diff --git a/go.mod b/go.mod index 79893bfdd3..76af0baa51 100644 --- a/go.mod +++ b/go.mod @@ -47,7 +47,7 @@ require ( github.com/linuxkit/virtsock v0.0.0-20241009230534-cb6a20cc0422 github.com/mattn/go-shellwords v1.0.12 github.com/moby/sys/user v0.4.0 - github.com/open-policy-agent/opa v0.70.0 + github.com/open-policy-agent/opa v1.15.2 github.com/opencontainers/cgroups v0.0.6 github.com/opencontainers/runc v1.4.2 github.com/opencontainers/runtime-spec v1.3.0 @@ -71,10 +71,8 @@ require ( require ( cyphar.com/go-pathrs v0.2.4 // indirect - github.com/OneOfOne/xxhash v1.2.8 // indirect - github.com/agnivade/levenshtein v1.2.0 // indirect + github.com/agnivade/levenshtein v1.2.1 // indirect github.com/akavel/rsrc v0.10.2 // indirect - github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/checkpoint-restore/go-criu/v7 v7.2.0 // indirect github.com/cilium/ebpf v0.17.3 // indirect @@ -90,24 +88,25 @@ require ( github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect - github.com/go-ini/ini v1.67.0 // indirect - github.com/go-logr/logr v1.4.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/goccy/go-json v0.10.3 // indirect + github.com/goccy/go-json v0.10.5 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/gorilla/mux v1.8.1 // indirect github.com/josephspurrier/goversioninfo v1.5.0 // indirect github.com/klauspost/compress v1.18.5 // indirect github.com/lestrrat-go/backoff/v2 v2.0.8 // indirect - github.com/lestrrat-go/blackmagic v1.0.2 // indirect + github.com/lestrrat-go/blackmagic v1.0.4 // indirect + github.com/lestrrat-go/dsig v1.0.0 // indirect + github.com/lestrrat-go/dsig-secp256k1 v1.0.0 // indirect github.com/lestrrat-go/httpcc v1.0.1 // indirect + github.com/lestrrat-go/httprc/v3 v3.0.2 // indirect github.com/lestrrat-go/iter v1.0.2 // indirect github.com/lestrrat-go/jwx v1.2.31 // indirect + github.com/lestrrat-go/jwx/v3 v3.0.13 // indirect github.com/lestrrat-go/option v1.0.1 // indirect + github.com/lestrrat-go/option/v2 v2.0.0 // indirect github.com/mdlayher/socket v0.5.1 // indirect github.com/mdlayher/vsock v1.2.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -115,38 +114,33 @@ require ( github.com/moby/sys/mountinfo v0.7.2 // indirect github.com/moby/sys/userns v0.1.0 // indirect github.com/mrunalp/fileutils v0.5.1 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect github.com/opencontainers/selinux v1.13.1 // indirect - github.com/prometheus/client_golang v1.23.2 // indirect - github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.66.1 // indirect - github.com/prometheus/procfs v0.16.1 // indirect - github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect + github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/seccomp/libseccomp-golang v0.11.1 // indirect + github.com/segmentio/asm v1.2.1 // indirect github.com/tchap/go-patricia/v2 v2.3.3 // indirect + github.com/valyala/fastjson v1.6.7 // indirect github.com/vbatts/tar-split v0.12.2 // indirect + github.com/vektah/gqlparser/v2 v2.5.32 // indirect github.com/veraison/go-cose v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect github.com/yashtewari/glob-intersection v0.2.0 // indirect - go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/otel v1.43.0 // indirect - go.opentelemetry.io/otel/metric v1.43.0 // indirect - go.opentelemetry.io/otel/sdk v1.43.0 // indirect - go.opentelemetry.io/otel/trace v1.43.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.43.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.50.0 // indirect golang.org/x/mod v0.35.0 // indirect golang.org/x/text v0.36.0 // indirect golang.org/x/tools v0.44.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260120221211-b8f7ae30c516 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.6.1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/go.sum b/go.sum index e299a51753..fce7c4ef54 100644 --- a/go.sum +++ b/go.sum @@ -369,10 +369,8 @@ github.com/Microsoft/didx509go v0.0.3/go.mod h1:wWt+iQsLzn3011+VfESzznLIp/Owhuj7 github.com/Microsoft/go-winio v0.6.3-0.20251027160822-ad3df93bed29 h1:0kQAzHq8vLs7Pptv+7TxjdETLf/nIqJpIB4oC6Ba4vY= github.com/Microsoft/go-winio v0.6.3-0.20251027160822-ad3df93bed29/go.mod h1:ZWa7ssZJT30CCDGJ7fk/2SBTq9BIQrrVjrcss0UW2s0= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= -github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY= -github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= +github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM= +github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -384,6 +382,8 @@ github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhk github.com/alecthomas/participle/v2 v2.0.0/go.mod h1:rAKZdJldHu8084ojcWevWAL8KmEU+AT+Olodb+WoN2Y= github.com/alecthomas/participle/v2 v2.1.0/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/v15 v15.0.2/go.mod h1:DGXsR3ajT524njufqf95822i+KTh+yea1jass9YXgjA= @@ -397,14 +397,13 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= -github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q= +github.com/bytecodealliance/wasmtime-go/v39 v39.0.1 h1:RibaT47yiyCRxMOj/l2cvL8cWiWBSqDXHyqsa9sGcCE= +github.com/bytecodealliance/wasmtime-go/v39 v39.0.1/go.mod h1:miR4NYIEBXeDNamZIzpskhJ0z/p8al+lwMWylQ/ZJb4= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -485,14 +484,15 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE= github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= -github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= -github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= -github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= -github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgraph-io/badger/v4 v4.9.1 h1:DocZXZkg5JJHJPtUErA0ibyHxOVUDVoXLSCV6t8NC8w= +github.com/dgraph-io/badger/v4 v4.9.1/go.mod h1:5/MEx97uzdPUHR4KtkNt8asfI2T4JiEiQlV7kWUo8c0= +github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM= +github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI= github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo= github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/docker/cli v29.4.0+incompatible h1:+IjXULMetlvWJiuSI0Nbor36lcJ5BTcVpUmB21KBoVM= @@ -541,14 +541,13 @@ github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= -github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= +github.com/foxcpp/go-mockdns v1.2.0 h1:omK3OrHRD1IWJz1FuFBCFquhXslXoF17OvBS6JPzZF0= +github.com/foxcpp/go-mockdns v1.2.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -560,8 +559,6 @@ github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmn github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= -github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -583,8 +580,8 @@ github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZs github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= -github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/goccy/go-yaml v1.9.8/go.mod h1:JubOolP3gh0HpiBc4BLRD4YmjEjHAmIIB2aaXKkTfoE= github.com/goccy/go-yaml v1.11.0/go.mod h1:H+mJrWtjPTJAHvRbV09MCK9xYwODM+wRTVFFTWckfng= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= @@ -601,8 +598,6 @@ github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwm github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/glog v1.2.3/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= -github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -640,14 +635,14 @@ github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= +github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -748,15 +743,10 @@ github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07 github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= -github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M= github.com/hamba/avro/v2 v2.17.2/go.mod h1:Q9YK+qxAhtVrNqOhwlZTATLgLA8qxG2vtvkhK8fJ7Jo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -799,17 +789,27 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lestrrat-go/backoff/v2 v2.0.8 h1:oNb5E5isby2kiro9AgdHLv5N5tint1AnDVVf2E2un5A= github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y= -github.com/lestrrat-go/blackmagic v1.0.2 h1:Cg2gVSc9h7sz9NOByczrbUvLopQmXrfFx//N+AkAr5k= -github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= +github.com/lestrrat-go/blackmagic v1.0.4 h1:IwQibdnf8l2KoO+qC3uT4OaTWsW7tuRQXy9TRN9QanA= +github.com/lestrrat-go/blackmagic v1.0.4/go.mod h1:6AWFyKNNj0zEXQYfTMPfZrAXUWUfTIZ5ECEUEJaijtw= +github.com/lestrrat-go/dsig v1.0.0 h1:OE09s2r9Z81kxzJYRn07TFM9XA4akrUdoMwr0L8xj38= +github.com/lestrrat-go/dsig v1.0.0/go.mod h1:dEgoOYYEJvW6XGbLasr8TFcAxoWrKlbQvmJgCR0qkDo= +github.com/lestrrat-go/dsig-secp256k1 v1.0.0 h1:JpDe4Aybfl0soBvoVwjqDbp+9S1Y2OM7gcrVVMFPOzY= +github.com/lestrrat-go/dsig-secp256k1 v1.0.0/go.mod h1:CxUgAhssb8FToqbL8NjSPoGQlnO4w3LG1P0qPWQm/NU= github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/httprc/v3 v3.0.2 h1:7u4HUaD0NQbf2/n5+fyp+T10hNCsAnwKfqn4A4Baif0= +github.com/lestrrat-go/httprc/v3 v3.0.2/go.mod h1:mSMtkZW92Z98M5YoNNztbRGxbXHql7tSitCvaxvo9l0= github.com/lestrrat-go/iter v1.0.2 h1:gMXo1q4c2pHmC3dn8LzRhJfP1ceCbgSiT9lUydIzltI= github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= github.com/lestrrat-go/jwx v1.2.31 h1:/OM9oNl/fzyldpv5HKZ9m7bTywa7COUfg8gujd9nJ54= github.com/lestrrat-go/jwx v1.2.31/go.mod h1:eQJKoRwWcLg4PfD5CFA5gIZGxhPgoPYq9pZISdxLf0c= +github.com/lestrrat-go/jwx/v3 v3.0.13 h1:AdHKiPIYeCSnOJtvdpipPg/0SuFh9rdkN+HF3O0VdSk= +github.com/lestrrat-go/jwx/v3 v3.0.13/go.mod h1:2m0PV1A9tM4b/jVLMx8rh6rBl7F6WGb3EG2hufN9OQU= github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= +github.com/lestrrat-go/option/v2 v2.0.0 h1:XxrcaJESE1fokHy3FpaQ/cXW8ZsIdWcdFzzLOcID3Ss= +github.com/lestrrat-go/option/v2 v2.0.0/go.mod h1:oSySsmzMoR0iRzCDCaUfsCzxQHUEuhOViQObyy7S6Vg= github.com/linuxkit/virtsock v0.0.0-20241009230534-cb6a20cc0422 h1:XvRuyDDRvi+UDxHN/M4MW4HxjmNVMmUKQj/+AbgsYgk= github.com/linuxkit/virtsock v0.0.0-20241009230534-cb6a20cc0422/go.mod h1:JLgfq4XMVbvfNlAXla/41lZnp21O72a/wWHGJefAvgQ= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= @@ -856,8 +856,8 @@ github.com/mrunalp/fileutils v0.5.1 h1:F+S7ZlNKnrwHfSwdlgNSkKo67ReVf8o9fel6C3dkm github.com/mrunalp/fileutils v0.5.1/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/open-policy-agent/opa v0.70.0 h1:B3cqCN2iQAyKxK6+GI+N40uqkin+wzIrM7YA60t9x1U= -github.com/open-policy-agent/opa v0.70.0/go.mod h1:Y/nm5NY0BX0BqjBriKUiV81sCl8XOjjvqQG7dXrggtI= +github.com/open-policy-agent/opa v1.15.2 h1:dS9q+0Yvruq/VNvWJc5qCvCchn715OWc3HLHXn/UCCc= +github.com/open-policy-agent/opa v1.15.2/go.mod h1:c6SN+7jSsUcKJLQc5P4yhwx8YYDRbjpAiGkBOTqxaa4= github.com/opencontainers/cgroups v0.0.6 h1:tfZFWTIIGaUUFImTyuTg+Mr5x8XRiSdZESgEBW7UxuI= github.com/opencontainers/cgroups v0.0.6/go.mod h1:oWVzJsKK0gG9SCRBfTpnn16WcGEqDI8PAcpMGbqWxcs= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -884,8 +884,9 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -899,10 +900,10 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= -github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= -github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg= +github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -921,6 +922,10 @@ github.com/samber/lo v1.53.0 h1:t975lj2py4kJPQ6haz1QMgtId2gtmfktACxIXArw3HM= github.com/samber/lo v1.53.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0= github.com/seccomp/libseccomp-golang v0.11.1 h1:wuk4ZjSx6kyQII4rj6G6fvVzRHQaSiPvccJazDagu4g= github.com/seccomp/libseccomp-golang v0.11.1/go.mod h1:5m1Lk8E9OwgZTTVz4bBOer7JuazaBa+xTkM895tDiWc= +github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0= +github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= +github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -960,8 +965,12 @@ github.com/urfave/cli v1.22.17 h1:SYzXoiPfQjHBbkYxbew5prZHS1TOLT3ierW8SYLqtVQ= github.com/urfave/cli v1.22.17/go.mod h1:b0ht0aqgH/6pBYzzxURyrM4xXNgsoT/n2ZzwQiEhNVo= github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU= github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4= +github.com/valyala/fastjson v1.6.7 h1:ZE4tRy0CIkh+qDc5McjatheGX2czdn8slQjomexVpBM= +github.com/valyala/fastjson v1.6.7/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vektah/gqlparser/v2 v2.5.32 h1:k9QPJd4sEDTL+qB4ncPLflqTJ3MmjB9SrVzJrawpFSc= +github.com/vektah/gqlparser/v2 v2.5.32/go.mod h1:c1I28gSOVNzlfc4WuDlqU7voQnsqI6OG2amkBAFmgts= github.com/veraison/go-cose v1.3.0 h1:2/H5w8kdSpQJyVtIhx8gmwPJ2uSz1PkyWFx0idbd7rk= github.com/veraison/go-cose v1.3.0/go.mod h1:df09OV91aHoQWLmy1KsDdYiagtXgyAwAl8vFeFn1gMc= github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0= @@ -1028,8 +1037,6 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1: go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0= @@ -1045,10 +1052,6 @@ go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7 go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= @@ -1106,8 +1109,6 @@ go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqe go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= -go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= @@ -1781,7 +1782,6 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20250428153025-10db94c68c34 h1:oklGWmm0ZiCw4efmdYZo5MF9t6nRvGzM5+0klSjOmGM= google.golang.org/genproto v0.0.0-20250428153025-10db94c68c34/go.mod h1:hiH/EqX5GBdTyIpkqMqDGUHDiBniln8b4FCw+NzPxQY= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= @@ -1836,8 +1836,6 @@ google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go. google.golang.org/genproto/googleapis/api v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:c8q6Z6OCqnfVIqUFJkCzKcrj8eCvUrz+K4KRzSTuANg= google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e/go.mod h1:085qFyf2+XaZlRdCgKNCIZ3afY2p4HHZdoIRpId8F4A= google.golang.org/genproto/googleapis/api v0.0.0-20250425173222-7b384671a197/go.mod h1:Cd8IzgPo5Akum2c9R6FsXNaZbH3Jpa2gpHlW89FqlyQ= -google.golang.org/genproto/googleapis/api v0.0.0-20260120221211-b8f7ae30c516 h1:vmC/ws+pLzWjj/gzApyoZuSVrDtF1aod4u/+bbj8hgM= -google.golang.org/genproto/googleapis/api v0.0.0-20260120221211-b8f7ae30c516/go.mod h1:p3MLuOwURrGBRoEyFHBT3GjUwaCQVKeNqqWxlcISGdw= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/bytestream v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:+34luvCflYKiKylNwGJfn9cFBbcL/WrkciMmDmsTQ/A= google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405/go.mod h1:GRUCuLdzVqZte8+Dl/D4N25yLzcGqqWaYkeVOwulFqw= @@ -1951,8 +1949,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20250409194420-de1ac958c67a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/genproto/googleapis/rpc v0.0.0-20250425173222-7b384671a197/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260120221211-b8f7ae30c516 h1:sNrWoksmOyF5bvJUcnmbeAmQi8baNhqg5IWaI3llQqU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260120221211-b8f7ae30c516/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= diff --git a/vendor/github.com/OneOfOne/xxhash/.gitignore b/vendor/github.com/OneOfOne/xxhash/.gitignore deleted file mode 100644 index f4faa7f8f1..0000000000 --- a/vendor/github.com/OneOfOne/xxhash/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.txt -*.pprof -cmap2/ -cache/ diff --git a/vendor/github.com/OneOfOne/xxhash/.travis.yml b/vendor/github.com/OneOfOne/xxhash/.travis.yml deleted file mode 100644 index 1c6dc55bc7..0000000000 --- a/vendor/github.com/OneOfOne/xxhash/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go -sudo: false - -go: - - "1.10" - - "1.11" - - "1.12" - - master - -script: - - go test -tags safe ./... - - go test ./... - - diff --git a/vendor/github.com/OneOfOne/xxhash/LICENSE b/vendor/github.com/OneOfOne/xxhash/LICENSE deleted file mode 100644 index 9e30b4f342..0000000000 --- a/vendor/github.com/OneOfOne/xxhash/LICENSE +++ /dev/null @@ -1,187 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. diff --git a/vendor/github.com/OneOfOne/xxhash/README.md b/vendor/github.com/OneOfOne/xxhash/README.md deleted file mode 100644 index 8eea28c394..0000000000 --- a/vendor/github.com/OneOfOne/xxhash/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# xxhash [![GoDoc](https://godoc.org/github.com/OneOfOne/xxhash?status.svg)](https://godoc.org/github.com/OneOfOne/xxhash) [![Build Status](https://travis-ci.org/OneOfOne/xxhash.svg?branch=master)](https://travis-ci.org/OneOfOne/xxhash) [![Coverage](https://gocover.io/_badge/github.com/OneOfOne/xxhash)](https://gocover.io/github.com/OneOfOne/xxhash) - -This is a native Go implementation of the excellent [xxhash](https://github.com/Cyan4973/xxHash)* algorithm, an extremely fast non-cryptographic Hash algorithm, working at speeds close to RAM limits. - -* The C implementation is ([Copyright](https://github.com/Cyan4973/xxHash/blob/master/LICENSE) (c) 2012-2014, Yann Collet) - -## Install - - go get github.com/OneOfOne/xxhash - -## Features - -* On Go 1.7+ the pure go version is faster than CGO for all inputs. -* Supports ChecksumString{32,64} xxhash{32,64}.WriteString, which uses no copies when it can, falls back to copy on appengine. -* The native version falls back to a less optimized version on appengine due to the lack of unsafe. -* Almost as fast as the mostly pure assembly version written by the brilliant [cespare](https://github.com/cespare/xxhash), while also supporting seeds. -* To manually toggle the appengine version build with `-tags safe`. - -## Benchmark - -### Core i7-4790 @ 3.60GHz, Linux 4.12.6-1-ARCH (64bit), Go tip (+ff90f4af66 2017-08-19) - -```bash -➤ go test -bench '64' -count 5 -tags cespare | benchstat /dev/stdin -name time/op - -# https://github.com/cespare/xxhash -XXSum64Cespare/Func-8 160ns ± 2% -XXSum64Cespare/Struct-8 173ns ± 1% -XXSum64ShortCespare/Func-8 6.78ns ± 1% -XXSum64ShortCespare/Struct-8 19.6ns ± 2% - -# this package (default mode, using unsafe) -XXSum64/Func-8 170ns ± 1% -XXSum64/Struct-8 182ns ± 1% -XXSum64Short/Func-8 13.5ns ± 3% -XXSum64Short/Struct-8 20.4ns ± 0% - -# this package (appengine, *not* using unsafe) -XXSum64/Func-8 241ns ± 5% -XXSum64/Struct-8 243ns ± 6% -XXSum64Short/Func-8 15.2ns ± 2% -XXSum64Short/Struct-8 23.7ns ± 5% - -CRC64ISO-8 1.23µs ± 1% -CRC64ISOString-8 2.71µs ± 4% -CRC64ISOShort-8 22.2ns ± 3% - -Fnv64-8 2.34µs ± 1% -Fnv64Short-8 74.7ns ± 8% -``` - -## Usage - -```go - h := xxhash.New64() - // r, err := os.Open("......") - // defer f.Close() - r := strings.NewReader(F) - io.Copy(h, r) - fmt.Println("xxhash.Backend:", xxhash.Backend) - fmt.Println("File checksum:", h.Sum64()) -``` - -[playground](https://play.golang.org/p/wHKBwfu6CPV) - -## TODO - -* Rewrite the 32bit version to be more optimized. -* General cleanup as the Go inliner gets smarter. - -## License - -This project is released under the Apache v2. license. See [LICENSE](LICENSE) for more details. diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash.go b/vendor/github.com/OneOfOne/xxhash/xxhash.go deleted file mode 100644 index af2496b77f..0000000000 --- a/vendor/github.com/OneOfOne/xxhash/xxhash.go +++ /dev/null @@ -1,294 +0,0 @@ -package xxhash - -import ( - "encoding/binary" - "errors" - "hash" -) - -const ( - prime32x1 uint32 = 2654435761 - prime32x2 uint32 = 2246822519 - prime32x3 uint32 = 3266489917 - prime32x4 uint32 = 668265263 - prime32x5 uint32 = 374761393 - - prime64x1 uint64 = 11400714785074694791 - prime64x2 uint64 = 14029467366897019727 - prime64x3 uint64 = 1609587929392839161 - prime64x4 uint64 = 9650029242287828579 - prime64x5 uint64 = 2870177450012600261 - - maxInt32 int32 = (1<<31 - 1) - - // precomputed zero Vs for seed 0 - zero64x1 = 0x60ea27eeadc0b5d6 - zero64x2 = 0xc2b2ae3d27d4eb4f - zero64x3 = 0x0 - zero64x4 = 0x61c8864e7a143579 -) - -const ( - magic32 = "xxh\x07" - magic64 = "xxh\x08" - marshaled32Size = len(magic32) + 4*7 + 16 - marshaled64Size = len(magic64) + 8*6 + 32 + 1 -) - -func NewHash32() hash.Hash { return New32() } -func NewHash64() hash.Hash { return New64() } - -// Checksum32 returns the checksum of the input data with the seed set to 0. -func Checksum32(in []byte) uint32 { - return Checksum32S(in, 0) -} - -// ChecksumString32 returns the checksum of the input data, without creating a copy, with the seed set to 0. -func ChecksumString32(s string) uint32 { - return ChecksumString32S(s, 0) -} - -type XXHash32 struct { - mem [16]byte - ln, memIdx int32 - v1, v2, v3, v4 uint32 - seed uint32 -} - -// Size returns the number of bytes Sum will return. -func (xx *XXHash32) Size() int { - return 4 -} - -// BlockSize returns the hash's underlying block size. -// The Write method must be able to accept any amount -// of data, but it may operate more efficiently if all writes -// are a multiple of the block size. -func (xx *XXHash32) BlockSize() int { - return 16 -} - -// NewS32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the specific seed. -func NewS32(seed uint32) (xx *XXHash32) { - xx = &XXHash32{ - seed: seed, - } - xx.Reset() - return -} - -// New32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the seed set to 0. -func New32() *XXHash32 { - return NewS32(0) -} - -func (xx *XXHash32) Reset() { - xx.v1 = xx.seed + prime32x1 + prime32x2 - xx.v2 = xx.seed + prime32x2 - xx.v3 = xx.seed - xx.v4 = xx.seed - prime32x1 - xx.ln, xx.memIdx = 0, 0 -} - -// Sum appends the current hash to b and returns the resulting slice. -// It does not change the underlying hash state. -func (xx *XXHash32) Sum(in []byte) []byte { - s := xx.Sum32() - return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (xx *XXHash32) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaled32Size) - b = append(b, magic32...) - b = appendUint32(b, xx.v1) - b = appendUint32(b, xx.v2) - b = appendUint32(b, xx.v3) - b = appendUint32(b, xx.v4) - b = appendUint32(b, xx.seed) - b = appendInt32(b, xx.ln) - b = appendInt32(b, xx.memIdx) - b = append(b, xx.mem[:]...) - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (xx *XXHash32) UnmarshalBinary(b []byte) error { - if len(b) < len(magic32) || string(b[:len(magic32)]) != magic32 { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaled32Size { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic32):] - b, xx.v1 = consumeUint32(b) - b, xx.v2 = consumeUint32(b) - b, xx.v3 = consumeUint32(b) - b, xx.v4 = consumeUint32(b) - b, xx.seed = consumeUint32(b) - b, xx.ln = consumeInt32(b) - b, xx.memIdx = consumeInt32(b) - copy(xx.mem[:], b) - return nil -} - -// Checksum64 an alias for Checksum64S(in, 0) -func Checksum64(in []byte) uint64 { - return Checksum64S(in, 0) -} - -// ChecksumString64 returns the checksum of the input data, without creating a copy, with the seed set to 0. -func ChecksumString64(s string) uint64 { - return ChecksumString64S(s, 0) -} - -type XXHash64 struct { - v1, v2, v3, v4 uint64 - seed uint64 - ln uint64 - mem [32]byte - memIdx int8 -} - -// Size returns the number of bytes Sum will return. -func (xx *XXHash64) Size() int { - return 8 -} - -// BlockSize returns the hash's underlying block size. -// The Write method must be able to accept any amount -// of data, but it may operate more efficiently if all writes -// are a multiple of the block size. -func (xx *XXHash64) BlockSize() int { - return 32 -} - -// NewS64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the specific seed. -func NewS64(seed uint64) (xx *XXHash64) { - xx = &XXHash64{ - seed: seed, - } - xx.Reset() - return -} - -// New64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the seed set to 0x0. -func New64() *XXHash64 { - return NewS64(0) -} - -func (xx *XXHash64) Reset() { - xx.ln, xx.memIdx = 0, 0 - xx.v1, xx.v2, xx.v3, xx.v4 = resetVs64(xx.seed) -} - -// Sum appends the current hash to b and returns the resulting slice. -// It does not change the underlying hash state. -func (xx *XXHash64) Sum(in []byte) []byte { - s := xx.Sum64() - return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (xx *XXHash64) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaled64Size) - b = append(b, magic64...) - b = appendUint64(b, xx.v1) - b = appendUint64(b, xx.v2) - b = appendUint64(b, xx.v3) - b = appendUint64(b, xx.v4) - b = appendUint64(b, xx.seed) - b = appendUint64(b, xx.ln) - b = append(b, byte(xx.memIdx)) - b = append(b, xx.mem[:]...) - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (xx *XXHash64) UnmarshalBinary(b []byte) error { - if len(b) < len(magic64) || string(b[:len(magic64)]) != magic64 { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaled64Size { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic64):] - b, xx.v1 = consumeUint64(b) - b, xx.v2 = consumeUint64(b) - b, xx.v3 = consumeUint64(b) - b, xx.v4 = consumeUint64(b) - b, xx.seed = consumeUint64(b) - b, xx.ln = consumeUint64(b) - xx.memIdx = int8(b[0]) - b = b[1:] - copy(xx.mem[:], b) - return nil -} - -func appendInt32(b []byte, x int32) []byte { return appendUint32(b, uint32(x)) } - -func appendUint32(b []byte, x uint32) []byte { - var a [4]byte - binary.LittleEndian.PutUint32(a[:], x) - return append(b, a[:]...) -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeInt32(b []byte) ([]byte, int32) { bn, x := consumeUint32(b); return bn, int32(x) } -func consumeUint32(b []byte) ([]byte, uint32) { x := u32(b); return b[4:], x } -func consumeUint64(b []byte) ([]byte, uint64) { x := u64(b); return b[8:], x } - -// force the compiler to use ROTL instructions - -func rotl32_1(x uint32) uint32 { return (x << 1) | (x >> (32 - 1)) } -func rotl32_7(x uint32) uint32 { return (x << 7) | (x >> (32 - 7)) } -func rotl32_11(x uint32) uint32 { return (x << 11) | (x >> (32 - 11)) } -func rotl32_12(x uint32) uint32 { return (x << 12) | (x >> (32 - 12)) } -func rotl32_13(x uint32) uint32 { return (x << 13) | (x >> (32 - 13)) } -func rotl32_17(x uint32) uint32 { return (x << 17) | (x >> (32 - 17)) } -func rotl32_18(x uint32) uint32 { return (x << 18) | (x >> (32 - 18)) } - -func rotl64_1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) } -func rotl64_7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) } -func rotl64_11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) } -func rotl64_12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) } -func rotl64_18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) } -func rotl64_23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) } -func rotl64_27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) } -func rotl64_31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) } - -func mix64(h uint64) uint64 { - h ^= h >> 33 - h *= prime64x2 - h ^= h >> 29 - h *= prime64x3 - h ^= h >> 32 - return h -} - -func resetVs64(seed uint64) (v1, v2, v3, v4 uint64) { - if seed == 0 { - return zero64x1, zero64x2, zero64x3, zero64x4 - } - return (seed + prime64x1 + prime64x2), (seed + prime64x2), (seed), (seed - prime64x1) -} - -// borrowed from cespare -func round64(h, v uint64) uint64 { - h += v * prime64x2 - h = rotl64_31(h) - h *= prime64x1 - return h -} - -func mergeRound64(h, v uint64) uint64 { - v = round64(0, v) - h ^= v - h = h*prime64x1 + prime64x4 - return h -} diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash_go17.go b/vendor/github.com/OneOfOne/xxhash/xxhash_go17.go deleted file mode 100644 index ae48e0c5ca..0000000000 --- a/vendor/github.com/OneOfOne/xxhash/xxhash_go17.go +++ /dev/null @@ -1,161 +0,0 @@ -package xxhash - -func u32(in []byte) uint32 { - return uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 -} - -func u64(in []byte) uint64 { - return uint64(in[0]) | uint64(in[1])<<8 | uint64(in[2])<<16 | uint64(in[3])<<24 | uint64(in[4])<<32 | uint64(in[5])<<40 | uint64(in[6])<<48 | uint64(in[7])<<56 -} - -// Checksum32S returns the checksum of the input bytes with the specific seed. -func Checksum32S(in []byte, seed uint32) (h uint32) { - var i int - - if len(in) > 15 { - var ( - v1 = seed + prime32x1 + prime32x2 - v2 = seed + prime32x2 - v3 = seed + 0 - v4 = seed - prime32x1 - ) - for ; i < len(in)-15; i += 16 { - in := in[i : i+16 : len(in)] - v1 += u32(in[0:4:len(in)]) * prime32x2 - v1 = rotl32_13(v1) * prime32x1 - - v2 += u32(in[4:8:len(in)]) * prime32x2 - v2 = rotl32_13(v2) * prime32x1 - - v3 += u32(in[8:12:len(in)]) * prime32x2 - v3 = rotl32_13(v3) * prime32x1 - - v4 += u32(in[12:16:len(in)]) * prime32x2 - v4 = rotl32_13(v4) * prime32x1 - } - - h = rotl32_1(v1) + rotl32_7(v2) + rotl32_12(v3) + rotl32_18(v4) - - } else { - h = seed + prime32x5 - } - - h += uint32(len(in)) - for ; i <= len(in)-4; i += 4 { - in := in[i : i+4 : len(in)] - h += u32(in[0:4:len(in)]) * prime32x3 - h = rotl32_17(h) * prime32x4 - } - - for ; i < len(in); i++ { - h += uint32(in[i]) * prime32x5 - h = rotl32_11(h) * prime32x1 - } - - h ^= h >> 15 - h *= prime32x2 - h ^= h >> 13 - h *= prime32x3 - h ^= h >> 16 - - return -} - -func (xx *XXHash32) Write(in []byte) (n int, err error) { - i, ml := 0, int(xx.memIdx) - n = len(in) - xx.ln += int32(n) - - if d := 16 - ml; ml > 0 && ml+len(in) > 16 { - xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in[:d])) - ml, in = 16, in[d:len(in):len(in)] - } else if ml+len(in) < 16 { - xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in)) - return - } - - if ml > 0 { - i += 16 - ml - xx.memIdx += int32(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in)) - in := xx.mem[:16:len(xx.mem)] - - xx.v1 += u32(in[0:4:len(in)]) * prime32x2 - xx.v1 = rotl32_13(xx.v1) * prime32x1 - - xx.v2 += u32(in[4:8:len(in)]) * prime32x2 - xx.v2 = rotl32_13(xx.v2) * prime32x1 - - xx.v3 += u32(in[8:12:len(in)]) * prime32x2 - xx.v3 = rotl32_13(xx.v3) * prime32x1 - - xx.v4 += u32(in[12:16:len(in)]) * prime32x2 - xx.v4 = rotl32_13(xx.v4) * prime32x1 - - xx.memIdx = 0 - } - - for ; i <= len(in)-16; i += 16 { - in := in[i : i+16 : len(in)] - xx.v1 += u32(in[0:4:len(in)]) * prime32x2 - xx.v1 = rotl32_13(xx.v1) * prime32x1 - - xx.v2 += u32(in[4:8:len(in)]) * prime32x2 - xx.v2 = rotl32_13(xx.v2) * prime32x1 - - xx.v3 += u32(in[8:12:len(in)]) * prime32x2 - xx.v3 = rotl32_13(xx.v3) * prime32x1 - - xx.v4 += u32(in[12:16:len(in)]) * prime32x2 - xx.v4 = rotl32_13(xx.v4) * prime32x1 - } - - if len(in)-i != 0 { - xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in[i:len(in):len(in)])) - } - - return -} - -func (xx *XXHash32) Sum32() (h uint32) { - var i int32 - if xx.ln > 15 { - h = rotl32_1(xx.v1) + rotl32_7(xx.v2) + rotl32_12(xx.v3) + rotl32_18(xx.v4) - } else { - h = xx.seed + prime32x5 - } - - h += uint32(xx.ln) - - if xx.memIdx > 0 { - for ; i < xx.memIdx-3; i += 4 { - in := xx.mem[i : i+4 : len(xx.mem)] - h += u32(in[0:4:len(in)]) * prime32x3 - h = rotl32_17(h) * prime32x4 - } - - for ; i < xx.memIdx; i++ { - h += uint32(xx.mem[i]) * prime32x5 - h = rotl32_11(h) * prime32x1 - } - } - h ^= h >> 15 - h *= prime32x2 - h ^= h >> 13 - h *= prime32x3 - h ^= h >> 16 - - return -} - -// Checksum64S returns the 64bit xxhash checksum for a single input -func Checksum64S(in []byte, seed uint64) uint64 { - if len(in) == 0 && seed == 0 { - return 0xef46db3751d8e999 - } - - if len(in) > 31 { - return checksum64(in, seed) - } - - return checksum64Short(in, seed) -} diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash_safe.go b/vendor/github.com/OneOfOne/xxhash/xxhash_safe.go deleted file mode 100644 index e92ec29e02..0000000000 --- a/vendor/github.com/OneOfOne/xxhash/xxhash_safe.go +++ /dev/null @@ -1,183 +0,0 @@ -// +build appengine safe ppc64le ppc64be mipsle mips s390x - -package xxhash - -// Backend returns the current version of xxhash being used. -const Backend = "GoSafe" - -func ChecksumString32S(s string, seed uint32) uint32 { - return Checksum32S([]byte(s), seed) -} - -func (xx *XXHash32) WriteString(s string) (int, error) { - if len(s) == 0 { - return 0, nil - } - return xx.Write([]byte(s)) -} - -func ChecksumString64S(s string, seed uint64) uint64 { - return Checksum64S([]byte(s), seed) -} - -func (xx *XXHash64) WriteString(s string) (int, error) { - if len(s) == 0 { - return 0, nil - } - return xx.Write([]byte(s)) -} - -func checksum64(in []byte, seed uint64) (h uint64) { - var ( - v1, v2, v3, v4 = resetVs64(seed) - - i int - ) - - for ; i < len(in)-31; i += 32 { - in := in[i : i+32 : len(in)] - v1 = round64(v1, u64(in[0:8:len(in)])) - v2 = round64(v2, u64(in[8:16:len(in)])) - v3 = round64(v3, u64(in[16:24:len(in)])) - v4 = round64(v4, u64(in[24:32:len(in)])) - } - - h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4) - - h = mergeRound64(h, v1) - h = mergeRound64(h, v2) - h = mergeRound64(h, v3) - h = mergeRound64(h, v4) - - h += uint64(len(in)) - - for ; i < len(in)-7; i += 8 { - h ^= round64(0, u64(in[i:len(in):len(in)])) - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - for ; i < len(in)-3; i += 4 { - h ^= uint64(u32(in[i:len(in):len(in)])) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - } - - for ; i < len(in); i++ { - h ^= uint64(in[i]) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - - return mix64(h) -} - -func checksum64Short(in []byte, seed uint64) uint64 { - var ( - h = seed + prime64x5 + uint64(len(in)) - i int - ) - - for ; i < len(in)-7; i += 8 { - k := u64(in[i : i+8 : len(in)]) - h ^= round64(0, k) - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - for ; i < len(in)-3; i += 4 { - h ^= uint64(u32(in[i:i+4:len(in)])) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - } - - for ; i < len(in); i++ { - h ^= uint64(in[i]) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - - return mix64(h) -} - -func (xx *XXHash64) Write(in []byte) (n int, err error) { - var ( - ml = int(xx.memIdx) - d = 32 - ml - ) - - n = len(in) - xx.ln += uint64(n) - - if ml+len(in) < 32 { - xx.memIdx += int8(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in)) - return - } - - i, v1, v2, v3, v4 := 0, xx.v1, xx.v2, xx.v3, xx.v4 - if ml > 0 && ml+len(in) > 32 { - xx.memIdx += int8(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in[:d:len(in)])) - in = in[d:len(in):len(in)] - - in := xx.mem[0:32:len(xx.mem)] - - v1 = round64(v1, u64(in[0:8:len(in)])) - v2 = round64(v2, u64(in[8:16:len(in)])) - v3 = round64(v3, u64(in[16:24:len(in)])) - v4 = round64(v4, u64(in[24:32:len(in)])) - - xx.memIdx = 0 - } - - for ; i < len(in)-31; i += 32 { - in := in[i : i+32 : len(in)] - v1 = round64(v1, u64(in[0:8:len(in)])) - v2 = round64(v2, u64(in[8:16:len(in)])) - v3 = round64(v3, u64(in[16:24:len(in)])) - v4 = round64(v4, u64(in[24:32:len(in)])) - } - - if len(in)-i != 0 { - xx.memIdx += int8(copy(xx.mem[xx.memIdx:], in[i:len(in):len(in)])) - } - - xx.v1, xx.v2, xx.v3, xx.v4 = v1, v2, v3, v4 - - return -} - -func (xx *XXHash64) Sum64() (h uint64) { - var i int - if xx.ln > 31 { - v1, v2, v3, v4 := xx.v1, xx.v2, xx.v3, xx.v4 - h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4) - - h = mergeRound64(h, v1) - h = mergeRound64(h, v2) - h = mergeRound64(h, v3) - h = mergeRound64(h, v4) - } else { - h = xx.seed + prime64x5 - } - - h += uint64(xx.ln) - if xx.memIdx > 0 { - in := xx.mem[:xx.memIdx] - for ; i < int(xx.memIdx)-7; i += 8 { - in := in[i : i+8 : len(in)] - k := u64(in[0:8:len(in)]) - k *= prime64x2 - k = rotl64_31(k) - k *= prime64x1 - h ^= k - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - for ; i < int(xx.memIdx)-3; i += 4 { - in := in[i : i+4 : len(in)] - h ^= uint64(u32(in[0:4:len(in)])) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - } - - for ; i < int(xx.memIdx); i++ { - h ^= uint64(in[i]) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - } - - return mix64(h) -} diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go b/vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go deleted file mode 100644 index 1e2b5e8f1f..0000000000 --- a/vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go +++ /dev/null @@ -1,240 +0,0 @@ -// +build !safe -// +build !appengine -// +build !ppc64le -// +build !mipsle -// +build !ppc64be -// +build !mips -// +build !s390x - -package xxhash - -import ( - "reflect" - "unsafe" -) - -// Backend returns the current version of xxhash being used. -const Backend = "GoUnsafe" - -// ChecksumString32S returns the checksum of the input data, without creating a copy, with the specific seed. -func ChecksumString32S(s string, seed uint32) uint32 { - if len(s) == 0 { - return Checksum32S(nil, seed) - } - ss := (*reflect.StringHeader)(unsafe.Pointer(&s)) - return Checksum32S((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)], seed) -} - -func (xx *XXHash32) WriteString(s string) (int, error) { - if len(s) == 0 { - return 0, nil - } - - ss := (*reflect.StringHeader)(unsafe.Pointer(&s)) - return xx.Write((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)]) -} - -// ChecksumString64S returns the checksum of the input data, without creating a copy, with the specific seed. -func ChecksumString64S(s string, seed uint64) uint64 { - if len(s) == 0 { - return Checksum64S(nil, seed) - } - - ss := (*reflect.StringHeader)(unsafe.Pointer(&s)) - return Checksum64S((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)], seed) -} - -func (xx *XXHash64) WriteString(s string) (int, error) { - if len(s) == 0 { - return 0, nil - } - ss := (*reflect.StringHeader)(unsafe.Pointer(&s)) - return xx.Write((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)]) -} - -//go:nocheckptr -func checksum64(in []byte, seed uint64) uint64 { - var ( - wordsLen = len(in) >> 3 - words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen] - - v1, v2, v3, v4 = resetVs64(seed) - - h uint64 - i int - ) - - for ; i < len(words)-3; i += 4 { - words := (*[4]uint64)(unsafe.Pointer(&words[i])) - - v1 = round64(v1, words[0]) - v2 = round64(v2, words[1]) - v3 = round64(v3, words[2]) - v4 = round64(v4, words[3]) - } - - h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4) - - h = mergeRound64(h, v1) - h = mergeRound64(h, v2) - h = mergeRound64(h, v3) - h = mergeRound64(h, v4) - - h += uint64(len(in)) - - for _, k := range words[i:] { - h ^= round64(0, k) - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - if in = in[wordsLen<<3 : len(in) : len(in)]; len(in) > 3 { - words := (*[1]uint32)(unsafe.Pointer(&in[0])) - h ^= uint64(words[0]) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - - in = in[4:len(in):len(in)] - } - - for _, b := range in { - h ^= uint64(b) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - - return mix64(h) -} - -//go:nocheckptr -func checksum64Short(in []byte, seed uint64) uint64 { - var ( - h = seed + prime64x5 + uint64(len(in)) - i int - ) - - if len(in) > 7 { - var ( - wordsLen = len(in) >> 3 - words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen] - ) - - for i := range words { - h ^= round64(0, words[i]) - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - i = wordsLen << 3 - } - - if in = in[i:len(in):len(in)]; len(in) > 3 { - words := (*[1]uint32)(unsafe.Pointer(&in[0])) - h ^= uint64(words[0]) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - - in = in[4:len(in):len(in)] - } - - for _, b := range in { - h ^= uint64(b) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - - return mix64(h) -} - -func (xx *XXHash64) Write(in []byte) (n int, err error) { - mem, idx := xx.mem[:], int(xx.memIdx) - - xx.ln, n = xx.ln+uint64(len(in)), len(in) - - if idx+len(in) < 32 { - xx.memIdx += int8(copy(mem[idx:len(mem):len(mem)], in)) - return - } - - var ( - v1, v2, v3, v4 = xx.v1, xx.v2, xx.v3, xx.v4 - - i int - ) - - if d := 32 - int(idx); d > 0 && int(idx)+len(in) > 31 { - copy(mem[idx:len(mem):len(mem)], in[:len(in):len(in)]) - - words := (*[4]uint64)(unsafe.Pointer(&mem[0])) - - v1 = round64(v1, words[0]) - v2 = round64(v2, words[1]) - v3 = round64(v3, words[2]) - v4 = round64(v4, words[3]) - - if in, xx.memIdx = in[d:len(in):len(in)], 0; len(in) == 0 { - goto RET - } - } - - for ; i < len(in)-31; i += 32 { - words := (*[4]uint64)(unsafe.Pointer(&in[i])) - - v1 = round64(v1, words[0]) - v2 = round64(v2, words[1]) - v3 = round64(v3, words[2]) - v4 = round64(v4, words[3]) - } - - if len(in)-i != 0 { - xx.memIdx += int8(copy(mem[xx.memIdx:len(mem):len(mem)], in[i:len(in):len(in)])) - } - -RET: - xx.v1, xx.v2, xx.v3, xx.v4 = v1, v2, v3, v4 - - return -} - -func (xx *XXHash64) Sum64() (h uint64) { - if seed := xx.seed; xx.ln > 31 { - v1, v2, v3, v4 := xx.v1, xx.v2, xx.v3, xx.v4 - h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4) - - h = mergeRound64(h, v1) - h = mergeRound64(h, v2) - h = mergeRound64(h, v3) - h = mergeRound64(h, v4) - } else if seed == 0 { - h = prime64x5 - } else { - h = seed + prime64x5 - } - - h += uint64(xx.ln) - - if xx.memIdx == 0 { - return mix64(h) - } - - var ( - in = xx.mem[:xx.memIdx:xx.memIdx] - wordsLen = len(in) >> 3 - words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen] - ) - - for _, k := range words { - h ^= round64(0, k) - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - if in = in[wordsLen<<3 : len(in) : len(in)]; len(in) > 3 { - words := (*[1]uint32)(unsafe.Pointer(&in[0])) - - h ^= uint64(words[0]) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - - in = in[4:len(in):len(in)] - } - - for _, b := range in { - h ^= uint64(b) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - - return mix64(h) -} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE deleted file mode 100644 index 339177be66..0000000000 --- a/vendor/github.com/beorn7/perks/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287d7c..0000000000 --- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index d7d14f8eb6..0000000000 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,316 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targetMap map[float64]float64) *Stream { - // Convert map to slice to avoid slow iterations on a map. - // ƒ is called on the hot path, so converting the map to a slice - // beforehand results in significant CPU savings. - targets := targetMapToSlice(targetMap) - - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for _, t := range targets { - if t.quantile*s.n <= r { - f = (2 * t.epsilon * r) / t.quantile - } else { - f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -type target struct { - quantile float64 - epsilon float64 -} - -func targetMapToSlice(targetMap map[float64]float64) []target { - targets := make([]target, 0, len(targetMap)) - - for quantile, epsilon := range targetMap { - t := target{ - quantile: quantile, - epsilon: epsilon, - } - targets = append(targets, t) - } - - return targets -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(math.Ceil(float64(l) * q)) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/vendor/github.com/go-ini/ini/.editorconfig b/vendor/github.com/go-ini/ini/.editorconfig deleted file mode 100644 index 4a2d9180f9..0000000000 --- a/vendor/github.com/go-ini/ini/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -# http://editorconfig.org - -root = true - -[*] -charset = utf-8 -end_of_line = lf -insert_final_newline = true -trim_trailing_whitespace = true - -[*_test.go] -trim_trailing_whitespace = false diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore deleted file mode 100644 index 588388bda2..0000000000 --- a/vendor/github.com/go-ini/ini/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -testdata/conf_out.ini -ini.sublime-project -ini.sublime-workspace -testdata/conf_reflect.ini -.idea -/.vscode -.DS_Store diff --git a/vendor/github.com/go-ini/ini/.golangci.yml b/vendor/github.com/go-ini/ini/.golangci.yml deleted file mode 100644 index 631e369254..0000000000 --- a/vendor/github.com/go-ini/ini/.golangci.yml +++ /dev/null @@ -1,27 +0,0 @@ -linters-settings: - staticcheck: - checks: [ - "all", - "-SA1019" # There are valid use cases of strings.Title - ] - nakedret: - max-func-lines: 0 # Disallow any unnamed return statement - -linters: - enable: - - deadcode - - errcheck - - gosimple - - govet - - ineffassign - - staticcheck - - structcheck - - typecheck - - unused - - varcheck - - nakedret - - gofmt - - rowserrcheck - - unconvert - - goimports - - unparam diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE deleted file mode 100644 index d361bbcdf5..0000000000 --- a/vendor/github.com/go-ini/ini/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright 2014 Unknwon - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile deleted file mode 100644 index f3b0dae2d2..0000000000 --- a/vendor/github.com/go-ini/ini/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -.PHONY: build test bench vet coverage - -build: vet bench - -test: - go test -v -cover -race - -bench: - go test -v -cover -test.bench=. -test.benchmem - -vet: - go vet - -coverage: - go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md deleted file mode 100644 index 30606d9700..0000000000 --- a/vendor/github.com/go-ini/ini/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# INI - -[![GitHub Workflow Status](https://img.shields.io/github/checks-status/go-ini/ini/main?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=branch%3Amain) -[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini) -[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc) -[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini) - -![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) - -Package ini provides INI file read and write functionality in Go. - -## Features - -- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites. -- Read with recursion values. -- Read with parent-child sections. -- Read with auto-increment key names. -- Read with multiple-line values. -- Read with tons of helper methods. -- Read and convert values to Go types. -- Read and **WRITE** comments of sections and keys. -- Manipulate sections, keys and comments with ease. -- Keep sections and keys in order as you parse and save. - -## Installation - -The minimum requirement of Go is **1.13**. - -```sh -$ go get gopkg.in/ini.v1 -``` - -Please add `-u` flag to update in the future. - -## Getting Help - -- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) -- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) -- 中国大陆镜像:https://ini.unknwon.cn - -## License - -This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/codecov.yml b/vendor/github.com/go-ini/ini/codecov.yml deleted file mode 100644 index e02ec84bc0..0000000000 --- a/vendor/github.com/go-ini/ini/codecov.yml +++ /dev/null @@ -1,16 +0,0 @@ -coverage: - range: "60...95" - status: - project: - default: - threshold: 1% - informational: true - patch: - defualt: - only_pulls: true - informational: true - -comment: - layout: 'diff' - -github_checks: false diff --git a/vendor/github.com/go-ini/ini/data_source.go b/vendor/github.com/go-ini/ini/data_source.go deleted file mode 100644 index c3a541f1d1..0000000000 --- a/vendor/github.com/go-ini/ini/data_source.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2019 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" -) - -var ( - _ dataSource = (*sourceFile)(nil) - _ dataSource = (*sourceData)(nil) - _ dataSource = (*sourceReadCloser)(nil) -) - -// dataSource is an interface that returns object which can be read and closed. -type dataSource interface { - ReadCloser() (io.ReadCloser, error) -} - -// sourceFile represents an object that contains content on the local file system. -type sourceFile struct { - name string -} - -func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { - return os.Open(s.name) -} - -// sourceData represents an object that contains content in memory. -type sourceData struct { - data []byte -} - -func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(s.data)), nil -} - -// sourceReadCloser represents an input stream with Close method. -type sourceReadCloser struct { - reader io.ReadCloser -} - -func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { - return s.reader, nil -} - -func parseDataSource(source interface{}) (dataSource, error) { - switch s := source.(type) { - case string: - return sourceFile{s}, nil - case []byte: - return &sourceData{s}, nil - case io.ReadCloser: - return &sourceReadCloser{s}, nil - case io.Reader: - return &sourceReadCloser{ioutil.NopCloser(s)}, nil - default: - return nil, fmt.Errorf("error parsing data source: unknown type %q", s) - } -} diff --git a/vendor/github.com/go-ini/ini/deprecated.go b/vendor/github.com/go-ini/ini/deprecated.go deleted file mode 100644 index 48b8e66d6d..0000000000 --- a/vendor/github.com/go-ini/ini/deprecated.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -var ( - // Deprecated: Use "DefaultSection" instead. - DEFAULT_SECTION = DefaultSection - // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. - AllCapsUnderscore = SnackCase -) diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go deleted file mode 100644 index f66bc94b8b..0000000000 --- a/vendor/github.com/go-ini/ini/error.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "fmt" -) - -// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one. -type ErrDelimiterNotFound struct { - Line string -} - -// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound. -func IsErrDelimiterNotFound(err error) bool { - _, ok := err.(ErrDelimiterNotFound) - return ok -} - -func (err ErrDelimiterNotFound) Error() string { - return fmt.Sprintf("key-value delimiter not found: %s", err.Line) -} - -// ErrEmptyKeyName indicates the error type of no key name is found which there should be one. -type ErrEmptyKeyName struct { - Line string -} - -// IsErrEmptyKeyName returns true if the given error is an instance of ErrEmptyKeyName. -func IsErrEmptyKeyName(err error) bool { - _, ok := err.(ErrEmptyKeyName) - return ok -} - -func (err ErrEmptyKeyName) Error() string { - return fmt.Sprintf("empty key name: %s", err.Line) -} diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go deleted file mode 100644 index f8b22408be..0000000000 --- a/vendor/github.com/go-ini/ini/file.go +++ /dev/null @@ -1,541 +0,0 @@ -// Copyright 2017 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "sync" -) - -// File represents a combination of one or more INI files in memory. -type File struct { - options LoadOptions - dataSources []dataSource - - // Should make things safe, but sometimes doesn't matter. - BlockMode bool - lock sync.RWMutex - - // To keep data in order. - sectionList []string - // To keep track of the index of a section with same name. - // This meta list is only used with non-unique section names are allowed. - sectionIndexes []int - - // Actual data is stored here. - sections map[string][]*Section - - NameMapper - ValueMapper -} - -// newFile initializes File object with given data sources. -func newFile(dataSources []dataSource, opts LoadOptions) *File { - if len(opts.KeyValueDelimiters) == 0 { - opts.KeyValueDelimiters = "=:" - } - if len(opts.KeyValueDelimiterOnWrite) == 0 { - opts.KeyValueDelimiterOnWrite = "=" - } - if len(opts.ChildSectionDelimiter) == 0 { - opts.ChildSectionDelimiter = "." - } - - return &File{ - BlockMode: true, - dataSources: dataSources, - sections: make(map[string][]*Section), - options: opts, - } -} - -// Empty returns an empty file object. -func Empty(opts ...LoadOptions) *File { - var opt LoadOptions - if len(opts) > 0 { - opt = opts[0] - } - - // Ignore error here, we are sure our data is good. - f, _ := LoadSources(opt, []byte("")) - return f -} - -// NewSection creates a new section. -func (f *File) NewSection(name string) (*Section, error) { - if len(name) == 0 { - return nil, errors.New("empty section name") - } - - if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) { - return f.sections[name][0], nil - } - - f.sectionList = append(f.sectionList, name) - - // NOTE: Append to indexes must happen before appending to sections, - // otherwise index will have off-by-one problem. - f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name])) - - sec := newSection(f, name) - f.sections[name] = append(f.sections[name], sec) - - return sec, nil -} - -// NewRawSection creates a new section with an unparseable body. -func (f *File) NewRawSection(name, body string) (*Section, error) { - section, err := f.NewSection(name) - if err != nil { - return nil, err - } - - section.isRawSection = true - section.rawBody = body - return section, nil -} - -// NewSections creates a list of sections. -func (f *File) NewSections(names ...string) (err error) { - for _, name := range names { - if _, err = f.NewSection(name); err != nil { - return err - } - } - return nil -} - -// GetSection returns section by given name. -func (f *File) GetSection(name string) (*Section, error) { - secs, err := f.SectionsByName(name) - if err != nil { - return nil, err - } - - return secs[0], err -} - -// HasSection returns true if the file contains a section with given name. -func (f *File) HasSection(name string) bool { - section, _ := f.GetSection(name) - return section != nil -} - -// SectionsByName returns all sections with given name. -func (f *File) SectionsByName(name string) ([]*Section, error) { - if len(name) == 0 { - name = DefaultSection - } - if f.options.Insensitive || f.options.InsensitiveSections { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - secs := f.sections[name] - if len(secs) == 0 { - return nil, fmt.Errorf("section %q does not exist", name) - } - - return secs, nil -} - -// Section assumes named section exists and returns a zero-value when not. -func (f *File) Section(name string) *Section { - sec, err := f.GetSection(name) - if err != nil { - if name == "" { - name = DefaultSection - } - sec, _ = f.NewSection(name) - return sec - } - return sec -} - -// SectionWithIndex assumes named section exists and returns a new section when not. -func (f *File) SectionWithIndex(name string, index int) *Section { - secs, err := f.SectionsByName(name) - if err != nil || len(secs) <= index { - // NOTE: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. - newSec, _ := f.NewSection(name) - return newSec - } - - return secs[index] -} - -// Sections returns a list of Section stored in the current instance. -func (f *File) Sections() []*Section { - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sections := make([]*Section, len(f.sectionList)) - for i, name := range f.sectionList { - sections[i] = f.sections[name][f.sectionIndexes[i]] - } - return sections -} - -// ChildSections returns a list of child sections of given section name. -func (f *File) ChildSections(name string) []*Section { - return f.Section(name).ChildSections() -} - -// SectionStrings returns list of section names. -func (f *File) SectionStrings() []string { - list := make([]string, len(f.sectionList)) - copy(list, f.sectionList) - return list -} - -// DeleteSection deletes a section or all sections with given name. -func (f *File) DeleteSection(name string) { - secs, err := f.SectionsByName(name) - if err != nil { - return - } - - for i := 0; i < len(secs); i++ { - // For non-unique sections, it is always needed to remove the first one so - // in the next iteration, the subsequent section continue having index 0. - // Ignoring the error as index 0 never returns an error. - _ = f.DeleteSectionWithIndex(name, 0) - } -} - -// DeleteSectionWithIndex deletes a section with given name and index. -func (f *File) DeleteSectionWithIndex(name string, index int) error { - if !f.options.AllowNonUniqueSections && index != 0 { - return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled") - } - - if len(name) == 0 { - name = DefaultSection - } - if f.options.Insensitive || f.options.InsensitiveSections { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - // Count occurrences of the sections - occurrences := 0 - - sectionListCopy := make([]string, len(f.sectionList)) - copy(sectionListCopy, f.sectionList) - - for i, s := range sectionListCopy { - if s != name { - continue - } - - if occurrences == index { - if len(f.sections[name]) <= 1 { - delete(f.sections, name) // The last one in the map - } else { - f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...) - } - - // Fix section lists - f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) - f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...) - - } else if occurrences > index { - // Fix the indices of all following sections with this name. - f.sectionIndexes[i-1]-- - } - - occurrences++ - } - - return nil -} - -func (f *File) reload(s dataSource) error { - r, err := s.ReadCloser() - if err != nil { - return err - } - defer r.Close() - - return f.parse(r) -} - -// Reload reloads and parses all data sources. -func (f *File) Reload() (err error) { - for _, s := range f.dataSources { - if err = f.reload(s); err != nil { - // In loose mode, we create an empty default section for nonexistent files. - if os.IsNotExist(err) && f.options.Loose { - _ = f.parse(bytes.NewBuffer(nil)) - continue - } - return err - } - if f.options.ShortCircuit { - return nil - } - } - return nil -} - -// Append appends one or more data sources and reloads automatically. -func (f *File) Append(source interface{}, others ...interface{}) error { - ds, err := parseDataSource(source) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - for _, s := range others { - ds, err = parseDataSource(s) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - } - return f.Reload() -} - -func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { - equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight - - if PrettyFormat || PrettyEqual { - equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite) - } - - // Use buffer to make sure target is safe until finish encoding. - buf := bytes.NewBuffer(nil) - lastSectionIdx := len(f.sectionList) - 1 - for i, sname := range f.sectionList { - sec := f.SectionWithIndex(sname, f.sectionIndexes[i]) - if len(sec.Comment) > 0 { - // Support multiline comments - lines := strings.Split(sec.Comment, LineBreak) - for i := range lines { - if lines[i][0] != '#' && lines[i][0] != ';' { - lines[i] = "; " + lines[i] - } else { - lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) - } - - if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { - return nil, err - } - } - } - - if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) { - if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return nil, err - } - } else { - // Write nothing if default section is empty - if len(sec.keyList) == 0 { - continue - } - } - - isLastSection := i == lastSectionIdx - if sec.isRawSection { - if _, err := buf.WriteString(sec.rawBody); err != nil { - return nil, err - } - - if PrettySection && !isLastSection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - continue - } - - // Count and generate alignment length and buffer spaces using the - // longest key. Keys may be modified if they contain certain characters so - // we need to take that into account in our calculation. - alignLength := 0 - if PrettyFormat { - for _, kname := range sec.keyList { - keyLength := len(kname) - // First case will surround key by ` and second by """ - if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) { - keyLength += 2 - } else if strings.Contains(kname, "`") { - keyLength += 6 - } - - if keyLength > alignLength { - alignLength = keyLength - } - } - } - alignSpaces := bytes.Repeat([]byte(" "), alignLength) - - KeyList: - for _, kname := range sec.keyList { - key := sec.Key(kname) - if len(key.Comment) > 0 { - if len(indent) > 0 && sname != DefaultSection { - buf.WriteString(indent) - } - - // Support multiline comments - lines := strings.Split(key.Comment, LineBreak) - for i := range lines { - if lines[i][0] != '#' && lines[i][0] != ';' { - lines[i] = "; " + strings.TrimSpace(lines[i]) - } else { - lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) - } - - if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { - return nil, err - } - } - } - - if len(indent) > 0 && sname != DefaultSection { - buf.WriteString(indent) - } - - switch { - case key.isAutoIncrement: - kname = "-" - case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters): - kname = "`" + kname + "`" - case strings.Contains(kname, "`"): - kname = `"""` + kname + `"""` - } - - writeKeyValue := func(val string) (bool, error) { - if _, err := buf.WriteString(kname); err != nil { - return false, err - } - - if key.isBooleanType { - buf.WriteString(LineBreak) - return true, nil - } - - // Write out alignment spaces before "=" sign - if PrettyFormat { - buf.Write(alignSpaces[:alignLength-len(kname)]) - } - - // In case key value contains "\n", "`", "\"", "#" or ";" - if strings.ContainsAny(val, "\n`") { - val = `"""` + val + `"""` - } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { - val = "`" + val + "`" - } else if len(strings.TrimSpace(val)) != len(val) { - val = `"` + val + `"` - } - if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { - return false, err - } - return false, nil - } - - shadows := key.ValueWithShadows() - if len(shadows) == 0 { - if _, err := writeKeyValue(""); err != nil { - return nil, err - } - } - - for _, val := range shadows { - exitLoop, err := writeKeyValue(val) - if err != nil { - return nil, err - } else if exitLoop { - continue KeyList - } - } - - for _, val := range key.nestedValues { - if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { - return nil, err - } - } - } - - if PrettySection && !isLastSection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - } - - return buf, nil -} - -// WriteToIndent writes content into io.Writer with given indention. -// If PrettyFormat has been set to be true, -// it will align "=" sign with spaces under each section. -func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { - buf, err := f.writeToBuffer(indent) - if err != nil { - return 0, err - } - return buf.WriteTo(w) -} - -// WriteTo writes file content into io.Writer. -func (f *File) WriteTo(w io.Writer) (int64, error) { - return f.WriteToIndent(w, "") -} - -// SaveToIndent writes content to file system with given value indention. -func (f *File) SaveToIndent(filename, indent string) error { - // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename after done. - buf, err := f.writeToBuffer(indent) - if err != nil { - return err - } - - return ioutil.WriteFile(filename, buf.Bytes(), 0666) -} - -// SaveTo writes content to file system. -func (f *File) SaveTo(filename string) error { - return f.SaveToIndent(filename, "") -} diff --git a/vendor/github.com/go-ini/ini/helper.go b/vendor/github.com/go-ini/ini/helper.go deleted file mode 100644 index f9d80a682a..0000000000 --- a/vendor/github.com/go-ini/ini/helper.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2019 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -func inSlice(str string, s []string) bool { - for _, v := range s { - if str == v { - return true - } - } - return false -} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go deleted file mode 100644 index 99e7f86511..0000000000 --- a/vendor/github.com/go-ini/ini/ini.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package ini provides INI file read and write functionality in Go. -package ini - -import ( - "os" - "regexp" - "runtime" - "strings" -) - -const ( - // Maximum allowed depth when recursively substituing variable names. - depthValues = 99 -) - -var ( - // DefaultSection is the name of default section. You can use this var or the string literal. - // In most of cases, an empty string is all you need to access the section. - DefaultSection = "DEFAULT" - - // LineBreak is the delimiter to determine or compose a new line. - // This variable will be changed to "\r\n" automatically on Windows at package init time. - LineBreak = "\n" - - // Variable regexp pattern: %(variable)s - varPattern = regexp.MustCompile(`%\(([^)]+)\)s`) - - // DefaultHeader explicitly writes default section header. - DefaultHeader = false - - // PrettySection indicates whether to put a line between sections. - PrettySection = true - // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output - // or reduce all possible spaces for compact format. - PrettyFormat = true - // PrettyEqual places spaces around "=" sign even when PrettyFormat is false. - PrettyEqual = false - // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled. - DefaultFormatLeft = "" - // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled. - DefaultFormatRight = "" -) - -var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") - -func init() { - if runtime.GOOS == "windows" && !inTest { - LineBreak = "\r\n" - } -} - -// LoadOptions contains all customized options used for load data source(s). -type LoadOptions struct { - // Loose indicates whether the parser should ignore nonexistent files or return error. - Loose bool - // Insensitive indicates whether the parser forces all section and key names to lowercase. - Insensitive bool - // InsensitiveSections indicates whether the parser forces all section to lowercase. - InsensitiveSections bool - // InsensitiveKeys indicates whether the parser forces all key names to lowercase. - InsensitiveKeys bool - // IgnoreContinuation indicates whether to ignore continuation lines while parsing. - IgnoreContinuation bool - // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. - IgnoreInlineComment bool - // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. - SkipUnrecognizableLines bool - // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source. - ShortCircuit bool - // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. - // This type of keys are mostly used in my.cnf. - AllowBooleanKeys bool - // AllowShadows indicates whether to keep track of keys with same name under same section. - AllowShadows bool - // AllowNestedValues indicates whether to allow AWS-like nested values. - // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values - AllowNestedValues bool - // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. - // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure - // Relevant quote: Values can also span multiple lines, as long as they are indented deeper - // than the first line of the value. - AllowPythonMultilineValues bool - // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. - // Docs: https://docs.python.org/2/library/configparser.html - // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. - // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. - SpaceBeforeInlineComment bool - // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format - // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" - UnescapeValueDoubleQuotes bool - // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format - // when value is NOT surrounded by any quotes. - // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. - UnescapeValueCommentSymbols bool - // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise - // conform to key/value pairs. Specify the names of those blocks here. - UnparseableSections []string - // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". - KeyValueDelimiters string - // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=". - KeyValueDelimiterOnWrite string - // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".". - ChildSectionDelimiter string - // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). - PreserveSurroundedQuote bool - // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values). - DebugFunc DebugFunc - // ReaderBufferSize is the buffer size of the reader in bytes. - ReaderBufferSize int - // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times. - AllowNonUniqueSections bool - // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated. - AllowDuplicateShadowValues bool -} - -// DebugFunc is the type of function called to log parse events. -type DebugFunc func(message string) - -// LoadSources allows caller to apply customized options for loading from data source(s). -func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { - sources := make([]dataSource, len(others)+1) - sources[0], err = parseDataSource(source) - if err != nil { - return nil, err - } - for i := range others { - sources[i+1], err = parseDataSource(others[i]) - if err != nil { - return nil, err - } - } - f := newFile(sources, opts) - if err = f.Reload(); err != nil { - return nil, err - } - return f, nil -} - -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -// It will return error if list contains nonexistent files. -func Load(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{}, source, others...) -} - -// LooseLoad has exactly same functionality as Load function -// except it ignores nonexistent files instead of returning error. -func LooseLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Loose: true}, source, others...) -} - -// InsensitiveLoad has exactly same functionality as Load function -// except it forces all section and key names to be lowercased. -func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Insensitive: true}, source, others...) -} - -// ShadowLoad has exactly same functionality as Load function -// except it allows have shadow keys. -func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{AllowShadows: true}, source, others...) -} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go deleted file mode 100644 index a19d9f38ef..0000000000 --- a/vendor/github.com/go-ini/ini/key.go +++ /dev/null @@ -1,837 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" - "time" -) - -// Key represents a key under a section. -type Key struct { - s *Section - Comment string - name string - value string - isAutoIncrement bool - isBooleanType bool - - isShadow bool - shadows []*Key - - nestedValues []string -} - -// newKey simply return a key object with given values. -func newKey(s *Section, name, val string) *Key { - return &Key{ - s: s, - name: name, - value: val, - } -} - -func (k *Key) addShadow(val string) error { - if k.isShadow { - return errors.New("cannot add shadow to another shadow key") - } else if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add shadow to auto-increment or boolean key") - } - - if !k.s.f.options.AllowDuplicateShadowValues { - // Deduplicate shadows based on their values. - if k.value == val { - return nil - } - for i := range k.shadows { - if k.shadows[i].value == val { - return nil - } - } - } - - shadow := newKey(k.s, k.name, val) - shadow.isShadow = true - k.shadows = append(k.shadows, shadow) - return nil -} - -// AddShadow adds a new shadow key to itself. -func (k *Key) AddShadow(val string) error { - if !k.s.f.options.AllowShadows { - return errors.New("shadow key is not allowed") - } - return k.addShadow(val) -} - -func (k *Key) addNestedValue(val string) error { - if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add nested value to auto-increment or boolean key") - } - - k.nestedValues = append(k.nestedValues, val) - return nil -} - -// AddNestedValue adds a nested value to the key. -func (k *Key) AddNestedValue(val string) error { - if !k.s.f.options.AllowNestedValues { - return errors.New("nested value is not allowed") - } - return k.addNestedValue(val) -} - -// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv -type ValueMapper func(string) string - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// ValueWithShadows returns raw values of key and its shadows if any. Shadow -// keys with empty values are ignored from the returned list. -func (k *Key) ValueWithShadows() []string { - if len(k.shadows) == 0 { - if k.value == "" { - return []string{} - } - return []string{k.value} - } - - vals := make([]string, 0, len(k.shadows)+1) - if k.value != "" { - vals = append(vals, k.value) - } - for _, s := range k.shadows { - if s.value != "" { - vals = append(vals, s.value) - } - } - return vals -} - -// NestedValues returns nested values stored in the key. -// It is possible returned value is nil if no nested values stored in the key. -func (k *Key) NestedValues() []string { - return k.nestedValues -} - -// transformValue takes a raw value and transforms to its final string. -func (k *Key) transformValue(val string) string { - if k.s.f.ValueMapper != nil { - val = k.s.f.ValueMapper(val) - } - - // Fail-fast if no indicate char found for recursive value - if !strings.Contains(val, "%") { - return val - } - for i := 0; i < depthValues; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := vr[2 : len(vr)-2] - - // Search in the same section. - // If not found or found the key itself, then search again in default section. - nk, err := k.s.GetKey(noption) - if err != nil || k == nk { - nk, _ = k.s.f.Section("").GetKey(noption) - if nk == nil { - // Stop when no results found in the default section, - // and returns the value as-is. - break - } - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// String returns string representation of value. -func (k *Key) String() string { - return k.transformValue(k.value) -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - v, err := strconv.ParseInt(k.String(), 0, 64) - return int(v), err -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 0, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 0, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 0, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - k.value = defaultVal - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatBool(defaultVal[0]) - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(int64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].String() - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].Format(format) - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string divided by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - runes := []rune(str) - vals := make([]string, 0, 2) - var buf bytes.Buffer - escape := false - idx := 0 - for { - if escape { - escape = false - if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { - buf.WriteRune('\\') - } - buf.WriteRune(runes[idx]) - } else { - if runes[idx] == '\\' { - escape = true - } else if strings.HasPrefix(string(runes[idx:]), delim) { - idx += len(delim) - 1 - vals = append(vals, strings.TrimSpace(buf.String())) - buf.Reset() - } else { - buf.WriteRune(runes[idx]) - } - } - idx++ - if idx == len(runes) { - break - } - } - - if buf.Len() > 0 { - vals = append(vals, strings.TrimSpace(buf.String())) - } - - return vals -} - -// StringsWithShadows returns list of string divided by given delimiter. -// Shadows will also be appended if any. -func (k *Key) StringsWithShadows(delim string) []string { - vals := k.ValueWithShadows() - results := make([]string, 0, len(vals)*2) - for i := range vals { - if len(vals) == 0 { - continue - } - - results = append(results, strings.Split(vals[i], delim)...) - } - - for i := range results { - results[i] = k.transformValue(strings.TrimSpace(results[i])) - } - return results -} - -// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Float64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), true, false) - return vals -} - -// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Ints(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), true, false) - return vals -} - -// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Int64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), true, false) - return vals -} - -// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), true, false) - return vals -} - -// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), true, false) - return vals -} - -// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Bools(delim string) []bool { - vals, _ := k.parseBools(k.Strings(delim), true, false) - return vals -} - -// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) TimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then -// it will not be included to result list. -func (k *Key) ValidFloat64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), false, false) - return vals -} - -// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will -// not be included to result list. -func (k *Key) ValidInts(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), false, false) - return vals -} - -// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, -// then it will not be included to result list. -func (k *Key) ValidInt64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), false, false) - return vals -} - -// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, -// then it will not be included to result list. -func (k *Key) ValidUints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), false, false) - return vals -} - -// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned -// integer, then it will not be included to result list. -func (k *Key) ValidUint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), false, false) - return vals -} - -// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned -// integer, then it will not be included to result list. -func (k *Key) ValidBools(delim string) []bool { - vals, _ := k.parseBools(k.Strings(delim), false, false) - return vals -} - -// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) - return vals -} - -// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimes(delim string) []time.Time { - return k.ValidTimesFormat(time.RFC3339, delim) -} - -// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictFloat64s(delim string) ([]float64, error) { - return k.parseFloat64s(k.Strings(delim), false, true) -} - -// StrictInts returns list of int divided by given delimiter or error on first invalid input. -func (k *Key) StrictInts(delim string) ([]int, error) { - return k.parseInts(k.Strings(delim), false, true) -} - -// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictInt64s(delim string) ([]int64, error) { - return k.parseInt64s(k.Strings(delim), false, true) -} - -// StrictUints returns list of uint divided by given delimiter or error on first invalid input. -func (k *Key) StrictUints(delim string) ([]uint, error) { - return k.parseUints(k.Strings(delim), false, true) -} - -// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictUint64s(delim string) ([]uint64, error) { - return k.parseUint64s(k.Strings(delim), false, true) -} - -// StrictBools returns list of bool divided by given delimiter or error on first invalid input. -func (k *Key) StrictBools(delim string) ([]bool, error) { - return k.parseBools(k.Strings(delim), false, true) -} - -// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { - return k.parseTimesFormat(format, k.Strings(delim), false, true) -} - -// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimes(delim string) ([]time.Time, error) { - return k.StrictTimesFormat(time.RFC3339, delim) -} - -// parseBools transforms strings to bools. -func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) { - vals := make([]bool, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := parseBool(str) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(bool)) - } - } - return vals, err -} - -// parseFloat64s transforms strings to float64s. -func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { - vals := make([]float64, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseFloat(str, 64) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(float64)) - } - } - return vals, err -} - -// parseInts transforms strings to ints. -func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { - vals := make([]int, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseInt(str, 0, 64) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, int(val.(int64))) - } - } - return vals, err -} - -// parseInt64s transforms strings to int64s. -func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { - vals := make([]int64, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseInt(str, 0, 64) - return val, err - } - - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(int64)) - } - } - return vals, err -} - -// parseUints transforms strings to uints. -func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { - vals := make([]uint, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseUint(str, 0, 64) - return val, err - } - - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, uint(val.(uint64))) - } - } - return vals, err -} - -// parseUint64s transforms strings to uint64s. -func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { - vals := make([]uint64, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseUint(str, 0, 64) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(uint64)) - } - } - return vals, err -} - -type Parser func(str string) (interface{}, error) - -// parseTimesFormat transforms strings to times in given format. -func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { - vals := make([]time.Time, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := time.Parse(format, str) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(time.Time)) - } - } - return vals, err -} - -// doParse transforms strings to different types -func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) { - vals := make([]interface{}, 0, len(strs)) - for _, str := range strs { - val, err := parser(str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - if k.s.f.BlockMode { - k.s.f.lock.Lock() - defer k.s.f.lock.Unlock() - } - - k.value = v - k.s.keysHash[k.name] = v -} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go deleted file mode 100644 index 44fc526c2c..0000000000 --- a/vendor/github.com/go-ini/ini/parser.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2015 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bufio" - "bytes" - "fmt" - "io" - "regexp" - "strconv" - "strings" - "unicode" -) - -const minReaderBufferSize = 4096 - -var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`) - -type parserOptions struct { - IgnoreContinuation bool - IgnoreInlineComment bool - AllowPythonMultilineValues bool - SpaceBeforeInlineComment bool - UnescapeValueDoubleQuotes bool - UnescapeValueCommentSymbols bool - PreserveSurroundedQuote bool - DebugFunc DebugFunc - ReaderBufferSize int -} - -type parser struct { - buf *bufio.Reader - options parserOptions - - isEOF bool - count int - comment *bytes.Buffer -} - -func (p *parser) debug(format string, args ...interface{}) { - if p.options.DebugFunc != nil { - p.options.DebugFunc(fmt.Sprintf(format, args...)) - } -} - -func newParser(r io.Reader, opts parserOptions) *parser { - size := opts.ReaderBufferSize - if size < minReaderBufferSize { - size = minReaderBufferSize - } - - return &parser{ - buf: bufio.NewReaderSize(r, size), - options: opts, - count: 1, - comment: &bytes.Buffer{}, - } -} - -// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. -// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding -func (p *parser) BOM() error { - mask, err := p.buf.Peek(2) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 2 { - return nil - } - - switch { - case mask[0] == 254 && mask[1] == 255: - fallthrough - case mask[0] == 255 && mask[1] == 254: - _, err = p.buf.Read(mask) - if err != nil { - return err - } - case mask[0] == 239 && mask[1] == 187: - mask, err := p.buf.Peek(3) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 3 { - return nil - } - if mask[2] == 191 { - _, err = p.buf.Read(mask) - if err != nil { - return err - } - } - } - return nil -} - -func (p *parser) readUntil(delim byte) ([]byte, error) { - data, err := p.buf.ReadBytes(delim) - if err != nil { - if err == io.EOF { - p.isEOF = true - } else { - return nil, err - } - } - return data, nil -} - -func cleanComment(in []byte) ([]byte, bool) { - i := bytes.IndexAny(in, "#;") - if i == -1 { - return nil, false - } - return in[i:], true -} - -func readKeyName(delimiters string, in []byte) (string, int, error) { - line := string(in) - - // Check if key name surrounded by quotes. - var keyQuote string - if line[0] == '"' { - if len(line) > 6 && line[0:3] == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - - // Get out key name - var endIdx int - if len(keyQuote) > 0 { - startIdx := len(keyQuote) - // FIXME: fail case -> """"""name"""=value - pos := strings.Index(line[startIdx:], keyQuote) - if pos == -1 { - return "", -1, fmt.Errorf("missing closing key quote: %s", line) - } - pos += startIdx - - // Find key-value delimiter - i := strings.IndexAny(line[pos+startIdx:], delimiters) - if i < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - endIdx = pos + i - return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil - } - - endIdx = strings.IndexAny(line, delimiters) - if endIdx < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - if endIdx == 0 { - return "", -1, ErrEmptyKeyName{line} - } - - return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil -} - -func (p *parser) readMultilines(line, val, valQuote string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := string(data) - - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - - comment, has := cleanComment([]byte(next[pos:])) - if has { - p.comment.Write(bytes.TrimSpace(comment)) - } - break - } - val += next - if p.isEOF { - return "", fmt.Errorf("missing closing key quote from %q to %q", line, next) - } - } - return val, nil -} - -func (p *parser) readContinuationLines(val string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := strings.TrimSpace(string(data)) - - if len(next) == 0 { - break - } - val += next - if val[len(val)-1] != '\\' { - break - } - val = val[:len(val)-1] - } - return val, nil -} - -// hasSurroundedQuote check if and only if the first and last characters -// are quotes \" or \'. -// It returns false if any other parts also contain same kind of quotes. -func hasSurroundedQuote(in string, quote byte) bool { - return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && - strings.IndexByte(in[1:], quote) == len(in)-2 -} - -func (p *parser) readValue(in []byte, bufferSize int) (string, error) { - - line := strings.TrimLeftFunc(string(in), unicode.IsSpace) - if len(line) == 0 { - if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' { - return p.readPythonMultilines(line, bufferSize) - } - return "", nil - } - - var valQuote string - if len(line) > 3 && line[0:3] == `"""` { - valQuote = `"""` - } else if line[0] == '`' { - valQuote = "`" - } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' { - valQuote = `"` - } - - if len(valQuote) > 0 { - startIdx := len(valQuote) - pos := strings.LastIndex(line[startIdx:], valQuote) - // Check for multi-line value - if pos == -1 { - return p.readMultilines(line, line[startIdx:], valQuote) - } - - if p.options.UnescapeValueDoubleQuotes && valQuote == `"` { - return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil - } - return line[startIdx : pos+startIdx], nil - } - - lastChar := line[len(line)-1] - // Won't be able to reach here if value only contains whitespace - line = strings.TrimSpace(line) - trimmedLastChar := line[len(line)-1] - - // Check continuation lines when desired - if !p.options.IgnoreContinuation && trimmedLastChar == '\\' { - return p.readContinuationLines(line[:len(line)-1]) - } - - // Check if ignore inline comment - if !p.options.IgnoreInlineComment { - var i int - if p.options.SpaceBeforeInlineComment { - i = strings.Index(line, " #") - if i == -1 { - i = strings.Index(line, " ;") - } - - } else { - i = strings.IndexAny(line, "#;") - } - - if i > -1 { - p.comment.WriteString(line[i:]) - line = strings.TrimSpace(line[:i]) - } - - } - - // Trim single and double quotes - if (hasSurroundedQuote(line, '\'') || - hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote { - line = line[1 : len(line)-1] - } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols { - line = strings.ReplaceAll(line, `\;`, ";") - line = strings.ReplaceAll(line, `\#`, "#") - } else if p.options.AllowPythonMultilineValues && lastChar == '\n' { - return p.readPythonMultilines(line, bufferSize) - } - - return line, nil -} - -func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) { - parserBufferPeekResult, _ := p.buf.Peek(bufferSize) - peekBuffer := bytes.NewBuffer(parserBufferPeekResult) - - for { - peekData, peekErr := peekBuffer.ReadBytes('\n') - if peekErr != nil && peekErr != io.EOF { - p.debug("readPythonMultilines: failed to peek with error: %v", peekErr) - return "", peekErr - } - - p.debug("readPythonMultilines: parsing %q", string(peekData)) - - peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) - p.debug("readPythonMultilines: matched %d parts", len(peekMatches)) - for n, v := range peekMatches { - p.debug(" %d: %q", n, v) - } - - // Return if not a Python multiline value. - if len(peekMatches) != 3 { - p.debug("readPythonMultilines: end of value, got: %q", line) - return line, nil - } - - // Advance the parser reader (buffer) in-sync with the peek buffer. - _, err := p.buf.Discard(len(peekData)) - if err != nil { - p.debug("readPythonMultilines: failed to skip to the end, returning error") - return "", err - } - - line += "\n" + peekMatches[0] - } -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) (err error) { - p := newParser(reader, parserOptions{ - IgnoreContinuation: f.options.IgnoreContinuation, - IgnoreInlineComment: f.options.IgnoreInlineComment, - AllowPythonMultilineValues: f.options.AllowPythonMultilineValues, - SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment, - UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes, - UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols, - PreserveSurroundedQuote: f.options.PreserveSurroundedQuote, - DebugFunc: f.options.DebugFunc, - ReaderBufferSize: f.options.ReaderBufferSize, - }) - if err = p.BOM(); err != nil { - return fmt.Errorf("BOM: %v", err) - } - - // Ignore error because default section name is never empty string. - name := DefaultSection - if f.options.Insensitive || f.options.InsensitiveSections { - name = strings.ToLower(DefaultSection) - } - section, _ := f.NewSection(name) - - // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key - var isLastValueEmpty bool - var lastRegularKey *Key - - var line []byte - var inUnparseableSection bool - - // NOTE: Iterate and increase `currentPeekSize` until - // the size of the parser buffer is found. - // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. - parserBufferSize := 0 - // NOTE: Peek 4kb at a time. - currentPeekSize := minReaderBufferSize - - if f.options.AllowPythonMultilineValues { - for { - peekBytes, _ := p.buf.Peek(currentPeekSize) - peekBytesLength := len(peekBytes) - - if parserBufferSize >= peekBytesLength { - break - } - - currentPeekSize *= 2 - parserBufferSize = peekBytesLength - } - } - - for !p.isEOF { - line, err = p.readUntil('\n') - if err != nil { - return err - } - - if f.options.AllowNestedValues && - isLastValueEmpty && len(line) > 0 { - if line[0] == ' ' || line[0] == '\t' { - err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) - if err != nil { - return err - } - continue - } - } - - line = bytes.TrimLeftFunc(line, unicode.IsSpace) - if len(line) == 0 { - continue - } - - // Comments - if line[0] == '#' || line[0] == ';' { - // Note: we do not care ending line break, - // it is needed for adding second line, - // so just clean it once at the end when set to value. - p.comment.Write(line) - continue - } - - // Section - if line[0] == '[' { - // Read to the next ']' (TODO: support quoted strings) - closeIdx := bytes.LastIndexByte(line, ']') - if closeIdx == -1 { - return fmt.Errorf("unclosed section: %s", line) - } - - name := string(line[1:closeIdx]) - section, err = f.NewSection(name) - if err != nil { - return err - } - - comment, has := cleanComment(line[closeIdx+1:]) - if has { - p.comment.Write(comment) - } - - section.Comment = strings.TrimSpace(p.comment.String()) - - // Reset auto-counter and comments - p.comment.Reset() - p.count = 1 - // Nested values can't span sections - isLastValueEmpty = false - - inUnparseableSection = false - for i := range f.options.UnparseableSections { - if f.options.UnparseableSections[i] == name || - ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) { - inUnparseableSection = true - continue - } - } - continue - } - - if inUnparseableSection { - section.isRawSection = true - section.rawBody += string(line) - continue - } - - kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) - if err != nil { - switch { - // Treat as boolean key when desired, and whole line is key name. - case IsErrDelimiterNotFound(err): - switch { - case f.options.AllowBooleanKeys: - kname, err := p.readValue(line, parserBufferSize) - if err != nil { - return err - } - key, err := section.NewBooleanKey(kname) - if err != nil { - return err - } - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - continue - - case f.options.SkipUnrecognizableLines: - continue - } - case IsErrEmptyKeyName(err) && f.options.SkipUnrecognizableLines: - continue - } - return err - } - - // Auto increment. - isAutoIncr := false - if kname == "-" { - isAutoIncr = true - kname = "#" + strconv.Itoa(p.count) - p.count++ - } - - value, err := p.readValue(line[offset:], parserBufferSize) - if err != nil { - return err - } - isLastValueEmpty = len(value) == 0 - - key, err := section.NewKey(kname, value) - if err != nil { - return err - } - key.isAutoIncrement = isAutoIncr - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - lastRegularKey = key - } - return nil -} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go deleted file mode 100644 index a3615d820b..0000000000 --- a/vendor/github.com/go-ini/ini/section.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "errors" - "fmt" - "strings" -) - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string - - isRawSection bool - rawBody string -} - -func newSection(f *File, name string) *Section { - return &Section{ - f: f, - name: name, - keys: make(map[string]*Key), - keyList: make([]string, 0, 10), - keysHash: make(map[string]string), - } -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// Body returns rawBody of Section if the section was marked as unparseable. -// It still follows the other rules of the INI format surrounding leading/trailing whitespace. -func (s *Section) Body() string { - return strings.TrimSpace(s.rawBody) -} - -// SetBody updates body content only if section is raw. -func (s *Section) SetBody(body string) { - if !s.isRawSection { - return - } - s.rawBody = body -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys { - name = strings.ToLower(name) - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - if s.f.options.AllowShadows { - if err := s.keys[name].addShadow(val); err != nil { - return nil, err - } - } else { - s.keys[name].value = val - s.keysHash[name] = val - } - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = newKey(s, name, val) - s.keysHash[name] = val - return s.keys[name], nil -} - -// NewBooleanKey creates a new boolean type key to given section. -func (s *Section) NewBooleanKey(name string) (*Key, error) { - key, err := s.NewKey(name, "true") - if err != nil { - return nil, err - } - - key.isBooleanType = true - return key, nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - if s.f.BlockMode { - s.f.lock.RLock() - } - if s.f.options.Insensitive || s.f.options.InsensitiveKeys { - name = strings.ToLower(name) - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } - break - } - return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name) - } - return key, nil -} - -// HasKey returns true if section contains a key with given name. -func (s *Section) HasKey(name string) bool { - key, _ := s.GetKey(name) - return key != nil -} - -// Deprecated: Use "HasKey" instead. -func (s *Section) Haskey(name string) bool { - return s.HasKey(name) -} - -// HasValue returns true if section contains given raw value. -func (s *Section) HasValue(value string) bool { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - for _, k := range s.keys { - if value == k.value { - return true - } - } - return false -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// ParentKeys returns list of keys of parent section. -func (s *Section) ParentKeys() []*Key { - var parentKeys []*Key - sname := s.name - for { - if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - parentKeys = append(parentKeys, sec.Keys()...) - } else { - break - } - - } - return parentKeys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := make(map[string]string, len(s.keysHash)) - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - delete(s.keysHash, name) - return - } - } -} - -// ChildSections returns a list of child sections of current section. -// For example, "[parent.child1]" and "[parent.child12]" are child sections -// of section "[parent]". -func (s *Section) ChildSections() []*Section { - prefix := s.name + s.f.options.ChildSectionDelimiter - children := make([]*Section, 0, 3) - for _, name := range s.f.sectionList { - if strings.HasPrefix(name, prefix) { - children = append(children, s.f.sections[name]...) - } - } - return children -} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go deleted file mode 100644 index a486b2fe0f..0000000000 --- a/vendor/github.com/go-ini/ini/struct.go +++ /dev/null @@ -1,747 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "strings" - "time" - "unicode" -) - -// NameMapper represents a ini tag name mapper. -type NameMapper func(string) string - -// Built-in name getters. -var ( - // SnackCase converts to format SNACK_CASE. - SnackCase NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - } - newstr = append(newstr, unicode.ToUpper(chr)) - } - return string(newstr) - } - // TitleUnderscore converts to format title_underscore. - TitleUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - chr -= 'A' - 'a' - } - newstr = append(newstr, chr) - } - return string(newstr) - } -) - -func (s *Section) parseFieldName(raw, actual string) string { - if len(actual) > 0 { - return actual - } - if s.f.NameMapper != nil { - return s.f.NameMapper(raw) - } - return raw -} - -func parseDelim(actual string) string { - if len(actual) > 0 { - return actual - } - return "," -} - -var reflectTime = reflect.TypeOf(time.Now()).Kind() - -// setSliceWithProperType sets proper values to slice based on its type. -func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - var strs []string - if allowShadow { - strs = key.StringsWithShadows(delim) - } else { - strs = key.Strings(delim) - } - - numVals := len(strs) - if numVals == 0 { - return nil - } - - var vals interface{} - var err error - - sliceOf := field.Type().Elem().Kind() - switch sliceOf { - case reflect.String: - vals = strs - case reflect.Int: - vals, err = key.parseInts(strs, true, false) - case reflect.Int64: - vals, err = key.parseInt64s(strs, true, false) - case reflect.Uint: - vals, err = key.parseUints(strs, true, false) - case reflect.Uint64: - vals, err = key.parseUint64s(strs, true, false) - case reflect.Float64: - vals, err = key.parseFloat64s(strs, true, false) - case reflect.Bool: - vals, err = key.parseBools(strs, true, false) - case reflectTime: - vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - if err != nil && isStrict { - return err - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflect.String: - slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) - case reflect.Int: - slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) - case reflect.Int64: - slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) - case reflect.Uint: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) - case reflect.Uint64: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) - case reflect.Float64: - slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) - case reflect.Bool: - slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i])) - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) - } - } - field.Set(slice) - return nil -} - -func wrapStrictError(err error, isStrict bool) error { - if isStrict { - return err - } - return nil -} - -// setWithProperType sets proper value to field based on its type, -// but it does not return error for failing parsing, -// because we want to use default value that is already assigned to struct. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - vt := t - isPtr := t.Kind() == reflect.Ptr - if isPtr { - vt = t.Elem() - } - switch vt.Kind() { - case reflect.String: - stringVal := key.String() - if isPtr { - field.Set(reflect.ValueOf(&stringVal)) - } else if len(stringVal) > 0 { - field.SetString(key.String()) - } - case reflect.Bool: - boolVal, err := key.Bool() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - field.Set(reflect.ValueOf(&boolVal)) - } else { - field.SetBool(boolVal) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - // ParseDuration will not return err for `0`, so check the type name - if vt.Name() == "Duration" { - durationVal, err := key.Duration() - if err != nil { - if intVal, err := key.Int64(); err == nil { - field.SetInt(intVal) - return nil - } - return wrapStrictError(err, isStrict) - } - if isPtr { - field.Set(reflect.ValueOf(&durationVal)) - } else if int64(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - } - return nil - } - - intVal, err := key.Int64() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - pv := reflect.New(t.Elem()) - pv.Elem().SetInt(intVal) - field.Set(pv) - } else { - field.SetInt(intVal) - } - // byte is an alias for uint8, so supporting uint8 breaks support for byte - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && uint64(durationVal) > 0 { - if isPtr { - field.Set(reflect.ValueOf(&durationVal)) - } else { - field.Set(reflect.ValueOf(durationVal)) - } - return nil - } - - uintVal, err := key.Uint64() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - pv := reflect.New(t.Elem()) - pv.Elem().SetUint(uintVal) - field.Set(pv) - } else { - field.SetUint(uintVal) - } - - case reflect.Float32, reflect.Float64: - floatVal, err := key.Float64() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - pv := reflect.New(t.Elem()) - pv.Elem().SetFloat(floatVal) - field.Set(pv) - } else { - field.SetFloat(floatVal) - } - case reflectTime: - timeVal, err := key.Time() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - field.Set(reflect.ValueOf(&timeVal)) - } else { - field.Set(reflect.ValueOf(timeVal)) - } - case reflect.Slice: - return setSliceWithProperType(key, field, delim, allowShadow, isStrict) - default: - return fmt.Errorf("unsupported type %q", t) - } - return nil -} - -func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) { - opts := strings.SplitN(tag, ",", 5) - rawName = opts[0] - for _, opt := range opts[1:] { - omitEmpty = omitEmpty || (opt == "omitempty") - allowShadow = allowShadow || (opt == "allowshadow") - allowNonUnique = allowNonUnique || (opt == "nonunique") - extends = extends || (opt == "extends") - } - return rawName, omitEmpty, allowShadow, allowNonUnique, extends -} - -// mapToField maps the given value to the matching field of the given section. -// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added. -func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag) - fieldName := s.parseFieldName(tpField.Name, rawName) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - isStruct := tpField.Type.Kind() == reflect.Struct - isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct - isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous - if isAnonymousPtr { - field.Set(reflect.New(tpField.Type.Elem())) - } - - if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) { - if isStructPtr && field.IsNil() { - field.Set(reflect.New(tpField.Type.Elem())) - } - fieldSection := s - if rawName != "" { - sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName - if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) { - fieldSection = secs[sectionIndex] - } - } - if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil { - return fmt.Errorf("map to field %q: %v", fieldName, err) - } - } else if isAnonymousPtr || isStruct || isStructPtr { - if secs, err := s.f.SectionsByName(fieldName); err == nil { - if len(secs) <= sectionIndex { - return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName) - } - // Only set the field to non-nil struct value if we have a section for it. - // Otherwise, we end up with a non-nil struct ptr even though there is no data. - if isStructPtr && field.IsNil() { - field.Set(reflect.New(tpField.Type.Elem())) - } - if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil { - return fmt.Errorf("map to field %q: %v", fieldName, err) - } - continue - } - } - - // Map non-unique sections - if allowNonUnique && tpField.Type.Kind() == reflect.Slice { - newField, err := s.mapToSlice(fieldName, field, isStrict) - if err != nil { - return fmt.Errorf("map to slice %q: %v", fieldName, err) - } - - field.Set(newField) - continue - } - - if key, err := s.GetKey(fieldName); err == nil { - delim := parseDelim(tpField.Tag.Get("delim")) - if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { - return fmt.Errorf("set field %q: %v", fieldName, err) - } - } - } - return nil -} - -// mapToSlice maps all sections with the same name and returns the new value. -// The type of the Value must be a slice. -func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) { - secs, err := s.f.SectionsByName(secName) - if err != nil { - return reflect.Value{}, err - } - - typ := val.Type().Elem() - for i, sec := range secs { - elem := reflect.New(typ) - if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil { - return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err) - } - - val = reflect.Append(val, elem.Elem()) - } - return val, nil -} - -// mapTo maps a section to object v. -func (s *Section) mapTo(v interface{}, isStrict bool) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("not a pointer to a struct") - } - - if typ.Kind() == reflect.Slice { - newField, err := s.mapToSlice(s.name, val, isStrict) - if err != nil { - return err - } - - val.Set(newField) - return nil - } - - return s.mapToField(val, isStrict, 0, s.name) -} - -// MapTo maps section to given struct. -func (s *Section) MapTo(v interface{}) error { - return s.mapTo(v, false) -} - -// StrictMapTo maps section to given struct in strict mode, -// which returns all possible error including value parsing error. -func (s *Section) StrictMapTo(v interface{}) error { - return s.mapTo(v, true) -} - -// MapTo maps file to given struct. -func (f *File) MapTo(v interface{}) error { - return f.Section("").MapTo(v) -} - -// StrictMapTo maps file to given struct in strict mode, -// which returns all possible error including value parsing error. -func (f *File) StrictMapTo(v interface{}) error { - return f.Section("").StrictMapTo(v) -} - -// MapToWithMapper maps data sources to given struct with name mapper. -func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.MapTo(v) -} - -// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, -// which returns all possible error including value parsing error. -func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.StrictMapTo(v) -} - -// MapTo maps data sources to given struct. -func MapTo(v, source interface{}, others ...interface{}) error { - return MapToWithMapper(v, nil, source, others...) -} - -// StrictMapTo maps data sources to given struct in strict mode, -// which returns all possible error including value parsing error. -func StrictMapTo(v, source interface{}, others ...interface{}) error { - return StrictMapToWithMapper(v, nil, source, others...) -} - -// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. -func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { - slice := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - sliceOf := field.Type().Elem().Kind() - - if allowShadow { - var keyWithShadows *Key - for i := 0; i < field.Len(); i++ { - var val string - switch sliceOf { - case reflect.String: - val = slice.Index(i).String() - case reflect.Int, reflect.Int64: - val = fmt.Sprint(slice.Index(i).Int()) - case reflect.Uint, reflect.Uint64: - val = fmt.Sprint(slice.Index(i).Uint()) - case reflect.Float64: - val = fmt.Sprint(slice.Index(i).Float()) - case reflect.Bool: - val = fmt.Sprint(slice.Index(i).Bool()) - case reflectTime: - val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - - if i == 0 { - keyWithShadows = newKey(key.s, key.name, val) - } else { - _ = keyWithShadows.AddShadow(val) - } - } - *key = *keyWithShadows - return nil - } - - var buf bytes.Buffer - for i := 0; i < field.Len(); i++ { - switch sliceOf { - case reflect.String: - buf.WriteString(slice.Index(i).String()) - case reflect.Int, reflect.Int64: - buf.WriteString(fmt.Sprint(slice.Index(i).Int())) - case reflect.Uint, reflect.Uint64: - buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) - case reflect.Float64: - buf.WriteString(fmt.Sprint(slice.Index(i).Float())) - case reflect.Bool: - buf.WriteString(fmt.Sprint(slice.Index(i).Bool())) - case reflectTime: - buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-len(delim)]) - return nil -} - -// reflectWithProperType does the opposite thing as setWithProperType. -func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { - switch t.Kind() { - case reflect.String: - key.SetValue(field.String()) - case reflect.Bool: - key.SetValue(fmt.Sprint(field.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - key.SetValue(fmt.Sprint(field.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - key.SetValue(fmt.Sprint(field.Uint())) - case reflect.Float32, reflect.Float64: - key.SetValue(fmt.Sprint(field.Float())) - case reflectTime: - key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) - case reflect.Slice: - return reflectSliceWithProperType(key, field, delim, allowShadow) - case reflect.Ptr: - if !field.IsNil() { - return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow) - } - default: - return fmt.Errorf("unsupported type %q", t) - } - return nil -} - -// CR: copied from encoding/json/encode.go with modifications of time.Time support. -// TODO: add more test coverage. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflectTime: - t, ok := v.Interface().(time.Time) - return ok && t.IsZero() - } - return false -} - -// StructReflector is the interface implemented by struct types that can extract themselves into INI objects. -type StructReflector interface { - ReflectINIStruct(*File) error -} - -func (s *Section) reflectFrom(val reflect.Value) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - if !val.Field(i).CanInterface() { - continue - } - - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag) - if omitEmpty && isEmptyValue(field) { - continue - } - - if r, ok := field.Interface().(StructReflector); ok { - return r.ReflectINIStruct(s.f) - } - - fieldName := s.parseFieldName(tpField.Name, rawName) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) { - if err := s.reflectFrom(field); err != nil { - return fmt.Errorf("reflect from field %q: %v", fieldName, err) - } - continue - } - - if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) || - (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { - // Note: The only error here is section doesn't exist. - sec, err := s.f.GetSection(fieldName) - if err != nil { - // Note: fieldName can never be empty here, ignore error. - sec, _ = s.f.NewSection(fieldName) - } - - // Add comment from comment tag - if len(sec.Comment) == 0 { - sec.Comment = tpField.Tag.Get("comment") - } - - if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("reflect from field %q: %v", fieldName, err) - } - continue - } - - if allowNonUnique && tpField.Type.Kind() == reflect.Slice { - slice := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - sliceOf := field.Type().Elem().Kind() - - for i := 0; i < field.Len(); i++ { - if sliceOf != reflect.Struct && sliceOf != reflect.Ptr { - return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName) - } - - sec, err := s.f.NewSection(fieldName) - if err != nil { - return err - } - - // Add comment from comment tag - if len(sec.Comment) == 0 { - sec.Comment = tpField.Tag.Get("comment") - } - - if err := sec.reflectFrom(slice.Index(i)); err != nil { - return fmt.Errorf("reflect from field %q: %v", fieldName, err) - } - } - continue - } - - // Note: Same reason as section. - key, err := s.GetKey(fieldName) - if err != nil { - key, _ = s.NewKey(fieldName, "") - } - - // Add comment from comment tag - if len(key.Comment) == 0 { - key.Comment = tpField.Tag.Get("comment") - } - - delim := parseDelim(tpField.Tag.Get("delim")) - if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { - return fmt.Errorf("reflect field %q: %v", fieldName, err) - } - - } - return nil -} - -// ReflectFrom reflects section from given struct. It overwrites existing ones. -func (s *Section) ReflectFrom(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - - if s.name != DefaultSection && s.f.options.AllowNonUniqueSections && - (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) { - // Clear sections to make sure none exists before adding the new ones - s.f.DeleteSection(s.name) - - if typ.Kind() == reflect.Ptr { - sec, err := s.f.NewSection(s.name) - if err != nil { - return err - } - return sec.reflectFrom(val.Elem()) - } - - slice := val.Slice(0, val.Len()) - sliceOf := val.Type().Elem().Kind() - if sliceOf != reflect.Ptr { - return fmt.Errorf("not a slice of pointers") - } - - for i := 0; i < slice.Len(); i++ { - sec, err := s.f.NewSection(s.name) - if err != nil { - return err - } - - err = sec.reflectFrom(slice.Index(i)) - if err != nil { - return fmt.Errorf("reflect from %dth field: %v", i, err) - } - } - - return nil - } - - if typ.Kind() == reflect.Ptr { - val = val.Elem() - } else { - return errors.New("not a pointer to a struct") - } - - return s.reflectFrom(val) -} - -// ReflectFrom reflects file from given struct. -func (f *File) ReflectFrom(v interface{}) error { - return f.Section("").ReflectFrom(v) -} - -// ReflectFromWithMapper reflects data sources from given struct with name mapper. -func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { - cfg.NameMapper = mapper - return cfg.ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct. -func ReflectFrom(cfg *File, v interface{}) error { - return ReflectFromWithMapper(cfg, v, nil) -} diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml deleted file mode 100644 index 0ed62c1a18..0000000000 --- a/vendor/github.com/go-logr/logr/.golangci.yaml +++ /dev/null @@ -1,28 +0,0 @@ -version: "2" - -run: - timeout: 1m - tests: true - -linters: - default: none - enable: # please keep this alphabetized - - asasalint - - asciicheck - - copyloopvar - - dupl - - errcheck - - forcetypeassert - - goconst - - gocritic - - govet - - ineffassign - - misspell - - musttag - - revive - - staticcheck - - unused - -issues: - max-issues-per-linter: 0 - max-same-issues: 10 diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md deleted file mode 100644 index c356960046..0000000000 --- a/vendor/github.com/go-logr/logr/CHANGELOG.md +++ /dev/null @@ -1,6 +0,0 @@ -# CHANGELOG - -## v1.0.0-rc1 - -This is the first logged release. Major changes (including breaking changes) -have occurred since earlier tags. diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md deleted file mode 100644 index 5d37e294c5..0000000000 --- a/vendor/github.com/go-logr/logr/CONTRIBUTING.md +++ /dev/null @@ -1,17 +0,0 @@ -# Contributing - -Logr is open to pull-requests, provided they fit within the intended scope of -the project. Specifically, this library aims to be VERY small and minimalist, -with no external dependencies. - -## Compatibility - -This project intends to follow [semantic versioning](http://semver.org) and -is very strict about compatibility. Any proposed changes MUST follow those -rules. - -## Performance - -As a logging library, logr must be as light-weight as possible. Any proposed -code change must include results of running the [benchmark](./benchmark) -before and after the change. diff --git a/vendor/github.com/go-logr/logr/LICENSE b/vendor/github.com/go-logr/logr/LICENSE deleted file mode 100644 index 8dada3edaf..0000000000 --- a/vendor/github.com/go-logr/logr/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md deleted file mode 100644 index 7c7f0c69cd..0000000000 --- a/vendor/github.com/go-logr/logr/README.md +++ /dev/null @@ -1,407 +0,0 @@ -# A minimal logging API for Go - -[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr) -[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) - -logr offers an(other) opinion on how Go programs and libraries can do logging -without becoming coupled to a particular logging implementation. This is not -an implementation of logging - it is an API. In fact it is two APIs with two -different sets of users. - -The `Logger` type is intended for application and library authors. It provides -a relatively small API which can be used everywhere you want to emit logs. It -defers the actual act of writing logs (to files, to stdout, or whatever) to the -`LogSink` interface. - -The `LogSink` interface is intended for logging library implementers. It is a -pure interface which can be implemented by logging frameworks to provide the actual logging -functionality. - -This decoupling allows application and library developers to write code in -terms of `logr.Logger` (which has very low dependency fan-out) while the -implementation of logging is managed "up stack" (e.g. in or near `main()`.) -Application developers can then switch out implementations as necessary. - -Many people assert that libraries should not be logging, and as such efforts -like this are pointless. Those people are welcome to convince the authors of -the tens-of-thousands of libraries that *DO* write logs that they are all -wrong. In the meantime, logr takes a more practical approach. - -## Typical usage - -Somewhere, early in an application's life, it will make a decision about which -logging library (implementation) it actually wants to use. Something like: - -``` - func main() { - // ... other setup code ... - - // Create the "root" logger. We have chosen the "logimpl" implementation, - // which takes some initial parameters and returns a logr.Logger. - logger := logimpl.New(param1, param2) - - // ... other setup code ... -``` - -Most apps will call into other libraries, create structures to govern the flow, -etc. The `logr.Logger` object can be passed to these other libraries, stored -in structs, or even used as a package-global variable, if needed. For example: - -``` - app := createTheAppObject(logger) - app.Run() -``` - -Outside of this early setup, no other packages need to know about the choice of -implementation. They write logs in terms of the `logr.Logger` that they -received: - -``` - type appObject struct { - // ... other fields ... - logger logr.Logger - // ... other fields ... - } - - func (app *appObject) Run() { - app.logger.Info("starting up", "timestamp", time.Now()) - - // ... app code ... -``` - -## Background - -If the Go standard library had defined an interface for logging, this project -probably would not be needed. Alas, here we are. - -When the Go developers started developing such an interface with -[slog](https://github.com/golang/go/issues/56345), they adopted some of the -logr design but also left out some parts and changed others: - -| Feature | logr | slog | -|---------|------|------| -| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) | -| Low-level API | `LogSink` | `Handler` | -| Stack unwinding | done by `LogSink` | done by `Logger` | -| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) | -| Generating a value for logging on demand | `Marshaler` | `LogValuer` | -| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" | -| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` | -| Passing logger via context | `NewContext`, `FromContext` | no API | -| Adding a name to a logger | `WithName` | no API | -| Modify verbosity of log entries in a call chain | `V` | no API | -| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | -| Pass context for extracting additional values | no API | API variants like `InfoCtx` | - -The high-level slog API is explicitly meant to be one of many different APIs -that can be layered on top of a shared `slog.Handler`. logr is one such -alternative API, with [interoperability](#slog-interoperability) provided by -some conversion functions. - -### Inspiration - -Before you consider this package, please read [this blog post by the -inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what -he has to say, and it largely aligns with our own experiences. - -### Differences from Dave's ideas - -The main differences are: - -1. Dave basically proposes doing away with the notion of a logging API in favor -of `fmt.Printf()`. We disagree, especially when you consider things like output -locations, timestamps, file and line decorations, and structured logging. This -package restricts the logging API to just 2 types of logs: info and error. - -Info logs are things you want to tell the user which are not errors. Error -logs are, well, errors. If your code receives an `error` from a subordinate -function call and is logging that `error` *and not returning it*, use error -logs. - -2. Verbosity-levels on info logs. This gives developers a chance to indicate -arbitrary grades of importance for info logs, without assigning names with -semantic meaning such as "warning", "trace", and "debug." Superficially this -may feel very similar, but the primary difference is the lack of semantics. -Because verbosity is a numerical value, it's safe to assume that an app running -with higher verbosity means more (and less important) logs will be generated. - -## Implementations (non-exhaustive) - -There are implementations for the following logging libraries: - -- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) -- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr) -- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) -- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) -- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting) -- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) -- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) -- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) -- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) -- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) -- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) -- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) -- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) - -## slog interoperability - -Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` -and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and -`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`. -As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level -slog API. - -### Using a `logr.LogSink` as backend for slog - -Ideally, a logr sink implementation should support both logr and slog by -implementing both the normal logr interface(s) and `SlogSink`. Because -of a conflict in the parameters of the common `Enabled` method, it is [not -possible to implement both slog.Handler and logr.Sink in the same -type](https://github.com/golang/go/issues/59110). - -If both are supported, log calls can go from the high-level APIs to the backend -without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can -convert back and forth without adding additional wrappers, with one exception: -when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then -`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future -log calls. - -Such an implementation should also support values that implement specific -interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`, -`slog.GroupValue`). logr does not convert those. - -Not supporting slog has several drawbacks: -- Recording source code locations works correctly if the handler gets called - through `slog.Logger`, but may be wrong in other cases. That's because a - `logr.Sink` does its own stack unwinding instead of using the program counter - provided by the high-level API. -- slog levels <= 0 can be mapped to logr levels by negating the level without a - loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as - used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink - because logr does not support "more important than info" levels. -- The slog group concept is supported by prefixing each key in a key/value - pair with the group names, separated by a dot. For structured output like - JSON it would be better to group the key/value pairs inside an object. -- Special slog values and interfaces don't work as expected. -- The overhead is likely to be higher. - -These drawbacks are severe enough that applications using a mixture of slog and -logr should switch to a different backend. - -### Using a `slog.Handler` as backend for logr - -Using a plain `slog.Handler` without support for logr works better than the -other direction: -- All logr verbosity levels can be mapped 1:1 to their corresponding slog level - by negating them. -- Stack unwinding is done by the `SlogSink` and the resulting program - counter is passed to the `slog.Handler`. -- Names added via `Logger.WithName` are gathered and recorded in an additional - attribute with `logger` as key and the names separated by slash as value. -- `Logger.Error` is turned into a log record with `slog.LevelError` as level - and an additional attribute with `err` as key, if an error was provided. - -The main drawback is that `logr.Marshaler` will not be supported. Types should -ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility -with logr implementations without slog support is not important, then -`slog.Valuer` is sufficient. - -### Context support for slog - -Storing a logger in a `context.Context` is not supported by -slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be -used to fill this gap. They store and retrieve a `slog.Logger` pointer -under the same context key that is also used by `NewContext` and -`FromContext` for `logr.Logger` value. - -When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will -automatically convert the `slog.Logger` to a -`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction. - -With this approach, binaries which use either slog or logr are as efficient as -possible with no unnecessary allocations. This is also why the API stores a -`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger` -on retrieval would need to allocate one. - -The downside is that switching back and forth needs more allocations. Because -logr is the API that is already in use by different packages, in particular -Kubernetes, the recommendation is to use the `logr.Logger` API in code which -uses contextual logging. - -An alternative to adding values to a logger and storing that logger in the -context is to store the values in the context and to configure a logging -backend to extract those values when emitting log entries. This only works when -log calls are passed the context, which is not supported by the logr API. - -With the slog API, it is possible, but not -required. https://github.com/veqryn/slog-context is a package for slog which -provides additional support code for this approach. It also contains wrappers -for the context functions in logr, so developers who prefer to not use the logr -APIs directly can use those instead and the resulting code will still be -interoperable with logr. - -## FAQ - -### Conceptual - -#### Why structured logging? - -- **Structured logs are more easily queryable**: Since you've got - key-value pairs, it's much easier to query your structured logs for - particular values by filtering on the contents of a particular key -- - think searching request logs for error codes, Kubernetes reconcilers for - the name and namespace of the reconciled object, etc. - -- **Structured logging makes it easier to have cross-referenceable logs**: - Similarly to searchability, if you maintain conventions around your - keys, it becomes easy to gather all log lines related to a particular - concept. - -- **Structured logs allow better dimensions of filtering**: if you have - structure to your logs, you've got more precise control over how much - information is logged -- you might choose in a particular configuration - to log certain keys but not others, only log lines where a certain key - matches a certain value, etc., instead of just having v-levels and names - to key off of. - -- **Structured logs better represent structured data**: sometimes, the - data that you want to log is inherently structured (think tuple-link - objects.) Structured logs allow you to preserve that structure when - outputting. - -#### Why V-levels? - -**V-levels give operators an easy way to control the chattiness of log -operations**. V-levels provide a way for a given package to distinguish -the relative importance or verbosity of a given log message. Then, if -a particular logger or package is logging too many messages, the user -of the package can simply change the v-levels for that library. - -#### Why not named levels, like Info/Warning/Error? - -Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences -from Dave's ideas](#differences-from-daves-ideas). - -#### Why not allow format strings, too? - -**Format strings negate many of the benefits of structured logs**: - -- They're not easily searchable without resorting to fuzzy searching, - regular expressions, etc. - -- They don't store structured data well, since contents are flattened into - a string. - -- They're not cross-referenceable. - -- They don't compress easily, since the message is not constant. - -(Unless you turn positional parameters into key-value pairs with numerical -keys, at which point you've gotten key-value logging with meaningless -keys.) - -### Practical - -#### Why key-value pairs, and not a map? - -Key-value pairs are *much* easier to optimize, especially around -allocations. Zap (a structured logger that inspired logr's interface) has -[performance measurements](https://github.com/uber-go/zap#performance) -that show this quite nicely. - -While the interface ends up being a little less obvious, you get -potentially better performance, plus avoid making users type -`map[string]string{}` every time they want to log. - -#### What if my V-levels differ between libraries? - -That's fine. Control your V-levels on a per-logger basis, and use the -`WithName` method to pass different loggers to different libraries. - -Generally, you should take care to ensure that you have relatively -consistent V-levels within a given logger, however, as this makes deciding -on what verbosity of logs to request easier. - -#### But I really want to use a format string! - -That's not actually a question. Assuming your question is "how do -I convert my mental model of logging with format strings to logging with -constant messages": - -1. Figure out what the error actually is, as you'd write in a TL;DR style, - and use that as a message. - -2. For every place you'd write a format specifier, look to the word before - it, and add that as a key value pair. - -For instance, consider the following examples (all taken from spots in the -Kubernetes codebase): - -- `klog.V(4).Infof("Client is returning errors: code %v, error %v", - responseCode, err)` becomes `logger.Error(err, "client returned an - error", "code", responseCode)` - -- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", - seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after - response when requesting url", "attempt", retries, "after - seconds", seconds, "url", url)` - -If you *really* must use a format string, use it in a key's value, and -call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to -reflect over type %T")` becomes `logger.Info("unable to reflect over -type", "type", fmt.Sprintf("%T"))`. In general though, the cases where -this is necessary should be few and far between. - -#### How do I choose my V-levels? - -This is basically the only hard constraint: increase V-levels to denote -more verbose or more debug-y logs. - -Otherwise, you can start out with `0` as "you always want to see this", -`1` as "common logging that you might *possibly* want to turn off", and -`10` as "I would like to performance-test your log collection stack." - -Then gradually choose levels in between as you need them, working your way -down from 10 (for debug and trace style logs) and up from 1 (for chattier -info-type logs). For reference, slog pre-defines -4 for debug logs -(corresponds to 4 in logr), which matches what is -[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use). - -#### How do I choose my keys? - -Keys are fairly flexible, and can hold more or less any string -value. For best compatibility with implementations and consistency -with existing code in other projects, there are a few conventions you -should consider. - -- Make your keys human-readable. -- Constant keys are generally a good idea. -- Be consistent across your codebase. -- Keys should naturally match parts of the message string. -- Use lower case for simple keys and - [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for - more complex ones. Kubernetes is one example of a project that has - [adopted that - convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments). - -While key names are mostly unrestricted (and spaces are acceptable), -it's generally a good idea to stick to printable ascii characters, or at -least match the general character set of your log lines. - -#### Why should keys be constant values? - -The point of structured logging is to make later log processing easier. Your -keys are, effectively, the schema of each log message. If you use different -keys across instances of the same log line, you will make your structured logs -much harder to use. `Sprintf()` is for values, not for keys! - -#### Why is this not a pure interface? - -The Logger type is implemented as a struct in order to allow the Go compiler to -optimize things like high-V `Info` logs that are not triggered. Not all of -these implementations are implemented yet, but this structure was suggested as -a way to ensure they *can* be implemented. All of the real work is behind the -`LogSink` interface. - -[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md deleted file mode 100644 index 1ca756fc7b..0000000000 --- a/vendor/github.com/go-logr/logr/SECURITY.md +++ /dev/null @@ -1,18 +0,0 @@ -# Security Policy - -If you have discovered a security vulnerability in this project, please report it -privately. **Do not disclose it as a public issue.** This gives us time to work with you -to fix the issue before public exposure, reducing the chance that the exploit will be -used before a patch is released. - -You may submit the report in the following ways: - -- send an email to go-logr-security@googlegroups.com -- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new) - -Please provide the following information in your report: - -- A description of the vulnerability and its impact -- How to reproduce the issue - -We ask that you give us 90 days to work on a fix before public exposure. diff --git a/vendor/github.com/go-logr/logr/context.go b/vendor/github.com/go-logr/logr/context.go deleted file mode 100644 index de8bcc3ad8..0000000000 --- a/vendor/github.com/go-logr/logr/context.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2023 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logr - -// contextKey is how we find Loggers in a context.Context. With Go < 1.21, -// the value is always a Logger value. With Go >= 1.21, the value can be a -// Logger value or a slog.Logger pointer. -type contextKey struct{} - -// notFoundError exists to carry an IsNotFound method. -type notFoundError struct{} - -func (notFoundError) Error() string { - return "no logr.Logger was present" -} - -func (notFoundError) IsNotFound() bool { - return true -} diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go deleted file mode 100644 index f012f9a18e..0000000000 --- a/vendor/github.com/go-logr/logr/context_noslog.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build !go1.21 -// +build !go1.21 - -/* -Copyright 2019 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logr - -import ( - "context" -) - -// FromContext returns a Logger from ctx or an error if no Logger is found. -func FromContext(ctx context.Context) (Logger, error) { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v, nil - } - - return Logger{}, notFoundError{} -} - -// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this -// returns a Logger that discards all log messages. -func FromContextOrDiscard(ctx context.Context) Logger { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v - } - - return Discard() -} - -// NewContext returns a new Context, derived from ctx, which carries the -// provided Logger. -func NewContext(ctx context.Context, logger Logger) context.Context { - return context.WithValue(ctx, contextKey{}, logger) -} diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go deleted file mode 100644 index 065ef0b828..0000000000 --- a/vendor/github.com/go-logr/logr/context_slog.go +++ /dev/null @@ -1,83 +0,0 @@ -//go:build go1.21 -// +build go1.21 - -/* -Copyright 2019 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logr - -import ( - "context" - "fmt" - "log/slog" -) - -// FromContext returns a Logger from ctx or an error if no Logger is found. -func FromContext(ctx context.Context) (Logger, error) { - v := ctx.Value(contextKey{}) - if v == nil { - return Logger{}, notFoundError{} - } - - switch v := v.(type) { - case Logger: - return v, nil - case *slog.Logger: - return FromSlogHandler(v.Handler()), nil - default: - // Not reached. - panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) - } -} - -// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found. -func FromContextAsSlogLogger(ctx context.Context) *slog.Logger { - v := ctx.Value(contextKey{}) - if v == nil { - return nil - } - - switch v := v.(type) { - case Logger: - return slog.New(ToSlogHandler(v)) - case *slog.Logger: - return v - default: - // Not reached. - panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) - } -} - -// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this -// returns a Logger that discards all log messages. -func FromContextOrDiscard(ctx context.Context) Logger { - if logger, err := FromContext(ctx); err == nil { - return logger - } - return Discard() -} - -// NewContext returns a new Context, derived from ctx, which carries the -// provided Logger. -func NewContext(ctx context.Context, logger Logger) context.Context { - return context.WithValue(ctx, contextKey{}, logger) -} - -// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the -// provided slog.Logger. -func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context { - return context.WithValue(ctx, contextKey{}, logger) -} diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go deleted file mode 100644 index 99fe8be93c..0000000000 --- a/vendor/github.com/go-logr/logr/discard.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2020 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logr - -// Discard returns a Logger that discards all messages logged to it. It can be -// used whenever the caller is not interested in the logs. Logger instances -// produced by this function always compare as equal. -func Discard() Logger { - return New(nil) -} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go deleted file mode 100644 index b22c57d713..0000000000 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ /dev/null @@ -1,914 +0,0 @@ -/* -Copyright 2021 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package funcr implements formatting of structured log messages and -// optionally captures the call site and timestamp. -// -// The simplest way to use it is via its implementation of a -// github.com/go-logr/logr.LogSink with output through an arbitrary -// "write" function. See New and NewJSON for details. -// -// # Custom LogSinks -// -// For users who need more control, a funcr.Formatter can be embedded inside -// your own custom LogSink implementation. This is useful when the LogSink -// needs to implement additional methods, for example. -// -// # Formatting -// -// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for -// values which are being logged. When rendering a struct, funcr will use Go's -// standard JSON tags (all except "string"). -package funcr - -import ( - "bytes" - "encoding" - "encoding/json" - "fmt" - "path/filepath" - "reflect" - "runtime" - "strconv" - "strings" - "time" - - "github.com/go-logr/logr" -) - -// New returns a logr.Logger which is implemented by an arbitrary function. -func New(fn func(prefix, args string), opts Options) logr.Logger { - return logr.New(newSink(fn, NewFormatter(opts))) -} - -// NewJSON returns a logr.Logger which is implemented by an arbitrary function -// and produces JSON output. -func NewJSON(fn func(obj string), opts Options) logr.Logger { - fnWrapper := func(_, obj string) { - fn(obj) - } - return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) -} - -// Underlier exposes access to the underlying logging function. Since -// callers only have a logr.Logger, they have to know which -// implementation is in use, so this interface is less of an -// abstraction and more of a way to test type conversion. -type Underlier interface { - GetUnderlying() func(prefix, args string) -} - -func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { - l := &fnlogger{ - Formatter: formatter, - write: fn, - } - // For skipping fnlogger.Info and fnlogger.Error. - l.AddCallDepth(1) // via Formatter - return l -} - -// Options carries parameters which influence the way logs are generated. -type Options struct { - // LogCaller tells funcr to add a "caller" key to some or all log lines. - // This has some overhead, so some users might not want it. - LogCaller MessageClass - - // LogCallerFunc tells funcr to also log the calling function name. This - // has no effect if caller logging is not enabled (see Options.LogCaller). - LogCallerFunc bool - - // LogTimestamp tells funcr to add a "ts" key to log lines. This has some - // overhead, so some users might not want it. - LogTimestamp bool - - // TimestampFormat tells funcr how to render timestamps when LogTimestamp - // is enabled. If not specified, a default format will be used. For more - // details, see docs for Go's time.Layout. - TimestampFormat string - - // LogInfoLevel tells funcr what key to use to log the info level. - // If not specified, the info level will be logged as "level". - // If this is set to "", the info level will not be logged at all. - LogInfoLevel *string - - // Verbosity tells funcr which V logs to produce. Higher values enable - // more logs. Info logs at or below this level will be written, while logs - // above this level will be discarded. - Verbosity int - - // RenderBuiltinsHook allows users to mutate the list of key-value pairs - // while a log line is being rendered. The kvList argument follows logr - // conventions - each pair of slice elements is comprised of a string key - // and an arbitrary value (verified and sanitized before calling this - // hook). The value returned must follow the same conventions. This hook - // can be used to audit or modify logged data. For example, you might want - // to prefix all of funcr's built-in keys with some string. This hook is - // only called for built-in (provided by funcr itself) key-value pairs. - // Equivalent hooks are offered for key-value pairs saved via - // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and - // for user-provided pairs (see RenderArgsHook). - RenderBuiltinsHook func(kvList []any) []any - - // RenderValuesHook is the same as RenderBuiltinsHook, except that it is - // only called for key-value pairs saved via logr.Logger.WithValues. See - // RenderBuiltinsHook for more details. - RenderValuesHook func(kvList []any) []any - - // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only - // called for key-value pairs passed directly to Info and Error. See - // RenderBuiltinsHook for more details. - RenderArgsHook func(kvList []any) []any - - // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct - // that contains a struct, etc.) it may log. Every time it finds a struct, - // slice, array, or map the depth is increased by one. When the maximum is - // reached, the value will be converted to a string indicating that the max - // depth has been exceeded. If this field is not specified, a default - // value will be used. - MaxLogDepth int -} - -// MessageClass indicates which category or categories of messages to consider. -type MessageClass int - -const ( - // None ignores all message classes. - None MessageClass = iota - // All considers all message classes. - All - // Info only considers info messages. - Info - // Error only considers error messages. - Error -) - -// fnlogger inherits some of its LogSink implementation from Formatter -// and just needs to add some glue code. -type fnlogger struct { - Formatter - write func(prefix, args string) -} - -func (l fnlogger) WithName(name string) logr.LogSink { - l.AddName(name) // via Formatter - return &l -} - -func (l fnlogger) WithValues(kvList ...any) logr.LogSink { - l.AddValues(kvList) // via Formatter - return &l -} - -func (l fnlogger) WithCallDepth(depth int) logr.LogSink { - l.AddCallDepth(depth) // via Formatter - return &l -} - -func (l fnlogger) Info(level int, msg string, kvList ...any) { - prefix, args := l.FormatInfo(level, msg, kvList) - l.write(prefix, args) -} - -func (l fnlogger) Error(err error, msg string, kvList ...any) { - prefix, args := l.FormatError(err, msg, kvList) - l.write(prefix, args) -} - -func (l fnlogger) GetUnderlying() func(prefix, args string) { - return l.write -} - -// Assert conformance to the interfaces. -var _ logr.LogSink = &fnlogger{} -var _ logr.CallDepthLogSink = &fnlogger{} -var _ Underlier = &fnlogger{} - -// NewFormatter constructs a Formatter which emits a JSON-like key=value format. -func NewFormatter(opts Options) Formatter { - return newFormatter(opts, outputKeyValue) -} - -// NewFormatterJSON constructs a Formatter which emits strict JSON. -func NewFormatterJSON(opts Options) Formatter { - return newFormatter(opts, outputJSON) -} - -// Defaults for Options. -const defaultTimestampFormat = "2006-01-02 15:04:05.000000" -const defaultMaxLogDepth = 16 - -func newFormatter(opts Options, outfmt outputFormat) Formatter { - if opts.TimestampFormat == "" { - opts.TimestampFormat = defaultTimestampFormat - } - if opts.MaxLogDepth == 0 { - opts.MaxLogDepth = defaultMaxLogDepth - } - if opts.LogInfoLevel == nil { - opts.LogInfoLevel = new(string) - *opts.LogInfoLevel = "level" - } - f := Formatter{ - outputFormat: outfmt, - prefix: "", - values: nil, - depth: 0, - opts: &opts, - } - return f -} - -// Formatter is an opaque struct which can be embedded in a LogSink -// implementation. It should be constructed with NewFormatter. Some of -// its methods directly implement logr.LogSink. -type Formatter struct { - outputFormat outputFormat - prefix string - values []any - valuesStr string - depth int - opts *Options - groupName string // for slog groups - groups []groupDef -} - -// outputFormat indicates which outputFormat to use. -type outputFormat int - -const ( - // outputKeyValue emits a JSON-like key=value format, but not strict JSON. - outputKeyValue outputFormat = iota - // outputJSON emits strict JSON. - outputJSON -) - -// groupDef represents a saved group. The values may be empty, but we don't -// know if we need to render the group until the final record is rendered. -type groupDef struct { - name string - values string -} - -// PseudoStruct is a list of key-value pairs that gets logged as a struct. -type PseudoStruct []any - -// render produces a log line, ready to use. -func (f Formatter) render(builtins, args []any) string { - // Empirically bytes.Buffer is faster than strings.Builder for this. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - - if f.outputFormat == outputJSON { - buf.WriteByte('{') // for the whole record - } - - // Render builtins - vals := builtins - if hook := f.opts.RenderBuiltinsHook; hook != nil { - vals = hook(f.sanitize(vals)) - } - f.flatten(buf, vals, false) // keys are ours, no need to escape - continuing := len(builtins) > 0 - - // Turn the inner-most group into a string - argsStr := func() string { - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - - vals = args - if hook := f.opts.RenderArgsHook; hook != nil { - vals = hook(f.sanitize(vals)) - } - f.flatten(buf, vals, true) // escape user-provided keys - - return buf.String() - }() - - // Render the stack of groups from the inside out. - bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr) - for i := len(f.groups) - 1; i >= 0; i-- { - grp := &f.groups[i] - if grp.values == "" && bodyStr == "" { - // no contents, so we must elide the whole group - continue - } - bodyStr = f.renderGroup(grp.name, grp.values, bodyStr) - } - - if bodyStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(bodyStr) - } - - if f.outputFormat == outputJSON { - buf.WriteByte('}') // for the whole record - } - - return buf.String() -} - -// renderGroup returns a string representation of the named group with rendered -// values and args. If the name is empty, this will return the values and args, -// joined. If the name is not empty, this will return a single key-value pair, -// where the value is a grouping of the values and args. If the values and -// args are both empty, this will return an empty string, even if the name was -// specified. -func (f Formatter) renderGroup(name string, values string, args string) string { - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - - needClosingBrace := false - if name != "" && (values != "" || args != "") { - buf.WriteString(f.quoted(name, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') - needClosingBrace = true - } - - continuing := false - if values != "" { - buf.WriteString(values) - continuing = true - } - - if args != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(args) - } - - if needClosingBrace { - buf.WriteByte('}') - } - - return buf.String() -} - -// flatten renders a list of key-value pairs into a buffer. If escapeKeys is -// true, the keys are assumed to have non-JSON-compatible characters in them -// and must be evaluated for escapes. -// -// This function returns a potentially modified version of kvList, which -// ensures that there is a value for every key (adding a value if needed) and -// that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any { - // This logic overlaps with sanitize() but saves one type-cast per key, - // which can be measurable. - if len(kvList)%2 != 0 { - kvList = append(kvList, noValue) - } - copied := false - for i := 0; i < len(kvList); i += 2 { - k, ok := kvList[i].(string) - if !ok { - if !copied { - newList := make([]any, len(kvList)) - copy(newList, kvList) - kvList = newList - copied = true - } - k = f.nonStringKey(kvList[i]) - kvList[i] = k - } - v := kvList[i+1] - - if i > 0 { - if f.outputFormat == outputJSON { - buf.WriteByte(f.comma()) - } else { - // In theory the format could be something we don't understand. In - // practice, we control it, so it won't be. - buf.WriteByte(' ') - } - } - - buf.WriteString(f.quoted(k, escapeKeys)) - buf.WriteByte(f.colon()) - buf.WriteString(f.pretty(v)) - } - return kvList -} - -func (f Formatter) quoted(str string, escape bool) string { - if escape { - return prettyString(str) - } - // this is faster - return `"` + str + `"` -} - -func (f Formatter) comma() byte { - if f.outputFormat == outputJSON { - return ',' - } - return ' ' -} - -func (f Formatter) colon() byte { - if f.outputFormat == outputJSON { - return ':' - } - return '=' -} - -func (f Formatter) pretty(value any) string { - return f.prettyWithFlags(value, 0, 0) -} - -const ( - flagRawStruct = 0x1 // do not print braces on structs -) - -// TODO: This is not fast. Most of the overhead goes here. -func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { - if depth > f.opts.MaxLogDepth { - return `""` - } - - // Handle types that take full control of logging. - if v, ok := value.(logr.Marshaler); ok { - // Replace the value with what the type wants to get logged. - // That then gets handled below via reflection. - value = invokeMarshaler(v) - } - - // Handle types that want to format themselves. - switch v := value.(type) { - case fmt.Stringer: - value = invokeStringer(v) - case error: - value = invokeError(v) - } - - // Handling the most common types without reflect is a small perf win. - switch v := value.(type) { - case bool: - return strconv.FormatBool(v) - case string: - return prettyString(v) - case int: - return strconv.FormatInt(int64(v), 10) - case int8: - return strconv.FormatInt(int64(v), 10) - case int16: - return strconv.FormatInt(int64(v), 10) - case int32: - return strconv.FormatInt(int64(v), 10) - case int64: - return strconv.FormatInt(int64(v), 10) - case uint: - return strconv.FormatUint(uint64(v), 10) - case uint8: - return strconv.FormatUint(uint64(v), 10) - case uint16: - return strconv.FormatUint(uint64(v), 10) - case uint32: - return strconv.FormatUint(uint64(v), 10) - case uint64: - return strconv.FormatUint(v, 10) - case uintptr: - return strconv.FormatUint(uint64(v), 10) - case float32: - return strconv.FormatFloat(float64(v), 'f', -1, 32) - case float64: - return strconv.FormatFloat(v, 'f', -1, 64) - case complex64: - return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` - case complex128: - return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` - case PseudoStruct: - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - v = f.sanitize(v) - if flags&flagRawStruct == 0 { - buf.WriteByte('{') - } - for i := 0; i < len(v); i += 2 { - if i > 0 { - buf.WriteByte(f.comma()) - } - k, _ := v[i].(string) // sanitize() above means no need to check success - // arbitrary keys might need escaping - buf.WriteString(prettyString(k)) - buf.WriteByte(f.colon()) - buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) - } - if flags&flagRawStruct == 0 { - buf.WriteByte('}') - } - return buf.String() - } - - buf := bytes.NewBuffer(make([]byte, 0, 256)) - t := reflect.TypeOf(value) - if t == nil { - return "null" - } - v := reflect.ValueOf(value) - switch t.Kind() { - case reflect.Bool: - return strconv.FormatBool(v.Bool()) - case reflect.String: - return prettyString(v.String()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(int64(v.Int()), 10) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return strconv.FormatUint(uint64(v.Uint()), 10) - case reflect.Float32: - return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) - case reflect.Float64: - return strconv.FormatFloat(v.Float(), 'f', -1, 64) - case reflect.Complex64: - return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` - case reflect.Complex128: - return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` - case reflect.Struct: - if flags&flagRawStruct == 0 { - buf.WriteByte('{') - } - printComma := false // testing i>0 is not enough because of JSON omitted fields - for i := 0; i < t.NumField(); i++ { - fld := t.Field(i) - if fld.PkgPath != "" { - // reflect says this field is only defined for non-exported fields. - continue - } - if !v.Field(i).CanInterface() { - // reflect isn't clear exactly what this means, but we can't use it. - continue - } - name := "" - omitempty := false - if tag, found := fld.Tag.Lookup("json"); found { - if tag == "-" { - continue - } - if comma := strings.Index(tag, ","); comma != -1 { - if n := tag[:comma]; n != "" { - name = n - } - rest := tag[comma:] - if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { - omitempty = true - } - } else { - name = tag - } - } - if omitempty && isEmpty(v.Field(i)) { - continue - } - if printComma { - buf.WriteByte(f.comma()) - } - printComma = true // if we got here, we are rendering a field - if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { - buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) - continue - } - if name == "" { - name = fld.Name - } - // field names can't contain characters which need escaping - buf.WriteString(f.quoted(name, false)) - buf.WriteByte(f.colon()) - buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) - } - if flags&flagRawStruct == 0 { - buf.WriteByte('}') - } - return buf.String() - case reflect.Slice, reflect.Array: - // If this is outputing as JSON make sure this isn't really a json.RawMessage. - // If so just emit "as-is" and don't pretty it as that will just print - // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want. - if f.outputFormat == outputJSON { - if rm, ok := value.(json.RawMessage); ok { - // If it's empty make sure we emit an empty value as the array style would below. - if len(rm) > 0 { - buf.Write(rm) - } else { - buf.WriteString("null") - } - return buf.String() - } - } - buf.WriteByte('[') - for i := 0; i < v.Len(); i++ { - if i > 0 { - buf.WriteByte(f.comma()) - } - e := v.Index(i) - buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) - } - buf.WriteByte(']') - return buf.String() - case reflect.Map: - buf.WriteByte('{') - // This does not sort the map keys, for best perf. - it := v.MapRange() - i := 0 - for it.Next() { - if i > 0 { - buf.WriteByte(f.comma()) - } - // If a map key supports TextMarshaler, use it. - keystr := "" - if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { - txt, err := m.MarshalText() - if err != nil { - keystr = fmt.Sprintf("", err.Error()) - } else { - keystr = string(txt) - } - keystr = prettyString(keystr) - } else { - // prettyWithFlags will produce already-escaped values - keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) - if t.Key().Kind() != reflect.String { - // JSON only does string keys. Unlike Go's standard JSON, we'll - // convert just about anything to a string. - keystr = prettyString(keystr) - } - } - buf.WriteString(keystr) - buf.WriteByte(f.colon()) - buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) - i++ - } - buf.WriteByte('}') - return buf.String() - case reflect.Ptr, reflect.Interface: - if v.IsNil() { - return "null" - } - return f.prettyWithFlags(v.Elem().Interface(), 0, depth) - } - return fmt.Sprintf(`""`, t.Kind().String()) -} - -func prettyString(s string) string { - // Avoid escaping (which does allocations) if we can. - if needsEscape(s) { - return strconv.Quote(s) - } - b := bytes.NewBuffer(make([]byte, 0, 1024)) - b.WriteByte('"') - b.WriteString(s) - b.WriteByte('"') - return b.String() -} - -// needsEscape determines whether the input string needs to be escaped or not, -// without doing any allocations. -func needsEscape(s string) bool { - for _, r := range s { - if !strconv.IsPrint(r) || r == '\\' || r == '"' { - return true - } - } - return false -} - -func isEmpty(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Complex64, reflect.Complex128: - return v.Complex() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func invokeMarshaler(m logr.Marshaler) (ret any) { - defer func() { - if r := recover(); r != nil { - ret = fmt.Sprintf("", r) - } - }() - return m.MarshalLog() -} - -func invokeStringer(s fmt.Stringer) (ret string) { - defer func() { - if r := recover(); r != nil { - ret = fmt.Sprintf("", r) - } - }() - return s.String() -} - -func invokeError(e error) (ret string) { - defer func() { - if r := recover(); r != nil { - ret = fmt.Sprintf("", r) - } - }() - return e.Error() -} - -// Caller represents the original call site for a log line, after considering -// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and -// Line fields will always be provided, while the Func field is optional. -// Users can set the render hook fields in Options to examine logged key-value -// pairs, one of which will be {"caller", Caller} if the Options.LogCaller -// field is enabled for the given MessageClass. -type Caller struct { - // File is the basename of the file for this call site. - File string `json:"file"` - // Line is the line number in the file for this call site. - Line int `json:"line"` - // Func is the function name for this call site, or empty if - // Options.LogCallerFunc is not enabled. - Func string `json:"function,omitempty"` -} - -func (f Formatter) caller() Caller { - // +1 for this frame, +1 for Info/Error. - pc, file, line, ok := runtime.Caller(f.depth + 2) - if !ok { - return Caller{"", 0, ""} - } - fn := "" - if f.opts.LogCallerFunc { - if fp := runtime.FuncForPC(pc); fp != nil { - fn = fp.Name() - } - } - - return Caller{filepath.Base(file), line, fn} -} - -const noValue = "" - -func (f Formatter) nonStringKey(v any) string { - return fmt.Sprintf("", f.snippet(v)) -} - -// snippet produces a short snippet string of an arbitrary value. -func (f Formatter) snippet(v any) string { - const snipLen = 16 - - snip := f.pretty(v) - if len(snip) > snipLen { - snip = snip[:snipLen] - } - return snip -} - -// sanitize ensures that a list of key-value pairs has a value for every key -// (adding a value if needed) and that each key is a string (substituting a key -// if needed). -func (f Formatter) sanitize(kvList []any) []any { - if len(kvList)%2 != 0 { - kvList = append(kvList, noValue) - } - for i := 0; i < len(kvList); i += 2 { - _, ok := kvList[i].(string) - if !ok { - kvList[i] = f.nonStringKey(kvList[i]) - } - } - return kvList -} - -// startGroup opens a new group scope (basically a sub-struct), which locks all -// the current saved values and starts them anew. This is needed to satisfy -// slog. -func (f *Formatter) startGroup(name string) { - // Unnamed groups are just inlined. - if name == "" { - return - } - - n := len(f.groups) - f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr}) - - // Start collecting new values. - f.groupName = name - f.valuesStr = "" - f.values = nil -} - -// Init configures this Formatter from runtime info, such as the call depth -// imposed by logr itself. -// Note that this receiver is a pointer, so depth can be saved. -func (f *Formatter) Init(info logr.RuntimeInfo) { - f.depth += info.CallDepth -} - -// Enabled checks whether an info message at the given level should be logged. -func (f Formatter) Enabled(level int) bool { - return level <= f.opts.Verbosity -} - -// GetDepth returns the current depth of this Formatter. This is useful for -// implementations which do their own caller attribution. -func (f Formatter) GetDepth() int { - return f.depth -} - -// FormatInfo renders an Info log message into strings. The prefix will be -// empty when no names were set (via AddNames), or when the output is -// configured for JSON. -func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) { - args := make([]any, 0, 64) // using a constant here impacts perf - prefix = f.prefix - if f.outputFormat == outputJSON { - args = append(args, "logger", prefix) - prefix = "" - } - if f.opts.LogTimestamp { - args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) - } - if policy := f.opts.LogCaller; policy == All || policy == Info { - args = append(args, "caller", f.caller()) - } - if key := *f.opts.LogInfoLevel; key != "" { - args = append(args, key, level) - } - args = append(args, "msg", msg) - return prefix, f.render(args, kvList) -} - -// FormatError renders an Error log message into strings. The prefix will be -// empty when no names were set (via AddNames), or when the output is -// configured for JSON. -func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) { - args := make([]any, 0, 64) // using a constant here impacts perf - prefix = f.prefix - if f.outputFormat == outputJSON { - args = append(args, "logger", prefix) - prefix = "" - } - if f.opts.LogTimestamp { - args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) - } - if policy := f.opts.LogCaller; policy == All || policy == Error { - args = append(args, "caller", f.caller()) - } - args = append(args, "msg", msg) - var loggableErr any - if err != nil { - loggableErr = err.Error() - } - args = append(args, "error", loggableErr) - return prefix, f.render(args, kvList) -} - -// AddName appends the specified name. funcr uses '/' characters to separate -// name elements. Callers should not pass '/' in the provided name string, but -// this library does not actually enforce that. -func (f *Formatter) AddName(name string) { - if len(f.prefix) > 0 { - f.prefix += "/" - } - f.prefix += name -} - -// AddValues adds key-value pairs to the set of saved values to be logged with -// each log line. -func (f *Formatter) AddValues(kvList []any) { - // Three slice args forces a copy. - n := len(f.values) - f.values = append(f.values[:n:n], kvList...) - - vals := f.values - if hook := f.opts.RenderValuesHook; hook != nil { - vals = hook(f.sanitize(vals)) - } - - // Pre-render values, so we don't have to do it on each Info/Error call. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - f.flatten(buf, vals, true) // escape user-provided keys - f.valuesStr = buf.String() -} - -// AddCallDepth increases the number of stack-frames to skip when attributing -// the log line to a file and line. -func (f *Formatter) AddCallDepth(depth int) { - f.depth += depth -} diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go deleted file mode 100644 index 7bd84761e2..0000000000 --- a/vendor/github.com/go-logr/logr/funcr/slogsink.go +++ /dev/null @@ -1,105 +0,0 @@ -//go:build go1.21 -// +build go1.21 - -/* -Copyright 2023 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package funcr - -import ( - "context" - "log/slog" - - "github.com/go-logr/logr" -) - -var _ logr.SlogSink = &fnlogger{} - -const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink - -func (l fnlogger) Handle(_ context.Context, record slog.Record) error { - kvList := make([]any, 0, 2*record.NumAttrs()) - record.Attrs(func(attr slog.Attr) bool { - kvList = attrToKVs(attr, kvList) - return true - }) - - if record.Level >= slog.LevelError { - l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...) - } else { - level := l.levelFromSlog(record.Level) - l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...) - } - return nil -} - -func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { - kvList := make([]any, 0, 2*len(attrs)) - for _, attr := range attrs { - kvList = attrToKVs(attr, kvList) - } - l.AddValues(kvList) - return &l -} - -func (l fnlogger) WithGroup(name string) logr.SlogSink { - l.startGroup(name) - return &l -} - -// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups -// and other details of slog. -func attrToKVs(attr slog.Attr, kvList []any) []any { - attrVal := attr.Value.Resolve() - if attrVal.Kind() == slog.KindGroup { - groupVal := attrVal.Group() - grpKVs := make([]any, 0, 2*len(groupVal)) - for _, attr := range groupVal { - grpKVs = attrToKVs(attr, grpKVs) - } - if attr.Key == "" { - // slog says we have to inline these - kvList = append(kvList, grpKVs...) - } else { - kvList = append(kvList, attr.Key, PseudoStruct(grpKVs)) - } - } else if attr.Key != "" { - kvList = append(kvList, attr.Key, attrVal.Any()) - } - - return kvList -} - -// levelFromSlog adjusts the level by the logger's verbosity and negates it. -// It ensures that the result is >= 0. This is necessary because the result is -// passed to a LogSink and that API did not historically document whether -// levels could be negative or what that meant. -// -// Some example usage: -// -// logrV0 := getMyLogger() -// logrV2 := logrV0.V(2) -// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) -// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) -// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) -// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) -func (l fnlogger) levelFromSlog(level slog.Level) int { - result := -level - if result < 0 { - result = 0 // because LogSink doesn't expect negative V levels - } - return int(result) -} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go deleted file mode 100644 index b4428e105b..0000000000 --- a/vendor/github.com/go-logr/logr/logr.go +++ /dev/null @@ -1,520 +0,0 @@ -/* -Copyright 2019 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This design derives from Dave Cheney's blog: -// http://dave.cheney.net/2015/11/05/lets-talk-about-logging - -// Package logr defines a general-purpose logging API and abstract interfaces -// to back that API. Packages in the Go ecosystem can depend on this package, -// while callers can implement logging with whatever backend is appropriate. -// -// # Usage -// -// Logging is done using a Logger instance. Logger is a concrete type with -// methods, which defers the actual logging to a LogSink interface. The main -// methods of Logger are Info() and Error(). Arguments to Info() and Error() -// are key/value pairs rather than printf-style formatted strings, emphasizing -// "structured logging". -// -// With Go's standard log package, we might write: -// -// log.Printf("setting target value %s", targetValue) -// -// With logr's structured logging, we'd write: -// -// logger.Info("setting target", "value", targetValue) -// -// Errors are much the same. Instead of: -// -// log.Printf("failed to open the pod bay door for user %s: %v", user, err) -// -// We'd write: -// -// logger.Error(err, "failed to open the pod bay door", "user", user) -// -// Info() and Error() are very similar, but they are separate methods so that -// LogSink implementations can choose to do things like attach additional -// information (such as stack traces) on calls to Error(). Error() messages are -// always logged, regardless of the current verbosity. If there is no error -// instance available, passing nil is valid. -// -// # Verbosity -// -// Often we want to log information only when the application in "verbose -// mode". To write log lines that are more verbose, Logger has a V() method. -// The higher the V-level of a log line, the less critical it is considered. -// Log-lines with V-levels that are not enabled (as per the LogSink) will not -// be written. Level V(0) is the default, and logger.V(0).Info() has the same -// meaning as logger.Info(). Negative V-levels have the same meaning as V(0). -// Error messages do not have a verbosity level and are always logged. -// -// Where we might have written: -// -// if flVerbose >= 2 { -// log.Printf("an unusual thing happened") -// } -// -// We can write: -// -// logger.V(2).Info("an unusual thing happened") -// -// # Logger Names -// -// Logger instances can have name strings so that all messages logged through -// that instance have additional context. For example, you might want to add -// a subsystem name: -// -// logger.WithName("compactor").Info("started", "time", time.Now()) -// -// The WithName() method returns a new Logger, which can be passed to -// constructors or other functions for further use. Repeated use of WithName() -// will accumulate name "segments". These name segments will be joined in some -// way by the LogSink implementation. It is strongly recommended that name -// segments contain simple identifiers (letters, digits, and hyphen), and do -// not contain characters that could muddle the log output or confuse the -// joining operation (e.g. whitespace, commas, periods, slashes, brackets, -// quotes, etc). -// -// # Saved Values -// -// Logger instances can store any number of key/value pairs, which will be -// logged alongside all messages logged through that instance. For example, -// you might want to create a Logger instance per managed object: -// -// With the standard log package, we might write: -// -// log.Printf("decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) -// -// With logr we'd write: -// -// // Elsewhere: set up the logger to log the object name. -// obj.logger = mainLogger.WithValues( -// "name", obj.name, "namespace", obj.namespace) -// -// // later on... -// obj.logger.Info("setting foo", "value", targetValue) -// -// # Best Practices -// -// Logger has very few hard rules, with the goal that LogSink implementations -// might have a lot of freedom to differentiate. There are, however, some -// things to consider. -// -// The log message consists of a constant message attached to the log line. -// This should generally be a simple description of what's occurring, and should -// never be a format string. Variable information can then be attached using -// named values. -// -// Keys are arbitrary strings, but should generally be constant values. Values -// may be any Go value, but how the value is formatted is determined by the -// LogSink implementation. -// -// Logger instances are meant to be passed around by value. Code that receives -// such a value can call its methods without having to check whether the -// instance is ready for use. -// -// The zero logger (= Logger{}) is identical to Discard() and discards all log -// entries. Code that receives a Logger by value can simply call it, the methods -// will never crash. For cases where passing a logger is optional, a pointer to Logger -// should be used. -// -// # Key Naming Conventions -// -// Keys are not strictly required to conform to any specification or regex, but -// it is recommended that they: -// - be human-readable and meaningful (not auto-generated or simple ordinals) -// - be constant (not dependent on input data) -// - contain only printable characters -// - not contain whitespace or punctuation -// - use lower case for simple keys and lowerCamelCase for more complex ones -// -// These guidelines help ensure that log data is processed properly regardless -// of the log implementation. For example, log implementations will try to -// output JSON data or will store data for later database (e.g. SQL) queries. -// -// While users are generally free to use key names of their choice, it's -// generally best to avoid using the following keys, as they're frequently used -// by implementations: -// - "caller": the calling information (file/line) of a particular log line -// - "error": the underlying error value in the `Error` method -// - "level": the log level -// - "logger": the name of the associated logger -// - "msg": the log message -// - "stacktrace": the stack trace associated with a particular log line or -// error (often from the `Error` message) -// - "ts": the timestamp for a log line -// -// Implementations are encouraged to make use of these keys to represent the -// above concepts, when necessary (for example, in a pure-JSON output form, it -// would be necessary to represent at least message and timestamp as ordinary -// named values). -// -// # Break Glass -// -// Implementations may choose to give callers access to the underlying -// logging implementation. The recommended pattern for this is: -// -// // Underlier exposes access to the underlying logging implementation. -// // Since callers only have a logr.Logger, they have to know which -// // implementation is in use, so this interface is less of an abstraction -// // and more of way to test type conversion. -// type Underlier interface { -// GetUnderlying() -// } -// -// Logger grants access to the sink to enable type assertions like this: -// -// func DoSomethingWithImpl(log logr.Logger) { -// if underlier, ok := log.GetSink().(impl.Underlier); ok { -// implLogger := underlier.GetUnderlying() -// ... -// } -// } -// -// Custom `With*` functions can be implemented by copying the complete -// Logger struct and replacing the sink in the copy: -// -// // WithFooBar changes the foobar parameter in the log sink and returns a -// // new logger with that modified sink. It does nothing for loggers where -// // the sink doesn't support that parameter. -// func WithFoobar(log logr.Logger, foobar int) logr.Logger { -// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok { -// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) -// } -// return log -// } -// -// Don't use New to construct a new Logger with a LogSink retrieved from an -// existing Logger. Source code attribution might not work correctly and -// unexported fields in Logger get lost. -// -// Beware that the same LogSink instance may be shared by different logger -// instances. Calling functions that modify the LogSink will affect all of -// those. -package logr - -// New returns a new Logger instance. This is primarily used by libraries -// implementing LogSink, rather than end users. Passing a nil sink will create -// a Logger which discards all log lines. -func New(sink LogSink) Logger { - logger := Logger{} - logger.setSink(sink) - if sink != nil { - sink.Init(runtimeInfo) - } - return logger -} - -// setSink stores the sink and updates any related fields. It mutates the -// logger and thus is only safe to use for loggers that are not currently being -// used concurrently. -func (l *Logger) setSink(sink LogSink) { - l.sink = sink -} - -// GetSink returns the stored sink. -func (l Logger) GetSink() LogSink { - return l.sink -} - -// WithSink returns a copy of the logger with the new sink. -func (l Logger) WithSink(sink LogSink) Logger { - l.setSink(sink) - return l -} - -// Logger is an interface to an abstract logging implementation. This is a -// concrete type for performance reasons, but all the real work is passed on to -// a LogSink. Implementations of LogSink should provide their own constructors -// that return Logger, not LogSink. -// -// The underlying sink can be accessed through GetSink and be modified through -// WithSink. This enables the implementation of custom extensions (see "Break -// Glass" in the package documentation). Normally the sink should be used only -// indirectly. -type Logger struct { - sink LogSink - level int -} - -// Enabled tests whether this Logger is enabled. For example, commandline -// flags might be used to set the logging verbosity and disable some info logs. -func (l Logger) Enabled() bool { - // Some implementations of LogSink look at the caller in Enabled (e.g. - // different verbosity levels per package or file), but we only pass one - // CallDepth in (via Init). This means that all calls from Logger to the - // LogSink's Enabled, Info, and Error methods must have the same number of - // frames. In other words, Logger methods can't call other Logger methods - // which call these LogSink methods unless we do it the same in all paths. - return l.sink != nil && l.sink.Enabled(l.level) -} - -// Info logs a non-error message with the given key/value pairs as context. -// -// The msg argument should be used to add some constant description to the log -// line. The key/value pairs can then be used to add additional variable -// information. The key/value pairs must alternate string keys and arbitrary -// values. -func (l Logger) Info(msg string, keysAndValues ...any) { - if l.sink == nil { - return - } - if l.sink.Enabled(l.level) { // see comment in Enabled - if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { - withHelper.GetCallStackHelper()() - } - l.sink.Info(l.level, msg, keysAndValues...) - } -} - -// Error logs an error, with the given message and key/value pairs as context. -// It functions similarly to Info, but may have unique behavior, and should be -// preferred for logging errors (see the package documentations for more -// information). The log message will always be emitted, regardless of -// verbosity level. -// -// The msg argument should be used to add context to any underlying error, -// while the err argument should be used to attach the actual error that -// triggered this log line, if present. The err parameter is optional -// and nil may be passed instead of an error instance. -func (l Logger) Error(err error, msg string, keysAndValues ...any) { - if l.sink == nil { - return - } - if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { - withHelper.GetCallStackHelper()() - } - l.sink.Error(err, msg, keysAndValues...) -} - -// V returns a new Logger instance for a specific verbosity level, relative to -// this Logger. In other words, V-levels are additive. A higher verbosity -// level means a log message is less important. Negative V-levels are treated -// as 0. -func (l Logger) V(level int) Logger { - if l.sink == nil { - return l - } - if level < 0 { - level = 0 - } - l.level += level - return l -} - -// GetV returns the verbosity level of the logger. If the logger's LogSink is -// nil as in the Discard logger, this will always return 0. -func (l Logger) GetV() int { - // 0 if l.sink nil because of the if check in V above. - return l.level -} - -// WithValues returns a new Logger instance with additional key/value pairs. -// See Info for documentation on how key/value pairs work. -func (l Logger) WithValues(keysAndValues ...any) Logger { - if l.sink == nil { - return l - } - l.setSink(l.sink.WithValues(keysAndValues...)) - return l -} - -// WithName returns a new Logger instance with the specified name element added -// to the Logger's name. Successive calls with WithName append additional -// suffixes to the Logger's name. It's strongly recommended that name segments -// contain only letters, digits, and hyphens (see the package documentation for -// more information). -func (l Logger) WithName(name string) Logger { - if l.sink == nil { - return l - } - l.setSink(l.sink.WithName(name)) - return l -} - -// WithCallDepth returns a Logger instance that offsets the call stack by the -// specified number of frames when logging call site information, if possible. -// This is useful for users who have helper functions between the "real" call -// site and the actual calls to Logger methods. If depth is 0 the attribution -// should be to the direct caller of this function. If depth is 1 the -// attribution should skip 1 call frame, and so on. Successive calls to this -// are additive. -// -// If the underlying log implementation supports a WithCallDepth(int) method, -// it will be called and the result returned. If the implementation does not -// support CallDepthLogSink, the original Logger will be returned. -// -// To skip one level, WithCallStackHelper() should be used instead of -// WithCallDepth(1) because it works with implementions that support the -// CallDepthLogSink and/or CallStackHelperLogSink interfaces. -func (l Logger) WithCallDepth(depth int) Logger { - if l.sink == nil { - return l - } - if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { - l.setSink(withCallDepth.WithCallDepth(depth)) - } - return l -} - -// WithCallStackHelper returns a new Logger instance that skips the direct -// caller when logging call site information, if possible. This is useful for -// users who have helper functions between the "real" call site and the actual -// calls to Logger methods and want to support loggers which depend on marking -// each individual helper function, like loggers based on testing.T. -// -// In addition to using that new logger instance, callers also must call the -// returned function. -// -// If the underlying log implementation supports a WithCallDepth(int) method, -// WithCallDepth(1) will be called to produce a new logger. If it supports a -// WithCallStackHelper() method, that will be also called. If the -// implementation does not support either of these, the original Logger will be -// returned. -func (l Logger) WithCallStackHelper() (func(), Logger) { - if l.sink == nil { - return func() {}, l - } - var helper func() - if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { - l.setSink(withCallDepth.WithCallDepth(1)) - } - if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { - helper = withHelper.GetCallStackHelper() - } else { - helper = func() {} - } - return helper, l -} - -// IsZero returns true if this logger is an uninitialized zero value -func (l Logger) IsZero() bool { - return l.sink == nil -} - -// RuntimeInfo holds information that the logr "core" library knows which -// LogSinks might want to know. -type RuntimeInfo struct { - // CallDepth is the number of call frames the logr library adds between the - // end-user and the LogSink. LogSink implementations which choose to print - // the original logging site (e.g. file & line) should climb this many - // additional frames to find it. - CallDepth int -} - -// runtimeInfo is a static global. It must not be changed at run time. -var runtimeInfo = RuntimeInfo{ - CallDepth: 1, -} - -// LogSink represents a logging implementation. End-users will generally not -// interact with this type. -type LogSink interface { - // Init receives optional information about the logr library for LogSink - // implementations that need it. - Init(info RuntimeInfo) - - // Enabled tests whether this LogSink is enabled at the specified V-level. - // For example, commandline flags might be used to set the logging - // verbosity and disable some info logs. - Enabled(level int) bool - - // Info logs a non-error message with the given key/value pairs as context. - // The level argument is provided for optional logging. This method will - // only be called when Enabled(level) is true. See Logger.Info for more - // details. - Info(level int, msg string, keysAndValues ...any) - - // Error logs an error, with the given message and key/value pairs as - // context. See Logger.Error for more details. - Error(err error, msg string, keysAndValues ...any) - - // WithValues returns a new LogSink with additional key/value pairs. See - // Logger.WithValues for more details. - WithValues(keysAndValues ...any) LogSink - - // WithName returns a new LogSink with the specified name appended. See - // Logger.WithName for more details. - WithName(name string) LogSink -} - -// CallDepthLogSink represents a LogSink that knows how to climb the call stack -// to identify the original call site and can offset the depth by a specified -// number of frames. This is useful for users who have helper functions -// between the "real" call site and the actual calls to Logger methods. -// Implementations that log information about the call site (such as file, -// function, or line) would otherwise log information about the intermediate -// helper functions. -// -// This is an optional interface and implementations are not required to -// support it. -type CallDepthLogSink interface { - // WithCallDepth returns a LogSink that will offset the call - // stack by the specified number of frames when logging call - // site information. - // - // If depth is 0, the LogSink should skip exactly the number - // of call frames defined in RuntimeInfo.CallDepth when Info - // or Error are called, i.e. the attribution should be to the - // direct caller of Logger.Info or Logger.Error. - // - // If depth is 1 the attribution should skip 1 call frame, and so on. - // Successive calls to this are additive. - WithCallDepth(depth int) LogSink -} - -// CallStackHelperLogSink represents a LogSink that knows how to climb -// the call stack to identify the original call site and can skip -// intermediate helper functions if they mark themselves as -// helper. Go's testing package uses that approach. -// -// This is useful for users who have helper functions between the -// "real" call site and the actual calls to Logger methods. -// Implementations that log information about the call site (such as -// file, function, or line) would otherwise log information about the -// intermediate helper functions. -// -// This is an optional interface and implementations are not required -// to support it. Implementations that choose to support this must not -// simply implement it as WithCallDepth(1), because -// Logger.WithCallStackHelper will call both methods if they are -// present. This should only be implemented for LogSinks that actually -// need it, as with testing.T. -type CallStackHelperLogSink interface { - // GetCallStackHelper returns a function that must be called - // to mark the direct caller as helper function when logging - // call site information. - GetCallStackHelper() func() -} - -// Marshaler is an optional interface that logged values may choose to -// implement. Loggers with structured output, such as JSON, should -// log the object return by the MarshalLog method instead of the -// original value. -type Marshaler interface { - // MarshalLog can be used to: - // - ensure that structs are not logged as strings when the original - // value has a String method: return a different type without a - // String method - // - select which fields of a complex type should get logged: - // return a simpler struct with fewer fields - // - log unexported fields: return a different struct - // with exported fields - // - // It may return any value of any type. - MarshalLog() any -} diff --git a/vendor/github.com/go-logr/logr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go deleted file mode 100644 index 82d1ba4948..0000000000 --- a/vendor/github.com/go-logr/logr/sloghandler.go +++ /dev/null @@ -1,192 +0,0 @@ -//go:build go1.21 -// +build go1.21 - -/* -Copyright 2023 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logr - -import ( - "context" - "log/slog" -) - -type slogHandler struct { - // May be nil, in which case all logs get discarded. - sink LogSink - // Non-nil if sink is non-nil and implements SlogSink. - slogSink SlogSink - - // groupPrefix collects values from WithGroup calls. It gets added as - // prefix to value keys when handling a log record. - groupPrefix string - - // levelBias can be set when constructing the handler to influence the - // slog.Level of log records. A positive levelBias reduces the - // slog.Level value. slog has no API to influence this value after the - // handler got created, so it can only be set indirectly through - // Logger.V. - levelBias slog.Level -} - -var _ slog.Handler = &slogHandler{} - -// groupSeparator is used to concatenate WithGroup names and attribute keys. -const groupSeparator = "." - -// GetLevel is used for black box unit testing. -func (l *slogHandler) GetLevel() slog.Level { - return l.levelBias -} - -func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool { - return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level))) -} - -func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { - if l.slogSink != nil { - // Only adjust verbosity level of log entries < slog.LevelError. - if record.Level < slog.LevelError { - record.Level -= l.levelBias - } - return l.slogSink.Handle(ctx, record) - } - - // No need to check for nil sink here because Handle will only be called - // when Enabled returned true. - - kvList := make([]any, 0, 2*record.NumAttrs()) - record.Attrs(func(attr slog.Attr) bool { - kvList = attrToKVs(attr, l.groupPrefix, kvList) - return true - }) - if record.Level >= slog.LevelError { - l.sinkWithCallDepth().Error(nil, record.Message, kvList...) - } else { - level := l.levelFromSlog(record.Level) - l.sinkWithCallDepth().Info(level, record.Message, kvList...) - } - return nil -} - -// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info -// are called by Handle, code in slog gets skipped. -// -// This offset currently (Go 1.21.0) works for calls through -// slog.New(ToSlogHandler(...)). There's no guarantee that the call -// chain won't change. Wrapping the handler will also break unwinding. It's -// still better than not adjusting at all.... -// -// This cannot be done when constructing the handler because FromSlogHandler needs -// access to the original sink without this adjustment. A second copy would -// work, but then WithAttrs would have to be called for both of them. -func (l *slogHandler) sinkWithCallDepth() LogSink { - if sink, ok := l.sink.(CallDepthLogSink); ok { - return sink.WithCallDepth(2) - } - return l.sink -} - -func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { - if l.sink == nil || len(attrs) == 0 { - return l - } - - clone := *l - if l.slogSink != nil { - clone.slogSink = l.slogSink.WithAttrs(attrs) - clone.sink = clone.slogSink - } else { - kvList := make([]any, 0, 2*len(attrs)) - for _, attr := range attrs { - kvList = attrToKVs(attr, l.groupPrefix, kvList) - } - clone.sink = l.sink.WithValues(kvList...) - } - return &clone -} - -func (l *slogHandler) WithGroup(name string) slog.Handler { - if l.sink == nil { - return l - } - if name == "" { - // slog says to inline empty groups - return l - } - clone := *l - if l.slogSink != nil { - clone.slogSink = l.slogSink.WithGroup(name) - clone.sink = clone.slogSink - } else { - clone.groupPrefix = addPrefix(clone.groupPrefix, name) - } - return &clone -} - -// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups -// and other details of slog. -func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any { - attrVal := attr.Value.Resolve() - if attrVal.Kind() == slog.KindGroup { - groupVal := attrVal.Group() - grpKVs := make([]any, 0, 2*len(groupVal)) - prefix := groupPrefix - if attr.Key != "" { - prefix = addPrefix(groupPrefix, attr.Key) - } - for _, attr := range groupVal { - grpKVs = attrToKVs(attr, prefix, grpKVs) - } - kvList = append(kvList, grpKVs...) - } else if attr.Key != "" { - kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any()) - } - - return kvList -} - -func addPrefix(prefix, name string) string { - if prefix == "" { - return name - } - if name == "" { - return prefix - } - return prefix + groupSeparator + name -} - -// levelFromSlog adjusts the level by the logger's verbosity and negates it. -// It ensures that the result is >= 0. This is necessary because the result is -// passed to a LogSink and that API did not historically document whether -// levels could be negative or what that meant. -// -// Some example usage: -// -// logrV0 := getMyLogger() -// logrV2 := logrV0.V(2) -// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) -// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) -// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) -// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) -func (l *slogHandler) levelFromSlog(level slog.Level) int { - result := -level - result += l.levelBias // in case the original Logger had a V level - if result < 0 { - result = 0 // because LogSink doesn't expect negative V levels - } - return int(result) -} diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go deleted file mode 100644 index 28a83d0243..0000000000 --- a/vendor/github.com/go-logr/logr/slogr.go +++ /dev/null @@ -1,100 +0,0 @@ -//go:build go1.21 -// +build go1.21 - -/* -Copyright 2023 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logr - -import ( - "context" - "log/slog" -) - -// FromSlogHandler returns a Logger which writes to the slog.Handler. -// -// The logr verbosity level is mapped to slog levels such that V(0) becomes -// slog.LevelInfo and V(4) becomes slog.LevelDebug. -func FromSlogHandler(handler slog.Handler) Logger { - if handler, ok := handler.(*slogHandler); ok { - if handler.sink == nil { - return Discard() - } - return New(handler.sink).V(int(handler.levelBias)) - } - return New(&slogSink{handler: handler}) -} - -// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger. -// -// The returned logger writes all records with level >= slog.LevelError as -// error log entries with LogSink.Error, regardless of the verbosity level of -// the Logger: -// -// logger := -// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) -// -// The level of all other records gets reduced by the verbosity -// level of the Logger and the result is negated. If it happens -// to be negative, then it gets replaced by zero because a LogSink -// is not expected to handled negative levels: -// -// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) -// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) -// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) -// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) -func ToSlogHandler(logger Logger) slog.Handler { - if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { - return sink.handler - } - - handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} - if slogSink, ok := handler.sink.(SlogSink); ok { - handler.slogSink = slogSink - } - return handler -} - -// SlogSink is an optional interface that a LogSink can implement to support -// logging through the slog.Logger or slog.Handler APIs better. It then should -// also support special slog values like slog.Group. When used as a -// slog.Handler, the advantages are: -// -// - stack unwinding gets avoided in favor of logging the pre-recorded PC, -// as intended by slog -// - proper grouping of key/value pairs via WithGroup -// - verbosity levels > slog.LevelInfo can be recorded -// - less overhead -// -// Both APIs (Logger and slog.Logger/Handler) then are supported equally -// well. Developers can pick whatever API suits them better and/or mix -// packages which use either API in the same binary with a common logging -// implementation. -// -// This interface is necessary because the type implementing the LogSink -// interface cannot also implement the slog.Handler interface due to the -// different prototype of the common Enabled method. -// -// An implementation could support both interfaces in two different types, but then -// additional interfaces would be needed to convert between those types in FromSlogHandler -// and ToSlogHandler. -type SlogSink interface { - LogSink - - Handle(ctx context.Context, record slog.Record) error - WithAttrs(attrs []slog.Attr) SlogSink - WithGroup(name string) SlogSink -} diff --git a/vendor/github.com/go-logr/logr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go deleted file mode 100644 index 4060fcbc2b..0000000000 --- a/vendor/github.com/go-logr/logr/slogsink.go +++ /dev/null @@ -1,120 +0,0 @@ -//go:build go1.21 -// +build go1.21 - -/* -Copyright 2023 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logr - -import ( - "context" - "log/slog" - "runtime" - "time" -) - -var ( - _ LogSink = &slogSink{} - _ CallDepthLogSink = &slogSink{} - _ Underlier = &slogSink{} -) - -// Underlier is implemented by the LogSink returned by NewFromLogHandler. -type Underlier interface { - // GetUnderlying returns the Handler used by the LogSink. - GetUnderlying() slog.Handler -} - -const ( - // nameKey is used to log the `WithName` values as an additional attribute. - nameKey = "logger" - - // errKey is used to log the error parameter of Error as an additional attribute. - errKey = "err" -) - -type slogSink struct { - callDepth int - name string - handler slog.Handler -} - -func (l *slogSink) Init(info RuntimeInfo) { - l.callDepth = info.CallDepth -} - -func (l *slogSink) GetUnderlying() slog.Handler { - return l.handler -} - -func (l *slogSink) WithCallDepth(depth int) LogSink { - newLogger := *l - newLogger.callDepth += depth - return &newLogger -} - -func (l *slogSink) Enabled(level int) bool { - return l.handler.Enabled(context.Background(), slog.Level(-level)) -} - -func (l *slogSink) Info(level int, msg string, kvList ...interface{}) { - l.log(nil, msg, slog.Level(-level), kvList...) -} - -func (l *slogSink) Error(err error, msg string, kvList ...interface{}) { - l.log(err, msg, slog.LevelError, kvList...) -} - -func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) { - var pcs [1]uintptr - // skip runtime.Callers, this function, Info/Error, and all helper functions above that. - runtime.Callers(3+l.callDepth, pcs[:]) - - record := slog.NewRecord(time.Now(), level, msg, pcs[0]) - if l.name != "" { - record.AddAttrs(slog.String(nameKey, l.name)) - } - if err != nil { - record.AddAttrs(slog.Any(errKey, err)) - } - record.Add(kvList...) - _ = l.handler.Handle(context.Background(), record) -} - -func (l slogSink) WithName(name string) LogSink { - if l.name != "" { - l.name += "/" - } - l.name += name - return &l -} - -func (l slogSink) WithValues(kvList ...interface{}) LogSink { - l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...)) - return &l -} - -func kvListToAttrs(kvList ...interface{}) []slog.Attr { - // We don't need the record itself, only its Add method. - record := slog.NewRecord(time.Time{}, 0, "", 0) - record.Add(kvList...) - attrs := make([]slog.Attr, 0, record.NumAttrs()) - record.Attrs(func(attr slog.Attr) bool { - attrs = append(attrs, attr) - return true - }) - return attrs -} diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/github.com/go-logr/stdr/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md deleted file mode 100644 index 5158667890..0000000000 --- a/vendor/github.com/go-logr/stdr/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Minimal Go logging using logr and Go's standard library - -[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) - -This package implements the [logr interface](https://github.com/go-logr/logr) -in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go deleted file mode 100644 index 93a8aab51b..0000000000 --- a/vendor/github.com/go-logr/stdr/stdr.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2019 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package stdr implements github.com/go-logr/logr.Logger in terms of -// Go's standard log package. -package stdr - -import ( - "log" - "os" - - "github.com/go-logr/logr" - "github.com/go-logr/logr/funcr" -) - -// The global verbosity level. See SetVerbosity(). -var globalVerbosity int - -// SetVerbosity sets the global level against which all info logs will be -// compared. If this is greater than or equal to the "V" of the logger, the -// message will be logged. A higher value here means more logs will be written. -// The previous verbosity value is returned. This is not concurrent-safe - -// callers must be sure to call it from only one goroutine. -func SetVerbosity(v int) int { - old := globalVerbosity - globalVerbosity = v - return old -} - -// New returns a logr.Logger which is implemented by Go's standard log package, -// or something like it. If std is nil, this will use a default logger -// instead. -// -// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) -func New(std StdLogger) logr.Logger { - return NewWithOptions(std, Options{}) -} - -// NewWithOptions returns a logr.Logger which is implemented by Go's standard -// log package, or something like it. See New for details. -func NewWithOptions(std StdLogger, opts Options) logr.Logger { - if std == nil { - // Go's log.Default() is only available in 1.16 and higher. - std = log.New(os.Stderr, "", log.LstdFlags) - } - - if opts.Depth < 0 { - opts.Depth = 0 - } - - fopts := funcr.Options{ - LogCaller: funcr.MessageClass(opts.LogCaller), - } - - sl := &logger{ - Formatter: funcr.NewFormatter(fopts), - std: std, - } - - // For skipping our own logger.Info/Error. - sl.Formatter.AddCallDepth(1 + opts.Depth) - - return logr.New(sl) -} - -// Options carries parameters which influence the way logs are generated. -type Options struct { - // Depth biases the assumed number of call frames to the "true" caller. - // This is useful when the calling code calls a function which then calls - // stdr (e.g. a logging shim to another API). Values less than zero will - // be treated as zero. - Depth int - - // LogCaller tells stdr to add a "caller" key to some or all log lines. - // Go's log package has options to log this natively, too. - LogCaller MessageClass - - // TODO: add an option to log the date/time -} - -// MessageClass indicates which category or categories of messages to consider. -type MessageClass int - -const ( - // None ignores all message classes. - None MessageClass = iota - // All considers all message classes. - All - // Info only considers info messages. - Info - // Error only considers error messages. - Error -) - -// StdLogger is the subset of the Go stdlib log.Logger API that is needed for -// this adapter. -type StdLogger interface { - // Output is the same as log.Output and log.Logger.Output. - Output(calldepth int, logline string) error -} - -type logger struct { - funcr.Formatter - std StdLogger -} - -var _ logr.LogSink = &logger{} -var _ logr.CallDepthLogSink = &logger{} - -func (l logger) Enabled(level int) bool { - return globalVerbosity >= level -} - -func (l logger) Info(level int, msg string, kvList ...interface{}) { - prefix, args := l.FormatInfo(level, msg, kvList) - if prefix != "" { - args = prefix + ": " + args - } - _ = l.std.Output(l.Formatter.GetDepth()+1, args) -} - -func (l logger) Error(err error, msg string, kvList ...interface{}) { - prefix, args := l.FormatError(err, msg, kvList) - if prefix != "" { - args = prefix + ": " + args - } - _ = l.std.Output(l.Formatter.GetDepth()+1, args) -} - -func (l logger) WithName(name string) logr.LogSink { - l.Formatter.AddName(name) - return &l -} - -func (l logger) WithValues(kvList ...interface{}) logr.LogSink { - l.Formatter.AddValues(kvList) - return &l -} - -func (l logger) WithCallDepth(depth int) logr.LogSink { - l.Formatter.AddCallDepth(depth) - return &l -} - -// Underlier exposes access to the underlying logging implementation. Since -// callers only have a logr.Logger, they have to know which implementation is -// in use, so this interface is less of an abstraction and more of way to test -// type conversion. -type Underlier interface { - GetUnderlying() StdLogger -} - -// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger -// is itself an interface, the result may or may not be a Go log.Logger. -func (l logger) GetUnderlying() StdLogger { - return l.std -} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile.go b/vendor/github.com/goccy/go-json/internal/decoder/compile.go index fab6437647..8ad50936c0 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/compile.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile.go @@ -5,6 +5,7 @@ import ( "fmt" "reflect" "strings" + "sync" "sync/atomic" "unicode" "unsafe" @@ -17,22 +18,27 @@ var ( typeAddr *runtime.TypeAddr cachedDecoderMap unsafe.Pointer // map[uintptr]decoder cachedDecoder []Decoder + initOnce sync.Once ) -func init() { - typeAddr = runtime.AnalyzeTypeAddr() - if typeAddr == nil { - typeAddr = &runtime.TypeAddr{} - } - cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1) +func initDecoder() { + initOnce.Do(func() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1) + }) } func loadDecoderMap() map[uintptr]Decoder { + initDecoder() p := atomic.LoadPointer(&cachedDecoderMap) return *(*map[uintptr]Decoder)(unsafe.Pointer(&p)) } func storeDecoder(typ uintptr, dec Decoder, m map[uintptr]Decoder) { + initDecoder() newDecoderMap := make(map[uintptr]Decoder, len(m)+1) newDecoderMap[typ] = dec diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go index eb7e2b1345..025ca85b5e 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go @@ -10,6 +10,7 @@ import ( ) func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + initDecoder() typeptr := uintptr(unsafe.Pointer(typ)) if typeptr > typeAddr.MaxTypeAddr { return compileToGetDecoderSlowPath(typeptr, typ) diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go index 49cdda4a17..023b817c36 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go @@ -13,6 +13,7 @@ import ( var decMu sync.RWMutex func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + initDecoder() typeptr := uintptr(unsafe.Pointer(typ)) if typeptr > typeAddr.MaxTypeAddr { return compileToGetDecoderSlowPath(typeptr, typ) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go index 37b7aa38e2..b107636890 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go @@ -5,6 +5,7 @@ import ( "encoding" "encoding/json" "reflect" + "sync" "sync/atomic" "unsafe" @@ -24,14 +25,17 @@ var ( cachedOpcodeSets []*OpcodeSet cachedOpcodeMap unsafe.Pointer // map[uintptr]*OpcodeSet typeAddr *runtime.TypeAddr + initEncoderOnce sync.Once ) -func init() { - typeAddr = runtime.AnalyzeTypeAddr() - if typeAddr == nil { - typeAddr = &runtime.TypeAddr{} - } - cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1) +func initEncoder() { + initEncoderOnce.Do(func() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1) + }) } func loadOpcodeMap() map[uintptr]*OpcodeSet { diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go index 20c93cbf70..b6f45a49b0 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go @@ -4,6 +4,7 @@ package encoder func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + initEncoder() if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { codeSet, err := compileToGetCodeSetSlowPath(typeptr) if err != nil { diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go index 13ba23fdff..47b482f7fb 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go @@ -10,6 +10,7 @@ import ( var setsMu sync.RWMutex func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + initEncoder() if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { codeSet, err := compileToGetCodeSetSlowPath(typeptr) if err != nil { diff --git a/vendor/github.com/goccy/go-json/internal/encoder/encoder.go b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go index 14eb6a0d64..b436f5b21f 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/encoder.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go @@ -406,6 +406,11 @@ func AppendMarshalJSON(ctx *RuntimeContext, code *Opcode, b []byte, v interface{ rv = newV } } + + if rv.Kind() == reflect.Ptr && rv.IsNil() { + return AppendNull(ctx, b), nil + } + v = rv.Interface() var bb []byte if (code.Flags & MarshalerContextFlags) != 0 { diff --git a/vendor/github.com/goccy/go-json/internal/runtime/type.go b/vendor/github.com/goccy/go-json/internal/runtime/type.go index 0167cd2c01..4b693cb0bb 100644 --- a/vendor/github.com/goccy/go-json/internal/runtime/type.go +++ b/vendor/github.com/goccy/go-json/internal/runtime/type.go @@ -2,6 +2,7 @@ package runtime import ( "reflect" + "sync" "unsafe" ) @@ -23,8 +24,8 @@ type TypeAddr struct { } var ( - typeAddr *TypeAddr - alreadyAnalyzed bool + typeAddr *TypeAddr + once sync.Once ) //go:linkname typelinks reflect.typelinks @@ -34,67 +35,64 @@ func typelinks() ([]unsafe.Pointer, [][]int32) func rtypeOff(unsafe.Pointer, int32) unsafe.Pointer func AnalyzeTypeAddr() *TypeAddr { - defer func() { - alreadyAnalyzed = true - }() - if alreadyAnalyzed { - return typeAddr - } - sections, offsets := typelinks() - if len(sections) != 1 { - return nil - } - if len(offsets) != 1 { - return nil - } - section := sections[0] - offset := offsets[0] - var ( - min uintptr = uintptr(^uint(0)) - max uintptr = 0 - isAligned64 = true - isAligned32 = true - ) - for i := 0; i < len(offset); i++ { - typ := (*Type)(rtypeOff(section, offset[i])) - addr := uintptr(unsafe.Pointer(typ)) - if min > addr { - min = addr + once.Do(func() { + sections, offsets := typelinks() + if len(sections) != 1 { + return } - if max < addr { - max = addr + if len(offsets) != 1 { + return } - if typ.Kind() == reflect.Ptr { - addr = uintptr(unsafe.Pointer(typ.Elem())) + section := sections[0] + offset := offsets[0] + var ( + min uintptr = uintptr(^uint(0)) + max uintptr = 0 + isAligned64 = true + isAligned32 = true + ) + for i := 0; i < len(offset); i++ { + typ := (*Type)(rtypeOff(section, offset[i])) + addr := uintptr(unsafe.Pointer(typ)) if min > addr { min = addr } if max < addr { max = addr } + if typ.Kind() == reflect.Ptr { + addr = uintptr(unsafe.Pointer(typ.Elem())) + if min > addr { + min = addr + } + if max < addr { + max = addr + } + } + isAligned64 = isAligned64 && (addr-min)&63 == 0 + isAligned32 = isAligned32 && (addr-min)&31 == 0 + } + addrRange := max - min + if addrRange == 0 { + return + } + var addrShift uintptr + if isAligned64 { + addrShift = 6 + } else if isAligned32 { + addrShift = 5 } - isAligned64 = isAligned64 && (addr-min)&63 == 0 - isAligned32 = isAligned32 && (addr-min)&31 == 0 - } - addrRange := max - min - if addrRange == 0 { - return nil - } - var addrShift uintptr - if isAligned64 { - addrShift = 6 - } else if isAligned32 { - addrShift = 5 - } - cacheSize := addrRange >> addrShift - if cacheSize > maxAcceptableTypeAddrRange { - return nil - } - typeAddr = &TypeAddr{ - BaseTypeAddr: min, - MaxTypeAddr: max, - AddrRange: addrRange, - AddrShift: addrShift, - } + cacheSize := addrRange >> addrShift + if cacheSize > maxAcceptableTypeAddrRange { + return + } + typeAddr = &TypeAddr{ + BaseTypeAddr: min, + MaxTypeAddr: max, + AddrRange: addrRange, + AddrShift: addrShift, + } + }) + return typeAddr } diff --git a/vendor/github.com/gorilla/mux/.editorconfig b/vendor/github.com/gorilla/mux/.editorconfig deleted file mode 100644 index c6b74c3e0d..0000000000 --- a/vendor/github.com/gorilla/mux/.editorconfig +++ /dev/null @@ -1,20 +0,0 @@ -; https://editorconfig.org/ - -root = true - -[*] -insert_final_newline = true -charset = utf-8 -trim_trailing_whitespace = true -indent_style = space -indent_size = 2 - -[{Makefile,go.mod,go.sum,*.go,.gitmodules}] -indent_style = tab -indent_size = 4 - -[*.md] -indent_size = 4 -trim_trailing_whitespace = false - -eclint_indent_style = unset \ No newline at end of file diff --git a/vendor/github.com/gorilla/mux/.gitignore b/vendor/github.com/gorilla/mux/.gitignore deleted file mode 100644 index 84039fec68..0000000000 --- a/vendor/github.com/gorilla/mux/.gitignore +++ /dev/null @@ -1 +0,0 @@ -coverage.coverprofile diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE deleted file mode 100644 index bb9d80bc9b..0000000000 --- a/vendor/github.com/gorilla/mux/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2023 The Gorilla Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/mux/Makefile b/vendor/github.com/gorilla/mux/Makefile deleted file mode 100644 index 98f5ab75f9..0000000000 --- a/vendor/github.com/gorilla/mux/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') -GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest - -GO_SEC=$(shell which gosec 2> /dev/null || echo '') -GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest - -GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') -GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest - -.PHONY: golangci-lint -golangci-lint: - $(if $(GO_LINT), ,go install $(GO_LINT_URI)) - @echo "##### Running golangci-lint" - golangci-lint run -v - -.PHONY: gosec -gosec: - $(if $(GO_SEC), ,go install $(GO_SEC_URI)) - @echo "##### Running gosec" - gosec ./... - -.PHONY: govulncheck -govulncheck: - $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) - @echo "##### Running govulncheck" - govulncheck ./... - -.PHONY: verify -verify: golangci-lint gosec govulncheck - -.PHONY: test -test: - @echo "##### Running tests" - go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... \ No newline at end of file diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md deleted file mode 100644 index 382513d57c..0000000000 --- a/vendor/github.com/gorilla/mux/README.md +++ /dev/null @@ -1,812 +0,0 @@ -# gorilla/mux - -![testing](https://github.com/gorilla/mux/actions/workflows/test.yml/badge.svg) -[![codecov](https://codecov.io/github/gorilla/mux/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/mux) -[![godoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) -[![sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) - - -![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) - -Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to -their respective handler. - -The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: - -* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. -* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. -* URL hosts, paths and query values can have variables with an optional regular expression. -* Registered URLs can be built, or "reversed", which helps maintaining references to resources. -* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. - ---- - -* [Install](#install) -* [Examples](#examples) -* [Matching Routes](#matching-routes) -* [Static Files](#static-files) -* [Serving Single Page Applications](#serving-single-page-applications) (e.g. React, Vue, Ember.js, etc.) -* [Registered URLs](#registered-urls) -* [Walking Routes](#walking-routes) -* [Graceful Shutdown](#graceful-shutdown) -* [Middleware](#middleware) -* [Handling CORS Requests](#handling-cors-requests) -* [Testing Handlers](#testing-handlers) -* [Full Example](#full-example) - ---- - -## Install - -With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain: - -```sh -go get -u github.com/gorilla/mux -``` - -## Examples - -Let's start registering a couple of URL paths and handlers: - -```go -func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) -} -``` - -Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. - -Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: - -```go -r := mux.NewRouter() -r.HandleFunc("/products/{key}", ProductHandler) -r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) -r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) -``` - -The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: - -```go -func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, "Category: %v\n", vars["category"]) -} -``` - -And this is all you need to know about the basic usage. More advanced options are explained below. - -### Matching Routes - -Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: - -```go -r := mux.NewRouter() -// Only matches if domain is "www.example.com". -r.Host("www.example.com") -// Matches a dynamic subdomain. -r.Host("{subdomain:[a-z]+}.example.com") -``` - -There are several other matchers that can be added. To match path prefixes: - -```go -r.PathPrefix("/products/") -``` - -...or HTTP methods: - -```go -r.Methods("GET", "POST") -``` - -...or URL schemes: - -```go -r.Schemes("https") -``` - -...or header values: - -```go -r.Headers("X-Requested-With", "XMLHttpRequest") -``` - -...or query values: - -```go -r.Queries("key", "value") -``` - -...or to use a custom matcher function: - -```go -r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 -}) -``` - -...and finally, it is possible to combine several matchers in a single route: - -```go -r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") -``` - -Routes are tested in the order they were added to the router. If two routes match, the first one wins: - -```go -r := mux.NewRouter() -r.HandleFunc("/specific", specificHandler) -r.PathPrefix("/").Handler(catchAllHandler) -``` - -Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". - -For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: - -```go -r := mux.NewRouter() -s := r.Host("www.example.com").Subrouter() -``` - -Then register routes in the subrouter: - -```go -s.HandleFunc("/products/", ProductsHandler) -s.HandleFunc("/products/{key}", ProductHandler) -s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) -``` - -The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: - -```go -r := mux.NewRouter() -s := r.PathPrefix("/products").Subrouter() -// "/products/" -s.HandleFunc("/", ProductsHandler) -// "/products/{key}/" -s.HandleFunc("/{key}/", ProductHandler) -// "/products/{key}/details" -s.HandleFunc("/{key}/details", ProductDetailsHandler) -``` - - -### Static Files - -Note that the path provided to `PathPrefix()` represents a "wildcard": calling -`PathPrefix("/static/").Handler(...)` means that the handler will be passed any -request that matches "/static/\*". This makes it easy to serve static files with mux: - -```go -func main() { - var dir string - - flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") - flag.Parse() - r := mux.NewRouter() - - // This will serve files under http://localhost:8000/static/ - r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) - - srv := &http.Server{ - Handler: r, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) -} -``` - -### Serving Single Page Applications - -Most of the time it makes sense to serve your SPA on a separate web server from your API, -but sometimes it's desirable to serve them both from one place. It's possible to write a simple -handler for serving your SPA (for use with React Router's [BrowserRouter](https://reacttraining.com/react-router/web/api/BrowserRouter) for example), and leverage -mux's powerful routing for your API endpoints. - -```go -package main - -import ( - "encoding/json" - "log" - "net/http" - "os" - "path/filepath" - "time" - - "github.com/gorilla/mux" -) - -// spaHandler implements the http.Handler interface, so we can use it -// to respond to HTTP requests. The path to the static directory and -// path to the index file within that static directory are used to -// serve the SPA in the given static directory. -type spaHandler struct { - staticPath string - indexPath string -} - -// ServeHTTP inspects the URL path to locate a file within the static dir -// on the SPA handler. If a file is found, it will be served. If not, the -// file located at the index path on the SPA handler will be served. This -// is suitable behavior for serving an SPA (single page application). -func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // Join internally call path.Clean to prevent directory traversal - path := filepath.Join(h.staticPath, r.URL.Path) - - // check whether a file exists or is a directory at the given path - fi, err := os.Stat(path) - if os.IsNotExist(err) || fi.IsDir() { - // file does not exist or path is a directory, serve index.html - http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) - return - } - - if err != nil { - // if we got an error (that wasn't that the file doesn't exist) stating the - // file, return a 500 internal server error and stop - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - // otherwise, use http.FileServer to serve the static file - http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) -} - -func main() { - router := mux.NewRouter() - - router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) { - // an example API handler - json.NewEncoder(w).Encode(map[string]bool{"ok": true}) - }) - - spa := spaHandler{staticPath: "build", indexPath: "index.html"} - router.PathPrefix("/").Handler(spa) - - srv := &http.Server{ - Handler: router, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) -} -``` - -### Registered URLs - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: - -```go -r := mux.NewRouter() -r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") -``` - -To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: - -```go -url, err := r.Get("article").URL("category", "technology", "id", "42") -``` - -...and the result will be a `url.URL` with the following path: - -``` -"/articles/technology/42" -``` - -This also works for host and query value variables: - -```go -r := mux.NewRouter() -r.Host("{subdomain}.example.com"). - Path("/articles/{category}/{id:[0-9]+}"). - Queries("filter", "{filter}"). - HandlerFunc(ArticleHandler). - Name("article") - -// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla" -url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42", - "filter", "gorilla") -``` - -All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - -```go -r.HeadersRegexp("Content-Type", "application/(text|json)") -``` - -...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` - -There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: - -```go -// "http://news.example.com/" -host, err := r.Get("article").URLHost("subdomain", "news") - -// "/articles/technology/42" -path, err := r.Get("article").URLPath("category", "technology", "id", "42") -``` - -And if you use subrouters, host and path defined separately can be built as well: - -```go -r := mux.NewRouter() -s := r.Host("{subdomain}.example.com").Subrouter() -s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - -// "http://news.example.com/articles/technology/42" -url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") -``` - -To find all the required variables for a given route when calling `URL()`, the method `GetVarNames()` is available: -```go -r := mux.NewRouter() -r.Host("{domain}"). - Path("/{group}/{item_id}"). - Queries("some_data1", "{some_data1}"). - Queries("some_data2", "{some_data2}"). - Name("article") - -// Will print [domain group item_id some_data1 some_data2] -fmt.Println(r.Get("article").GetVarNames()) - -``` -### Walking Routes - -The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example, -the following prints all of the registered routes: - -```go -package main - -import ( - "fmt" - "net/http" - "strings" - - "github.com/gorilla/mux" -) - -func handler(w http.ResponseWriter, r *http.Request) { - return -} - -func main() { - r := mux.NewRouter() - r.HandleFunc("/", handler) - r.HandleFunc("/products", handler).Methods("POST") - r.HandleFunc("/articles", handler).Methods("GET") - r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT") - r.HandleFunc("/authors", handler).Queries("surname", "{surname}") - err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { - pathTemplate, err := route.GetPathTemplate() - if err == nil { - fmt.Println("ROUTE:", pathTemplate) - } - pathRegexp, err := route.GetPathRegexp() - if err == nil { - fmt.Println("Path regexp:", pathRegexp) - } - queriesTemplates, err := route.GetQueriesTemplates() - if err == nil { - fmt.Println("Queries templates:", strings.Join(queriesTemplates, ",")) - } - queriesRegexps, err := route.GetQueriesRegexp() - if err == nil { - fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ",")) - } - methods, err := route.GetMethods() - if err == nil { - fmt.Println("Methods:", strings.Join(methods, ",")) - } - fmt.Println() - return nil - }) - - if err != nil { - fmt.Println(err) - } - - http.Handle("/", r) -} -``` - -### Graceful Shutdown - -Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`: - -```go -package main - -import ( - "context" - "flag" - "log" - "net/http" - "os" - "os/signal" - "time" - - "github.com/gorilla/mux" -) - -func main() { - var wait time.Duration - flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m") - flag.Parse() - - r := mux.NewRouter() - // Add your routes as needed - - srv := &http.Server{ - Addr: "0.0.0.0:8080", - // Good practice to set timeouts to avoid Slowloris attacks. - WriteTimeout: time.Second * 15, - ReadTimeout: time.Second * 15, - IdleTimeout: time.Second * 60, - Handler: r, // Pass our instance of gorilla/mux in. - } - - // Run our server in a goroutine so that it doesn't block. - go func() { - if err := srv.ListenAndServe(); err != nil { - log.Println(err) - } - }() - - c := make(chan os.Signal, 1) - // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C) - // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught. - signal.Notify(c, os.Interrupt) - - // Block until we receive our signal. - <-c - - // Create a deadline to wait for. - ctx, cancel := context.WithTimeout(context.Background(), wait) - defer cancel() - // Doesn't block if no connections, but will otherwise wait - // until the timeout deadline. - srv.Shutdown(ctx) - // Optionally, you could run srv.Shutdown in a goroutine and block on - // <-ctx.Done() if your application should wait for other services - // to finalize based on context cancellation. - log.Println("shutting down") - os.Exit(0) -} -``` - -### Middleware - -Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters. -Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking. - -Mux middlewares are defined using the de facto standard type: - -```go -type MiddlewareFunc func(http.Handler) http.Handler -``` - -Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers. - -A very basic middleware which logs the URI of the request being handled could be written as: - -```go -func loggingMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Do stuff here - log.Println(r.RequestURI) - // Call the next handler, which can be another middleware in the chain, or the final handler. - next.ServeHTTP(w, r) - }) -} -``` - -Middlewares can be added to a router using `Router.Use()`: - -```go -r := mux.NewRouter() -r.HandleFunc("/", handler) -r.Use(loggingMiddleware) -``` - -A more complex authentication middleware, which maps session token to users, could be written as: - -```go -// Define our struct -type authenticationMiddleware struct { - tokenUsers map[string]string -} - -// Initialize it somewhere -func (amw *authenticationMiddleware) Populate() { - amw.tokenUsers["00000000"] = "user0" - amw.tokenUsers["aaaaaaaa"] = "userA" - amw.tokenUsers["05f717e5"] = "randomUser" - amw.tokenUsers["deadbeef"] = "user0" -} - -// Middleware function, which will be called for each request -func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - token := r.Header.Get("X-Session-Token") - - if user, found := amw.tokenUsers[token]; found { - // We found the token in our map - log.Printf("Authenticated user %s\n", user) - // Pass down the request to the next middleware (or final handler) - next.ServeHTTP(w, r) - } else { - // Write an error and stop the handler chain - http.Error(w, "Forbidden", http.StatusForbidden) - } - }) -} -``` - -```go -r := mux.NewRouter() -r.HandleFunc("/", handler) - -amw := authenticationMiddleware{tokenUsers: make(map[string]string)} -amw.Populate() - -r.Use(amw.Middleware) -``` - -Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it. - -### Handling CORS Requests - -[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header. - -* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin` -* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route -* If you do not specify any methods, then: -> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers. - -Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers: - -```go -package main - -import ( - "net/http" - "github.com/gorilla/mux" -) - -func main() { - r := mux.NewRouter() - - // IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers - r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions) - r.Use(mux.CORSMethodMiddleware(r)) - - http.ListenAndServe(":8080", r) -} - -func fooHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - if r.Method == http.MethodOptions { - return - } - - w.Write([]byte("foo")) -} -``` - -And an request to `/foo` using something like: - -```bash -curl localhost:8080/foo -v -``` - -Would look like: - -```bash -* Trying ::1... -* TCP_NODELAY set -* Connected to localhost (::1) port 8080 (#0) -> GET /foo HTTP/1.1 -> Host: localhost:8080 -> User-Agent: curl/7.59.0 -> Accept: */* -> -< HTTP/1.1 200 OK -< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS -< Access-Control-Allow-Origin: * -< Date: Fri, 28 Jun 2019 20:13:30 GMT -< Content-Length: 3 -< Content-Type: text/plain; charset=utf-8 -< -* Connection #0 to host localhost left intact -foo -``` - -### Testing Handlers - -Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_. - -First, our simple HTTP handler: - -```go -// endpoints.go -package main - -func HealthCheckHandler(w http.ResponseWriter, r *http.Request) { - // A very simple health check. - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - - // In the future we could report back on the status of our DB, or our cache - // (e.g. Redis) by performing a simple PING, and include them in the response. - io.WriteString(w, `{"alive": true}`) -} - -func main() { - r := mux.NewRouter() - r.HandleFunc("/health", HealthCheckHandler) - - log.Fatal(http.ListenAndServe("localhost:8080", r)) -} -``` - -Our test code: - -```go -// endpoints_test.go -package main - -import ( - "net/http" - "net/http/httptest" - "testing" -) - -func TestHealthCheckHandler(t *testing.T) { - // Create a request to pass to our handler. We don't have any query parameters for now, so we'll - // pass 'nil' as the third parameter. - req, err := http.NewRequest("GET", "/health", nil) - if err != nil { - t.Fatal(err) - } - - // We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response. - rr := httptest.NewRecorder() - handler := http.HandlerFunc(HealthCheckHandler) - - // Our handlers satisfy http.Handler, so we can call their ServeHTTP method - // directly and pass in our Request and ResponseRecorder. - handler.ServeHTTP(rr, req) - - // Check the status code is what we expect. - if status := rr.Code; status != http.StatusOK { - t.Errorf("handler returned wrong status code: got %v want %v", - status, http.StatusOK) - } - - // Check the response body is what we expect. - expected := `{"alive": true}` - if rr.Body.String() != expected { - t.Errorf("handler returned unexpected body: got %v want %v", - rr.Body.String(), expected) - } -} -``` - -In the case that our routes have [variables](#examples), we can pass those in the request. We could write -[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple -possible route variables as needed. - -```go -// endpoints.go -func main() { - r := mux.NewRouter() - // A route with a route variable: - r.HandleFunc("/metrics/{type}", MetricsHandler) - - log.Fatal(http.ListenAndServe("localhost:8080", r)) -} -``` - -Our test file, with a table-driven test of `routeVariables`: - -```go -// endpoints_test.go -func TestMetricsHandler(t *testing.T) { - tt := []struct{ - routeVariable string - shouldPass bool - }{ - {"goroutines", true}, - {"heap", true}, - {"counters", true}, - {"queries", true}, - {"adhadaeqm3k", false}, - } - - for _, tc := range tt { - path := fmt.Sprintf("/metrics/%s", tc.routeVariable) - req, err := http.NewRequest("GET", path, nil) - if err != nil { - t.Fatal(err) - } - - rr := httptest.NewRecorder() - - // To add the vars to the context, - // we need to create a router through which we can pass the request. - router := mux.NewRouter() - router.HandleFunc("/metrics/{type}", MetricsHandler) - router.ServeHTTP(rr, req) - - // In this case, our MetricsHandler returns a non-200 response - // for a route variable it doesn't know about. - if rr.Code == http.StatusOK && !tc.shouldPass { - t.Errorf("handler should have failed on routeVariable %s: got %v want %v", - tc.routeVariable, rr.Code, http.StatusOK) - } - } -} -``` - -## Full Example - -Here's a complete, runnable example of a small `mux` based server: - -```go -package main - -import ( - "net/http" - "log" - "github.com/gorilla/mux" -) - -func YourHandler(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("Gorilla!\n")) -} - -func main() { - r := mux.NewRouter() - // Routes consist of a path and a handler function. - r.HandleFunc("/", YourHandler) - - // Bind to a port and pass our router in - log.Fatal(http.ListenAndServe(":8000", r)) -} -``` - -## License - -BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go deleted file mode 100644 index 80601351fd..0000000000 --- a/vendor/github.com/gorilla/mux/doc.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - - Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - - URL hosts, paths and query values can have variables with an optional - regular expression. - - Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - - Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - - It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. - -Let's start registering a couple of URL paths and handlers: - - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } - -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. - -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: - - r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) - -Groups can be used inside patterns, as long as they are non-capturing (?:re). For example: - - r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler) - -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): - - vars := mux.Vars(request) - category := vars["category"] - -Note that if any capturing groups are present, mux will panic() during parsing. To prevent -this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to -"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably -when capturing groups were present. - -And this is all you need to know about the basic usage. More advanced options -are explained below. - -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: - - r := mux.NewRouter() - // Only matches if domain is "www.example.com". - r.Host("www.example.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") - -There are several other matchers that can be added. To match path prefixes: - - r.PathPrefix("/products/") - -...or HTTP methods: - - r.Methods("GET", "POST") - -...or URL schemes: - - r.Schemes("https") - -...or header values: - - r.Headers("X-Requested-With", "XMLHttpRequest") - -...or query values: - - r.Queries("key", "value") - -...or to use a custom matcher function: - - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) - -...and finally, it is possible to combine several matchers in a single route: - - r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") - -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". - -For example, let's say we have several URLs that should only match when the -host is "www.example.com". Create a route for that host and get a "subrouter" -from it: - - r := mux.NewRouter() - s := r.Host("www.example.com").Subrouter() - -Then register routes in the subrouter: - - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) - -The three URL paths we registered above will only be tested if the domain is -"www.example.com", because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: - - r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) - -Note that the path provided to PathPrefix() represents a "wildcard": calling -PathPrefix("/static/").Handler(...) means that the handler will be passed any -request that matches "/static/*". This makes it easy to serve static files with mux: - - func main() { - var dir string - - flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") - flag.Parse() - r := mux.NewRouter() - - // This will serve files under http://localhost:8000/static/ - r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) - - srv := &http.Server{ - Handler: r, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) - } - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: - - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") - -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: - - url, err := r.Get("article").URL("category", "technology", "id", "42") - -...and the result will be a url.URL with the following path: - - "/articles/technology/42" - -This also works for host and query value variables: - - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - Queries("filter", "{filter}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42", - "filter", "gorilla") - -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - - r.HeadersRegexp("Content-Type", "application/(text|json)") - -...and the route will match both requests with a Content-Type of `application/json` as well as -`application/text` - -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: - - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") - - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") - -And if you use subrouters, host and path defined separately can be built -as well: - - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking. - - type MiddlewareFunc func(http.Handler) http.Handler - -Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created). - -A very basic middleware which logs the URI of the request being handled could be written as: - - func simpleMw(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Do stuff here - log.Println(r.RequestURI) - // Call the next handler, which can be another middleware in the chain, or the final handler. - next.ServeHTTP(w, r) - }) - } - -Middlewares can be added to a router using `Router.Use()`: - - r := mux.NewRouter() - r.HandleFunc("/", handler) - r.Use(simpleMw) - -A more complex authentication middleware, which maps session token to users, could be written as: - - // Define our struct - type authenticationMiddleware struct { - tokenUsers map[string]string - } - - // Initialize it somewhere - func (amw *authenticationMiddleware) Populate() { - amw.tokenUsers["00000000"] = "user0" - amw.tokenUsers["aaaaaaaa"] = "userA" - amw.tokenUsers["05f717e5"] = "randomUser" - amw.tokenUsers["deadbeef"] = "user0" - } - - // Middleware function, which will be called for each request - func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - token := r.Header.Get("X-Session-Token") - - if user, found := amw.tokenUsers[token]; found { - // We found the token in our map - log.Printf("Authenticated user %s\n", user) - next.ServeHTTP(w, r) - } else { - http.Error(w, "Forbidden", http.StatusForbidden) - } - }) - } - - r := mux.NewRouter() - r.HandleFunc("/", handler) - - amw := authenticationMiddleware{tokenUsers: make(map[string]string)} - amw.Populate() - - r.Use(amw.Middleware) - -Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. -*/ -package mux diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go deleted file mode 100644 index cb51c565eb..0000000000 --- a/vendor/github.com/gorilla/mux/middleware.go +++ /dev/null @@ -1,74 +0,0 @@ -package mux - -import ( - "net/http" - "strings" -) - -// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler. -// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed -// to it, and then calls the handler passed as parameter to the MiddlewareFunc. -type MiddlewareFunc func(http.Handler) http.Handler - -// middleware interface is anything which implements a MiddlewareFunc named Middleware. -type middleware interface { - Middleware(handler http.Handler) http.Handler -} - -// Middleware allows MiddlewareFunc to implement the middleware interface. -func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler { - return mw(handler) -} - -// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. -func (r *Router) Use(mwf ...MiddlewareFunc) { - for _, fn := range mwf { - r.middlewares = append(r.middlewares, fn) - } -} - -// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. -func (r *Router) useInterface(mw middleware) { - r.middlewares = append(r.middlewares, mw) -} - -// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header -// on requests for routes that have an OPTIONS method matcher to all the method matchers on -// the route. Routes that do not explicitly handle OPTIONS requests will not be processed -// by the middleware. See examples for usage. -func CORSMethodMiddleware(r *Router) MiddlewareFunc { - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - allMethods, err := getAllMethodsForRoute(r, req) - if err == nil { - for _, v := range allMethods { - if v == http.MethodOptions { - w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ",")) - } - } - } - - next.ServeHTTP(w, req) - }) - } -} - -// getAllMethodsForRoute returns all the methods from method matchers matching a given -// request. -func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) { - var allMethods []string - - for _, route := range r.routes { - var match RouteMatch - if route.Match(req, &match) || match.MatchErr == ErrMethodMismatch { - methods, err := route.GetMethods() - if err != nil { - return nil, err - } - - allMethods = append(allMethods, methods...) - } - } - - return allMethods, nil -} diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go deleted file mode 100644 index 1e089906fa..0000000000 --- a/vendor/github.com/gorilla/mux/mux.go +++ /dev/null @@ -1,608 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "context" - "errors" - "fmt" - "net/http" - "path" - "regexp" -) - -var ( - // ErrMethodMismatch is returned when the method in the request does not match - // the method defined against the route. - ErrMethodMismatch = errors.New("method is not allowed") - // ErrNotFound is returned when no route match is found. - ErrNotFound = errors.New("no matching route was found") -) - -// NewRouter returns a new router instance. -func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route)} -} - -// Router registers routes to be matched and dispatches a handler. -// -// It implements the http.Handler interface, so it can be registered to serve -// requests: -// -// var router = mux.NewRouter() -// -// func main() { -// http.Handle("/", router) -// } -// -// Or, for Google App Engine, register it in a init() function: -// -// func init() { -// http.Handle("/", router) -// } -// -// This will send all incoming requests to the router. -type Router struct { - // Configurable Handler to be used when no route matches. - // This can be used to render your own 404 Not Found errors. - NotFoundHandler http.Handler - - // Configurable Handler to be used when the request method does not match the route. - // This can be used to render your own 405 Method Not Allowed errors. - MethodNotAllowedHandler http.Handler - - // Routes to be matched, in order. - routes []*Route - - // Routes by name for URL building. - namedRoutes map[string]*Route - - // If true, do not clear the request context after handling the request. - // - // Deprecated: No effect, since the context is stored on the request itself. - KeepContext bool - - // Slice of middlewares to be called after a match is found - middlewares []middleware - - // configuration shared with `Route` - routeConf -} - -// common route configuration shared between `Router` and `Route` -type routeConf struct { - // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" - useEncodedPath bool - - // If true, when the path pattern is "/path/", accessing "/path" will - // redirect to the former and vice versa. - strictSlash bool - - // If true, when the path pattern is "/path//to", accessing "/path//to" - // will not redirect - skipClean bool - - // Manager for the variables from host and path. - regexp routeRegexpGroup - - // List of matchers. - matchers []matcher - - // The scheme used when building URLs. - buildScheme string - - buildVarsFunc BuildVarsFunc -} - -// returns an effective deep copy of `routeConf` -func copyRouteConf(r routeConf) routeConf { - c := r - - if r.regexp.path != nil { - c.regexp.path = copyRouteRegexp(r.regexp.path) - } - - if r.regexp.host != nil { - c.regexp.host = copyRouteRegexp(r.regexp.host) - } - - c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries)) - for _, q := range r.regexp.queries { - c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q)) - } - - c.matchers = make([]matcher, len(r.matchers)) - copy(c.matchers, r.matchers) - - return c -} - -func copyRouteRegexp(r *routeRegexp) *routeRegexp { - c := *r - return &c -} - -// Match attempts to match the given request against the router's registered routes. -// -// If the request matches a route of this router or one of its subrouters the Route, -// Handler, and Vars fields of the the match argument are filled and this function -// returns true. -// -// If the request does not match any of this router's or its subrouters' routes -// then this function returns false. If available, a reason for the match failure -// will be filled in the match argument's MatchErr field. If the match failure type -// (eg: not found) has a registered handler, the handler is assigned to the Handler -// field of the match argument. -func (r *Router) Match(req *http.Request, match *RouteMatch) bool { - for _, route := range r.routes { - if route.Match(req, match) { - // Build middleware chain if no error was found - if match.MatchErr == nil { - for i := len(r.middlewares) - 1; i >= 0; i-- { - match.Handler = r.middlewares[i].Middleware(match.Handler) - } - } - return true - } - } - - if match.MatchErr == ErrMethodMismatch { - if r.MethodNotAllowedHandler != nil { - match.Handler = r.MethodNotAllowedHandler - return true - } - - return false - } - - // Closest match for a router (includes sub-routers) - if r.NotFoundHandler != nil { - match.Handler = r.NotFoundHandler - match.MatchErr = ErrNotFound - return true - } - - match.MatchErr = ErrNotFound - return false -} - -// ServeHTTP dispatches the handler registered in the matched route. -// -// When there is a match, the route variables can be retrieved calling -// mux.Vars(request). -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if !r.skipClean { - path := req.URL.Path - if r.useEncodedPath { - path = req.URL.EscapedPath() - } - // Clean path to canonical form and redirect. - if p := cleanPath(path); p != path { - - // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() - - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return - } - } - var match RouteMatch - var handler http.Handler - if r.Match(req, &match) { - handler = match.Handler - req = requestWithVars(req, match.Vars) - req = requestWithRoute(req, match.Route) - } - - if handler == nil && match.MatchErr == ErrMethodMismatch { - handler = methodNotAllowedHandler() - } - - if handler == nil { - handler = http.NotFoundHandler() - } - - handler.ServeHTTP(w, req) -} - -// Get returns a route registered with the given name. -func (r *Router) Get(name string) *Route { - return r.namedRoutes[name] -} - -// GetRoute returns a route registered with the given name. This method -// was renamed to Get() and remains here for backwards compatibility. -func (r *Router) GetRoute(name string) *Route { - return r.namedRoutes[name] -} - -// StrictSlash defines the trailing slash behavior for new routes. The initial -// value is false. -// -// When true, if the route path is "/path/", accessing "/path" will perform a redirect -// to the former and vice versa. In other words, your application will always -// see the path as specified in the route. -// -// When false, if the route path is "/path", accessing "/path/" will not match -// this route and vice versa. -// -// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for -// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed -// request will be made as a GET by most clients. Use middleware or client settings -// to modify this behaviour as needed. -// -// Special case: when a route sets a path prefix using the PathPrefix() method, -// strict slash is ignored for that route because the redirect behavior can't -// be determined from a prefix alone. However, any subrouters created from that -// route inherit the original StrictSlash setting. -func (r *Router) StrictSlash(value bool) *Router { - r.strictSlash = value - return r -} - -// SkipClean defines the path cleaning behaviour for new routes. The initial -// value is false. Users should be careful about which routes are not cleaned -// -// When true, if the route path is "/path//to", it will remain with the double -// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ -// -// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will -// become /fetch/http/xkcd.com/534 -func (r *Router) SkipClean(value bool) *Router { - r.skipClean = value - return r -} - -// UseEncodedPath tells the router to match the encoded original path -// to the routes. -// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to". -// -// If not called, the router will match the unencoded path to the routes. -// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to" -func (r *Router) UseEncodedPath() *Router { - r.useEncodedPath = true - return r -} - -// ---------------------------------------------------------------------------- -// Route factories -// ---------------------------------------------------------------------------- - -// NewRoute registers an empty route. -func (r *Router) NewRoute() *Route { - // initialize a route with a copy of the parent router's configuration - route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} - r.routes = append(r.routes, route) - return route -} - -// Name registers a new route with a name. -// See Route.Name(). -func (r *Router) Name(name string) *Route { - return r.NewRoute().Name(name) -} - -// Handle registers a new route with a matcher for the URL path. -// See Route.Path() and Route.Handler(). -func (r *Router) Handle(path string, handler http.Handler) *Route { - return r.NewRoute().Path(path).Handler(handler) -} - -// HandleFunc registers a new route with a matcher for the URL path. -// See Route.Path() and Route.HandlerFunc(). -func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, - *http.Request)) *Route { - return r.NewRoute().Path(path).HandlerFunc(f) -} - -// Headers registers a new route with a matcher for request header values. -// See Route.Headers(). -func (r *Router) Headers(pairs ...string) *Route { - return r.NewRoute().Headers(pairs...) -} - -// Host registers a new route with a matcher for the URL host. -// See Route.Host(). -func (r *Router) Host(tpl string) *Route { - return r.NewRoute().Host(tpl) -} - -// MatcherFunc registers a new route with a custom matcher function. -// See Route.MatcherFunc(). -func (r *Router) MatcherFunc(f MatcherFunc) *Route { - return r.NewRoute().MatcherFunc(f) -} - -// Methods registers a new route with a matcher for HTTP methods. -// See Route.Methods(). -func (r *Router) Methods(methods ...string) *Route { - return r.NewRoute().Methods(methods...) -} - -// Path registers a new route with a matcher for the URL path. -// See Route.Path(). -func (r *Router) Path(tpl string) *Route { - return r.NewRoute().Path(tpl) -} - -// PathPrefix registers a new route with a matcher for the URL path prefix. -// See Route.PathPrefix(). -func (r *Router) PathPrefix(tpl string) *Route { - return r.NewRoute().PathPrefix(tpl) -} - -// Queries registers a new route with a matcher for URL query values. -// See Route.Queries(). -func (r *Router) Queries(pairs ...string) *Route { - return r.NewRoute().Queries(pairs...) -} - -// Schemes registers a new route with a matcher for URL schemes. -// See Route.Schemes(). -func (r *Router) Schemes(schemes ...string) *Route { - return r.NewRoute().Schemes(schemes...) -} - -// BuildVarsFunc registers a new route with a custom function for modifying -// route variables before building a URL. -func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { - return r.NewRoute().BuildVarsFunc(f) -} - -// Walk walks the router and all its sub-routers, calling walkFn for each route -// in the tree. The routes are walked in the order they were added. Sub-routers -// are explored depth-first. -func (r *Router) Walk(walkFn WalkFunc) error { - return r.walk(walkFn, []*Route{}) -} - -// SkipRouter is used as a return value from WalkFuncs to indicate that the -// router that walk is about to descend down to should be skipped. -var SkipRouter = errors.New("skip this router") - -// WalkFunc is the type of the function called for each route visited by Walk. -// At every invocation, it is given the current route, and the current router, -// and a list of ancestor routes that lead to the current route. -type WalkFunc func(route *Route, router *Router, ancestors []*Route) error - -func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { - for _, t := range r.routes { - err := walkFn(t, r, ancestors) - if err == SkipRouter { - continue - } - if err != nil { - return err - } - for _, sr := range t.matchers { - if h, ok := sr.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - if h, ok := t.handler.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - return nil -} - -// ---------------------------------------------------------------------------- -// Context -// ---------------------------------------------------------------------------- - -// RouteMatch stores information about a matched route. -type RouteMatch struct { - Route *Route - Handler http.Handler - Vars map[string]string - - // MatchErr is set to appropriate matching error - // It is set to ErrMethodMismatch if there is a mismatch in - // the request method and route method - MatchErr error -} - -type contextKey int - -const ( - varsKey contextKey = iota - routeKey -) - -// Vars returns the route variables for the current request, if any. -func Vars(r *http.Request) map[string]string { - if rv := r.Context().Value(varsKey); rv != nil { - return rv.(map[string]string) - } - return nil -} - -// CurrentRoute returns the matched route for the current request, if any. -// This only works when called inside the handler of the matched route -// because the matched route is stored in the request context which is cleared -// after the handler returns. -func CurrentRoute(r *http.Request) *Route { - if rv := r.Context().Value(routeKey); rv != nil { - return rv.(*Route) - } - return nil -} - -func requestWithVars(r *http.Request, vars map[string]string) *http.Request { - ctx := context.WithValue(r.Context(), varsKey, vars) - return r.WithContext(ctx) -} - -func requestWithRoute(r *http.Request, route *Route) *http.Request { - ctx := context.WithValue(r.Context(), routeKey, route) - return r.WithContext(ctx) -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -// cleanPath returns the canonical path for p, eliminating . and .. elements. -// Borrowed from the net/http package. -func cleanPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root; - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - - return np -} - -// uniqueVars returns an error if two slices contain duplicated strings. -func uniqueVars(s1, s2 []string) error { - for _, v1 := range s1 { - for _, v2 := range s2 { - if v1 == v2 { - return fmt.Errorf("mux: duplicated route variable %q", v2) - } - } - } - return nil -} - -// checkPairs returns the count of strings passed in, and an error if -// the count is not an even number. -func checkPairs(pairs ...string) (int, error) { - length := len(pairs) - if length%2 != 0 { - return length, fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - } - return length, nil -} - -// mapFromPairsToString converts variadic string parameters to a -// string to string map. -func mapFromPairsToString(pairs ...string) (map[string]string, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]string, length/2) - for i := 0; i < length; i += 2 { - m[pairs[i]] = pairs[i+1] - } - return m, nil -} - -// mapFromPairsToRegex converts variadic string parameters to a -// string to regex map. -func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]*regexp.Regexp, length/2) - for i := 0; i < length; i += 2 { - regex, err := regexp.Compile(pairs[i+1]) - if err != nil { - return nil, err - } - m[pairs[i]] = regex - } - return m, nil -} - -// matchInArray returns true if the given string value is in the array. -func matchInArray(arr []string, value string) bool { - for _, v := range arr { - if v == value { - return true - } - } - return false -} - -// matchMapWithString returns true if the given key/value pairs exist in a given map. -func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != "" { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v == value { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} - -// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against -// the given regex -func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != nil { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v.MatchString(value) { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} - -// methodNotAllowed replies to the request with an HTTP status code 405. -func methodNotAllowed(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusMethodNotAllowed) -} - -// methodNotAllowedHandler returns a simple request handler -// that replies to each request with a status code 405. -func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) } diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go deleted file mode 100644 index 5d05cfa0e9..0000000000 --- a/vendor/github.com/gorilla/mux/regexp.go +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "fmt" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" -) - -type routeRegexpOptions struct { - strictSlash bool - useEncodedPath bool -} - -type regexpType int - -const ( - regexpTypePath regexpType = iota - regexpTypeHost - regexpTypePrefix - regexpTypeQuery -) - -// newRouteRegexp parses a route template and returns a routeRegexp, -// used to match a host, a path or a query string. -// -// It will extract named variables, assemble a regexp to be matched, create -// a "reverse" template to build URLs and compile regexps to validate variable -// values used in URL building. -// -// Previously we accepted only Python-like identifiers for variable -// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that -// name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) { - // Check if it is well-formed. - idxs, errBraces := braceIndices(tpl) - if errBraces != nil { - return nil, errBraces - } - // Backup the original. - template := tpl - // Now let's parse it. - defaultPattern := "[^/]+" - if typ == regexpTypeQuery { - defaultPattern = ".*" - } else if typ == regexpTypeHost { - defaultPattern = "[^.]+" - } - // Only match strict slash if not matching - if typ != regexpTypePath { - options.strictSlash = false - } - // Set a flag for strictSlash. - endSlash := false - if options.strictSlash && strings.HasSuffix(tpl, "/") { - tpl = tpl[:len(tpl)-1] - endSlash = true - } - varsN := make([]string, len(idxs)/2) - varsR := make([]*regexp.Regexp, len(idxs)/2) - pattern := bytes.NewBufferString("") - pattern.WriteByte('^') - reverse := bytes.NewBufferString("") - var end int - var err error - for i := 0; i < len(idxs); i += 2 { - // Set all values we are interested in. - raw := tpl[end:idxs[i]] - end = idxs[i+1] - parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) - name := parts[0] - patt := defaultPattern - if len(parts) == 2 { - patt = parts[1] - } - // Name or pattern can't be empty. - if name == "" || patt == "" { - return nil, fmt.Errorf("mux: missing name or pattern in %q", - tpl[idxs[i]:end]) - } - // Build the regexp pattern. - fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt) - - // Build the reverse template. - fmt.Fprintf(reverse, "%s%%s", raw) - - // Append variable name and compiled pattern. - varsN[i/2] = name - varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) - if err != nil { - return nil, err - } - } - // Add the remaining. - raw := tpl[end:] - pattern.WriteString(regexp.QuoteMeta(raw)) - if options.strictSlash { - pattern.WriteString("[/]?") - } - if typ == regexpTypeQuery { - // Add the default pattern if the query value is empty - if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { - pattern.WriteString(defaultPattern) - } - } - if typ != regexpTypePrefix { - pattern.WriteByte('$') - } - - var wildcardHostPort bool - if typ == regexpTypeHost { - if !strings.Contains(pattern.String(), ":") { - wildcardHostPort = true - } - } - reverse.WriteString(raw) - if endSlash { - reverse.WriteByte('/') - } - // Compile full regexp. - reg, errCompile := regexp.Compile(pattern.String()) - if errCompile != nil { - return nil, errCompile - } - - // Check for capturing groups which used to work in older versions - if reg.NumSubexp() != len(idxs)/2 { - panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) + - "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)") - } - - // Done! - return &routeRegexp{ - template: template, - regexpType: typ, - options: options, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, - wildcardHostPort: wildcardHostPort, - }, nil -} - -// routeRegexp stores a regexp to match a host or path and information to -// collect and validate route variables. -type routeRegexp struct { - // The unmodified template. - template string - // The type of match - regexpType regexpType - // Options for matching - options routeRegexpOptions - // Expanded regexp. - regexp *regexp.Regexp - // Reverse template. - reverse string - // Variable names. - varsN []string - // Variable regexps (validators). - varsR []*regexp.Regexp - // Wildcard host-port (no strict port match in hostname) - wildcardHostPort bool -} - -// Match matches the regexp against the URL host or path. -func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { - if r.regexpType == regexpTypeHost { - host := getHost(req) - if r.wildcardHostPort { - // Don't be strict on the port match - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - } - return r.regexp.MatchString(host) - } - - if r.regexpType == regexpTypeQuery { - return r.matchQueryString(req) - } - path := req.URL.Path - if r.options.useEncodedPath { - path = req.URL.EscapedPath() - } - return r.regexp.MatchString(path) -} - -// url builds a URL part using the given values. -func (r *routeRegexp) url(values map[string]string) (string, error) { - urlValues := make([]interface{}, len(r.varsN)) - for k, v := range r.varsN { - value, ok := values[v] - if !ok { - return "", fmt.Errorf("mux: missing route variable %q", v) - } - if r.regexpType == regexpTypeQuery { - value = url.QueryEscape(value) - } - urlValues[k] = value - } - rv := fmt.Sprintf(r.reverse, urlValues...) - if !r.regexp.MatchString(rv) { - // The URL is checked against the full regexp, instead of checking - // individual variables. This is faster but to provide a good error - // message, we check individual regexps if the URL doesn't match. - for k, v := range r.varsN { - if !r.varsR[k].MatchString(values[v]) { - return "", fmt.Errorf( - "mux: variable %q doesn't match, expected %q", values[v], - r.varsR[k].String()) - } - } - } - return rv, nil -} - -// getURLQuery returns a single query parameter from a request URL. -// For a URL with foo=bar&baz=ding, we return only the relevant key -// value pair for the routeRegexp. -func (r *routeRegexp) getURLQuery(req *http.Request) string { - if r.regexpType != regexpTypeQuery { - return "" - } - templateKey := strings.SplitN(r.template, "=", 2)[0] - val, ok := findFirstQueryKey(req.URL.RawQuery, templateKey) - if ok { - return templateKey + "=" + val - } - return "" -} - -// findFirstQueryKey returns the same result as (*url.URL).Query()[key][0]. -// If key was not found, empty string and false is returned. -func findFirstQueryKey(rawQuery, key string) (value string, ok bool) { - query := []byte(rawQuery) - for len(query) > 0 { - foundKey := query - if i := bytes.IndexAny(foundKey, "&;"); i >= 0 { - foundKey, query = foundKey[:i], foundKey[i+1:] - } else { - query = query[:0] - } - if len(foundKey) == 0 { - continue - } - var value []byte - if i := bytes.IndexByte(foundKey, '='); i >= 0 { - foundKey, value = foundKey[:i], foundKey[i+1:] - } - if len(foundKey) < len(key) { - // Cannot possibly be key. - continue - } - keyString, err := url.QueryUnescape(string(foundKey)) - if err != nil { - continue - } - if keyString != key { - continue - } - valueString, err := url.QueryUnescape(string(value)) - if err != nil { - continue - } - return valueString, true - } - return "", false -} - -func (r *routeRegexp) matchQueryString(req *http.Request) bool { - return r.regexp.MatchString(r.getURLQuery(req)) -} - -// braceIndices returns the first level curly brace indices from a string. -// It returns an error in case of unbalanced braces. -func braceIndices(s string) ([]int, error) { - var level, idx int - var idxs []int - for i := 0; i < len(s); i++ { - switch s[i] { - case '{': - if level++; level == 1 { - idx = i - } - case '}': - if level--; level == 0 { - idxs = append(idxs, idx, i+1) - } else if level < 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - } - } - if level != 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - return idxs, nil -} - -// varGroupName builds a capturing group name for the indexed variable. -func varGroupName(idx int) string { - return "v" + strconv.Itoa(idx) -} - -// ---------------------------------------------------------------------------- -// routeRegexpGroup -// ---------------------------------------------------------------------------- - -// routeRegexpGroup groups the route matchers that carry variables. -type routeRegexpGroup struct { - host *routeRegexp - path *routeRegexp - queries []*routeRegexp -} - -// setMatch extracts the variables from the URL once a route matches. -func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { - // Store host variables. - if v.host != nil { - host := getHost(req) - if v.host.wildcardHostPort { - // Don't be strict on the port match - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - } - matches := v.host.regexp.FindStringSubmatchIndex(host) - if len(matches) > 0 { - extractVars(host, matches, v.host.varsN, m.Vars) - } - } - path := req.URL.Path - if r.useEncodedPath { - path = req.URL.EscapedPath() - } - // Store path variables. - if v.path != nil { - matches := v.path.regexp.FindStringSubmatchIndex(path) - if len(matches) > 0 { - extractVars(path, matches, v.path.varsN, m.Vars) - // Check if we should redirect. - if v.path.options.strictSlash { - p1 := strings.HasSuffix(path, "/") - p2 := strings.HasSuffix(v.path.template, "/") - if p1 != p2 { - u, _ := url.Parse(req.URL.String()) - if p1 { - u.Path = u.Path[:len(u.Path)-1] - } else { - u.Path += "/" - } - m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently) - } - } - } - } - // Store query string variables. - for _, q := range v.queries { - queryURL := q.getURLQuery(req) - matches := q.regexp.FindStringSubmatchIndex(queryURL) - if len(matches) > 0 { - extractVars(queryURL, matches, q.varsN, m.Vars) - } - } -} - -// getHost tries its best to return the request host. -// According to section 14.23 of RFC 2616 the Host header -// can include the port number if the default value of 80 is not used. -func getHost(r *http.Request) string { - if r.URL.IsAbs() { - return r.URL.Host - } - return r.Host -} - -func extractVars(input string, matches []int, names []string, output map[string]string) { - for i, name := range names { - output[name] = input[matches[2*i+2]:matches[2*i+3]] - } -} diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go deleted file mode 100644 index e8f11df221..0000000000 --- a/vendor/github.com/gorilla/mux/route.go +++ /dev/null @@ -1,765 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "regexp" - "strings" -) - -// Route stores information to match a request and build URLs. -type Route struct { - // Request handler for the route. - handler http.Handler - // If true, this route never matches: it is only used to build URLs. - buildOnly bool - // The name used to build URLs. - name string - // Error resulted from building a route. - err error - - // "global" reference to all named routes - namedRoutes map[string]*Route - - // config possibly passed in from `Router` - routeConf -} - -// SkipClean reports whether path cleaning is enabled for this route via -// Router.SkipClean. -func (r *Route) SkipClean() bool { - return r.skipClean -} - -// Match matches the route against the request. -func (r *Route) Match(req *http.Request, match *RouteMatch) bool { - if r.buildOnly || r.err != nil { - return false - } - - var matchErr error - - // Match everything. - for _, m := range r.matchers { - if matched := m.Match(req, match); !matched { - if _, ok := m.(methodMatcher); ok { - matchErr = ErrMethodMismatch - continue - } - - // Ignore ErrNotFound errors. These errors arise from match call - // to Subrouters. - // - // This prevents subsequent matching subrouters from failing to - // run middleware. If not ignored, the middleware would see a - // non-nil MatchErr and be skipped, even when there was a - // matching route. - if match.MatchErr == ErrNotFound { - match.MatchErr = nil - } - - matchErr = nil // nolint:ineffassign - return false - } else { - // Multiple routes may share the same path but use different HTTP methods. For instance: - // Route 1: POST "/users/{id}". - // Route 2: GET "/users/{id}", parameters: "id": "[0-9]+". - // - // The router must handle these cases correctly. For a GET request to "/users/abc" with "id" as "-2", - // The router should return a "Not Found" error as no route fully matches this request. - if match.MatchErr == ErrMethodMismatch { - match.MatchErr = nil - } - } - } - - if matchErr != nil { - match.MatchErr = matchErr - return false - } - - if match.MatchErr == ErrMethodMismatch && r.handler != nil { - // We found a route which matches request method, clear MatchErr - match.MatchErr = nil - // Then override the mis-matched handler - match.Handler = r.handler - } - - // Yay, we have a match. Let's collect some info about it. - if match.Route == nil { - match.Route = r - } - if match.Handler == nil { - match.Handler = r.handler - } - if match.Vars == nil { - match.Vars = make(map[string]string) - } - - // Set variables. - r.regexp.setMatch(req, match, r) - return true -} - -// ---------------------------------------------------------------------------- -// Route attributes -// ---------------------------------------------------------------------------- - -// GetError returns an error resulted from building the route, if any. -func (r *Route) GetError() error { - return r.err -} - -// BuildOnly sets the route to never match: it is only used to build URLs. -func (r *Route) BuildOnly() *Route { - r.buildOnly = true - return r -} - -// Handler -------------------------------------------------------------------- - -// Handler sets a handler for the route. -func (r *Route) Handler(handler http.Handler) *Route { - if r.err == nil { - r.handler = handler - } - return r -} - -// HandlerFunc sets a handler function for the route. -func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { - return r.Handler(http.HandlerFunc(f)) -} - -// GetHandler returns the handler for the route, if any. -func (r *Route) GetHandler() http.Handler { - return r.handler -} - -// Name ----------------------------------------------------------------------- - -// Name sets the name for the route, used to build URLs. -// It is an error to call Name more than once on a route. -func (r *Route) Name(name string) *Route { - if r.name != "" { - r.err = fmt.Errorf("mux: route already has name %q, can't set %q", - r.name, name) - } - if r.err == nil { - r.name = name - r.namedRoutes[name] = r - } - return r -} - -// GetName returns the name for the route, if any. -func (r *Route) GetName() string { - return r.name -} - -// ---------------------------------------------------------------------------- -// Matchers -// ---------------------------------------------------------------------------- - -// matcher types try to match a request. -type matcher interface { - Match(*http.Request, *RouteMatch) bool -} - -// addMatcher adds a matcher to the route. -func (r *Route) addMatcher(m matcher) *Route { - if r.err == nil { - r.matchers = append(r.matchers, m) - } - return r -} - -// addRegexpMatcher adds a host or path matcher and builder to a route. -func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error { - if r.err != nil { - return r.err - } - if typ == regexpTypePath || typ == regexpTypePrefix { - if len(tpl) > 0 && tpl[0] != '/' { - return fmt.Errorf("mux: path must start with a slash, got %q", tpl) - } - if r.regexp.path != nil { - tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl - } - } - rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{ - strictSlash: r.strictSlash, - useEncodedPath: r.useEncodedPath, - }) - if err != nil { - return err - } - for _, q := range r.regexp.queries { - if err = uniqueVars(rr.varsN, q.varsN); err != nil { - return err - } - } - if typ == regexpTypeHost { - if r.regexp.path != nil { - if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { - return err - } - } - r.regexp.host = rr - } else { - if r.regexp.host != nil { - if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { - return err - } - } - if typ == regexpTypeQuery { - r.regexp.queries = append(r.regexp.queries, rr) - } else { - r.regexp.path = rr - } - } - r.addMatcher(rr) - return nil -} - -// Headers -------------------------------------------------------------------- - -// headerMatcher matches the request against header values. -type headerMatcher map[string]string - -func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithString(m, r.Header, true) -} - -// Headers adds a matcher for request header values. -// It accepts a sequence of key/value pairs to be matched. For example: -// -// r := mux.NewRouter().NewRoute() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both request header values match. -// If the value is an empty string, it will match any value if the key is set. -func (r *Route) Headers(pairs ...string) *Route { - if r.err == nil { - var headers map[string]string - headers, r.err = mapFromPairsToString(pairs...) - return r.addMatcher(headerMatcher(headers)) - } - return r -} - -// headerRegexMatcher matches the request against the route given a regex for the header -type headerRegexMatcher map[string]*regexp.Regexp - -func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithRegex(m, r.Header, true) -} - -// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex -// support. For example: -// -// r := mux.NewRouter().NewRoute() -// r.HeadersRegexp("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both the request header matches both regular expressions. -// If the value is an empty string, it will match any value if the key is set. -// Use the start and end of string anchors (^ and $) to match an exact value. -func (r *Route) HeadersRegexp(pairs ...string) *Route { - if r.err == nil { - var headers map[string]*regexp.Regexp - headers, r.err = mapFromPairsToRegex(pairs...) - return r.addMatcher(headerRegexMatcher(headers)) - } - return r -} - -// Host ----------------------------------------------------------------------- - -// Host adds a matcher for the URL host. -// It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next dot. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter().NewRoute() -// r.Host("www.example.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Host(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypeHost) - return r -} - -// MatcherFunc ---------------------------------------------------------------- - -// MatcherFunc is the function signature used by custom matchers. -type MatcherFunc func(*http.Request, *RouteMatch) bool - -// Match returns the match for a given request. -func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { - return m(r, match) -} - -// MatcherFunc adds a custom function to be used as request matcher. -func (r *Route) MatcherFunc(f MatcherFunc) *Route { - return r.addMatcher(f) -} - -// Methods -------------------------------------------------------------------- - -// methodMatcher matches the request against HTTP methods. -type methodMatcher []string - -func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.Method) -} - -// Methods adds a matcher for HTTP methods. -// It accepts a sequence of one or more methods to be matched, e.g.: -// "GET", "POST", "PUT". -func (r *Route) Methods(methods ...string) *Route { - for k, v := range methods { - methods[k] = strings.ToUpper(v) - } - return r.addMatcher(methodMatcher(methods)) -} - -// Path ----------------------------------------------------------------------- - -// Path adds a matcher for the URL path. -// It accepts a template with zero or more URL variables enclosed by {}. The -// template must start with a "/". -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter().NewRoute() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Path(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypePath) - return r -} - -// PathPrefix ----------------------------------------------------------------- - -// PathPrefix adds a matcher for the URL path prefix. This matches if the given -// template is a prefix of the full URL path. See Route.Path() for details on -// the tpl argument. -// -// Note that it does not treat slashes specially ("/foobar/" will be matched by -// the prefix "/foo") so you may want to use a trailing slash here. -// -// Also note that the setting of Router.StrictSlash() has no effect on routes -// with a PathPrefix matcher. -func (r *Route) PathPrefix(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypePrefix) - return r -} - -// Query ---------------------------------------------------------------------- - -// Queries adds a matcher for URL query values. -// It accepts a sequence of key/value pairs. Values may define variables. -// For example: -// -// r := mux.NewRouter().NewRoute() -// r.Queries("foo", "bar", "id", "{id:[0-9]+}") -// -// The above route will only match if the URL contains the defined queries -// values, e.g.: ?foo=bar&id=42. -// -// If the value is an empty string, it will match any value if the key is set. -// -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -func (r *Route) Queries(pairs ...string) *Route { - length := len(pairs) - if length%2 != 0 { - r.err = fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - return nil - } - for i := 0; i < length; i += 2 { - if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil { - return r - } - } - - return r -} - -// Schemes -------------------------------------------------------------------- - -// schemeMatcher matches the request against URL schemes. -type schemeMatcher []string - -func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { - scheme := r.URL.Scheme - // https://golang.org/pkg/net/http/#Request - // "For [most] server requests, fields other than Path and RawQuery will be - // empty." - // Since we're an http muxer, the scheme is either going to be http or https - // though, so we can just set it based on the tls termination state. - if scheme == "" { - if r.TLS == nil { - scheme = "http" - } else { - scheme = "https" - } - } - return matchInArray(m, scheme) -} - -// Schemes adds a matcher for URL schemes. -// It accepts a sequence of schemes to be matched, e.g.: "http", "https". -// If the request's URL has a scheme set, it will be matched against. -// Generally, the URL scheme will only be set if a previous handler set it, -// such as the ProxyHeaders handler from gorilla/handlers. -// If unset, the scheme will be determined based on the request's TLS -// termination state. -// The first argument to Schemes will be used when constructing a route URL. -func (r *Route) Schemes(schemes ...string) *Route { - for k, v := range schemes { - schemes[k] = strings.ToLower(v) - } - if len(schemes) > 0 { - r.buildScheme = schemes[0] - } - return r.addMatcher(schemeMatcher(schemes)) -} - -// BuildVarsFunc -------------------------------------------------------------- - -// BuildVarsFunc is the function signature used by custom build variable -// functions (which can modify route variables before a route's URL is built). -type BuildVarsFunc func(map[string]string) map[string]string - -// BuildVarsFunc adds a custom function to be used to modify build variables -// before a route's URL is built. -func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { - if r.buildVarsFunc != nil { - // compose the old and new functions - old := r.buildVarsFunc - r.buildVarsFunc = func(m map[string]string) map[string]string { - return f(old(m)) - } - } else { - r.buildVarsFunc = f - } - return r -} - -// Subrouter ------------------------------------------------------------------ - -// Subrouter creates a subrouter for the route. -// -// It will test the inner routes only if the parent route matched. For example: -// -// r := mux.NewRouter().NewRoute() -// s := r.Host("www.example.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) -// -// Here, the routes registered in the subrouter won't be tested if the host -// doesn't match. -func (r *Route) Subrouter() *Router { - // initialize a subrouter with a copy of the parent route's configuration - router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} - r.addMatcher(router) - return router -} - -// ---------------------------------------------------------------------------- -// URL building -// ---------------------------------------------------------------------------- - -// URL builds a URL for the route. -// -// It accepts a sequence of key/value pairs for the route variables. For -// example, given this route: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// ...a URL for it can be built using: -// -// url, err := r.Get("article").URL("category", "technology", "id", "42") -// -// ...which will return an url.URL with the following path: -// -// "/articles/technology/42" -// -// This also works for host variables: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Host("{subdomain}.domain.com"). -// Name("article") -// -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") -// -// The scheme of the resulting url will be the first argument that was passed to Schemes: -// -// // url.String() will be "https://example.com" -// r := mux.NewRouter().NewRoute() -// url, err := r.Host("example.com") -// .Schemes("https", "http").URL() -// -// All variables defined in the route are required, and their values must -// conform to the corresponding patterns. -func (r *Route) URL(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - var scheme, host, path string - queries := make([]string, 0, len(r.regexp.queries)) - if r.regexp.host != nil { - if host, err = r.regexp.host.url(values); err != nil { - return nil, err - } - scheme = "http" - if r.buildScheme != "" { - scheme = r.buildScheme - } - } - if r.regexp.path != nil { - if path, err = r.regexp.path.url(values); err != nil { - return nil, err - } - } - for _, q := range r.regexp.queries { - var query string - if query, err = q.url(values); err != nil { - return nil, err - } - queries = append(queries, query) - } - return &url.URL{ - Scheme: scheme, - Host: host, - Path: path, - RawQuery: strings.Join(queries, "&"), - }, nil -} - -// URLHost builds the host part of the URL for a route. See Route.URL(). -// -// The route must have a host defined. -func (r *Route) URLHost(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.host == nil { - return nil, errors.New("mux: route doesn't have a host") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - host, err := r.regexp.host.url(values) - if err != nil { - return nil, err - } - u := &url.URL{ - Scheme: "http", - Host: host, - } - if r.buildScheme != "" { - u.Scheme = r.buildScheme - } - return u, nil -} - -// URLPath builds the path part of the URL for a route. See Route.URL(). -// -// The route must have a path defined. -func (r *Route) URLPath(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.path == nil { - return nil, errors.New("mux: route doesn't have a path") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - path, err := r.regexp.path.url(values) - if err != nil { - return nil, err - } - return &url.URL{ - Path: path, - }, nil -} - -// GetPathTemplate returns the template used to build the -// route match. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a path. -func (r *Route) GetPathTemplate() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.path == nil { - return "", errors.New("mux: route doesn't have a path") - } - return r.regexp.path.template, nil -} - -// GetPathRegexp returns the expanded regular expression used to match route path. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a path. -func (r *Route) GetPathRegexp() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.path == nil { - return "", errors.New("mux: route does not have a path") - } - return r.regexp.path.regexp.String(), nil -} - -// GetQueriesRegexp returns the expanded regular expressions used to match the -// route queries. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not have queries. -func (r *Route) GetQueriesRegexp() ([]string, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.queries == nil { - return nil, errors.New("mux: route doesn't have queries") - } - queries := make([]string, 0, len(r.regexp.queries)) - for _, query := range r.regexp.queries { - queries = append(queries, query.regexp.String()) - } - return queries, nil -} - -// GetQueriesTemplates returns the templates used to build the -// query matching. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define queries. -func (r *Route) GetQueriesTemplates() ([]string, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.queries == nil { - return nil, errors.New("mux: route doesn't have queries") - } - queries := make([]string, 0, len(r.regexp.queries)) - for _, query := range r.regexp.queries { - queries = append(queries, query.template) - } - return queries, nil -} - -// GetMethods returns the methods the route matches against -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if route does not have methods. -func (r *Route) GetMethods() ([]string, error) { - if r.err != nil { - return nil, r.err - } - for _, m := range r.matchers { - if methods, ok := m.(methodMatcher); ok { - return []string(methods), nil - } - } - return nil, errors.New("mux: route doesn't have methods") -} - -// GetHostTemplate returns the template used to build the -// route match. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a host. -func (r *Route) GetHostTemplate() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.host == nil { - return "", errors.New("mux: route doesn't have a host") - } - return r.regexp.host.template, nil -} - -// GetVarNames returns the names of all variables added by regexp matchers -// These can be used to know which route variables should be passed into r.URL() -func (r *Route) GetVarNames() ([]string, error) { - if r.err != nil { - return nil, r.err - } - var varNames []string - if r.regexp.host != nil { - varNames = append(varNames, r.regexp.host.varsN...) - } - if r.regexp.path != nil { - varNames = append(varNames, r.regexp.path.varsN...) - } - for _, regx := range r.regexp.queries { - varNames = append(varNames, regx.varsN...) - } - return varNames, nil -} - -// prepareVars converts the route variable pairs into a map. If the route has a -// BuildVarsFunc, it is invoked. -func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { - m, err := mapFromPairsToString(pairs...) - if err != nil { - return nil, err - } - return r.buildVars(m), nil -} - -func (r *Route) buildVars(m map[string]string) map[string]string { - if r.buildVarsFunc != nil { - m = r.buildVarsFunc(m) - } - return m -} diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go deleted file mode 100644 index 5f5c496de0..0000000000 --- a/vendor/github.com/gorilla/mux/test_helpers.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import "net/http" - -// SetURLVars sets the URL variables for the given request, to be accessed via -// mux.Vars for testing route behaviour. Arguments are not modified, a shallow -// copy is returned. -// -// This API should only be used for testing purposes; it provides a way to -// inject variables into the request context. Alternatively, URL variables -// can be set by making a route that captures the required variables, -// starting a server and sending the request to that server. -func SetURLVars(r *http.Request, val map[string]string) *http.Request { - return requestWithVars(r, val) -} diff --git a/vendor/github.com/lestrrat-go/blackmagic/blackmagic.go b/vendor/github.com/lestrrat-go/blackmagic/blackmagic.go index aa5704a21a..9c98ac8483 100644 --- a/vendor/github.com/lestrrat-go/blackmagic/blackmagic.go +++ b/vendor/github.com/lestrrat-go/blackmagic/blackmagic.go @@ -5,6 +5,20 @@ import ( "reflect" ) +type errInvalidValue struct{} + +func (*errInvalidValue) Error() string { + return "invalid value (probably an untyped nil)" +} + +// InvalidValueError is a sentinel error that can be used to +// indicate that a value is invalid. This can happen when the +// source value is an untyped nil, and we have no further information +// about the type of the value, obstructing the assignment. +func InvalidValueError() error { + return &errInvalidValue{} +} + // AssignField is a convenience function to assign a value to // an optional struct field. In Go, an optional struct field is // usually denoted by a pointer to T instead of T: @@ -32,7 +46,7 @@ func AssignOptionalField(dst, src interface{}) error { if !dstRV.Elem().CanSet() { return fmt.Errorf(`dst (%T) is not assignable`, dstRV.Elem().Interface()) } - if !reflect.PtrTo(srcRV.Type()).AssignableTo(dstRV.Elem().Type()) { + if !reflect.PointerTo(srcRV.Type()).AssignableTo(dstRV.Elem().Type()) { return fmt.Errorf(`cannot assign src (%T) to dst (%T)`, src, dst) } @@ -50,15 +64,15 @@ func AssignIfCompatible(dst, src interface{}) error { orv := reflect.ValueOf(src) // save this value for error reporting result := orv - // t can be a pointer or a slice, and the code will slightly change + // src can be a pointer or a slice, and the code will slightly change // depending on this - var isPtr bool - var isSlice bool + var srcIsPtr bool + var srcIsSlice bool switch result.Kind() { case reflect.Ptr: - isPtr = true + srcIsPtr = true case reflect.Slice: - isSlice = true + srcIsSlice = true } rv := reflect.ValueOf(dst) @@ -66,17 +80,38 @@ func AssignIfCompatible(dst, src interface{}) error { return fmt.Errorf(`destination argument to AssignIfCompatible() must be a pointer: %T`, dst) } - actualDst := rv.Elem() + actualDst := rv + for { + if !actualDst.IsValid() { + return fmt.Errorf(`could not find a valid destination for AssignIfCompatible() (%T)`, dst) + } + if actualDst.CanSet() { + break + } + actualDst = actualDst.Elem() + } + switch actualDst.Kind() { case reflect.Interface: // If it's an interface, we can just assign the pointer to the interface{} default: // If it's a pointer to the struct we're looking for, we need to set // the de-referenced struct - if !isSlice && isPtr { + if !srcIsSlice && srcIsPtr { result = result.Elem() } } + + if !result.IsValid() { + // At this point there's nothing we can do. return an error + return fmt.Errorf(`source value is invalid (%T): %w`, src, InvalidValueError()) + } + + if actualDst.Kind() == reflect.Ptr { + actualDst.Set(result.Addr()) + return nil + } + if !result.Type().AssignableTo(actualDst.Type()) { return fmt.Errorf(`argument to AssignIfCompatible() must be compatible with %T (was %T)`, orv.Interface(), dst) } diff --git a/vendor/github.com/lestrrat-go/dsig-secp256k1/.gitignore b/vendor/github.com/lestrrat-go/dsig-secp256k1/.gitignore new file mode 100644 index 0000000000..aaadf736e5 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig-secp256k1/.gitignore @@ -0,0 +1,32 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Code coverage profiles and other test artifacts +*.out +coverage.* +*.coverprofile +profile.cov + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work +go.work.sum + +# env file +.env + +# Editor/IDE +# .idea/ +# .vscode/ diff --git a/vendor/github.com/lestrrat-go/dsig-secp256k1/Changes b/vendor/github.com/lestrrat-go/dsig-secp256k1/Changes new file mode 100644 index 0000000000..4dd588a006 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig-secp256k1/Changes @@ -0,0 +1,5 @@ +Changes +======= + +v1.0.0 18 Aug 2025 +* Initial release \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/dsig-secp256k1/LICENSE b/vendor/github.com/lestrrat-go/dsig-secp256k1/LICENSE new file mode 100644 index 0000000000..1e1f5d199e --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig-secp256k1/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 lestrrat-go + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/dsig-secp256k1/secp256k1.go b/vendor/github.com/lestrrat-go/dsig-secp256k1/secp256k1.go new file mode 100644 index 0000000000..85650efa1c --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig-secp256k1/secp256k1.go @@ -0,0 +1,29 @@ +package dsigsecp256k1 + +import ( + "crypto" + + "github.com/decred/dcrd/dcrec/secp256k1/v4" + "github.com/lestrrat-go/dsig" +) + +const ECDSAWithSecp256k1AndSHA256 = "ECDSA_WITH_SECP256K1_AND_SHA256" + +// init adds secp256k1 support when the dsig_secp256k1 build tag is used. +func init() { + // Register ES256K (secp256k1 + SHA256) support using the new API + err := dsig.RegisterAlgorithm(ECDSAWithSecp256k1AndSHA256, dsig.AlgorithmInfo{ + Family: dsig.ECDSA, + Meta: dsig.ECDSAFamilyMeta{ + Hash: crypto.SHA256, + }, + }) + if err != nil { + panic("failed to register secp256k1 algorithm: " + err.Error()) + } +} + +// secp256k1Curve returns the secp256k1 curve. +func Curve() *secp256k1.KoblitzCurve { + return secp256k1.S256() +} diff --git a/vendor/github.com/lestrrat-go/dsig/.gitignore b/vendor/github.com/lestrrat-go/dsig/.gitignore new file mode 100644 index 0000000000..aaadf736e5 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/.gitignore @@ -0,0 +1,32 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Code coverage profiles and other test artifacts +*.out +coverage.* +*.coverprofile +profile.cov + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work +go.work.sum + +# env file +.env + +# Editor/IDE +# .idea/ +# .vscode/ diff --git a/vendor/github.com/lestrrat-go/dsig/Changes b/vendor/github.com/lestrrat-go/dsig/Changes new file mode 100644 index 0000000000..bccce97613 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/Changes @@ -0,0 +1,5 @@ +Changes +======= + +v1.0.0 - 18 Aug 2025 +* Initial release \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/dsig/LICENSE b/vendor/github.com/lestrrat-go/dsig/LICENSE new file mode 100644 index 0000000000..1e1f5d199e --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 lestrrat-go + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/dsig/README.md b/vendor/github.com/lestrrat-go/dsig/README.md new file mode 100644 index 0000000000..37c194579e --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/README.md @@ -0,0 +1,163 @@ +# github.com/lestrrat-go/dsig [![CI](https://github.com/lestrrat-go/dsig/actions/workflows/ci.yml/badge.svg)](https://github.com/lestrrat-go/dsig/actions/workflows/ci.yml) [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/dsig.svg)](https://pkg.go.dev/github.com/lestrrat-go/dsig) [![codecov.io](https://codecov.io/github/lestrrat-go/dsig/coverage.svg?branch=v1)](https://codecov.io/github/lestrrat-go/dsig?branch=v1) + +Go module providing low-level digital signature operations. + +While there are many standards for generating and verifying digital signatures, the core operations are virtually the same. This module implements the core functionality of digital signature generation / verifications in a framework agnostic way. + +# Features + +* RSA signatures (PKCS1v15 and PSS) +* ECDSA signatures (P-256, P-384, P-521) +* EdDSA signatures (Ed25519, Ed448) +* HMAC signatures (SHA-256, SHA-384, SHA-512) +* Support for crypto.Signer interface +* Allows for dynamic additions of algorithms in limited cases. + +# SYNOPSIS + + +```go +package examples_test + +import ( + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "fmt" + + "github.com/lestrrat-go/dsig" +) + +func Example() { + payload := []byte("hello world") + + // RSA signing and verification + { + privKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + fmt.Printf("failed to generate RSA key: %s\n", err) + return + } + + // Sign with RSA-PSS SHA256 + signature, err := dsig.Sign(privKey, dsig.RSAPSSWithSHA256, payload, nil) + if err != nil { + fmt.Printf("failed to sign with RSA: %s\n", err) + return + } + + // Verify with RSA-PSS SHA256 + err = dsig.Verify(&privKey.PublicKey, dsig.RSAPSSWithSHA256, payload, signature) + if err != nil { + fmt.Printf("failed to verify RSA signature: %s\n", err) + return + } + } + + // ECDSA signing and verification + { + privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + fmt.Printf("failed to generate ECDSA key: %s\n", err) + return + } + + // Sign with ECDSA P-256 SHA256 + signature, err := dsig.Sign(privKey, dsig.ECDSAWithP256AndSHA256, payload, nil) + if err != nil { + fmt.Printf("failed to sign with ECDSA: %s\n", err) + return + } + + // Verify with ECDSA P-256 SHA256 + err = dsig.Verify(&privKey.PublicKey, dsig.ECDSAWithP256AndSHA256, payload, signature) + if err != nil { + fmt.Printf("failed to verify ECDSA signature: %s\n", err) + return + } + } + + // EdDSA signing and verification + { + pubKey, privKey, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + fmt.Printf("failed to generate Ed25519 key: %s\n", err) + return + } + + // Sign with EdDSA + signature, err := dsig.Sign(privKey, dsig.EdDSA, payload, nil) + if err != nil { + fmt.Printf("failed to sign with EdDSA: %s\n", err) + return + } + + // Verify with EdDSA + err = dsig.Verify(pubKey, dsig.EdDSA, payload, signature) + if err != nil { + fmt.Printf("failed to verify EdDSA signature: %s\n", err) + return + } + } + + // HMAC signing and verification + { + key := []byte("secret-key") + + // Sign with HMAC SHA256 + signature, err := dsig.Sign(key, dsig.HMACWithSHA256, payload, nil) + if err != nil { + fmt.Printf("failed to sign with HMAC: %s\n", err) + return + } + + // Verify with HMAC SHA256 + err = dsig.Verify(key, dsig.HMACWithSHA256, payload, signature) + if err != nil { + fmt.Printf("failed to verify HMAC signature: %s\n", err) + return + } + } + // OUTPUT: +} +``` +source: [examples/dsig_readme_example_test.go](https://github.com/lestrrat-go/dsig/blob/v1/examples/dsig_readme_example_test.go) + + +# Supported Algorithms + +| Constant | Algorithm | Key Type | +|----------|-----------|----------| +| `HMACWithSHA256` | HMAC using SHA-256 | []byte | +| `HMACWithSHA384` | HMAC using SHA-384 | []byte | +| `HMACWithSHA512` | HMAC using SHA-512 | []byte | +| `RSAPKCS1v15WithSHA256` | RSA PKCS#1 v1.5 using SHA-256 | *rsa.PrivateKey / *rsa.PublicKey | +| `RSAPKCS1v15WithSHA384` | RSA PKCS#1 v1.5 using SHA-384 | *rsa.PrivateKey / *rsa.PublicKey | +| `RSAPKCS1v15WithSHA512` | RSA PKCS#1 v1.5 using SHA-512 | *rsa.PrivateKey / *rsa.PublicKey | +| `RSAPSSWithSHA256` | RSA PSS using SHA-256 | *rsa.PrivateKey / *rsa.PublicKey | +| `RSAPSSWithSHA384` | RSA PSS using SHA-384 | *rsa.PrivateKey / *rsa.PublicKey | +| `RSAPSSWithSHA512` | RSA PSS using SHA-512 | *rsa.PrivateKey / *rsa.PublicKey | +| `ECDSAWithP256AndSHA256` | ECDSA using P-256 and SHA-256 | *ecdsa.PrivateKey / *ecdsa.PublicKey | +| `ECDSAWithP384AndSHA384` | ECDSA using P-384 and SHA-384 | *ecdsa.PrivateKey / *ecdsa.PublicKey | +| `ECDSAWithP521AndSHA512` | ECDSA using P-521 and SHA-512 | *ecdsa.PrivateKey / *ecdsa.PublicKey | +| `EdDSA` | EdDSA using Ed25519 or Ed448 | ed25519.PrivateKey / ed25519.PublicKey | + +# Description + +This library provides low-level digital signature operations. It does minimal parameter validation for performance, uses strongly typed APIs, and has minimal dependencies. + +# Contributions + +## Issues + +For bug reports and feature requests, please include failing tests when possible. + +## Pull Requests + +Please include tests that exercise your changes. + +# Related Libraries + +* [github.com/lestrrat-go/jwx](https://github.com/lestrrat-go/jwx) - JOSE (JWA/JWE/JWK/JWS/JWT) implementation \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/dsig/algorithms.go b/vendor/github.com/lestrrat-go/dsig/algorithms.go new file mode 100644 index 0000000000..0895c64764 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/algorithms.go @@ -0,0 +1,37 @@ +package dsig + +// This file defines verbose algorithm name constants that can be mapped to by +// different standards (RFC7518, FIDO, etc.) for interoperability. +// +// The algorithm names are intentionally verbose to avoid any ambiguity about +// the exact cryptographic operations being performed. + +const ( + // HMAC signature algorithms + // These use Hash-based Message Authentication Code with specified hash functions + HMACWithSHA256 = "HMAC_WITH_SHA256" + HMACWithSHA384 = "HMAC_WITH_SHA384" + HMACWithSHA512 = "HMAC_WITH_SHA512" + + // RSA signature algorithms with PKCS#1 v1.5 padding + // These use RSA signatures with PKCS#1 v1.5 padding and specified hash functions + RSAPKCS1v15WithSHA256 = "RSA_PKCS1v15_WITH_SHA256" + RSAPKCS1v15WithSHA384 = "RSA_PKCS1v15_WITH_SHA384" + RSAPKCS1v15WithSHA512 = "RSA_PKCS1v15_WITH_SHA512" + + // RSA signature algorithms with PSS padding + // These use RSA signatures with Probabilistic Signature Scheme (PSS) padding + RSAPSSWithSHA256 = "RSA_PSS_WITH_SHA256" + RSAPSSWithSHA384 = "RSA_PSS_WITH_SHA384" + RSAPSSWithSHA512 = "RSA_PSS_WITH_SHA512" + + // ECDSA signature algorithms + // These use Elliptic Curve Digital Signature Algorithm with specified curves and hash functions + ECDSAWithP256AndSHA256 = "ECDSA_WITH_P256_AND_SHA256" + ECDSAWithP384AndSHA384 = "ECDSA_WITH_P384_AND_SHA384" + ECDSAWithP521AndSHA512 = "ECDSA_WITH_P521_AND_SHA512" + + // EdDSA signature algorithms + // These use Edwards-curve Digital Signature Algorithm (supports Ed25519 and Ed448) + EdDSA = "EDDSA" +) \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/dsig/crypto_signer.go b/vendor/github.com/lestrrat-go/dsig/crypto_signer.go new file mode 100644 index 0000000000..f81666708b --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/crypto_signer.go @@ -0,0 +1,45 @@ +package dsig + +import ( + "crypto" + "crypto/rand" + "fmt" + "io" +) + +// cryptosign is a low-level function that signs a payload using a crypto.Signer. +// If hash is crypto.Hash(0), the payload is signed directly without hashing. +// Otherwise, the payload is hashed using the specified hash function before signing. +// +// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. +func cryptosign(signer crypto.Signer, payload []byte, hash crypto.Hash, opts crypto.SignerOpts, rr io.Reader) ([]byte, error) { + if rr == nil { + rr = rand.Reader + } + + var digest []byte + if hash == crypto.Hash(0) { + digest = payload + } else { + h := hash.New() + if _, err := h.Write(payload); err != nil { + return nil, fmt.Errorf(`failed to write payload to hash: %w`, err) + } + digest = h.Sum(nil) + } + return signer.Sign(rr, digest, opts) +} + +// SignCryptoSigner generates a signature using a crypto.Signer interface. +// This function can be used for hardware security modules, smart cards, +// and other implementations of the crypto.Signer interface. +// +// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. +// +// Returns the signature bytes or an error if signing fails. +func SignCryptoSigner(signer crypto.Signer, raw []byte, h crypto.Hash, opts crypto.SignerOpts, rr io.Reader) ([]byte, error) { + if signer == nil { + return nil, fmt.Errorf("dsig.SignCryptoSigner: signer is nil") + } + return cryptosign(signer, raw, h, opts, rr) +} diff --git a/vendor/github.com/lestrrat-go/dsig/dsig.go b/vendor/github.com/lestrrat-go/dsig/dsig.go new file mode 100644 index 0000000000..de6cbdec45 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/dsig.go @@ -0,0 +1,224 @@ +// Package dsig provides digital signature operations for Go. +// It contains low-level signature generation and verification tools that +// can be used by other signing libraries +// +// The package follows these design principles: +// 1. Does minimal checking of input parameters (for performance); callers need to ensure that the parameters are valid. +// 2. All exported functions are strongly typed (i.e. they do not take `any` types unless they absolutely have to). +// 3. Does not rely on other high-level packages (standalone, except for internal packages). +package dsig + +import ( + "crypto" + "crypto/sha256" + "crypto/sha512" + "fmt" + "hash" + "sync" +) + +// Family represents the cryptographic algorithm family +type Family int + +const ( + InvalidFamily Family = iota + HMAC + RSA + ECDSA + EdDSAFamily + maxFamily +) + +// String returns the string representation of the Family +func (f Family) String() string { + switch f { + case HMAC: + return "HMAC" + case RSA: + return "RSA" + case ECDSA: + return "ECDSA" + case EdDSAFamily: + return "EdDSA" + default: + return "InvalidFamily" + } +} + +// AlgorithmInfo contains metadata about a digital signature algorithm +type AlgorithmInfo struct { + Family Family // The cryptographic family (HMAC, RSA, ECDSA, EdDSA) + Meta any // Family-specific metadata +} + +// HMACFamilyMeta contains metadata specific to HMAC algorithms +type HMACFamilyMeta struct { + HashFunc func() hash.Hash // Hash function constructor +} + +// RSAFamilyMeta contains metadata specific to RSA algorithms +type RSAFamilyMeta struct { + Hash crypto.Hash // Hash algorithm + PSS bool // Whether to use PSS padding (false = PKCS#1 v1.5) +} + +// ECDSAFamilyMeta contains metadata specific to ECDSA algorithms +type ECDSAFamilyMeta struct { + Hash crypto.Hash // Hash algorithm +} + +// EdDSAFamilyMeta contains metadata specific to EdDSA algorithms +// Currently EdDSA doesn't need specific metadata, but this provides extensibility +type EdDSAFamilyMeta struct { + // Reserved for future use +} + +var algorithms = make(map[string]AlgorithmInfo) +var muAlgorithms sync.RWMutex + +// RegisterAlgorithm registers a new digital signature algorithm with the specified family and metadata. +// +// info.Meta should contain extra metadata for some algorithms. Currently HMAC, RSA, +// and ECDSA family of algorithms need their respective metadata (HMACFamilyMeta, +// RSAFamilyMeta, and ECDSAFamilyMeta). Metadata for other families are ignored. +func RegisterAlgorithm(name string, info AlgorithmInfo) error { + muAlgorithms.Lock() + defer muAlgorithms.Unlock() + + // Validate the metadata matches the family + switch info.Family { + case HMAC: + if _, ok := info.Meta.(HMACFamilyMeta); !ok { + return fmt.Errorf("invalid HMAC metadata for algorithm %s", name) + } + case RSA: + if _, ok := info.Meta.(RSAFamilyMeta); !ok { + return fmt.Errorf("invalid RSA metadata for algorithm %s", name) + } + case ECDSA: + if _, ok := info.Meta.(ECDSAFamilyMeta); !ok { + return fmt.Errorf("invalid ECDSA metadata for algorithm %s", name) + } + case EdDSAFamily: + // EdDSA metadata is optional for now + default: + return fmt.Errorf("unsupported algorithm family %s for algorithm %s", info.Family, name) + } + + algorithms[name] = info + return nil +} + +// GetAlgorithmInfo retrieves the algorithm information for a given algorithm name. +// Returns the info and true if found, zero value and false if not found. +func GetAlgorithmInfo(name string) (AlgorithmInfo, bool) { + muAlgorithms.RLock() + defer muAlgorithms.RUnlock() + + info, ok := algorithms[name] + return info, ok +} + +func init() { + // Register all standard algorithms with their metadata + toRegister := map[string]AlgorithmInfo{ + // HMAC algorithms + HMACWithSHA256: { + Family: HMAC, + Meta: HMACFamilyMeta{ + HashFunc: sha256.New, + }, + }, + HMACWithSHA384: { + Family: HMAC, + Meta: HMACFamilyMeta{ + HashFunc: sha512.New384, + }, + }, + HMACWithSHA512: { + Family: HMAC, + Meta: HMACFamilyMeta{ + HashFunc: sha512.New, + }, + }, + + // RSA PKCS#1 v1.5 algorithms + RSAPKCS1v15WithSHA256: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA256, + PSS: false, + }, + }, + RSAPKCS1v15WithSHA384: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA384, + PSS: false, + }, + }, + RSAPKCS1v15WithSHA512: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA512, + PSS: false, + }, + }, + + // RSA PSS algorithms + RSAPSSWithSHA256: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA256, + PSS: true, + }, + }, + RSAPSSWithSHA384: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA384, + PSS: true, + }, + }, + RSAPSSWithSHA512: { + Family: RSA, + Meta: RSAFamilyMeta{ + Hash: crypto.SHA512, + PSS: true, + }, + }, + + // ECDSA algorithms + ECDSAWithP256AndSHA256: { + Family: ECDSA, + Meta: ECDSAFamilyMeta{ + Hash: crypto.SHA256, + }, + }, + ECDSAWithP384AndSHA384: { + Family: ECDSA, + Meta: ECDSAFamilyMeta{ + Hash: crypto.SHA384, + }, + }, + ECDSAWithP521AndSHA512: { + Family: ECDSA, + Meta: ECDSAFamilyMeta{ + Hash: crypto.SHA512, + }, + }, + + // EdDSA algorithm + EdDSA: { + Family: EdDSAFamily, + Meta: EdDSAFamilyMeta{}, + }, + } + + for name, info := range toRegister { + if err := RegisterAlgorithm(name, info); err != nil { + panic(fmt.Sprintf("failed to register algorithm %s: %v", name, err)) + } + } +} + diff --git a/vendor/github.com/lestrrat-go/dsig/ecdsa.go b/vendor/github.com/lestrrat-go/dsig/ecdsa.go new file mode 100644 index 0000000000..a04a266919 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/ecdsa.go @@ -0,0 +1,200 @@ +package dsig + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "encoding/asn1" + "fmt" + "io" + "math/big" + + "github.com/lestrrat-go/dsig/internal/ecutil" +) + + +func ecdsaGetSignerKey(key any) (*ecdsa.PrivateKey, crypto.Signer, bool, error) { + cs, isCryptoSigner := key.(crypto.Signer) + if isCryptoSigner { + if !isValidECDSAKey(key) { + return nil, nil, false, fmt.Errorf(`invalid key type %T for ECDSA algorithm`, key) + } + + switch key.(type) { + case ecdsa.PrivateKey, *ecdsa.PrivateKey: + // if it's ecdsa.PrivateKey, it's more efficient to + // go through the non-crypto.Signer route. Set isCryptoSigner to false + isCryptoSigner = false + } + } + + if isCryptoSigner { + return nil, cs, true, nil + } + + privkey, ok := key.(*ecdsa.PrivateKey) + if !ok { + return nil, nil, false, fmt.Errorf(`invalid key type %T. *ecdsa.PrivateKey is required`, key) + } + return privkey, nil, false, nil +} + +// UnpackASN1ECDSASignature unpacks an ASN.1 encoded ECDSA signature into r and s values. +// This is typically used when working with crypto.Signer interfaces that return ASN.1 encoded signatures. +func UnpackASN1ECDSASignature(signed []byte, r, s *big.Int) error { + // Okay, this is silly, but hear me out. When we use the + // crypto.Signer interface, the PrivateKey is hidden. + // But we need some information about the key (its bit size). + // + // So while silly, we're going to have to make another call + // here and fetch the Public key. + // (This probably means that this information should be cached somewhere) + var p struct { + R *big.Int // TODO: get this from a pool? + S *big.Int + } + if _, err := asn1.Unmarshal(signed, &p); err != nil { + return fmt.Errorf(`failed to unmarshal ASN1 encoded signature: %w`, err) + } + + r.Set(p.R) + s.Set(p.S) + return nil +} + +// UnpackECDSASignature unpacks a JWS-format ECDSA signature into r and s values. +// The signature should be in the format specified by RFC 7515 (r||s as fixed-length byte arrays). +func UnpackECDSASignature(signature []byte, pubkey *ecdsa.PublicKey, r, s *big.Int) error { + keySize := ecutil.CalculateKeySize(pubkey.Curve) + if len(signature) != keySize*2 { + return fmt.Errorf(`invalid signature length for curve %q`, pubkey.Curve.Params().Name) + } + + r.SetBytes(signature[:keySize]) + s.SetBytes(signature[keySize:]) + + return nil +} + +// PackECDSASignature packs the r and s values from an ECDSA signature into a JWS-format byte slice. +// The output format follows RFC 7515: r||s as fixed-length byte arrays. +func PackECDSASignature(r *big.Int, sbig *big.Int, curveBits int) ([]byte, error) { + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes++ + } + + // Serialize r and s into fixed-length bytes + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := sbig.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + // Output as r||s + return append(rBytesPadded, sBytesPadded...), nil +} + +// SignECDSA generates an ECDSA signature for the given payload using the specified private key and hash. +// The raw parameter should be the pre-computed signing input (typically header.payload). +// +// rr is an io.Reader that provides randomness for signing. if rr is nil, it defaults to rand.Reader. +func SignECDSA(key *ecdsa.PrivateKey, payload []byte, h crypto.Hash, rr io.Reader) ([]byte, error) { + if !isValidECDSAKey(key) { + return nil, fmt.Errorf(`invalid key type %T for ECDSA algorithm`, key) + } + hh := h.New() + if _, err := hh.Write(payload); err != nil { + return nil, fmt.Errorf(`failed to write payload using ecdsa: %w`, err) + } + digest := hh.Sum(nil) + + if rr == nil { + rr = rand.Reader + } + + // Sign and get r, s values + r, s, err := ecdsa.Sign(rr, key, digest) + if err != nil { + return nil, fmt.Errorf(`failed to sign payload using ecdsa: %w`, err) + } + + return PackECDSASignature(r, s, key.Curve.Params().BitSize) +} + +// SignECDSACryptoSigner generates an ECDSA signature using a crypto.Signer interface. +// This function works with hardware security modules and other crypto.Signer implementations. +// The signature is converted from ASN.1 format to JWS format (r||s). +// +// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. +func SignECDSACryptoSigner(signer crypto.Signer, raw []byte, h crypto.Hash, rr io.Reader) ([]byte, error) { + signed, err := SignCryptoSigner(signer, raw, h, h, rr) + if err != nil { + return nil, fmt.Errorf(`failed to sign payload using crypto.Signer: %w`, err) + } + + return signECDSACryptoSigner(signer, signed) +} + +func signECDSACryptoSigner(signer crypto.Signer, signed []byte) ([]byte, error) { + cpub := signer.Public() + pubkey, ok := cpub.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf(`expected *ecdsa.PublicKey, got %T`, pubkey) + } + curveBits := pubkey.Curve.Params().BitSize + + var r, s big.Int + if err := UnpackASN1ECDSASignature(signed, &r, &s); err != nil { + return nil, fmt.Errorf(`failed to unpack ASN1 encoded signature: %w`, err) + } + + return PackECDSASignature(&r, &s, curveBits) +} + +func ecdsaVerify(key *ecdsa.PublicKey, buf []byte, h crypto.Hash, r, s *big.Int) error { + hasher := h.New() + hasher.Write(buf) + digest := hasher.Sum(nil) + if !ecdsa.Verify(key, digest, r, s) { + return NewVerificationError("invalid ECDSA signature") + } + return nil +} + +// VerifyECDSA verifies an ECDSA signature for the given payload. +// This function verifies the signature using the specified public key and hash algorithm. +// The payload parameter should be the pre-computed signing input (typically header.payload). +func VerifyECDSA(key *ecdsa.PublicKey, payload, signature []byte, h crypto.Hash) error { + var r, s big.Int + if err := UnpackECDSASignature(signature, key, &r, &s); err != nil { + return fmt.Errorf("dsig.VerifyECDSA: failed to unpack ECDSA signature: %w", err) + } + + return ecdsaVerify(key, payload, h, &r, &s) +} + +// VerifyECDSACryptoSigner verifies an ECDSA signature for crypto.Signer implementations. +// This function is useful for verifying signatures created by hardware security modules +// or other implementations of the crypto.Signer interface. +// The payload parameter should be the pre-computed signing input (typically header.payload). +func VerifyECDSACryptoSigner(signer crypto.Signer, payload, signature []byte, h crypto.Hash) error { + var pubkey *ecdsa.PublicKey + switch cpub := signer.Public(); cpub := cpub.(type) { + case ecdsa.PublicKey: + pubkey = &cpub + case *ecdsa.PublicKey: + pubkey = cpub + default: + return fmt.Errorf(`dsig.VerifyECDSACryptoSigner: expected *ecdsa.PublicKey, got %T`, cpub) + } + + var r, s big.Int + if err := UnpackECDSASignature(signature, pubkey, &r, &s); err != nil { + return fmt.Errorf("dsig.VerifyECDSACryptoSigner: failed to unpack ASN.1 encoded ECDSA signature: %w", err) + } + + return ecdsaVerify(pubkey, payload, h, &r, &s) +} diff --git a/vendor/github.com/lestrrat-go/dsig/eddsa.go b/vendor/github.com/lestrrat-go/dsig/eddsa.go new file mode 100644 index 0000000000..6562da37b8 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/eddsa.go @@ -0,0 +1,44 @@ +package dsig + +import ( + "crypto" + "crypto/ed25519" + "fmt" +) + +func eddsaGetSigner(key any) (crypto.Signer, error) { + // The ed25519.PrivateKey object implements crypto.Signer, so we should + // simply accept a crypto.Signer here. + signer, ok := key.(crypto.Signer) + if ok { + if !isValidEDDSAKey(key) { + return nil, fmt.Errorf(`invalid key type %T for EdDSA algorithm`, key) + } + return signer, nil + } + + // This fallback exists for cases when users give us a pointer instead of non-pointer, etc. + privkey, ok := key.(ed25519.PrivateKey) + if !ok { + return nil, fmt.Errorf(`failed to retrieve ed25519.PrivateKey out of %T`, key) + } + return privkey, nil +} + +// SignEdDSA generates an EdDSA (Ed25519) signature for the given payload. +// The raw parameter should be the pre-computed signing input (typically header.payload). +// EdDSA is deterministic and doesn't require additional hashing of the input. +func SignEdDSA(key ed25519.PrivateKey, payload []byte) ([]byte, error) { + return ed25519.Sign(key, payload), nil +} + +// VerifyEdDSA verifies an EdDSA (Ed25519) signature for the given payload. +// This function verifies the signature using Ed25519 verification algorithm. +// The payload parameter should be the pre-computed signing input (typically header.payload). +// EdDSA is deterministic and provides strong security guarantees without requiring hash function selection. +func VerifyEdDSA(key ed25519.PublicKey, payload, signature []byte) error { + if !ed25519.Verify(key, payload, signature) { + return fmt.Errorf("invalid EdDSA signature") + } + return nil +} diff --git a/vendor/github.com/lestrrat-go/dsig/hmac.go b/vendor/github.com/lestrrat-go/dsig/hmac.go new file mode 100644 index 0000000000..8b2612279d --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/hmac.go @@ -0,0 +1,45 @@ +package dsig + +import ( + "crypto/hmac" + "fmt" + "hash" +) + +func toHMACKey(dst *[]byte, key any) error { + keyBytes, ok := key.([]byte) + if !ok { + return fmt.Errorf(`dsig.toHMACKey: invalid key type %T. []byte is required`, key) + } + + if len(keyBytes) == 0 { + return fmt.Errorf(`dsig.toHMACKey: missing key while signing payload`) + } + + *dst = keyBytes + return nil +} + +// SignHMAC generates an HMAC signature for the given payload using the specified hash function and key. +// The raw parameter should be the pre-computed signing input (typically header.payload). +func SignHMAC(key, payload []byte, hfunc func() hash.Hash) ([]byte, error) { + h := hmac.New(hfunc, key) + if _, err := h.Write(payload); err != nil { + return nil, fmt.Errorf(`failed to write payload using hmac: %w`, err) + } + return h.Sum(nil), nil +} + +// VerifyHMAC verifies an HMAC signature for the given payload. +// This function verifies the signature using the specified key and hash function. +// The payload parameter should be the pre-computed signing input (typically header.payload). +func VerifyHMAC(key, payload, signature []byte, hfunc func() hash.Hash) error { + expected, err := SignHMAC(key, payload, hfunc) + if err != nil { + return fmt.Errorf("failed to sign payload for verification: %w", err) + } + if !hmac.Equal(signature, expected) { + return NewVerificationError("invalid HMAC signature") + } + return nil +} diff --git a/vendor/github.com/lestrrat-go/dsig/internal/ecutil/ecutil.go b/vendor/github.com/lestrrat-go/dsig/internal/ecutil/ecutil.go new file mode 100644 index 0000000000..cf0bd4ac48 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/internal/ecutil/ecutil.go @@ -0,0 +1,76 @@ +// Package ecutil defines tools that help with elliptic curve related +// computation +package ecutil + +import ( + "crypto/elliptic" + "math/big" + "sync" +) + +const ( + // size of buffer that needs to be allocated for EC521 curve + ec521BufferSize = 66 // (521 / 8) + 1 +) + +var ecpointBufferPool = sync.Pool{ + New: func() any { + // In most cases the curve bit size will be less than this length + // so allocate the maximum, and keep reusing + buf := make([]byte, 0, ec521BufferSize) + return &buf + }, +} + +func getCrvFixedBuffer(size int) []byte { + //nolint:forcetypeassert + buf := *(ecpointBufferPool.Get().(*[]byte)) + if size > ec521BufferSize && cap(buf) < size { + buf = append(buf, make([]byte, size-cap(buf))...) + } + return buf[:size] +} + +// ReleaseECPointBuffer releases the []byte buffer allocated. +func ReleaseECPointBuffer(buf []byte) { + buf = buf[:cap(buf)] + buf[0] = 0x0 + for i := 1; i < len(buf); i *= 2 { + copy(buf[i:], buf[:i]) + } + buf = buf[:0] + ecpointBufferPool.Put(&buf) +} + +func CalculateKeySize(crv elliptic.Curve) int { + // We need to create a buffer that fits the entire curve. + // If the curve size is 66, that fits in 9 bytes. If the curve + // size is 64, it fits in 8 bytes. + bits := crv.Params().BitSize + + // For most common cases we know before hand what the byte length + // is going to be. optimize + var inBytes int + switch bits { + case 224, 256, 384: // TODO: use constant? + inBytes = bits / 8 + case 521: + inBytes = ec521BufferSize + default: + inBytes = bits / 8 + if (bits % 8) != 0 { + inBytes++ + } + } + + return inBytes +} + +// AllocECPointBuffer allocates a buffer for the given point in the given +// curve. This buffer should be released using the ReleaseECPointBuffer +// function. +func AllocECPointBuffer(v *big.Int, crv elliptic.Curve) []byte { + buf := getCrvFixedBuffer(CalculateKeySize(crv)) + v.FillBytes(buf) + return buf +} diff --git a/vendor/github.com/lestrrat-go/dsig/rsa.go b/vendor/github.com/lestrrat-go/dsig/rsa.go new file mode 100644 index 0000000000..a339fe5b78 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/rsa.go @@ -0,0 +1,63 @@ +package dsig + +import ( + "crypto" + "crypto/rsa" + "fmt" + "io" +) + +func rsaGetSignerCryptoSignerKey(key any) (crypto.Signer, bool, error) { + if !isValidRSAKey(key) { + return nil, false, fmt.Errorf(`invalid key type %T for RSA algorithm`, key) + } + cs, isCryptoSigner := key.(crypto.Signer) + if isCryptoSigner { + return cs, true, nil + } + return nil, false, nil +} + +// rsaPSSOptions returns the PSS options for RSA-PSS signatures with the specified hash. +// The salt length is set to equal the hash length as per RFC 7518. +func rsaPSSOptions(h crypto.Hash) rsa.PSSOptions { + return rsa.PSSOptions{ + Hash: h, + SaltLength: rsa.PSSSaltLengthEqualsHash, + } +} + +// SignRSA generates an RSA signature for the given payload using the specified private key and options. +// The raw parameter should be the pre-computed signing input (typically header.payload). +// If pss is true, RSA-PSS is used; otherwise, PKCS#1 v1.5 is used. +// +// The rr parameter is an optional io.Reader that can be used to provide randomness for signing. +// If rr is nil, it defaults to rand.Reader. +func SignRSA(key *rsa.PrivateKey, payload []byte, h crypto.Hash, pss bool, rr io.Reader) ([]byte, error) { + if !isValidRSAKey(key) { + return nil, fmt.Errorf(`invalid key type %T for RSA algorithm`, key) + } + var opts crypto.SignerOpts = h + if pss { + rsaopts := rsaPSSOptions(h) + opts = &rsaopts + } + return cryptosign(key, payload, h, opts, rr) +} + +// VerifyRSA verifies an RSA signature for the given payload and header. +// This function constructs the signing input by encoding the header and payload according to JWS specification, +// then verifies the signature using the specified public key and hash algorithm. +// If pss is true, RSA-PSS verification is used; otherwise, PKCS#1 v1.5 verification is used. +func VerifyRSA(key *rsa.PublicKey, payload, signature []byte, h crypto.Hash, pss bool) error { + if !isValidRSAKey(key) { + return fmt.Errorf(`invalid key type %T for RSA algorithm`, key) + } + hasher := h.New() + hasher.Write(payload) + digest := hasher.Sum(nil) + if pss { + return rsa.VerifyPSS(key, h, digest, signature, &rsa.PSSOptions{Hash: h, SaltLength: rsa.PSSSaltLengthEqualsHash}) + } + return rsa.VerifyPKCS1v15(key, h, digest, signature) +} diff --git a/vendor/github.com/lestrrat-go/dsig/sign.go b/vendor/github.com/lestrrat-go/dsig/sign.go new file mode 100644 index 0000000000..e2a6bde290 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/sign.go @@ -0,0 +1,100 @@ +package dsig + +import ( + "crypto" + "crypto/rsa" + "fmt" + "io" +) + +// Sign generates a digital signature using the specified key and algorithm. +// +// This function loads the signer registered in the dsig package _ONLY_. +// It does not support custom signers that the user might have registered. +// +// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. +// Not all algorithms require this parameter, but it is included for consistency. +// 99% of the time, you can pass nil for rr, and it will work fine. +func Sign(key any, alg string, payload []byte, rr io.Reader) ([]byte, error) { + info, ok := GetAlgorithmInfo(alg) + if !ok { + return nil, fmt.Errorf(`dsig.Sign: unsupported signature algorithm %q`, alg) + } + + switch info.Family { + case HMAC: + return dispatchHMACSign(key, info, payload) + case RSA: + return dispatchRSASign(key, info, payload, rr) + case ECDSA: + return dispatchECDSASign(key, info, payload, rr) + case EdDSAFamily: + return dispatchEdDSASign(key, info, payload, rr) + default: + return nil, fmt.Errorf(`dsig.Sign: unsupported signature family %q`, info.Family) + } +} + +func dispatchHMACSign(key any, info AlgorithmInfo, payload []byte) ([]byte, error) { + meta, ok := info.Meta.(HMACFamilyMeta) + if !ok { + return nil, fmt.Errorf(`dsig.Sign: invalid HMAC metadata`) + } + + var hmackey []byte + if err := toHMACKey(&hmackey, key); err != nil { + return nil, fmt.Errorf(`dsig.Sign: %w`, err) + } + return SignHMAC(hmackey, payload, meta.HashFunc) +} + +func dispatchRSASign(key any, info AlgorithmInfo, payload []byte, rr io.Reader) ([]byte, error) { + meta, ok := info.Meta.(RSAFamilyMeta) + if !ok { + return nil, fmt.Errorf(`dsig.Sign: invalid RSA metadata`) + } + + cs, isCryptoSigner, err := rsaGetSignerCryptoSignerKey(key) + if err != nil { + return nil, fmt.Errorf(`dsig.Sign: %w`, err) + } + if isCryptoSigner { + var options crypto.SignerOpts = meta.Hash + if meta.PSS { + rsaopts := rsaPSSOptions(meta.Hash) + options = &rsaopts + } + return SignCryptoSigner(cs, payload, meta.Hash, options, rr) + } + + privkey, ok := key.(*rsa.PrivateKey) + if !ok { + return nil, fmt.Errorf(`dsig.Sign: invalid key type %T. *rsa.PrivateKey is required`, key) + } + return SignRSA(privkey, payload, meta.Hash, meta.PSS, rr) +} + +func dispatchEdDSASign(key any, _ AlgorithmInfo, payload []byte, rr io.Reader) ([]byte, error) { + signer, err := eddsaGetSigner(key) + if err != nil { + return nil, fmt.Errorf(`dsig.Sign: %w`, err) + } + + return SignCryptoSigner(signer, payload, crypto.Hash(0), crypto.Hash(0), rr) +} + +func dispatchECDSASign(key any, info AlgorithmInfo, payload []byte, rr io.Reader) ([]byte, error) { + meta, ok := info.Meta.(ECDSAFamilyMeta) + if !ok { + return nil, fmt.Errorf(`dsig.Sign: invalid ECDSA metadata`) + } + + privkey, cs, isCryptoSigner, err := ecdsaGetSignerKey(key) + if err != nil { + return nil, fmt.Errorf(`dsig.Sign: %w`, err) + } + if isCryptoSigner { + return SignECDSACryptoSigner(cs, payload, meta.Hash, rr) + } + return SignECDSA(privkey, payload, meta.Hash, rr) +} diff --git a/vendor/github.com/lestrrat-go/dsig/validation.go b/vendor/github.com/lestrrat-go/dsig/validation.go new file mode 100644 index 0000000000..17682d8538 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/validation.go @@ -0,0 +1,66 @@ +package dsig + +import ( + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" +) + +// isValidRSAKey validates that the provided key type is appropriate for RSA algorithms. +// It returns false if the key is clearly incompatible (e.g., ECDSA or EdDSA keys). +func isValidRSAKey(key any) bool { + switch key.(type) { + case + ecdsa.PrivateKey, *ecdsa.PrivateKey, + ed25519.PrivateKey: + // these are NOT ok for RSA algorithms + return false + } + return true +} + +// isValidECDSAKey validates that the provided key type is appropriate for ECDSA algorithms. +// It returns false if the key is clearly incompatible (e.g., RSA or EdDSA keys). +func isValidECDSAKey(key any) bool { + switch key.(type) { + case + ed25519.PrivateKey, + rsa.PrivateKey, *rsa.PrivateKey: + // these are NOT ok for ECDSA algorithms + return false + } + return true +} + +// isValidEDDSAKey validates that the provided key type is appropriate for EdDSA algorithms. +// It returns false if the key is clearly incompatible (e.g., RSA or ECDSA keys). +func isValidEDDSAKey(key any) bool { + switch key.(type) { + case + ecdsa.PrivateKey, *ecdsa.PrivateKey, + rsa.PrivateKey, *rsa.PrivateKey: + // these are NOT ok for EdDSA algorithms + return false + } + return true +} + +// VerificationError represents an error that occurred during signature verification. +type VerificationError struct { + message string +} + +func (e *VerificationError) Error() string { + return e.message +} + +// NewVerificationError creates a new verification error with the given message. +func NewVerificationError(message string) error { + return &VerificationError{message: message} +} + +// IsVerificationError checks if the given error is a verification error. +func IsVerificationError(err error) bool { + _, ok := err.(*VerificationError) + return ok +} diff --git a/vendor/github.com/lestrrat-go/dsig/verify.go b/vendor/github.com/lestrrat-go/dsig/verify.go new file mode 100644 index 0000000000..86085b0a37 --- /dev/null +++ b/vendor/github.com/lestrrat-go/dsig/verify.go @@ -0,0 +1,134 @@ +package dsig + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "fmt" +) + +// Verify verifies a digital signature using the specified key and algorithm. +// +// This function loads the verifier registered in the dsig package _ONLY_. +// It does not support custom verifiers that the user might have registered. +func Verify(key any, alg string, payload, signature []byte) error { + info, ok := GetAlgorithmInfo(alg) + if !ok { + return fmt.Errorf(`dsig.Verify: unsupported signature algorithm %q`, alg) + } + + switch info.Family { + case HMAC: + return dispatchHMACVerify(key, info, payload, signature) + case RSA: + return dispatchRSAVerify(key, info, payload, signature) + case ECDSA: + return dispatchECDSAVerify(key, info, payload, signature) + case EdDSAFamily: + return dispatchEdDSAVerify(key, info, payload, signature) + default: + return fmt.Errorf(`dsig.Verify: unsupported signature family %q`, info.Family) + } +} + +func dispatchHMACVerify(key any, info AlgorithmInfo, payload, signature []byte) error { + meta, ok := info.Meta.(HMACFamilyMeta) + if !ok { + return fmt.Errorf(`dsig.Verify: invalid HMAC metadata`) + } + + var hmackey []byte + if err := toHMACKey(&hmackey, key); err != nil { + return fmt.Errorf(`dsig.Verify: %w`, err) + } + return VerifyHMAC(hmackey, payload, signature, meta.HashFunc) +} + +func dispatchRSAVerify(key any, info AlgorithmInfo, payload, signature []byte) error { + meta, ok := info.Meta.(RSAFamilyMeta) + if !ok { + return fmt.Errorf(`dsig.Verify: invalid RSA metadata`) + } + + var pubkey *rsa.PublicKey + + if cs, ok := key.(crypto.Signer); ok { + cpub := cs.Public() + switch cpub := cpub.(type) { + case rsa.PublicKey: + pubkey = &cpub + case *rsa.PublicKey: + pubkey = cpub + default: + return fmt.Errorf(`dsig.Verify: failed to retrieve rsa.PublicKey out of crypto.Signer %T`, key) + } + } else { + var ok bool + pubkey, ok = key.(*rsa.PublicKey) + if !ok { + return fmt.Errorf(`dsig.Verify: failed to retrieve *rsa.PublicKey out of %T`, key) + } + } + + return VerifyRSA(pubkey, payload, signature, meta.Hash, meta.PSS) +} + +func dispatchECDSAVerify(key any, info AlgorithmInfo, payload, signature []byte) error { + meta, ok := info.Meta.(ECDSAFamilyMeta) + if !ok { + return fmt.Errorf(`dsig.Verify: invalid ECDSA metadata`) + } + + pubkey, cs, isCryptoSigner, err := ecdsaGetVerifierKey(key) + if err != nil { + return fmt.Errorf(`dsig.Verify: %w`, err) + } + if isCryptoSigner { + return VerifyECDSACryptoSigner(cs, payload, signature, meta.Hash) + } + return VerifyECDSA(pubkey, payload, signature, meta.Hash) +} + +func dispatchEdDSAVerify(key any, _ AlgorithmInfo, payload, signature []byte) error { + var pubkey ed25519.PublicKey + signer, ok := key.(crypto.Signer) + if ok { + v := signer.Public() + pubkey, ok = v.(ed25519.PublicKey) + if !ok { + return fmt.Errorf(`dsig.Verify: expected crypto.Signer.Public() to return ed25519.PublicKey, but got %T`, v) + } + } else { + var ok bool + pubkey, ok = key.(ed25519.PublicKey) + if !ok { + return fmt.Errorf(`dsig.Verify: failed to retrieve ed25519.PublicKey out of %T`, key) + } + } + + return VerifyEdDSA(pubkey, payload, signature) +} + +func ecdsaGetVerifierKey(key any) (*ecdsa.PublicKey, crypto.Signer, bool, error) { + cs, isCryptoSigner := key.(crypto.Signer) + if isCryptoSigner { + switch key.(type) { + case ecdsa.PublicKey, *ecdsa.PublicKey: + // if it's ecdsa.PublicKey, it's more efficient to + // go through the non-crypto.Signer route. Set isCryptoSigner to false + isCryptoSigner = false + } + } + + if isCryptoSigner { + return nil, cs, true, nil + } + + pubkey, ok := key.(*ecdsa.PublicKey) + if !ok { + return nil, nil, false, fmt.Errorf(`invalid key type %T. *ecdsa.PublicKey is required`, key) + } + + return pubkey, nil, false, nil +} diff --git a/vendor/github.com/lestrrat-go/httprc/v3/.gitignore b/vendor/github.com/lestrrat-go/httprc/v3/.gitignore new file mode 100644 index 0000000000..66fd13c903 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/lestrrat-go/httprc/v3/.golangci.yml b/vendor/github.com/lestrrat-go/httprc/v3/.golangci.yml new file mode 100644 index 0000000000..b3af8cfe12 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/.golangci.yml @@ -0,0 +1,95 @@ +version: "2" +linters: + default: all + disable: + - cyclop + - depguard + - dupl + - errorlint + - exhaustive + - forbidigo + - funcorder + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - gocritic + - gocyclo + - godot + - godox + - gosec + - gosmopolitan + - govet + - inamedparam + - ireturn + - lll + - maintidx + - makezero + - mnd + - nakedret + - nestif + - nlreturn + - noinlineerr + - nonamedreturns + - paralleltest + - tagliatelle + - testpackage + - thelper + - varnamelen + - wrapcheck + - wsl + - wsl_v5 + settings: + govet: + disable: + - shadow + - fieldalignment + enable-all: true + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - linters: + - staticcheck + path: /*.go + text: 'ST1003: should not use underscores in package names' + - linters: + - revive + path: /*.go + text: don't use an underscore in package name + - linters: + - contextcheck + - exhaustruct + path: /*.go + - linters: + - errcheck + path: /main.go + - linters: + - errcheck + - errchkjson + - forcetypeassert + path: /*_test.go + - linters: + - forbidigo + path: /*_example_test.go + paths: + - third_party$ + - builtin$ + - examples$ +issues: + max-issues-per-linter: 0 + max-same-issues: 0 +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/lestrrat-go/httprc/v3/Changes b/vendor/github.com/lestrrat-go/httprc/v3/Changes new file mode 100644 index 0000000000..6a5eb8064a --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/Changes @@ -0,0 +1,34 @@ +Changes +======= + +v3.0.2 05 Dev 2025 +* Code changes mainly due to upgraded linter. +* github.com/lestrrat-go/option upgraded to v2 + +v3.0.1 18 Aug 2025 +* Refresh() no longer requires the resource to be ready. + +v3.0.0 5 Jun 2025 +[Breaking Changes] + * The entire API has been re-imagined for Go versions that allow typed parameters + +v2.0.0 19 Feb 2024 +[Breaking Changes] + * `Fetcher` type is no longer available. You probably want to provide + a customg HTTP client instead via httprc.WithHTTPClient()). + * + +v1.0.4 19 Jul 2022 + * Fix sloppy API breakage + +v1.0.3 19 Jul 2022 + * Fix queue insertion in the middle of the queue (#7) + +v1.0.2 13 Jun 2022 + * Properly release a lock when the fetch fails (#5) + +v1.0.1 29 Mar 2022 + * Bump dependency for github.com/lestrrat-go/httpcc to v1.0.1 + +v1.0.0 29 Mar 2022 + * Initial release, refactored out of github.com/lestrrat-go/jwx diff --git a/vendor/github.com/lestrrat-go/httprc/v3/LICENSE b/vendor/github.com/lestrrat-go/httprc/v3/LICENSE new file mode 100644 index 0000000000..3e196892ca --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 lestrrat + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/httprc/v3/README.md b/vendor/github.com/lestrrat-go/httprc/v3/README.md new file mode 100644 index 0000000000..68239669a2 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/README.md @@ -0,0 +1,172 @@ +# github.com/lestrrat-go/httprc/v3 ![](https://github.com/lestrrat-go/httprc/v3/workflows/CI/badge.svg) [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/httprc/v3.svg)](https://pkg.go.dev/github.com/lestrrat-go/httprc/v3) + +`httprc` is a HTTP "Refresh" Cache. Its aim is to cache a remote resource that +can be fetched via HTTP, but keep the cached content up-to-date based on periodic +refreshing. + +# Client + +A `httprc.Client` object is comprised of 3 parts: The user-facing controller API, +the main controller loop, and set of workers that perform the actual fetching. + +The user-facing controller API is the object returned when you call `(httprc.Client).Start`. + +```go +ctrl, _ := client.Start(ctx) +``` + +# Controller API + +The controller API gives you access to the controller backend that runs asynchronously. +All methods take a `context.Context` object because they potentially block. You should +be careful to use `context.WithTimeout` to properly set a timeout if you cannot tolerate +a blocking operation. + +# Main Controller Loop + +The main controller loop is run asynchronously to the controller API. It is single threaded, +and it has two reponsibilities. + +The first is to receive commands from the controller API, +and appropriately modify the state of the goroutine, i.e. modify the list of resources +it is watching, performing forced refreshes, etc. + +The other is to periodically wake up and go through the list of resources and re-fetch +ones that are past their TTL (in reality, each resource carry a "next-check" time, not +a TTL). The main controller loop itself does nothing more: it just kicks these checks periodically. + +The interval between fetches is changed dynamically based on either the metadata carried +with the HTTP responses, such as `Cache-Control` and `Expires` headers, or a constant +interval set by the user for a given resource. Between these values, the main controller loop +will pick the shortest interval (but no less than 1 second) and checks if resources +need updating based on that value. + +For example, if a resource A has an expiry of 10 minutes and if resource has an expiry of 5 +minutes, the main controller loop will attempt to wake up roughly every 5 minutes to check +on the resources. + +When the controller loop detects that a resource needs to be checked for freshness, +it will send the resource to the worker pool to be synced. + +# Interval calculation + +After the resource is synced, the next fetch is scheduled. The interval to the next +fetch is calculated either by using constant intervals, or by heuristics using values +from the `http.Response` object. + +If the constant interval is specified, no extra calculation is performed. If you specify +a constant interval of 15 minutes, the resource will be checked every 15 minutes. This is +predictable and reliable, but not necessarily efficient. + +If you do not specify a constant interval, the HTTP response is analyzed for +values in `Cache-Control` and `Expires` headers. These values will be compared against +a maximum and minimum interval values, which default to 30 days and 15 minutes, respectively. +If the values obtained from the headers fall within that range, the value from the header is +used. If the value is larger than the maximum, the maximum is used. If the value is lower +than the minimum, the minimum is used. + +# SYNOPSIS + + +```go +package httprc_test + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "time" + + "github.com/lestrrat-go/httprc/v3" +) + +func ExampleClient() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + type HelloWorld struct { + Hello string `json:"hello"` + } + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + json.NewEncoder(w).Encode(map[string]string{"hello": "world"}) + })) + + options := []httprc.NewClientOption{ + // By default the client will allow all URLs (which is what the option + // below is explicitly specifying). If you want to restrict what URLs + // are allowed, you can specify another whitelist. + // + // httprc.WithWhitelist(httprc.NewInsecureWhitelist()), + } + // If you would like to handle errors from asynchronous workers, you can specify a error sink. + // This is disabled in this example because the trace logs are dynamic + // and thus would interfere with the runnable example test. + // options = append(options, httprc.WithErrorSink(errsink.NewSlog(slog.New(slog.NewJSONHandler(os.Stdout, nil))))) + + // If you would like to see the trace logs, you can specify a trace sink. + // This is disabled in this example because the trace logs are dynamic + // and thus would interfere with the runnable example test. + // options = append(options, httprc.WithTraceSink(tracesink.NewSlog(slog.New(slog.NewJSONHandler(os.Stdout, nil))))) + + // Create a new client + cl := httprc.NewClient(options...) + + // Start the client, and obtain a Controller object + ctrl, err := cl.Start(ctx) + if err != nil { + fmt.Println(err.Error()) + return + } + // The following is required if you want to make sure that there are no + // dangling goroutines hanging around when you exit. For example, if you + // are running tests to check for goroutine leaks, you should call this + // function before the end of your test. + defer ctrl.Shutdown(time.Second) + + // Create a new resource that is synchronized every so often + // + // By default the client will attempt to fetch the resource once + // as soon as it can, and then if no other metadata is provided, + // it will fetch the resource every 15 minutes. + // + // If the resource responds with a Cache-Control/Expires header, + // the client will attempt to respect that, and will try to fetch + // the resource again based on the values obatained from the headers. + r, err := httprc.NewResource[HelloWorld](srv.URL, httprc.JSONTransformer[HelloWorld]()) + if err != nil { + fmt.Println(err.Error()) + return + } + + // Add the resource to the controller, so that it starts fetching. + // By default, a call to `Add()` will block until the first fetch + // succeeds, via an implicit call to `r.Ready()` + // You can change this behavior if you specify the `WithWaitReady(false)` + // option. + ctrl.Add(ctx, r) + + // if you specified `httprc.WithWaitReady(false)` option, the fetch will happen + // "soon", but you're not guaranteed that it will happen before the next + // call to `Lookup()`. If you want to make sure that the resource is ready, + // you can call `Ready()` like so: + /* + { + tctx, tcancel := context.WithTimeout(ctx, time.Second) + defer tcancel() + if err := r.Ready(tctx); err != nil { + fmt.Println(err.Error()) + return + } + } + */ + m := r.Resource() + fmt.Println(m.Hello) + // OUTPUT: + // world +} +``` +source: [client_example_test.go](https://github.com/lestrrat-go/httprc/blob/refs/heads/v3/client_example_test.go) + diff --git a/vendor/github.com/lestrrat-go/httprc/v3/backend.go b/vendor/github.com/lestrrat-go/httprc/v3/backend.go new file mode 100644 index 0000000000..31f9fc07d3 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/backend.go @@ -0,0 +1,235 @@ +package httprc + +import ( + "context" + "fmt" + "sync" + "time" +) + +func (c *ctrlBackend) adjustInterval(ctx context.Context, req adjustIntervalRequest) { + interval := roundupToSeconds(time.Until(req.resource.Next())) + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: got adjust request (current tick interval=%s, next for %q=%s)", c.tickInterval, req.resource.URL(), interval)) + + if interval < time.Second { + interval = time.Second + } + + if c.tickInterval < interval { + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: no adjusting required (time to next check %s > current tick interval %s)", interval, c.tickInterval)) + } else { + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: adjusting tick interval to %s", interval)) + c.tickInterval = interval + c.check.Reset(interval) + } +} + +func (c *ctrlBackend) addResource(ctx context.Context, req addRequest) { + r := req.resource + if _, ok := c.items[r.URL()]; ok { + // Already exists + sendReply(ctx, req.reply, struct{}{}, errResourceAlreadyExists) + return + } + c.items[r.URL()] = r + + if r.MaxInterval() == 0 { + r.SetMaxInterval(c.defaultMaxInterval) + } + + if r.MinInterval() == 0 { + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: set minimum interval to %s", c.defaultMinInterval)) + r.SetMinInterval(c.defaultMinInterval) + } + + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: added resource %q", r.URL())) + sendReply(ctx, req.reply, struct{}{}, nil) + c.SetTickInterval(time.Nanosecond) +} + +func (c *ctrlBackend) rmResource(ctx context.Context, req rmRequest) { + u := req.u + if _, ok := c.items[u]; !ok { + sendReply(ctx, req.reply, struct{}{}, errResourceNotFound) + return + } + + delete(c.items, u) + + minInterval := oneDay + for _, item := range c.items { + if d := item.MinInterval(); d < minInterval { + minInterval = d + } + } + + close(req.reply) + c.check.Reset(minInterval) +} + +func (c *ctrlBackend) refreshResource(ctx context.Context, req refreshRequest) { + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] START %q", req.u)) + defer c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] END %q", req.u)) + u := req.u + + r, ok := c.items[u] + if !ok { + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] %s is not registered", req.u)) + sendReply(ctx, req.reply, struct{}{}, errResourceNotFound) + return + } + + // Note: We don't wait for r.Ready() here because refresh should work + // regardless of whether the resource has been fetched before. This allows + // refresh to work with resources registered using WithWaitReady(false). + + r.SetNext(time.Unix(0, 0)) + sendWorkerSynchronous(ctx, c.syncoutgoing, synchronousRequest{ + resource: r, + reply: req.reply, + }) + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: [refresh] sync request for %s sent to worker pool", req.u)) +} + +func (c *ctrlBackend) lookupResource(ctx context.Context, req lookupRequest) { + u := req.u + r, ok := c.items[u] + if !ok { + sendReply(ctx, req.reply, nil, errResourceNotFound) + return + } + sendReply(ctx, req.reply, r, nil) +} + +func (c *ctrlBackend) handleRequest(ctx context.Context, req any) { + switch req := req.(type) { + case adjustIntervalRequest: + c.adjustInterval(ctx, req) + case addRequest: + c.addResource(ctx, req) + case rmRequest: + c.rmResource(ctx, req) + case refreshRequest: + c.refreshResource(ctx, req) + case lookupRequest: + c.lookupResource(ctx, req) + default: + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: unknown request type %T", req)) + } +} + +func sendWorker(ctx context.Context, ch chan Resource, r Resource) { + r.SetBusy(true) + select { + case <-ctx.Done(): + case ch <- r: + } +} + +func sendWorkerSynchronous(ctx context.Context, ch chan synchronousRequest, r synchronousRequest) { + r.resource.SetBusy(true) + select { + case <-ctx.Done(): + case ch <- r: + } +} + +func sendReply[T any](ctx context.Context, ch chan backendResponse[T], v T, err error) { + defer close(ch) + select { + case <-ctx.Done(): + case ch <- backendResponse[T]{payload: v, err: err}: + } +} + +type ctrlBackend struct { + items map[string]Resource + outgoing chan Resource + syncoutgoing chan synchronousRequest + incoming chan any // incoming requests to the controller + traceSink TraceSink + tickInterval time.Duration + check *time.Ticker + defaultMaxInterval time.Duration + defaultMinInterval time.Duration +} + +func (c *ctrlBackend) loop(ctx context.Context, readywg, donewg *sync.WaitGroup) { + c.traceSink.Put(ctx, "httprc controller: starting main controller loop") + readywg.Done() + defer c.traceSink.Put(ctx, "httprc controller: stopping main controller loop") + defer donewg.Done() + for { + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: waiting for request or tick (tick interval=%s)", c.tickInterval)) + select { + case req := <-c.incoming: + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: got request %T", req)) + c.handleRequest(ctx, req) + case t := <-c.check.C: + c.periodicCheck(ctx, t) + case <-ctx.Done(): + return + } + } +} + +func (c *ctrlBackend) periodicCheck(ctx context.Context, t time.Time) { + c.traceSink.Put(ctx, "httprc controller: START periodic check") + defer c.traceSink.Put(ctx, "httprc controller: END periodic check") + var minNext time.Time + var dispatched int + minInterval := -1 * time.Second + for _, item := range c.items { + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: checking resource %q", item.URL())) + + next := item.Next() + if minNext.IsZero() || next.Before(minNext) { + minNext = next + } + + if interval := item.MinInterval(); minInterval < 0 || interval < minInterval { + minInterval = interval + } + + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resource %q isBusy=%t, next(%s).After(%s)=%t", item.URL(), item.IsBusy(), next, t, next.After(t))) + if item.IsBusy() || next.After(t) { + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resource %q is busy or not ready yet, skipping", item.URL())) + continue + } + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resource %q is ready, dispatching to worker pool", item.URL())) + + dispatched++ + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: dispatching resource %q to worker pool", item.URL())) + sendWorker(ctx, c.outgoing, item) + } + + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: dispatched %d resources", dispatched)) + + // Next check is always at the earliest next check + 1 second. + // The extra second makes sure that we are _past_ the actual next check time + // so we can send the resource to the worker pool + if interval := time.Until(minNext); interval > 0 { + c.SetTickInterval(roundupToSeconds(interval) + time.Second) + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resetting check intervanl to %s", c.tickInterval)) + } else { + // if we got here, either we have no resources, or all resources are busy. + // In this state, it's possible that the interval is less than 1 second, + // because we previously set it to a small value for an immediate refresh. + // in this case, we want to reset it to a sane value + if c.tickInterval < time.Second { + c.SetTickInterval(minInterval) + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: resetting check intervanl to %s after forced refresh", c.tickInterval)) + } + } + + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: next check in %s", c.tickInterval)) +} + +func (c *ctrlBackend) SetTickInterval(d time.Duration) { + // TODO synchronize + if d <= 0 { + d = time.Second // ensure positive interval + } + c.tickInterval = d + c.check.Reset(d) +} diff --git a/vendor/github.com/lestrrat-go/httprc/v3/client.go b/vendor/github.com/lestrrat-go/httprc/v3/client.go new file mode 100644 index 0000000000..05dfbb43f3 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/client.go @@ -0,0 +1,185 @@ +package httprc + +import ( + "context" + "net/http" + "sync" + "time" + + "github.com/lestrrat-go/httprc/v3/errsink" + "github.com/lestrrat-go/httprc/v3/proxysink" + "github.com/lestrrat-go/httprc/v3/tracesink" +) + +// setupSink creates and starts a proxy for the given sink if it's not a Nop sink +// Returns the sink to use and a cancel function that should be chained with the original cancel +func setupSink[T any, S proxysink.Backend[T], NopType any](ctx context.Context, sink S, wg *sync.WaitGroup) (S, context.CancelFunc) { + if _, ok := any(sink).(NopType); ok { + return sink, func() {} + } + + proxy := proxysink.New[T](sink) + wg.Add(1) + go func(ctx context.Context, wg *sync.WaitGroup, proxy *proxysink.Proxy[T]) { + defer wg.Done() + proxy.Run(ctx) + }(ctx, wg, proxy) + + // proxy can be converted to one of the sink subtypes + s, ok := any(proxy).(S) + if !ok { + panic("type assertion failed: proxy cannot be converted to type S") + } + return s, proxy.Close +} + +// Client is the main entry point for the httprc package. +type Client struct { + mu sync.Mutex + httpcl HTTPClient + numWorkers int + running bool + errSink ErrorSink + traceSink TraceSink + wl Whitelist + defaultMaxInterval time.Duration + defaultMinInterval time.Duration +} + +// NewClient creates a new `httprc.Client` object. +// +// By default ALL urls are allowed. This may not be suitable for you if +// are using this in a production environment. You are encouraged to specify +// a whitelist using the `WithWhitelist` option. +// +// NOTE: In future versions, this function signature should be changed to +// return an error to properly handle option parsing failures. +func NewClient(options ...NewClientOption) *Client { + //nolint:staticcheck + var errSink ErrorSink = errsink.NewNop() + //nolint:staticcheck + var traceSink TraceSink = tracesink.NewNop() + var wl Whitelist = InsecureWhitelist{} + var httpcl HTTPClient = http.DefaultClient + + defaultMinInterval := DefaultMinInterval + defaultMaxInterval := DefaultMaxInterval + + numWorkers := DefaultWorkers + for _, option := range options { + switch option.Ident() { + case identHTTPClient{}: + _ = option.Value(&httpcl) + case identWorkers{}: + _ = option.Value(&numWorkers) + case identErrorSink{}: + _ = option.Value(&errSink) + case identTraceSink{}: + _ = option.Value(&traceSink) + case identWhitelist{}: + _ = option.Value(&wl) + } + } + + if numWorkers <= 0 { + numWorkers = 1 + } + return &Client{ + httpcl: httpcl, + numWorkers: numWorkers, + errSink: errSink, + traceSink: traceSink, + wl: wl, + + defaultMinInterval: defaultMinInterval, + defaultMaxInterval: defaultMaxInterval, + } +} + +// Start sets the client into motion. It will start a number of worker goroutines, +// and return a Controller object that you can use to control the execution of +// the client. +// +// If you attempt to call Start more than once, it will return an error. +func (c *Client) Start(octx context.Context) (Controller, error) { + c.mu.Lock() + if c.running { + c.mu.Unlock() + return nil, errAlreadyRunning + } + c.running = true + c.mu.Unlock() + + // DON'T CANCEL THIS IN THIS METHOD! It's the responsibility of the + // controller to cancel this context. + ctx, cancel := context.WithCancel(octx) + + var donewg sync.WaitGroup + + // start proxy goroutines that will accept sink requests + // and forward them to the appropriate sink + errSink, errCancel := setupSink[error, ErrorSink, errsink.Nop](ctx, c.errSink, &donewg) + traceSink, traceCancel := setupSink[string, TraceSink, tracesink.Nop](ctx, c.traceSink, &donewg) + + // Chain the cancel functions + ocancel := cancel + cancel = func() { + ocancel() + errCancel() + traceCancel() + } + + chbuf := c.numWorkers + 1 + incoming := make(chan any, chbuf) + outgoing := make(chan Resource, chbuf) + syncoutgoing := make(chan synchronousRequest, chbuf) + + var readywg sync.WaitGroup + readywg.Add(c.numWorkers) + donewg.Add(c.numWorkers) + for range c.numWorkers { + wrk := worker{ + incoming: incoming, + next: outgoing, + nextsync: syncoutgoing, + errSink: errSink, + traceSink: traceSink, + httpcl: c.httpcl, + } + go wrk.Run(ctx, &readywg, &donewg) + } + + tickInterval := oneDay + ctrl := &controller{ + cancel: cancel, + incoming: incoming, + shutdown: make(chan struct{}), + traceSink: traceSink, + wl: c.wl, + } + + backend := &ctrlBackend{ + items: make(map[string]Resource), + outgoing: outgoing, + syncoutgoing: syncoutgoing, + incoming: incoming, + traceSink: traceSink, + tickInterval: tickInterval, + check: time.NewTicker(tickInterval), + + defaultMinInterval: c.defaultMinInterval, + defaultMaxInterval: c.defaultMaxInterval, + } + donewg.Add(1) + readywg.Add(1) + go backend.loop(ctx, &readywg, &donewg) + + go func(wg *sync.WaitGroup, ch chan struct{}) { + wg.Wait() + close(ch) + }(&donewg, ctrl.shutdown) + + readywg.Wait() + + return ctrl, nil +} diff --git a/vendor/github.com/lestrrat-go/httprc/v3/controller.go b/vendor/github.com/lestrrat-go/httprc/v3/controller.go new file mode 100644 index 0000000000..1ad9d7b6c6 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/controller.go @@ -0,0 +1,187 @@ +package httprc + +import ( + "context" + "fmt" + "time" +) + +type Controller interface { + // Add adds a new `http.Resource` to the controller. If the resource already exists, + // it will return an error. + Add(context.Context, Resource, ...AddOption) error + + // Lookup a `httprc.Resource` by its URL. If the resource does not exist, it + // will return an error. + Lookup(context.Context, string) (Resource, error) + + // Remove a `httprc.Resource` from the controller by its URL. If the resource does + // not exist, it will return an error. + Remove(context.Context, string) error + + // Refresh forces a resource to be refreshed immediately. If the resource does + // not exist, or if the refresh fails, it will return an error. + Refresh(context.Context, string) error + + ShutdownContext(context.Context) error + Shutdown(time.Duration) error +} + +type controller struct { + cancel context.CancelFunc + incoming chan any // incoming requests to the controller + shutdown chan struct{} + traceSink TraceSink + wl Whitelist +} + +// Shutdown is a convenience function that calls ShutdownContext with a +// context that has a timeout of `timeout`. +func (c *controller) Shutdown(timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + return c.ShutdownContext(ctx) +} + +// ShutdownContext stops the client and all associated goroutines, and waits for them +// to finish. If the context is canceled, the function will return immediately: +// there fore you should not use the context you used to start the client (because +// presumably it's already canceled). +// +// Waiting for the client shutdown will also ensure that all sinks are properly +// flushed. +func (c *controller) ShutdownContext(ctx context.Context) error { + c.cancel() + + select { + case <-ctx.Done(): + return ctx.Err() + case <-c.shutdown: + return nil + } +} + +type ctrlRequest[T any] struct { + reply chan T + resource Resource + u string +} +type addRequest ctrlRequest[backendResponse[struct{}]] +type rmRequest ctrlRequest[backendResponse[struct{}]] +type refreshRequest ctrlRequest[backendResponse[struct{}]] +type lookupRequest ctrlRequest[backendResponse[Resource]] +type synchronousRequest ctrlRequest[backendResponse[struct{}]] +type adjustIntervalRequest struct { + resource Resource +} + +type backendResponse[T any] struct { + payload T + err error +} + +func sendBackend[TReq any, TB any](ctx context.Context, backendCh chan any, v TReq, replyCh chan backendResponse[TB]) (TB, error) { + select { + case <-ctx.Done(): + case backendCh <- v: + } + + select { + case <-ctx.Done(): + var zero TB + return zero, ctx.Err() + case res := <-replyCh: + return res.payload, res.err + } +} + +// Lookup returns a resource by its URL. If the resource does not exist, it +// will return an error. +// +// Unfortunately, due to the way typed parameters are handled in Go, we can only +// return a Resource object (and not a ResourceBase[T] object). This means that +// you will either need to use the `Resource.Get()` method or use a type +// assertion to obtain a `ResourceBase[T]` to get to the actual object you are +// looking for +func (c *controller) Lookup(ctx context.Context, u string) (Resource, error) { + reply := make(chan backendResponse[Resource], 1) + req := lookupRequest{ + reply: reply, + u: u, + } + return sendBackend[lookupRequest, Resource](ctx, c.incoming, req, reply) +} + +// Add adds a new resource to the controller. If the resource already +// exists, it will return an error. +// +// By default this function will automatically wait for the resource to be +// fetched once (by calling `r.Ready()`). Note that the `r.Ready()` call will NOT +// timeout unless you configure your context object with `context.WithTimeout`. +// To disable waiting, you can specify the `WithWaitReady(false)` option. +func (c *controller) Add(ctx context.Context, r Resource, options ...AddOption) error { + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: START Add(%q)", r.URL())) + defer c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: END Add(%q)", r.URL())) + waitReady := true + for _, option := range options { + switch option.Ident() { + case identWaitReady{}: + if err := option.Value(&waitReady); err != nil { + return fmt.Errorf(`httprc.Controller.Add: failed to parse WaitReady option: %w`, err) + } + } + } + + if !c.wl.IsAllowed(r.URL()) { + return fmt.Errorf(`httprc.Controller.AddResource: cannot add %q: %w`, r.URL(), errBlockedByWhitelist) + } + + reply := make(chan backendResponse[struct{}], 1) + req := addRequest{ + reply: reply, + resource: r, + } + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: sending add request for %q to backend", r.URL())) + if _, err := sendBackend[addRequest, struct{}](ctx, c.incoming, req, reply); err != nil { + return err + } + + if waitReady { + c.traceSink.Put(ctx, fmt.Sprintf("httprc controller: waiting for resource %q to be ready", r.URL())) + if err := r.Ready(ctx); err != nil { + return err + } + } + return nil +} + +// Remove removes a resource from the controller. If the resource does +// not exist, it will return an error. +func (c *controller) Remove(ctx context.Context, u string) error { + reply := make(chan backendResponse[struct{}], 1) + req := rmRequest{ + reply: reply, + u: u, + } + if _, err := sendBackend[rmRequest, struct{}](ctx, c.incoming, req, reply); err != nil { + return err + } + return nil +} + +// Refresh forces a resource to be refreshed immediately. If the resource does +// not exist, or if the refresh fails, it will return an error. +// +// This function is synchronous, and will block until the resource has been refreshed. +func (c *controller) Refresh(ctx context.Context, u string) error { + reply := make(chan backendResponse[struct{}], 1) + req := refreshRequest{ + reply: reply, + u: u, + } + + if _, err := sendBackend[refreshRequest, struct{}](ctx, c.incoming, req, reply); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/lestrrat-go/httprc/v3/errors.go b/vendor/github.com/lestrrat-go/httprc/v3/errors.go new file mode 100644 index 0000000000..1152ba947f --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/errors.go @@ -0,0 +1,57 @@ +package httprc + +import "errors" + +var errResourceAlreadyExists = errors.New(`resource already exists`) + +func ErrResourceAlreadyExists() error { + return errResourceAlreadyExists +} + +var errAlreadyRunning = errors.New(`client is already running`) + +func ErrAlreadyRunning() error { + return errAlreadyRunning +} + +var errResourceNotFound = errors.New(`resource not found`) + +func ErrResourceNotFound() error { + return errResourceNotFound +} + +var errTransformerRequired = errors.New(`transformer is required`) + +func ErrTransformerRequired() error { + return errTransformerRequired +} + +var errURLCannotBeEmpty = errors.New(`URL cannot be empty`) + +func ErrURLCannotBeEmpty() error { + return errURLCannotBeEmpty +} + +var errUnexpectedStatusCode = errors.New(`unexpected status code`) + +func ErrUnexpectedStatusCode() error { + return errUnexpectedStatusCode +} + +var errTransformerFailed = errors.New(`failed to transform response body`) + +func ErrTransformerFailed() error { + return errTransformerFailed +} + +var errRecoveredFromPanic = errors.New(`recovered from panic`) + +func ErrRecoveredFromPanic() error { + return errRecoveredFromPanic +} + +var errBlockedByWhitelist = errors.New(`blocked by whitelist`) + +func ErrBlockedByWhitelist() error { + return errBlockedByWhitelist +} diff --git a/vendor/github.com/lestrrat-go/httprc/v3/errsink/errsink.go b/vendor/github.com/lestrrat-go/httprc/v3/errsink/errsink.go new file mode 100644 index 0000000000..03d128ffcd --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/errsink/errsink.go @@ -0,0 +1,59 @@ +package errsink + +import ( + "context" + "log/slog" +) + +type Interface interface { + Put(context.Context, error) +} + +// Nop is an ErrorSink that does nothing. It does not require +// any initialization, so the zero value can be used. +type Nop struct{} + +// NewNop returns a new NopErrorSink object. The constructor +// is provided for consistency. +func NewNop() Interface { + return Nop{} +} + +// Put for NopErrorSink does nothing. +func (Nop) Put(context.Context, error) {} + +type SlogLogger interface { + Log(context.Context, slog.Level, string, ...any) +} + +type slogSink struct { + logger SlogLogger +} + +// NewSlog returns a new ErrorSink that logs errors using the provided slog.Logger +func NewSlog(l SlogLogger) Interface { + return &slogSink{ + logger: l, + } +} + +func (s *slogSink) Put(ctx context.Context, v error) { + s.logger.Log(ctx, slog.LevelError, v.Error()) +} + +// FuncSink is an ErrorSink that calls a function with the error. +type FuncSink struct { + fn func(context.Context, error) +} + +// NewFunc returns a new FuncSink that calls the provided function with errors. +func NewFunc(fn func(context.Context, error)) Interface { + return &FuncSink{fn: fn} +} + +// Put calls the function with the error. +func (f *FuncSink) Put(ctx context.Context, err error) { + if f.fn != nil { + f.fn(ctx, err) + } +} diff --git a/vendor/github.com/lestrrat-go/httprc/v3/httprc.go b/vendor/github.com/lestrrat-go/httprc/v3/httprc.go new file mode 100644 index 0000000000..fc324b771f --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/httprc.go @@ -0,0 +1,90 @@ +package httprc + +import ( + "context" + "net/http" + "time" + + "github.com/lestrrat-go/httprc/v3/errsink" + "github.com/lestrrat-go/httprc/v3/tracesink" +) + +// Buffer size constants +const ( + // ReadBufferSize is the default buffer size for reading HTTP responses (10MB) + ReadBufferSize = 1024 * 1024 * 10 + // MaxBufferSize is the maximum allowed buffer size (1GB) + MaxBufferSize = 1024 * 1024 * 1000 +) + +// Client worker constants +const ( + // DefaultWorkers is the default number of worker goroutines + DefaultWorkers = 5 +) + +// Interval constants +const ( + // DefaultMaxInterval is the default maximum interval between fetches (30 days) + DefaultMaxInterval = 24 * time.Hour * 30 + // DefaultMinInterval is the default minimum interval between fetches (15 minutes) + DefaultMinInterval = 15 * time.Minute + // oneDay is used internally for time calculations + oneDay = 24 * time.Hour +) + +// utility to round up intervals to the nearest second +func roundupToSeconds(d time.Duration) time.Duration { + if diff := d % time.Second; diff > 0 { + return d + time.Second - diff + } + return d +} + +// ErrorSink is an interface that abstracts a sink for errors. +type ErrorSink = errsink.Interface + +type TraceSink = tracesink.Interface + +// HTTPClient is an interface that abstracts a "net/http".Client, so that +// users can provide their own implementation of the HTTP client, if need be. +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Transformer is used to convert the body of an HTTP response into an appropriate +// object of type T. +type Transformer[T any] interface { + Transform(context.Context, *http.Response) (T, error) +} + +// TransformFunc is a function type that implements the Transformer interface. +type TransformFunc[T any] func(context.Context, *http.Response) (T, error) + +func (f TransformFunc[T]) Transform(ctx context.Context, res *http.Response) (T, error) { + return f(ctx, res) +} + +// Resource is a single resource that can be retrieved via HTTP, and (possibly) transformed +// into an arbitrary object type. +// +// Realistically, there is no need for third-parties to implement this interface. This exists +// to provide a way to aggregate `httprc.ResourceBase` objects with different specialized types +// into a single collection. +// +// See ResourceBase for details +type Resource interface { //nolint:interfacebloat + Get(any) error + Next() time.Time + SetNext(time.Time) + URL() string + Sync(context.Context) error + ConstantInterval() time.Duration + MaxInterval() time.Duration + SetMaxInterval(time.Duration) + MinInterval() time.Duration + SetMinInterval(time.Duration) + IsBusy() bool + SetBusy(bool) + Ready(context.Context) error +} diff --git a/vendor/github.com/lestrrat-go/httprc/v3/options.go b/vendor/github.com/lestrrat-go/httprc/v3/options.go new file mode 100644 index 0000000000..40cf891b15 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/options.go @@ -0,0 +1,144 @@ +package httprc + +import ( + "time" + + "github.com/lestrrat-go/option/v2" +) + +type NewClientOption interface { + option.Interface + newClientOption() +} + +type newClientOption struct { + option.Interface +} + +func (newClientOption) newClientOption() {} + +type identWorkers struct{} + +// WithWorkers specifies the number of concurrent workers to use for the client. +// If n is less than or equal to 0, the client will use a single worker. +func WithWorkers(n int) NewClientOption { + return newClientOption{option.New(identWorkers{}, n)} +} + +type identErrorSink struct{} + +// WithErrorSink specifies the error sink to use for the client. +// If not specified, the client will use a NopErrorSink. +func WithErrorSink(sink ErrorSink) NewClientOption { + return newClientOption{option.New(identErrorSink{}, sink)} +} + +type identTraceSink struct{} + +// WithTraceSink specifies the trace sink to use for the client. +// If not specified, the client will use a NopTraceSink. +func WithTraceSink(sink TraceSink) NewClientOption { + return newClientOption{option.New(identTraceSink{}, sink)} +} + +type identWhitelist struct{} + +// WithWhitelist specifies the whitelist to use for the client. +// If not specified, the client will use a BlockAllWhitelist. +func WithWhitelist(wl Whitelist) NewClientOption { + return newClientOption{option.New(identWhitelist{}, wl)} +} + +type NewResourceOption interface { + option.Interface + newResourceOption() +} + +type newResourceOption struct { + option.Interface +} + +func (newResourceOption) newResourceOption() {} + +type NewClientResourceOption interface { + option.Interface + newResourceOption() + newClientOption() +} + +type newClientResourceOption struct { + option.Interface +} + +func (newClientResourceOption) newResourceOption() {} +func (newClientResourceOption) newClientOption() {} + +type identHTTPClient struct{} + +// WithHTTPClient specifies the HTTP client to use for the client. +// If not specified, the client will use http.DefaultClient. +// +// This option can be passed to NewClient or NewResource. +func WithHTTPClient(cl HTTPClient) NewClientResourceOption { + return newClientResourceOption{option.New(identHTTPClient{}, cl)} +} + +type identMinimumInterval struct{} + +// WithMinInterval specifies the minimum interval between fetches. +// +// This option affects the dynamic calculation of the interval between fetches. +// If the value calculated from the http.Response is less than the this value, +// the client will use this value instead. +func WithMinInterval(d time.Duration) NewResourceOption { + return newResourceOption{option.New(identMinimumInterval{}, d)} +} + +type identMaximumInterval struct{} + +// WithMaxInterval specifies the maximum interval between fetches. +// +// This option affects the dynamic calculation of the interval between fetches. +// If the value calculated from the http.Response is greater than the this value, +// the client will use this value instead. +func WithMaxInterval(d time.Duration) NewResourceOption { + return newResourceOption{option.New(identMaximumInterval{}, d)} +} + +type identConstantInterval struct{} + +// WithConstantInterval specifies the interval between fetches. When you +// specify this option, the client will fetch the resource at the specified +// intervals, regardless of the response's Cache-Control or Expires headers. +// +// By default this option is disabled. +func WithConstantInterval(d time.Duration) NewResourceOption { + return newResourceOption{option.New(identConstantInterval{}, d)} +} + +type AddOption interface { + option.Interface + newAddOption() +} + +type addOption struct { + option.Interface +} + +func (addOption) newAddOption() {} + +type identWaitReady struct{} + +// WithWaitReady specifies whether the client should wait for the resource to be +// ready before returning from the Add method. +// +// By default, the client will wait for the resource to be ready before returning. +// If you specify this option with a value of false, the client will not wait for +// the resource to be fully registered, which is usually not what you want. +// This option exists to accommodate for cases where you for some reason want to +// add a resource to the controller, but want to do something else before +// you wait for it. Make sure to call `r.Ready()` later on to ensure that +// the resource is ready before you try to access it. +func WithWaitReady(b bool) AddOption { + return addOption{option.New(identWaitReady{}, b)} +} diff --git a/vendor/github.com/lestrrat-go/httprc/v3/proxysink/proxysink.go b/vendor/github.com/lestrrat-go/httprc/v3/proxysink/proxysink.go new file mode 100644 index 0000000000..f290422d6c --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/proxysink/proxysink.go @@ -0,0 +1,135 @@ +package proxysink + +import ( + "context" + "sync" +) + +type Backend[T any] interface { + Put(context.Context, T) +} + +// Proxy is used to send values through a channel. This is used to +// serialize calls to underlying sinks. +type Proxy[T any] struct { + mu *sync.Mutex + cancel context.CancelFunc + ch chan T + cond *sync.Cond + pending []T + backend Backend[T] + closed bool +} + +func New[T any](b Backend[T]) *Proxy[T] { + mu := &sync.Mutex{} + return &Proxy[T]{ + ch: make(chan T, 1), + mu: mu, + cond: sync.NewCond(mu), + backend: b, + cancel: func() {}, + } +} + +func (p *Proxy[T]) Run(ctx context.Context) { + defer p.cond.Broadcast() + + p.mu.Lock() + ctx, cancel := context.WithCancel(ctx) + p.cancel = cancel + p.mu.Unlock() + + go p.controlloop(ctx) + go p.flushloop(ctx) + + <-ctx.Done() +} + +func (p *Proxy[T]) controlloop(ctx context.Context) { + defer p.cond.Broadcast() + for { + select { + case <-ctx.Done(): + return + case r := <-p.ch: + p.mu.Lock() + p.pending = append(p.pending, r) + p.mu.Unlock() + } + p.cond.Broadcast() + } +} + +func (p *Proxy[T]) flushloop(ctx context.Context) { + const defaultPendingSize = 10 + pending := make([]T, defaultPendingSize) + for { + select { + case <-ctx.Done(): + p.mu.Lock() + if len(p.pending) <= 0 { + p.mu.Unlock() + return + } + default: + } + + p.mu.Lock() + for len(p.pending) <= 0 { + select { + case <-ctx.Done(): + p.mu.Unlock() + return + default: + p.cond.Wait() + } + } + + // extract all pending values, and clear the shared slice + if cap(pending) < len(p.pending) { + pending = make([]T, len(p.pending)) + } else { + pending = pending[:len(p.pending)] + } + copy(pending, p.pending) + if cap(p.pending) > defaultPendingSize { + p.pending = make([]T, 0, defaultPendingSize) + } else { + p.pending = p.pending[:0] + } + p.mu.Unlock() + + for _, v := range pending { + // send to sink serially + p.backend.Put(ctx, v) + } + } +} + +func (p *Proxy[T]) Put(ctx context.Context, v T) { + p.mu.Lock() + if p.closed { + p.mu.Unlock() + return + } + p.mu.Unlock() + + select { + case <-ctx.Done(): + return + case p.ch <- v: + return + } +} + +func (p *Proxy[T]) Close() { + p.mu.Lock() + defer p.mu.Unlock() + + if !p.closed { + p.closed = true + } + p.cancel() + p.cond.Broadcast() +} diff --git a/vendor/github.com/lestrrat-go/httprc/v3/resource.go b/vendor/github.com/lestrrat-go/httprc/v3/resource.go new file mode 100644 index 0000000000..0f0d140d27 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/resource.go @@ -0,0 +1,366 @@ +package httprc + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "sync" + "sync/atomic" + "time" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/httpcc" + "github.com/lestrrat-go/httprc/v3/tracesink" +) + +// ResourceBase is a generic Resource type +type ResourceBase[T any] struct { + u string + ready chan struct{} // closed when the resource is ready (i.e. after first successful fetch) + once sync.Once + httpcl HTTPClient + t Transformer[T] + r atomic.Value + next atomic.Value + interval time.Duration + minInterval atomic.Int64 + maxInterval atomic.Int64 + busy atomic.Bool +} + +// NewResource creates a new Resource object which after fetching the +// resource from the URL, will transform the response body using the +// provided Transformer to an object of type T. +// +// This function will return an error if the URL is not a valid URL +// (i.e. it cannot be parsed by url.Parse), or if the transformer is nil. +func NewResource[T any](s string, transformer Transformer[T], options ...NewResourceOption) (*ResourceBase[T], error) { + var httpcl HTTPClient + var interval time.Duration + minInterval := DefaultMinInterval + maxInterval := DefaultMaxInterval + for _, option := range options { + switch option.Ident() { + case identHTTPClient{}: + if err := option.Value(&httpcl); err != nil { + return nil, fmt.Errorf(`httprc.NewResource: failed to parse HTTPClient option: %w`, err) + } + case identMinimumInterval{}: + if err := option.Value(&minInterval); err != nil { + return nil, fmt.Errorf(`httprc.NewResource: failed to parse MinimumInterval option: %w`, err) + } + case identMaximumInterval{}: + if err := option.Value(&maxInterval); err != nil { + return nil, fmt.Errorf(`httprc.NewResource: failed to parse MaximumInterval option: %w`, err) + } + case identConstantInterval{}: + if err := option.Value(&interval); err != nil { + return nil, fmt.Errorf(`httprc.NewResource: failed to parse ConstantInterval option: %w`, err) + } + } + } + if transformer == nil { + return nil, fmt.Errorf(`httprc.NewResource: %w`, errTransformerRequired) + } + + if s == "" { + return nil, fmt.Errorf(`httprc.NewResource: %w`, errURLCannotBeEmpty) + } + + if _, err := url.Parse(s); err != nil { + return nil, fmt.Errorf(`httprc.NewResource: %w`, err) + } + r := &ResourceBase[T]{ + u: s, + httpcl: httpcl, + t: transformer, + interval: interval, + ready: make(chan struct{}), + } + if httpcl != nil { + r.httpcl = httpcl + } + r.minInterval.Store(int64(minInterval)) + r.maxInterval.Store(int64(maxInterval)) + r.SetNext(time.Unix(0, 0)) // initially, it should be fetched immediately + return r, nil +} + +// URL returns the URL of the resource. +func (r *ResourceBase[T]) URL() string { + return r.u +} + +// Ready returns an empty error when the resource is ready. If the context +// is canceled before the resource is ready, it will return the error from +// the context. +func (r *ResourceBase[T]) Ready(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-r.ready: + return nil + } +} + +// Get assigns the value of the resource to the provided pointer. +// If using the `httprc.ResourceBase[T]` type directly, you can use the `Resource()` +// method to get the resource directly. +// +// This method exists because parametric types cannot be assigned to a single object type +// that return different return values of the specialized type. i.e. for resources +// `ResourceBase[A]` and `ResourceBase[B]`, we cannot have a single interface that can +// be assigned to the same interface type `X` that expects a `Resource()` method that +// returns `A` or `B` depending on the type of the resource. When accessing the +// resource through the `httprc.Resource` interface, use this method to obtain the +// stored value. +func (r *ResourceBase[T]) Get(dst any) error { + return blackmagic.AssignIfCompatible(dst, r.Resource()) +} + +// Resource returns the last fetched resource. If the resource has not been +// fetched yet, this will return the zero value of type T. +// +// If you would rather wait until the resource is fetched, you can use the +// `Ready()` method to wait until the resource is ready (i.e. fetched at least once). +func (r *ResourceBase[T]) Resource() T { + v := r.r.Load() + switch v := v.(type) { + case T: + return v + default: + var zero T + return zero + } +} + +func (r *ResourceBase[T]) Next() time.Time { + //nolint:forcetypeassert + return r.next.Load().(time.Time) +} + +func (r *ResourceBase[T]) SetNext(v time.Time) { + r.next.Store(v) +} + +func (r *ResourceBase[T]) ConstantInterval() time.Duration { + return r.interval +} + +func (r *ResourceBase[T]) MaxInterval() time.Duration { + return time.Duration(r.maxInterval.Load()) +} + +func (r *ResourceBase[T]) MinInterval() time.Duration { + return time.Duration(r.minInterval.Load()) +} + +func (r *ResourceBase[T]) SetMaxInterval(v time.Duration) { + r.maxInterval.Store(int64(v)) +} + +func (r *ResourceBase[T]) SetMinInterval(v time.Duration) { + r.minInterval.Store(int64(v)) +} + +func (r *ResourceBase[T]) SetBusy(v bool) { + r.busy.Store(v) +} + +func (r *ResourceBase[T]) IsBusy() bool { + return r.busy.Load() +} + +// limitedBody is a wrapper around an io.Reader that will only read up to +// MaxBufferSize bytes. This is provided to prevent the user from accidentally +// reading a huge response body into memory +type limitedBody struct { + rdr io.Reader + close func() error +} + +func (l *limitedBody) Read(p []byte) (n int, err error) { + return l.rdr.Read(p) +} + +func (l *limitedBody) Close() error { + return l.close() +} + +type traceSinkKey struct{} + +func withTraceSink(ctx context.Context, sink TraceSink) context.Context { + return context.WithValue(ctx, traceSinkKey{}, sink) +} + +func traceSinkFromContext(ctx context.Context) TraceSink { + if v := ctx.Value(traceSinkKey{}); v != nil { + //nolint:forcetypeassert + return v.(TraceSink) + } + return tracesink.Nop{} +} + +type httpClientKey struct{} + +func withHTTPClient(ctx context.Context, cl HTTPClient) context.Context { + return context.WithValue(ctx, httpClientKey{}, cl) +} + +func httpClientFromContext(ctx context.Context) HTTPClient { + if v := ctx.Value(httpClientKey{}); v != nil { + //nolint:forcetypeassert + return v.(HTTPClient) + } + return http.DefaultClient +} + +func (r *ResourceBase[T]) Sync(ctx context.Context) error { + traceSink := traceSinkFromContext(ctx) + httpcl := r.httpcl + if httpcl == nil { + httpcl = httpClientFromContext(ctx) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, r.u, nil) + if err != nil { + return fmt.Errorf(`httprc.Resource.Sync: failed to create request: %w`, err) + } + + traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: fetching %q", r.u)) + res, err := httpcl.Do(req) + if err != nil { + return fmt.Errorf(`httprc.Resource.Sync: failed to execute HTTP request: %w`, err) + } + defer res.Body.Close() + + next := r.calculateNextRefreshTime(ctx, res) + traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: next refresh time for %q is %v", r.u, next)) + r.SetNext(next) + + if res.StatusCode != http.StatusOK { + return fmt.Errorf(`httprc.Resource.Sync: %w (status code=%d, url=%q)`, errUnexpectedStatusCode, res.StatusCode, r.u) + } + + // replace the body of the response with a limited reader that + // will only read up to MaxBufferSize bytes + res.Body = &limitedBody{ + rdr: &io.LimitedReader{R: res.Body, N: MaxBufferSize}, + close: res.Body.Close, + } + traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: transforming %q", r.u)) + v, err := r.transform(ctx, res) + if err != nil { + return fmt.Errorf(`httprc.Resource.Sync: %w: %w`, errTransformerFailed, err) + } + + traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: storing new value for %q", r.u)) + r.r.Store(v) + r.once.Do(func() { close(r.ready) }) + traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: stored value for %q", r.u)) + return nil +} + +func (r *ResourceBase[T]) transform(ctx context.Context, res *http.Response) (ret T, gerr error) { + // Protect the call to Transform with a defer/recover block, so that even + // if the Transform method panics, we can recover from it and return an error + defer func() { + if recovered := recover(); recovered != nil { + gerr = fmt.Errorf(`httprc.Resource.transform: %w: %v`, errRecoveredFromPanic, recovered) + } + }() + return r.t.Transform(ctx, res) +} + +func (r *ResourceBase[T]) determineNextFetchInterval(ctx context.Context, name string, fromHeader, minValue, maxValue time.Duration) time.Duration { + traceSink := traceSinkFromContext(ctx) + + if fromHeader > maxValue { + traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s %s > maximum interval, using maximum interval %s", r.URL(), name, maxValue)) + return maxValue + } + + if fromHeader < minValue { + traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s %s < minimum interval, using minimum interval %s", r.URL(), name, minValue)) + return minValue + } + + traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s Using %s (%s)", r.URL(), name, fromHeader)) + return fromHeader +} + +func (r *ResourceBase[T]) calculateNextRefreshTime(ctx context.Context, res *http.Response) time.Time { + traceSink := traceSinkFromContext(ctx) + now := time.Now() + + // If constant interval is set, use that regardless of what the + // response headers say. + if interval := r.ConstantInterval(); interval > 0 { + traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s Explicit interval set, using value %s", r.URL(), interval)) + return now.Add(interval) + } + + if interval := r.extractCacheControlMaxAge(ctx, res); interval > 0 { + return now.Add(interval) + } + + if interval := r.extractExpiresInterval(ctx, res); interval > 0 { + return now.Add(interval) + } + + traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s No cache-control/expires headers found, using minimum interval", r.URL())) + return now.Add(r.MinInterval()) +} + +func (r *ResourceBase[T]) extractCacheControlMaxAge(ctx context.Context, res *http.Response) time.Duration { + traceSink := traceSinkFromContext(ctx) + + v := res.Header.Get(`Cache-Control`) + if v == "" { + return 0 + } + + dir, err := httpcc.ParseResponse(v) + if err != nil { + return 0 + } + + maxAge, ok := dir.MaxAge() + if !ok { + return 0 + } + + traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s Cache-Control=max-age directive set (%d)", r.URL(), maxAge)) + return r.determineNextFetchInterval( + ctx, + "max-age", + time.Duration(maxAge)*time.Second, + r.MinInterval(), + r.MaxInterval(), + ) +} + +func (r *ResourceBase[T]) extractExpiresInterval(ctx context.Context, res *http.Response) time.Duration { + traceSink := traceSinkFromContext(ctx) + + v := res.Header.Get(`Expires`) + if v == "" { + return 0 + } + + expires, err := http.ParseTime(v) + if err != nil { + return 0 + } + + traceSink.Put(ctx, fmt.Sprintf("httprc.Resource.Sync: %s Expires header set (%s)", r.URL(), expires)) + return r.determineNextFetchInterval( + ctx, + "expires", + time.Until(expires), + r.MinInterval(), + r.MaxInterval(), + ) +} diff --git a/vendor/github.com/lestrrat-go/httprc/v3/tracesink/tracesink.go b/vendor/github.com/lestrrat-go/httprc/v3/tracesink/tracesink.go new file mode 100644 index 0000000000..b8400a94ae --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/tracesink/tracesink.go @@ -0,0 +1,52 @@ +package tracesink + +import ( + "context" + "log/slog" +) + +type Interface interface { + Put(context.Context, string) +} + +// Nop is an ErrorSink that does nothing. It does not require +// any initialization, so the zero value can be used. +type Nop struct{} + +// NewNop returns a new NopTraceSink object. The constructor +// is provided for consistency. +func NewNop() Interface { + return Nop{} +} + +// Put for NopTraceSink does nothing. +func (Nop) Put(context.Context, string) {} + +type slogSink struct { + level slog.Level + logger SlogLogger +} + +type SlogLogger interface { + Log(context.Context, slog.Level, string, ...any) +} + +// NewSlog returns a new ErrorSink that logs errors using the provided slog.Logger +func NewSlog(l SlogLogger) Interface { + return &slogSink{ + level: slog.LevelInfo, + logger: l, + } +} + +func (s *slogSink) Put(ctx context.Context, v string) { + s.logger.Log(ctx, s.level, v) +} + +// Func is a TraceSink that calls a function with the trace message. +type Func func(context.Context, string) + +// Put calls the function with the trace message. +func (f Func) Put(ctx context.Context, msg string) { + f(ctx, msg) +} diff --git a/vendor/github.com/lestrrat-go/httprc/v3/transformer.go b/vendor/github.com/lestrrat-go/httprc/v3/transformer.go new file mode 100644 index 0000000000..2bd0635a2c --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/transformer.go @@ -0,0 +1,37 @@ +package httprc + +import ( + "context" + "encoding/json" + "io" + "net/http" +) + +type bytesTransformer struct{} + +// BytesTransformer returns a Transformer that reads the entire response body +// as a byte slice. This is the default Transformer used by httprc.Client +func BytesTransformer() Transformer[[]byte] { + return bytesTransformer{} +} + +func (bytesTransformer) Transform(_ context.Context, res *http.Response) ([]byte, error) { + return io.ReadAll(res.Body) +} + +type jsonTransformer[T any] struct{} + +// JSONTransformer returns a Transformer that decodes the response body as JSON +// into the provided type T. +func JSONTransformer[T any]() Transformer[T] { + return jsonTransformer[T]{} +} + +func (jsonTransformer[T]) Transform(_ context.Context, res *http.Response) (T, error) { + var v T + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + var zero T + return zero, err + } + return v, nil +} diff --git a/vendor/github.com/lestrrat-go/httprc/v3/whitelist.go b/vendor/github.com/lestrrat-go/httprc/v3/whitelist.go new file mode 100644 index 0000000000..74ef2a1be6 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/whitelist.go @@ -0,0 +1,113 @@ +package httprc + +import ( + "regexp" + "sync" +) + +// Whitelist is an interface that allows you to determine if a given URL is allowed +// or not. Implementations of this interface can be used to restrict the URLs that +// the client can access. +// +// By default all URLs are allowed, but this may not be ideal in production environments +// for security reasons. +// +// This exists because you might use this module to store resources provided by +// user of your application, in which case you cannot necessarily trust that the +// URLs are safe. +// +// You will HAVE to provide some sort of whitelist. +type Whitelist interface { + IsAllowed(string) bool +} + +// WhitelistFunc is a function type that implements the Whitelist interface. +type WhitelistFunc func(string) bool + +func (f WhitelistFunc) IsAllowed(u string) bool { return f(u) } + +// BlockAllWhitelist is a Whitelist implementation that blocks all URLs. +type BlockAllWhitelist struct{} + +// NewBlockAllWhitelist creates a new BlockAllWhitelist instance. It is safe to +// use the zero value of this type; this constructor is provided for consistency. +func NewBlockAllWhitelist() BlockAllWhitelist { return BlockAllWhitelist{} } + +func (BlockAllWhitelist) IsAllowed(_ string) bool { return false } + +// InsecureWhitelist is a Whitelist implementation that allows all URLs. Be careful +// when using this in your production code: make sure you do not blindly register +// URLs from untrusted sources. +type InsecureWhitelist struct{} + +// NewInsecureWhitelist creates a new InsecureWhitelist instance. It is safe to +// use the zero value of this type; this constructor is provided for consistency. +func NewInsecureWhitelist() InsecureWhitelist { return InsecureWhitelist{} } + +func (InsecureWhitelist) IsAllowed(_ string) bool { return true } + +// RegexpWhitelist is a jwk.Whitelist object comprised of a list of *regexp.Regexp +// objects. All entries in the list are tried until one matches. If none of the +// *regexp.Regexp objects match, then the URL is deemed unallowed. +type RegexpWhitelist struct { + mu sync.RWMutex + patterns []*regexp.Regexp +} + +// NewRegexpWhitelist creates a new RegexpWhitelist instance. It is safe to use the +// zero value of this type; this constructor is provided for consistency. +func NewRegexpWhitelist() *RegexpWhitelist { + return &RegexpWhitelist{} +} + +// Add adds a new regular expression to the list of expressions to match against. +func (w *RegexpWhitelist) Add(pat *regexp.Regexp) *RegexpWhitelist { + w.mu.Lock() + defer w.mu.Unlock() + w.patterns = append(w.patterns, pat) + return w +} + +// IsAllowed returns true if any of the patterns in the whitelist +// returns true. +func (w *RegexpWhitelist) IsAllowed(u string) bool { + w.mu.RLock() + patterns := w.patterns + w.mu.RUnlock() + for _, pat := range patterns { + if pat.MatchString(u) { + return true + } + } + return false +} + +// MapWhitelist is a jwk.Whitelist object comprised of a map of strings. +// If the URL exists in the map, then the URL is allowed to be fetched. +type MapWhitelist interface { + Whitelist + Add(string) MapWhitelist +} + +type mapWhitelist struct { + mu sync.RWMutex + store map[string]struct{} +} + +func NewMapWhitelist() MapWhitelist { + return &mapWhitelist{store: make(map[string]struct{})} +} + +func (w *mapWhitelist) Add(pat string) MapWhitelist { + w.mu.Lock() + defer w.mu.Unlock() + w.store[pat] = struct{}{} + return w +} + +func (w *mapWhitelist) IsAllowed(u string) bool { + w.mu.RLock() + _, b := w.store[u] + w.mu.RUnlock() + return b +} diff --git a/vendor/github.com/lestrrat-go/httprc/v3/worker.go b/vendor/github.com/lestrrat-go/httprc/v3/worker.go new file mode 100644 index 0000000000..d11477dadc --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/v3/worker.go @@ -0,0 +1,62 @@ +package httprc + +import ( + "context" + "fmt" + "sync" +) + +type worker struct { + httpcl HTTPClient + incoming chan any + next <-chan Resource + nextsync <-chan synchronousRequest + errSink ErrorSink + traceSink TraceSink +} + +func (w worker) Run(ctx context.Context, readywg *sync.WaitGroup, donewg *sync.WaitGroup) { + w.traceSink.Put(ctx, "httprc worker: START worker loop") + defer w.traceSink.Put(ctx, "httprc worker: END worker loop") + defer donewg.Done() + ctx = withTraceSink(ctx, w.traceSink) + ctx = withHTTPClient(ctx, w.httpcl) + + readywg.Done() + for { + select { + case <-ctx.Done(): + w.traceSink.Put(ctx, "httprc worker: stopping worker loop") + return + case r := <-w.next: + w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: syncing %q (async)", r.URL())) + if err := r.Sync(ctx); err != nil { + w.errSink.Put(ctx, err) + } + r.SetBusy(false) + + w.sendAdjustIntervalRequest(ctx, r) + case sr := <-w.nextsync: + w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: syncing %q (synchronous)", sr.resource.URL())) + if err := sr.resource.Sync(ctx); err != nil { + w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: FAILED to sync %q (synchronous): %s", sr.resource.URL(), err)) + sendReply(ctx, sr.reply, struct{}{}, err) + sr.resource.SetBusy(false) + return + } + w.traceSink.Put(ctx, fmt.Sprintf("httprc worker: SUCCESS syncing %q (synchronous)", sr.resource.URL())) + sr.resource.SetBusy(false) + sendReply(ctx, sr.reply, struct{}{}, nil) + w.sendAdjustIntervalRequest(ctx, sr.resource) + } + } +} + +func (w worker) sendAdjustIntervalRequest(ctx context.Context, r Resource) { + w.traceSink.Put(ctx, "httprc worker: Sending interval adjustment request for "+r.URL()) + select { + case <-ctx.Done(): + case w.incoming <- adjustIntervalRequest{resource: r}: + } + w.traceSink.Put(ctx, "httprc worker: Sent interval adjustment request for "+r.URL()) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/.bazelignore b/vendor/github.com/lestrrat-go/jwx/v3/.bazelignore new file mode 100644 index 0000000000..50347e8777 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/.bazelignore @@ -0,0 +1,4 @@ +cmd +bench +examples +tools diff --git a/vendor/github.com/lestrrat-go/jwx/v3/.bazelrc b/vendor/github.com/lestrrat-go/jwx/v3/.bazelrc new file mode 100644 index 0000000000..b47648db7b --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/.bazelrc @@ -0,0 +1 @@ +import %workspace%/.aspect/bazelrc/bazel7.bazelrc diff --git a/vendor/github.com/lestrrat-go/jwx/v3/.bazelversion b/vendor/github.com/lestrrat-go/jwx/v3/.bazelversion new file mode 100644 index 0000000000..56b6be4ebb --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/.bazelversion @@ -0,0 +1 @@ +8.3.1 diff --git a/vendor/github.com/lestrrat-go/jwx/v3/.gitignore b/vendor/github.com/lestrrat-go/jwx/v3/.gitignore new file mode 100644 index 0000000000..c4c0ebff32 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/.gitignore @@ -0,0 +1,39 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# IDE +.idea +.vscode +.DS_Store +*~ + +coverage.out + +# I redirect my test output to files named "out" way too often +out + +cmd/jwx/jwx + +bazel-* diff --git a/vendor/github.com/lestrrat-go/jwx/v3/.golangci.yml b/vendor/github.com/lestrrat-go/jwx/v3/.golangci.yml new file mode 100644 index 0000000000..30dc4c519b --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/.golangci.yml @@ -0,0 +1,128 @@ +version: "2" +linters: + default: all + disable: + - cyclop + - depguard + - dupl + - err113 + - errorlint + - exhaustive + - funcorder + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - gocritic + - gocyclo + - godot + - godox + - gosec + - gosmopolitan + - govet + - inamedparam + - ireturn + - lll + - maintidx + - makezero + - mnd + - nakedret + - nestif + - nlreturn + - noinlineerr + - nonamedreturns + - paralleltest + - perfsprint + - staticcheck + - recvcheck + - tagliatelle + - testifylint + - testpackage + - thelper + - varnamelen + - wrapcheck + - wsl + - wsl_v5 + settings: + govet: + disable: + - shadow + - fieldalignment + enable-all: true + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - linters: + - staticcheck + path: /*.go + text: 'ST1003: should not use underscores in package names' + - linters: + - revive + path: /*.go + text: don't use an underscore in package name + - linters: + - staticcheck + text: SA1019 + - linters: + - contextcheck + - exhaustruct + path: /*.go + - linters: + - errcheck + path: /main.go + - linters: + - errcheck + path: internal/codegen/codegen.go + - linters: + - errcheck + - errchkjson + - forcetypeassert + path: internal/jwxtest/jwxtest.go + - linters: + - errcheck + - errchkjson + - forcetypeassert + path: /*_test.go + - linters: + - forbidigo + path: /*_example_test.go + - linters: + - forbidigo + path: cmd/jwx/jwx.go + - linters: + - revive + path: /*_test.go + text: 'var-naming: ' + - linters: + - revive + path: internal/tokens/jwe_tokens.go + text: "don't use ALL_CAPS in Go names" + - linters: + - revive + path: jwt/internal/types/ + text: "var-naming: avoid meaningless package names" + - linters: + - godoclint + path: (^|/)internal/ + paths: + - third_party$ + - builtin$ + - examples$ +issues: + max-issues-per-linter: 0 + max-same-issues: 0 +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/lestrrat-go/jwx/v3/BUILD b/vendor/github.com/lestrrat-go/jwx/v3/BUILD new file mode 100644 index 0000000000..2759408882 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/BUILD @@ -0,0 +1,47 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") +load("@gazelle//:def.bzl", "gazelle") + +# gazelle:prefix github.com/lestrrat-go/jwx/v3 +# gazelle:go_naming_convention import_alias + +gazelle(name = "gazelle") + +go_library( + name = "jwx", + srcs = [ + "format.go", + "formatkind_string_gen.go", + "jwx.go", + "options.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3", + visibility = ["//visibility:public"], + deps = [ + "//internal/json", + "//internal/tokens", + "@com_github_lestrrat_go_option_v2//:option", + ], +) + +go_test( + name = "jwx_test", + srcs = ["jwx_test.go"], + deps = [ + ":jwx", + "//internal/jose", + "//internal/json", + "//internal/jwxtest", + "//jwa", + "//jwe", + "//jwk", + "//jwk/ecdsa", + "//jws", + "@com_github_stretchr_testify//require", + ], +) + +alias( + name = "go_default_library", + actual = ":jwx", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/Changes b/vendor/github.com/lestrrat-go/jwx/v3/Changes new file mode 100644 index 0000000000..4df33b756c --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/Changes @@ -0,0 +1,272 @@ +Changes +======= + +v3 has many incompatibilities with v2. To see the full list of differences between +v2 and v3, please read the Changes-v3.md file (https://github.com/lestrrat-go/jwx/blob/develop/v3/Changes-v3.md) + +v3.0.13 12 Jan 2026 + * [jwt] The `jwt.WithContext()` option is now properly being passed to `jws.Verify()` from + `jwt.Parse()`. + * [jwx] github.com/lestrrat-go/httprc/v3 has been upgraded to remove dependency on + github.com/lestrrat-go/option (v1) + * [jwk] `jwk.Clone()` has been fixed to properly work with private fields. + +v3.0.12 20 Oct 2025 + * [jwe] As part of the next change, now per-recipient headers that are empty + are no longer serialized in flattened JSON serialization. + + * [jwe] Introduce `jwe.WithLegacyHeaderMerging(bool)` option to control header + merging behavior in during JWE encryption. This only applies to flattened + JSON serialization. + + Previously, when using flattened JSON serialization (i.e. you specified + JSON serialization via `jwe.WithJSON()` and only supplied one key), per-recipient + headers were merged into the protected headers during encryption, and then + were left to be included in the final serialization as-is. This caused duplicate + headers to be present in both the protected headers and the per-recipient headers. + + Since there may be users who rely on this behavior already, instead of changing the + default behavior to fix this duplication, a new option to `jwe.Encrypt()` was added + to allow clearing the per-recipient headers after merging to leave the `"headers"` + field empty. This in effect makes the flattened JSON serialization more similar to + the compact serialization, where there are no per-recipient headers present, and + leaves the headers disjoint. + + Note that in compact mode, there are no per-recipient headers and thus the + headers need to be merged regardless. In full JSON serialization, we never + merge the headers, so it is left up to the user to keep the headers disjoint. + + * [jws] Calling the deprecated `jws.NewSigner()` function for the first time will cause + legacy signers to be loaded automatically. Previously, you had to explicitly + call `jws.Settings(jws.WithLegacySigners(true))` to enable legacy signers. + + We incorrectly assumed that users would not be using `jws.NewSigner()`, and thus + disabled legacy signers by default. However, it turned out that some users + were using `jws.NewSigner()` in their code, which lead to breakages in + existing code. In hindsight we should have known that any API made public before will + be used by _somebody_. + + As a side effect, jws.Settings(jws.WithLegacySigners(...)) is now a no-op. + + However, please do note that jws.Signer (and similar) objects were always intended to be + used for _registering_ new signing/verifying algorithms, and not for end users to actually + use them directly. If you are using them for other purposes, please consider changing + your code, as it is more than likely that we will somehow deprecate/remove/discouraged + their use in the future. + +v3.0.11 14 Sep 2025 + * [jwk] Add `(jwk.Cache).Shutdown()` method that delegates to the httprc controller + object, to shutdown the cache. + * [jwk] Change timing of `res.Body.Close()` call + * [jwe] Previously, ecdh.PrivateKey/ecdh.PublicKey were not properly handled + when used for encryption, which has been fixed. + * [jws/jwsbb] (EXPERIMENTAL/BREAKS COMPATIBILITY) Convert most functions into + thin wrappers around functions from github.com/lestrrat-go/dsig package. + As a related change, HAMCHashFuncFor/RSAHashFuncFor/ECDSAHashFuncFor/RSAPSSOptions + have been removed or unexported. + Users of this module should be using jwsbb.Sign() and jwsbb.Verify() instead of + algorithm specific jwsbb.SignRSA()/jwsbb.VerifyRSA() and such. If you feel the + need to use these functions, you should use github.com/lestrrat-go/dsig directly. + +v3.0.10 04 Aug 2025 + * [jws/jwsbb] Add `jwsbb.ErrHeaderNotFound()` to return the same error type as when + a non-existent header is requested. via `HeaderGetXXX()` functions. Previously, this + function was called `jwsbb.ErrFieldNotFound()`, but it was a misnomer. + * [jws/jwsbb] Fix a bug where error return values from `HeaderGetXXX()` functions + could not be matched against `jwsbb.ErrHeaderNotFound()` using `errors.Is()`. + +v3.0.9 31 Jul 2025 + * [jws/jwsbb] `HeaderGetXXX()` functions now return errors when + the requested header is not found, or if the value cannot be + converted to the requested type. + + * [jwt] `(jwt.Token).Get` methods now return specific types of errors depending + on if a) the specified claim was not present, or b) the specified claim could + not be assigned to the destination variable. + + You can distinguish these by using `errors.Is` against `jwt.ClaimNotFoundError()` + or `jwt.ClaimAssignmentFailedError()` + +v3.0.8 27 Jun 2025 + * [jwe/jwebb] (EXPERIMENTAL) Add low-level functions for JWE operations. + * [jws/jwsbb] (EXPERIMENTAL/BREAKS COMPATIBILITY) Add io.Reader parameter + so your choice of source of randomness can be passed. Defaults to crypto/rand.Reader. + Function signatures around jwsbb.Sign() now accept an addition `rr io.Reader`, + which can be nil for 99% of use cases. + * [jws/jwsbb] Add HeaderParse([]byte), where it is expected that the header + is already in its base64 decoded format. + * misc: replace `interface{}` with `any` + +v3.0.7 16 Jun 2025 + * [jws/jwsbb] (EXPERIMENTAL) Add low-level fast access to JWS headers in compact + serialization form. + * [jws] Fix error reporting when no key matched for a signature. + * [jws] Refactor jws signer setup. + * Known algorithms are now implemented completely in the jws/jwsbb package. + * VerifierFor and SignerFor now always succeed, and will also return a Signer2 + or Verifier2 that wraps the legacy Signer or Verifier if one is registered. + +v3.0.6 13 Jun 2025 + * This release contains various performance improvements all over the code. + No, this time for real. In particular, the most common case for signing + a JWT with a key is approx 70% more efficient based on the number of allocations. + + Please read the entry for the (retracted) v3.0.4 for what else I have to + say about performance improvements + + * [jwt] Added fast-path for token signing and verification. The fast path + is triggered if you only pass `jwt.Sign()` and `jwt.Parse()` one options each + (`jwt.WithKey()`), with no suboptions. + + * [jws] Major refactoring around basic operations: + + * How to work with Signer/Verifier have completely changed. Please take + a look at examples/jws_custom_signer_verifier_example_test.go for how + to do it the new way. The old way still works, but it WILL be removed + when v4 arrives. + * Related to the above, old code has been moved to `jws/legacy`. + + * A new package `jws/jwsbb` has been added. `bb` stands for building blocks. + This package separates out the low-level JWS operations into its own + package. So if you are looking for just the signing of a payload with + a key, this is it. + + `jws/jwsbb` is currently considered to be EXPERIMENTAL. + +v3.0.5 11 Jun 2025 + * Retract v3.0.4 + * Code for v3.0.3 is the same as v3.0.3 + +v3.0.4 09 Jun 2025 + * This release contains various performance improvements all over the code. + + Because of the direction that this library is taking, we have always been + more focused on correctness and usability/flexibility over performance. + + It just so happens that I had a moment of inspiration and decided to see + just how good our AI-based coding agents are in this sort of analysis-heavy tasks. + + Long story short, the AI was fairly good at identifying suspicious code with + an okay accuracy, but completely failed to make any meaningful changes to the + code in a way that both did not break the code _and_ improved performance. + I am sure that they will get better in the near future, but for now, + I had to do the changes myself. I should clarify to their defence that + the AI was very helpful in writing cumbersome benchmark code for me. + + The end result is that we have anywhere from 10 to 30% performance improvements + in various parts of the code that we touched, based on number of allocations. + We believe that this would be a significant improvement for many users. + + For further improvements, we can see that there would be a clear benefit to + writing optimized code path that is designed to serve the most common cases. + For example, for the case of signing JWTs with a single key, we could provide + a path that skips a lot of extra processing (we kind of did that in this change, + but we _could_ go ever harder in this direction). However, it is a trade-off between + maintainability and performance, and as I am currently the sole maintainer of + this library for the time being, I only plan to pursue such a route where it + requires minimal effort on my part. + + If you are interested in helping out in this area, I hereby thank you in advance. + However, please be perfectly clear that unlike other types of changes, for performance + related changes, the balance between the performance gains and maintainability is + top priority. If you have good ideas and code, they will always be welcome, but + please be prepared to justify your changes. + + Finally, thank you for using this library! + +v3.0.3 06 Jun 2025 + * Update some dependencies + * [jwe] Change some error messages to contain more context information + +v3.0.2 03 Jun 2025 + * [transform] (EXPERIMENTAL) Add utility function `transform.AsMap` to convert a + Mappable object to a map[string]interface{}. This is useful for converting + objects such as `jws.Header`, `jwk.Key`, `jwt.Token`, etc. to a map that can + be used with other libraries that expect a map. + * [jwt] (EXPERIMENTAL) Added token filtering functionality through the TokenFilter interface. + * [jwt/openid] (EXPERIMENTAL) Added StandardClaimsFilter() for filtering standard OpenID claims. + * [jws] (EXPERIMENTAL) Added header filtering functionality through the HeaderFilter interface. + * [jwe] (EXPERIMENTAL) Added header filtering functionality through the HeaderFilter interface. + * [jwk] (EXPERIMENTAL) Added key filtering functionality through the KeyFilter interface. + * [jwk] `jwk.Export` previously did not recognize third-party objects that implemented `jwk.Key`, + as it was detecting what to do by checking if the object was one of our own unexported + types. This caused some problems for consumers of this library that wanted to extend the + features of the keys. + + Now `jwk.Export` checks types against interface types such as `jwk.RSAPrivateKey`, `jwk.ECDSAPrivateKey`, etc. + It also uses some reflect blackmagic to detect if the given object implements the `jwk.Key` interface + via embedding, so you should be able to embed a `jwk.Key` to another object to act as if it + is a legitimate `jwk.Key`, as far as `jwk.Export` is concerned. + +v3.0.1 29 Apr 2025 + * [jwe] Fixed a long standing bug that could lead to degraded encryption or failure to + decrypt JWE messages when a very specific combination of inputs were used for + JWE operations. + + This problem only manifested itself when the following conditions in content encryption or decryption + were met: + - Content encryption was specified to use DIRECT mode. + - Contentn encryption algorithm is specified as A256CBC_HS512 + - The key was erronously constructed with a 32-byte content encryption key (CEK) + + In this case, the user would be passing a mis-constructed key of 32-bytes instead + of the intended 64-bytes. In all other cases, this construction would cause + an error because `crypto/aes.NewCipher` would return an error when a key with length + not matching 16, 24, and 32 bytes is used. However, due to use using a the provided + 32-bytes as half CEK and half the hash, the `crypto/aes.NewCipher` was passed + a 16-byte key, which is fine for AES-128. So internally `crypto/aes.NewCipher` would + choose to use AES-128 instead of AES-256, and happily continue. Note that no other + key lengths such as 48 and 128 would have worked. It had to be exactly 32. + + This does indeed result in a downgraded encryption, but we believe it is unlikely that this would cause a problem in the real world, + as you would have to very specifically choose to use DIRECT mode, choose + the specific content encryption algorithm, AND also use the wrong key size of + exactly 32 bytes. + + However, in abandunce of caution, we recommend that you upgrade to v3.0.1 or later, + or v2.1.6 or later if you are still on v2 series. + + * [jws] Improve performance of jws.SplitCompact and jws.SplitCompactString + * [jwe] Improve performance of jwe.Parse + +v3.0.0 1 Apr 2025 + * Release initial v3.0.0 series. Code is identical to v3.0.0-beta2, except + for minor documentation changes. + + Please note that v1 will no longer be maintained. + + Going forward v2 will receive security updates but will no longer receive + feature updates. Users are encouraged to migrate to v3. There is no hard-set + guarantee as to how long v2 will be supported, but if/when v4 comes out, + v2 support will be terminated then. + +v3.0.0-beta2 30 Mar 2025 + * [jwk] Fix a bug where `jwk.Set`'s `Keys()` method did not return the proper + non-standard fields. (#1322) + * [jws][jwt] Implement `WithBase64Encoder()` options to pass base64 encoders + to use during signing/verifying signatures. This useful when the token + provider generates JWTs that don't follow the specification and uses base64 + encoding other than raw url encoding (no padding), such as, apparently, + AWS ALB. (#1324, #1328) + +v3.0.0-beta1 15 Mar 2025 + * [jwt] Token validation no longer truncates time based fields by default. + To restore old behavior, you can either change the global settings by + calling `jwt.Settings(jwt.WithTruncation(time.Second))`, or you can + change it by each invocation by using `jwt.Validate(..., jwt.WithTruncation(time.Second))` + +v3.0.0-alpha3 13 Mar 2025 + * [jwk] Importing/Exporting from jwk.Key with P256/P386/P521 curves to + ecdh.PrivateKey/ecdh.PublicKey should now work. Previously these keys were not properly + recognized by the exporter/importer. Note that keys that use X25519 and P256/P384/P521 + behave differently: X25519 keys can only be exported to/imported from OKP keys, + while P256/P384/P521 can be exported to either ecdsa or ecdh keys. + +v3.0.0-alpha2 25 Feb 2025 + * Update to work with go1.24 + * Update tests to work with latest latchset/jose + * Fix build pipeline to work with latest golangci-lint + * Require go1.23 + +v3.0.0-alpha1 01 Nov 2024 + * Initial release of v3 line. diff --git a/vendor/github.com/lestrrat-go/jwx/v3/Changes-v2.md b/vendor/github.com/lestrrat-go/jwx/v3/Changes-v2.md new file mode 100644 index 0000000000..af146ed33a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/Changes-v2.md @@ -0,0 +1,390 @@ +# Incompatible Changes from v1 to v2 + +These are changes that are incompatible with the v1.x.x version. + +* [tl;dr](#tldr) - If you don't feel like reading the details -- but you will read the details, right? +* [Detailed List of Changes](#detailed-list-of-changes) - A comprehensive list of changes from v1 to v2 + +# tl;dr + +## JWT + +```go +// most basic +jwt.Parse(serialized, jwt.WithKey(alg, key)) // NOTE: verification and validation are ENABLED by default! +jwt.Sign(token, jwt.WithKey(alg,key)) + +// with a jwk.Set +jwt.Parse(serialized, jwt.WithKeySet(set)) + +// UseDefault/InferAlgorithm with JWKS +jwt.Parse(serialized, jwt.WithKeySet(set, + jws.WithUseDefault(true), jws.WithInferAlgorithm(true)) + +// Use `jku` +jwt.Parse(serialized, jwt.WithVerifyAuto(...)) + +// Any other custom key provisioning (using functions in this +// example, but can be anything that fulfills jws.KeyProvider) +jwt.Parse(serialized, jwt.WithKeyProvider(jws.KeyProviderFunc(...))) +``` + +## JWK + +```go +// jwk.New() was confusing. Renamed to fit the actual implementation +key, err := jwk.FromRaw(rawKey) + +// Algorithm() now returns jwa.KeyAlgorithm type. `jws.Sign()` +// and other function that receive JWK algorithm names accept +// this new type, so you can use the same key and do the following +// (previously you needed to type assert) +jws.Sign(payload, jws.WithKey(key.Algorithm(), key)) + +// If you need the specific type, type assert +key.Algorithm().(jwa.SignatureAlgorithm) + +// jwk.AutoRefresh is no more. Use jwk.Cache +cache := jwk.NewCache(ctx, options...) + +// Certificate chains are no longer jwk.CertificateChain type, but +// *(github.com/lestrrat-go/jwx/cert).Chain +cc := key.X509CertChain() // this is *cert.Chain now +``` + +## JWS + +```go +// basic +jws.Sign(payload, jws.WithKey(alg, key)) +jws.Sign(payload, jws.WithKey(alg, key), jws.WithKey(alg, key), jws.WithJSON(true)) +jws.Verify(signed, jws.WithKey(alg, key)) + +// other ways to pass the key +jws.Sign(payload, jws.WithKeySet(jwks)) +jws.Sign(payload, jws.WithKeyProvider(kp)) + +// retrieve the key that succeeded in verifying +var keyUsed interface{} +jws.Verify(signed, jws.WithKeySet(jwks), jws.WithKeyUsed(&keyUsed)) +``` + +## JWE + +```go +// basic +jwe.Encrypt(payload, jwe.WithKey(alg, key)) // other defaults are inferred +jwe.Encrypt(payload, jwe.WithKey(alg, key), jwe.WithKey(alg, key), jwe.WithJSON(true)) +jwe.Decrypt(encrypted, jwe.WithKey(alg, key)) + +// other ways to pass the key +jwe.Encrypt(payload, jwe.WithKeySet(jwks)) +jwe.Encrypt(payload, jwe.WithKeyProvider(kp)) + +// retrieve the key that succeeded in decrypting +var keyUsed interface{} +jwe.Verify(signed, jwe.WithKeySet(jwks), jwe.WithKeyUsed(&keyUsed)) +``` + +# Detailed List of Changes + +## Module + +* Module now requires go 1.16 + +* Use of github.com/pkg/errors is no more. If you were relying on behavior + that depends on the errors being an instance of github.com/pkg/errors + then you need to change your code + +* File-generation tools have been moved out of internal/ directories. + These files pre-dates Go modules, and they were in internal/ in order + to avoid being listed in the `go doc` -- however, now that we can + make them separate modules this is no longer necessary. + +* New package `cert` has been added to handle `x5c` certificate + chains, and to work with certificates + * cert.Chain to store base64 encoded ASN.1 DER format certificates + * cert.EncodeBase64 to encode ASN.1 DER format certificate using base64 + * cert.Create to create a base64 encoded ASN.1 DER format certificates + * cert.Parse to parse base64 encoded ASN.1 DER format certificates + +## JWE + +* `jwe.Compact()`'s signature has changed to + `jwe.Compact(*jwe.Message, ...jwe.CompactOption)` + +* `jwe.JSON()` has been removed. You can generate JSON serialization + using `jwe.Encrypt(jwe.WitJSON())` or `json.Marshal(jwe.Message)` + +* `(jwe.Message).Decrypt()` has been removed. Since formatting of the + original serialized message matters (including whitespace), using a parsed + object was inherently confusing. + +* `jwe.Encrypt()` can now generate JWE messages in either compact or JSON + forms. By default, the compact form is used. JSON format can be + enabled by using the `jwe.WithJSON` option. + +* `jwe.Encrypt()` can now accept multiple keys by passing multiple + `jwe.WithKey()` options. This can be used with `jwe.WithJSON` to + create JWE messages with multiple recipients. + +* `jwe.DecryptEncryptOption()` has been renamed to `jwe.EncryptDecryptOption()`. + This is so that it is more uniform with `jws` equivalent of `jws.SignVerifyOption()` + where the producer (`Sign`) comes before the consumer (`Verify`) in the naming + +* `jwe.WithCompact` and `jwe.WithJSON` options have been added + to control the serialization format. + +* jwe.Decrypt()'s method signature has been changed to `jwt.Decrypt([]byte, ...jwe.DecryptOption) ([]byte, error)`. + These options can be stacked. Therefore, you could configure the + verification process to attempt a static key pair, a JWKS, and only + try other forms if the first two fails, for example. + + - For static key pair, use `jwe.WithKey()` + - For static JWKS, use `jwe.WithKeySet()` (NOTE: InferAlgorithmFromKey like in `jws` package is NOT supported) + - For custom, possibly dynamic key provisioning, use `jwe.WithKeyProvider()` + +* jwe.Decrypter has been unexported. Users did not need this. + +* jwe.WithKeyProvider() has been added to specify arbitrary + code to specify which keys to try. + +* jwe.KeyProvider interface has been added + +* jwe.KeyProviderFunc has been added + +* `WithPostParser()` has been removed. You can achieve the same effect + by using `jwe.WithKeyProvider()`. Because this was the only consumer for + `jwe.DecryptCtx`, this type has been removed as well. + +* `x5c` field type has been changed to `*cert.Chain` instead of `[]string` + +* Method signature for `jwe.Parse()` has been changed to include options, + but options are currently not used + +* `jwe.ReadFile` now supports the option `jwe.WithFS` which allows you to + read data from arbitrary `fs.FS` objects + +* jwe.WithKeyUsed has been added to allow users to retrieve + the key used for decryption. This is useful in cases you provided + multiple keys and you want to know which one was successful + +## JWK + +* `jwk.New()` has been renamed to `jwk.FromRaw()`, which hopefully will + make it easier for the users what the input should be. + +* `jwk.Set` has many interface changes: + * Changed methods to match jwk.Key and its semantics: + * Field is now Get() (returns values for arbitrary fields other than keys). Fetching a key is done via Key() + * Remove() now removes arbitrary fields, not keys. to remove keys, use RemoveKey() + * Iterate has been added to iterate through all non-key fields. + * Add is now AddKey(Key) string, and returns an error when the same key is added + * Get is now Key(int) (Key, bool) + * Remove is now RemoveKey(Key) error + * Iterate is now Keys(context.Context) KeyIterator + * Clear is now Clear() error + +* `jwk.CachedSet` has been added. You can create a `jwk.Set` that is backed by + `jwk.Cache` so you can do this: + +```go +cache := jkw.NewCache(ctx) +cachedSet := jwk.NewCachedSet(cache, jwksURI) + +// cachedSet is always the refreshed, cached version from jwk.Cache +jws.Verify(signed, jws.WithKeySet(cachedSet)) +``` + +* `jwk.NewRSAPRivateKey()`, `jwk.NewECDSAPrivateKey()`, etc have been removed. + There is no longer any way to create concrete types of `jwk.Key` + +* `jwk.Key` type no longer supports direct unmarshaling via `json.Unmarshal()`, + because you can no longer instantiate concrete `jwk.Key` types. You will need to + use `jwk.ParseKey()`. See the documentation for ways to parse JWKs. + +* `(jwk.Key).Algorithm()` is now of `jwk.KeyAlgorithm` type. This field used + to be `string` and therefore could not be passed directly to `jwt.Sign()` + `jws.Sign()`, `jwe.Encrypt()`, et al. This is no longer the case, and + now you can pass it directly. See + https://github.com/lestrrat-go/jwx/blob/v2/docs/99-faq.md#why-is-jwkkeyalgorithm-and-jwakeyalgorithm-so-confusing + for more details + +* `jwk.Fetcher` and `jwk.FetchFunc` has been added. + They represent something that can fetch a `jwk.Set` + +* `jwk.CertificateChain` has been removed, use `*cert.Chain` +* `x5c` field type has been changed to `*cert.Chain` instead of `[]*x509.Certificate` + +* `jwk.ReadFile` now supports the option `jwk.WithFS` which allows you to + read data from arbitrary `fs.FS` objects + +* Added `jwk.PostFetcher`, `jwk.PostFetchFunc`, and `jwk.WithPostFetch` to + allow users to get at the `jwk.Set` that was fetched in `jwk.Cache`. + This will make it possible for users to supply extra information and edit + `jwk.Set` after it has been fetched and parsed, but before it is cached. + You could, for example, modify the `alg` field so that it's easier to + work with when you use it in `jws.Verify` later. + +* Reworked `jwk.AutoRefresh` in terms of `github.com/lestrrat-go/httprc` + and renamed it `jwk.Cache`. + + Major difference between `jwk.AutoRefresh` and `jwk.Cache` is that while + former used one `time.Timer` per resource, the latter uses a static timer + (based on `jwk.WithRefreshWindow()` value, default 15 minutes) that periodically + refreshes all resources that were due to be refreshed within that time frame. + + This method may cause your updates to happen slightly later, but uses significantly + less resources and is less prone to clogging. + +* Reimplemented `jwk.Fetch` in terms of `github.com/lestrrat-go/httprc`. + +* Previously `jwk.Fetch` and `jwk.AutoRefresh` respected backoff options, + but this has been removed. This is to avoid unwanted clogging of the fetch workers + which is the default processing mode in `github.com/lestrrat-go/httprc`. + + If you are using backoffs, you need to control your inputs more carefully so as + not to clog your fetch queue, and therefore you should be writing custom code that + suits your needs + +## JWS + +* `jws.Sign()` can now generate JWS messages in either compact or JSON + forms. By default, the compact form is used. JSON format can be + enabled by using the `jws.WithJSON` option. + +* `jws.Sign()` can now accept multiple keys by passing multiple + `jws.WithKey()` options. This can be used with `jws.WithJSON` to + create JWS messages with multiple signatures. + +* `jws.WithCompact` and `jws.WithJSON` options have been added + to control the serialization format. + +* jws.Verify()'s method signature has been changed to `jwt.Verify([]byte, ...jws.VerifyOption) ([]byte, error)`. + These options can be stacked. Therefore, you could configure the + verification process to attempt a static key pair, a JWKS, and only + try other forms if the first two fails, for example. + + - For static key pair, use `jws.WithKey()` + - For static JWKS, use `jws.WithKeySet()` + - For enabling verification using `jku`, use `jws.WithVerifyAuto()` + - For custom, possibly dynamic key provisioning, use `jws.WithKeyProvider()` + +* jws.WithVerify() has been removed. + +* jws.WithKey() has been added to specify an algorithm + key to + verify the payload with. + +* jws.WithKeySet() has been added to specify a JWKS to be used for + verification. By default `kid` AND `alg` must match between the signature + and the key. + + The option can take further suboptions: + +```go +jws.Parse(serialized, + jws.WithKeySet(set, + // by default `kid` is required. set false to disable. + jws.WithRequireKid(false), + // optionally skip matching kid if there's exactly one key in set + jws.WithUseDefault(true), + // infer algorithm name from key type + jws.WithInferAlgorithm(true), + ), +) +``` + +* `jws.VerifuAuto` has been removed in favor of using + `jws.WithVerifyAuto` option with `jws.Verify()` + +* `jws.WithVerifyAuto` has been added to enable verification + using `jku`. + + The first argument must be a jwk.Fetcher object, but can be + set to `nil` to use the default implementation which is `jwk.Fetch` + + The rest of the arguments are treated as options passed to the + `(jwk.Fetcher).Fetch()` function. + +* Remove `jws.WithPayloadSigner()`. This should be completely replaceable + using `jws.WithKey()` + +* jws.WithKeyProvider() has been added to specify arbitrary + code to specify which keys to try. + +* jws.KeyProvider interface has been added + +* jws.KeyProviderFunc has been added + +* jws.WithKeyUsed has been added to allow users to retrieve + the key used for verification. This is useful in cases you provided + multiple keys and you want to know which one was successful + +* `x5c` field type has been changed to `*cert.Chain` instead of `[]string` + +* `jws.ReadFile` now supports the option `jws.WithFS` which allows you to + read data from arbitrary `fs.FS` objects + +## JWT + +* `jwt.Parse` now verifies the signature and validates the token + by default. You must disable it explicitly using `jwt.WithValidate(false)` + and/or `jwt.WithVerify(false)` if you only want to parse the JWT message. + + If you don't want either, a convenience function `jwt.ParseInsecure` + has been added. + +* `jwt.Parse` can only parse raw JWT (JSON) or JWS (JSON or Compact). + It no longer accepts JWE messages. + +* `jwt.WithDecrypt` has been removed + +* `jwt.WithJweHeaders` has been removed + +* `jwt.WithVerify()` has been renamed to `jwt.WithKey()`. The option can + be used for signing, encryption, and parsing. + +* `jwt.Validator` has been changed to return `jwt.ValidationError`. + If you provide a custom validator, you should wrap the error with + `jwt.NewValidationError()` + +* `jwt.UseDefault()` has been removed. You should use `jws.WithUseDefault()` + as a suboption in the `jwt.WithKeySet()` option. + +```go +jwt.Parse(serialized, jwt.WithKeySet(set, jws.WithUseDefault(true))) +``` + +* `jwt.InferAlgorithmFromKey()` has been removed. You should use + `jws.WithInferAlgorithmFromKey()` as a suboption in the `jwt.WithKeySet()` option. + +```go +jwt.Parse(serialized, jwt.WithKeySet(set, jws.WithInferAlgorithmFromKey(true))) +``` + +* jwt.WithKeySetProvider has been removed. Use `jwt.WithKeyProvider()` + instead. If jwt.WithKeyProvider seems a bit complicated, use a combination of + JWS parse, no-verify/validate JWT parse, and an extra JWS verify: + +```go +msg, _ := jws.Parse(signed) +token, _ := jwt.Parse(msg.Payload(), jwt.WithVerify(false), jwt.WithValidate(false)) +// Get information out of token, for example, `iss` +switch token.Issuer() { +case ...: + jws.Verify(signed, jwt.WithKey(...)) +} +``` + +* `jwt.WithHeaders` and `jwt.WithJwsHeaders` have been removed. + You should be able to use the new `jwt.WithKey` option to pass headers + +* `jwt.WithSignOption` and `jwt.WithEncryptOption` have been added as + escape hatches for options that are declared in `jws` and `jwe` packages + but not in `jwt` + +* `jwt.ReadFile` now supports the option `jwt.WithFS` which allows you to + read data from arbitrary `fs.FS` objects + +* `jwt.Sign()` has been changed so that it works more like the new `jws.Sign()` + diff --git a/vendor/github.com/lestrrat-go/jwx/v3/Changes-v3.md b/vendor/github.com/lestrrat-go/jwx/v3/Changes-v3.md new file mode 100644 index 0000000000..c2fa8747b9 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/Changes-v3.md @@ -0,0 +1,140 @@ +# Incompatible Changes from v2 to v3 + +These are changes that are incompatible with the v2.x.x version. + +# Detailed list of changes + +## Module + +* This module now requires Go 1.23 + +* All `xxx.Get()` methods have been changed from `Get(string) (interface{}, error)` to + `Get(string, interface{}) error`, where the second argument should be a pointer + to the storage destination of the field. + +* All convenience accessors (e.g. `(jwt.Token).Subject`) now return `(T, bool)` instead of + `T`. If you want an accessor that returns a single value, consider using `Get()` + +* Most major errors can now be differentiated using `errors.Is` + +## JWA + +* All string constants have been renamed to equivalent functions that return a struct. + You should rewrite `jwa.RS256` as `jwa.RS256()` and so forth. + +* By default, only known algorithm names are accepted. For example, in our JWK tests, + there are tests that deal with "ECMR" algorithm, but this will now fail by default. + If you want this algorithm to succeed parsing, you need to call `jwa.RegisterXXXX` + functions before using them. + +* Previously, unmarshaling unquoted strings used to work (e.g. `var s = "RS256"`), + but now they must conform to the JSON standard and be quoted (e.g. `var s = strconv.Quote("RS256")`) + +## JWT + +* All convenience accessors (e.g. `Subject`) now return `(T, bool)` instead of + just `T`. If you want a single return value accessor, use `Get(dst) error` instead. + +* Validation used to work for `iat`, `nbf`, `exp` fields where these fields were + set to the explicit time.Time{} zero value, but now the _presence_ of these fields matter. + +* Validation of fields related to time used to be truncated to one second accuracy, + but no longer does so. To restore old behavior, you can either change the global settings by + calling `jwt.Settings(jwt.WithTruncation(time.Second))`, or you can + change it by each invocation by using `jwt.Validate(..., jwt.WithTruncation(time.Second))` + +* Error names have been renamed. For example `jwt.ErrInvalidJWT` has been renamed to + `jwt.UnknownPayloadTypeError` to better reflect what the error means. For other errors, + `func ErrXXXX()` have generally been renamed to `func XXXError()` + +* Validation errors are now wrapped. While `Validate()` returns a `ValidateError()` type, + it can also be matched against more specific error types such as `TokenExpierdError()` + using `errors.Is` + +* `jwt.ErrMissingRequiredClaim` has been removed + +## JWS + +* Iterators have been completely removed. +* As a side effect of removing iterators, some methods such as `Copy()` lost the + `context.Context` argument + +* All convenience accessors (e.g. `Algorithm`) now return `(T, bool)` instead of + just `T`. If you want a single return value accessor, use `Get(dst) error` instead. + +* Errors from `jws.Sign` and `jws.Verify`, as well as `jws.Parse` (and friends) + can now be differentiated by using `errors.Is`. All `jws.IsXXXXError` functions + have been removed. + +## JWE + +* Iterators have been completely removed. +* As a side effect of removing iterators, some methods such as `Copy()` lost the + `context.Context` argument + +* All convenience accessors (e.g. `Algorithm`) now return `(T, bool)` instead of + just `T`. If you want a single return value accessor, use `Get(dst) error` instead. + +* Errors from `jwe.Decrypt` and `jwe.Encrypt`, as well as `jwe.Parse` (and friends) + can now be differentiated by using `errors.Is`. All `jwe.IsXXXXrror` functions + have been removed. + +## JWK + +* All convenience accessors (e.g. `Algorithm`, `Crv`) now return `(T, bool)` instead + of just `T`, except `KeyType`, which _always_ returns a valid value. If you want a + single return value accessor, use `Get(dst) error` instead. + +* `jwk.KeyUsageType` can now be configured so that it's possible to assign values + other than "sig" and "enc" via `jwk.RegisterKeyUsage()`. Furthermore, strict + checks can be turned on/off against these registered values + +* `jwk.Cache` has been completely re-worked based on github.com/lestrrat-go/httprc/v3. + In particular, the default whitelist mode has changed from "block everything" to + "allow everything". + +* Experimental secp256k1 encoding/decoding for PEM encoded ASN.1 DER Format + has been removed. Instead, `jwk.PEMDecoder` and `jwk.PEMEncoder` have been + added to support those who want to perform non-standard PEM encoding/decoding + +* Iterators have been completely removed. + +* `jwk/x25519` has been removed. To use X25519 keys, use `(crypto/ecdh).PrivateKey` and + `(crypto/ecdh).PublicKey`. Similarly, internals have been reworked to use `crypto/ecdh` + +* Parsing has completely been reworked. It is now possible to add your own `jwk.KeyParser` + to generate a custom `jwk.Key` that this library may not natively support. Also see + `jwk.RegisterKeyParser()` + +* `jwk.KeyProbe` has been added to aid probing the JSON message. This is used to + guess the type of key described in the JSON message before deciding which concrete + type to instantiate, and aids implementing your own `jwk.KeyParser`. Also see + `jwk.RegisterKeyProbe()` + +* Conversion between raw keys and `jwk.Key` can be customized using `jwk.KeyImporter` and `jwk.KeyExporter`. + Also see `jwk.RegisterKeyImporter()` and `jwk.RegisterKeyExporter()` + +* Added `jwk/ecdsa` to keep track of which curves are available for ECDSA keys. + +* `(jwk.Key).Raw()` has been deprecated. Use `jwk.Export()` instead to convert `jwk.Key` + objects into their "raw" versions (e.g. `*rsa.PrivateKey`, `*ecdsa.PrivateKey`, etc). + This is to allow third parties to register custom key types that this library does not + natively support: Whereas a method must be bound to an object, and thus does not necessarily + have a way to hook into a global settings (i.e. custom exporter/importer) for arbitrary + key types, if the entrypoint is a function it's much easier and cleaner to for third-parties + to take advantage and hook into the mechanisms. + +* `jwk.FromRaw()` has been derepcated. Use `jwk.Import()` instead to convert "raw" + keys (e.g. `*rsa.PrivateKEy`, `*Ecdsa.PrivateKey`, etc) int `jwk.Key`s. + +* `(jwk.Key).FromRaw()` has been deprecated. The method `(jwk.Key).Import()` still exist for + built-in types, but it is no longer part of any public API (`interface{}`). + +* `jwk.Fetch` is marked as a simple wrapper around `net/http` and `jwk.Parse`. + +* `jwk.SetGlobalFetcher` has been deprecated. + +* `jwk.Fetcher` has been clearly marked as something that has limited + usage for `jws.WithVerifyAuto` + +* `jwk.Key` with P256/P386/P521 curves can be exporrted to `ecdh.PrivateKey`/`ecdh.PublicKey` \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/jwx/v3/LICENSE b/vendor/github.com/lestrrat-go/jwx/v3/LICENSE new file mode 100644 index 0000000000..205e33a7f1 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 lestrrat + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel b/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel new file mode 100644 index 0000000000..c9bdc9b730 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel @@ -0,0 +1,34 @@ +module( + name = "com_github_lestrrat_go_jwx_v3", + version = "3.0.0", + repo_name = "com_github_lestrrat_go_jwx_v2", +) + +bazel_dep(name = "bazel_skylib", version = "1.7.1") +bazel_dep(name = "rules_go", version = "0.55.1") +bazel_dep(name = "gazelle", version = "0.44.0") +bazel_dep(name = "aspect_bazel_lib", version = "2.11.0") + +# Go SDK setup from go.mod +go_sdk = use_extension("@rules_go//go:extensions.bzl", "go_sdk") +go_sdk.from_file(go_mod = "//:go.mod") + +# Go dependencies from go.mod +go_deps = use_extension("@gazelle//:extensions.bzl", "go_deps") +go_deps.from_file(go_mod = "//:go.mod") + +# Use repositories for external Go dependencies +use_repo( + go_deps, + "com_github_decred_dcrd_dcrec_secp256k1_v4", + "com_github_goccy_go_json", + "com_github_lestrrat_go_blackmagic", + "com_github_lestrrat_go_dsig", + "com_github_lestrrat_go_dsig_secp256k1", + "com_github_lestrrat_go_httprc_v3", + "com_github_lestrrat_go_option_v2", + "com_github_segmentio_asm", + "com_github_stretchr_testify", + "com_github_valyala_fastjson", + "org_golang_x_crypto", +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel.lock b/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel.lock new file mode 100644 index 0000000000..2848e8716d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/MODULE.bazel.lock @@ -0,0 +1,230 @@ +{ + "lockFileVersion": 18, + "registryFileHashes": { + "https://bcr.bazel.build/bazel_registry.json": "8a28e4aff06ee60aed2a8c281907fb8bcbf3b753c91fb5a5c57da3215d5b3497", + "https://bcr.bazel.build/modules/abseil-cpp/20210324.2/MODULE.bazel": "7cd0312e064fde87c8d1cd79ba06c876bd23630c83466e9500321be55c96ace2", + "https://bcr.bazel.build/modules/abseil-cpp/20211102.0/MODULE.bazel": "70390338f7a5106231d20620712f7cccb659cd0e9d073d1991c038eb9fc57589", + "https://bcr.bazel.build/modules/abseil-cpp/20230125.1/MODULE.bazel": "89047429cb0207707b2dface14ba7f8df85273d484c2572755be4bab7ce9c3a0", + "https://bcr.bazel.build/modules/abseil-cpp/20230802.0.bcr.1/MODULE.bazel": "1c8cec495288dccd14fdae6e3f95f772c1c91857047a098fad772034264cc8cb", + "https://bcr.bazel.build/modules/abseil-cpp/20230802.0/MODULE.bazel": "d253ae36a8bd9ee3c5955384096ccb6baf16a1b1e93e858370da0a3b94f77c16", + "https://bcr.bazel.build/modules/abseil-cpp/20230802.1/MODULE.bazel": "fa92e2eb41a04df73cdabeec37107316f7e5272650f81d6cc096418fe647b915", + "https://bcr.bazel.build/modules/abseil-cpp/20240116.1/MODULE.bazel": "37bcdb4440fbb61df6a1c296ae01b327f19e9bb521f9b8e26ec854b6f97309ed", + "https://bcr.bazel.build/modules/abseil-cpp/20240116.1/source.json": "9be551b8d4e3ef76875c0d744b5d6a504a27e3ae67bc6b28f46415fd2d2957da", + "https://bcr.bazel.build/modules/aspect_bazel_lib/2.11.0/MODULE.bazel": "cb1ba9f9999ed0bc08600c221f532c1ddd8d217686b32ba7d45b0713b5131452", + "https://bcr.bazel.build/modules/aspect_bazel_lib/2.11.0/source.json": "92494d5aa43b96665397dd13ee16023097470fa85e276b93674d62a244de47ee", + "https://bcr.bazel.build/modules/bazel_features/1.1.0/MODULE.bazel": "cfd42ff3b815a5f39554d97182657f8c4b9719568eb7fded2b9135f084bf760b", + "https://bcr.bazel.build/modules/bazel_features/1.1.1/MODULE.bazel": "27b8c79ef57efe08efccbd9dd6ef70d61b4798320b8d3c134fd571f78963dbcd", + "https://bcr.bazel.build/modules/bazel_features/1.11.0/MODULE.bazel": "f9382337dd5a474c3b7d334c2f83e50b6eaedc284253334cf823044a26de03e8", + "https://bcr.bazel.build/modules/bazel_features/1.15.0/MODULE.bazel": "d38ff6e517149dc509406aca0db3ad1efdd890a85e049585b7234d04238e2a4d", + "https://bcr.bazel.build/modules/bazel_features/1.17.0/MODULE.bazel": "039de32d21b816b47bd42c778e0454217e9c9caac4a3cf8e15c7231ee3ddee4d", + "https://bcr.bazel.build/modules/bazel_features/1.18.0/MODULE.bazel": "1be0ae2557ab3a72a57aeb31b29be347bcdc5d2b1eb1e70f39e3851a7e97041a", + "https://bcr.bazel.build/modules/bazel_features/1.19.0/MODULE.bazel": "59adcdf28230d220f0067b1f435b8537dd033bfff8db21335ef9217919c7fb58", + "https://bcr.bazel.build/modules/bazel_features/1.30.0/MODULE.bazel": "a14b62d05969a293b80257e72e597c2da7f717e1e69fa8b339703ed6731bec87", + "https://bcr.bazel.build/modules/bazel_features/1.30.0/source.json": "b07e17f067fe4f69f90b03b36ef1e08fe0d1f3cac254c1241a1818773e3423bc", + "https://bcr.bazel.build/modules/bazel_features/1.4.1/MODULE.bazel": "e45b6bb2350aff3e442ae1111c555e27eac1d915e77775f6fdc4b351b758b5d7", + "https://bcr.bazel.build/modules/bazel_features/1.9.0/MODULE.bazel": "885151d58d90d8d9c811eb75e3288c11f850e1d6b481a8c9f766adee4712358b", + "https://bcr.bazel.build/modules/bazel_features/1.9.1/MODULE.bazel": "8f679097876a9b609ad1f60249c49d68bfab783dd9be012faf9d82547b14815a", + "https://bcr.bazel.build/modules/bazel_skylib/1.0.3/MODULE.bazel": "bcb0fd896384802d1ad283b4e4eb4d718eebd8cb820b0a2c3a347fb971afd9d8", + "https://bcr.bazel.build/modules/bazel_skylib/1.1.1/MODULE.bazel": "1add3e7d93ff2e6998f9e118022c84d163917d912f5afafb3058e3d2f1545b5e", + "https://bcr.bazel.build/modules/bazel_skylib/1.2.0/MODULE.bazel": "44fe84260e454ed94ad326352a698422dbe372b21a1ac9f3eab76eb531223686", + "https://bcr.bazel.build/modules/bazel_skylib/1.2.1/MODULE.bazel": "f35baf9da0efe45fa3da1696ae906eea3d615ad41e2e3def4aeb4e8bc0ef9a7a", + "https://bcr.bazel.build/modules/bazel_skylib/1.3.0/MODULE.bazel": "20228b92868bf5cfc41bda7afc8a8ba2a543201851de39d990ec957b513579c5", + "https://bcr.bazel.build/modules/bazel_skylib/1.4.1/MODULE.bazel": "a0dcb779424be33100dcae821e9e27e4f2901d9dfd5333efe5ac6a8d7ab75e1d", + "https://bcr.bazel.build/modules/bazel_skylib/1.4.2/MODULE.bazel": "3bd40978e7a1fac911d5989e6b09d8f64921865a45822d8b09e815eaa726a651", + "https://bcr.bazel.build/modules/bazel_skylib/1.5.0/MODULE.bazel": "32880f5e2945ce6a03d1fbd588e9198c0a959bb42297b2cfaf1685b7bc32e138", + "https://bcr.bazel.build/modules/bazel_skylib/1.6.1/MODULE.bazel": "8fdee2dbaace6c252131c00e1de4b165dc65af02ea278476187765e1a617b917", + "https://bcr.bazel.build/modules/bazel_skylib/1.7.0/MODULE.bazel": "0db596f4563de7938de764cc8deeabec291f55e8ec15299718b93c4423e9796d", + "https://bcr.bazel.build/modules/bazel_skylib/1.7.1/MODULE.bazel": "3120d80c5861aa616222ec015332e5f8d3171e062e3e804a2a0253e1be26e59b", + "https://bcr.bazel.build/modules/bazel_skylib/1.7.1/source.json": "f121b43eeefc7c29efbd51b83d08631e2347297c95aac9764a701f2a6a2bb953", + "https://bcr.bazel.build/modules/buildozer/7.1.2/MODULE.bazel": "2e8dd40ede9c454042645fd8d8d0cd1527966aa5c919de86661e62953cd73d84", + "https://bcr.bazel.build/modules/buildozer/7.1.2/source.json": "c9028a501d2db85793a6996205c8de120944f50a0d570438fcae0457a5f9d1f8", + "https://bcr.bazel.build/modules/gazelle/0.32.0/MODULE.bazel": "b499f58a5d0d3537f3cf5b76d8ada18242f64ec474d8391247438bf04f58c7b8", + "https://bcr.bazel.build/modules/gazelle/0.33.0/MODULE.bazel": "a13a0f279b462b784fb8dd52a4074526c4a2afe70e114c7d09066097a46b3350", + "https://bcr.bazel.build/modules/gazelle/0.34.0/MODULE.bazel": "abdd8ce4d70978933209db92e436deb3a8b737859e9354fb5fd11fb5c2004c8a", + "https://bcr.bazel.build/modules/gazelle/0.36.0/MODULE.bazel": "e375d5d6e9a6ca59b0cb38b0540bc9a05b6aa926d322f2de268ad267a2ee74c0", + "https://bcr.bazel.build/modules/gazelle/0.44.0/MODULE.bazel": "fd3177ca0938da57a1e416cad3f39b9c4334defbc717e89aba9d9ddbbb0341da", + "https://bcr.bazel.build/modules/gazelle/0.44.0/source.json": "7fb65ef9c1ce470d099ca27fd478673d9d64c844af28d0d472b0874c7d590cb6", + "https://bcr.bazel.build/modules/google_benchmark/1.8.2/MODULE.bazel": "a70cf1bba851000ba93b58ae2f6d76490a9feb74192e57ab8e8ff13c34ec50cb", + "https://bcr.bazel.build/modules/googletest/1.11.0/MODULE.bazel": "3a83f095183f66345ca86aa13c58b59f9f94a2f81999c093d4eeaa2d262d12f4", + "https://bcr.bazel.build/modules/googletest/1.14.0.bcr.1/MODULE.bazel": "22c31a561553727960057361aa33bf20fb2e98584bc4fec007906e27053f80c6", + "https://bcr.bazel.build/modules/googletest/1.14.0.bcr.1/source.json": "41e9e129f80d8c8bf103a7acc337b76e54fad1214ac0a7084bf24f4cd924b8b4", + "https://bcr.bazel.build/modules/googletest/1.14.0/MODULE.bazel": "cfbcbf3e6eac06ef9d85900f64424708cc08687d1b527f0ef65aa7517af8118f", + "https://bcr.bazel.build/modules/jsoncpp/1.9.5/MODULE.bazel": "31271aedc59e815656f5736f282bb7509a97c7ecb43e927ac1a37966e0578075", + "https://bcr.bazel.build/modules/jsoncpp/1.9.5/source.json": "4108ee5085dd2885a341c7fab149429db457b3169b86eb081fa245eadf69169d", + "https://bcr.bazel.build/modules/libpfm/4.11.0/MODULE.bazel": "45061ff025b301940f1e30d2c16bea596c25b176c8b6b3087e92615adbd52902", + "https://bcr.bazel.build/modules/package_metadata/0.0.2/MODULE.bazel": "fb8d25550742674d63d7b250063d4580ca530499f045d70748b1b142081ebb92", + "https://bcr.bazel.build/modules/package_metadata/0.0.2/source.json": "e53a759a72488d2c0576f57491ef2da0cf4aab05ac0997314012495935531b73", + "https://bcr.bazel.build/modules/platforms/0.0.10/MODULE.bazel": "8cb8efaf200bdeb2150d93e162c40f388529a25852b332cec879373771e48ed5", + "https://bcr.bazel.build/modules/platforms/0.0.11/MODULE.bazel": "0daefc49732e227caa8bfa834d65dc52e8cc18a2faf80df25e8caea151a9413f", + "https://bcr.bazel.build/modules/platforms/0.0.11/source.json": "f7e188b79ebedebfe75e9e1d098b8845226c7992b307e28e1496f23112e8fc29", + "https://bcr.bazel.build/modules/platforms/0.0.4/MODULE.bazel": "9b328e31ee156f53f3c416a64f8491f7eb731742655a47c9eec4703a71644aee", + "https://bcr.bazel.build/modules/platforms/0.0.5/MODULE.bazel": "5733b54ea419d5eaf7997054bb55f6a1d0b5ff8aedf0176fef9eea44f3acda37", + "https://bcr.bazel.build/modules/platforms/0.0.6/MODULE.bazel": "ad6eeef431dc52aefd2d77ed20a4b353f8ebf0f4ecdd26a807d2da5aa8cd0615", + "https://bcr.bazel.build/modules/platforms/0.0.7/MODULE.bazel": "72fd4a0ede9ee5c021f6a8dd92b503e089f46c227ba2813ff183b71616034814", + "https://bcr.bazel.build/modules/platforms/0.0.8/MODULE.bazel": "9f142c03e348f6d263719f5074b21ef3adf0b139ee4c5133e2aa35664da9eb2d", + "https://bcr.bazel.build/modules/protobuf/21.7/MODULE.bazel": "a5a29bb89544f9b97edce05642fac225a808b5b7be74038ea3640fae2f8e66a7", + "https://bcr.bazel.build/modules/protobuf/27.0/MODULE.bazel": "7873b60be88844a0a1d8f80b9d5d20cfbd8495a689b8763e76c6372998d3f64c", + "https://bcr.bazel.build/modules/protobuf/27.1/MODULE.bazel": "703a7b614728bb06647f965264967a8ef1c39e09e8f167b3ca0bb1fd80449c0d", + "https://bcr.bazel.build/modules/protobuf/29.0-rc2.bcr.1/MODULE.bazel": "52f4126f63a2f0bbf36b99c2a87648f08467a4eaf92ba726bc7d6a500bbf770c", + "https://bcr.bazel.build/modules/protobuf/29.0-rc2/MODULE.bazel": "6241d35983510143049943fc0d57937937122baf1b287862f9dc8590fc4c37df", + "https://bcr.bazel.build/modules/protobuf/29.0/MODULE.bazel": "319dc8bf4c679ff87e71b1ccfb5a6e90a6dbc4693501d471f48662ac46d04e4e", + "https://bcr.bazel.build/modules/protobuf/29.0/source.json": "b857f93c796750eef95f0d61ee378f3420d00ee1dd38627b27193aa482f4f981", + "https://bcr.bazel.build/modules/protobuf/3.19.0/MODULE.bazel": "6b5fbb433f760a99a22b18b6850ed5784ef0e9928a72668b66e4d7ccd47db9b0", + "https://bcr.bazel.build/modules/protobuf/3.19.2/MODULE.bazel": "532ffe5f2186b69fdde039efe6df13ba726ff338c6bc82275ad433013fa10573", + "https://bcr.bazel.build/modules/protobuf/3.19.6/MODULE.bazel": "9233edc5e1f2ee276a60de3eaa47ac4132302ef9643238f23128fea53ea12858", + "https://bcr.bazel.build/modules/pybind11_bazel/2.11.1/MODULE.bazel": "88af1c246226d87e65be78ed49ecd1e6f5e98648558c14ce99176da041dc378e", + "https://bcr.bazel.build/modules/pybind11_bazel/2.11.1/source.json": "be4789e951dd5301282729fe3d4938995dc4c1a81c2ff150afc9f1b0504c6022", + "https://bcr.bazel.build/modules/re2/2023-09-01/MODULE.bazel": "cb3d511531b16cfc78a225a9e2136007a48cf8a677e4264baeab57fe78a80206", + "https://bcr.bazel.build/modules/re2/2023-09-01/source.json": "e044ce89c2883cd957a2969a43e79f7752f9656f6b20050b62f90ede21ec6eb4", + "https://bcr.bazel.build/modules/rules_android/0.1.1/MODULE.bazel": "48809ab0091b07ad0182defb787c4c5328bd3a278938415c00a7b69b50c4d3a8", + "https://bcr.bazel.build/modules/rules_android/0.1.1/source.json": "e6986b41626ee10bdc864937ffb6d6bf275bb5b9c65120e6137d56e6331f089e", + "https://bcr.bazel.build/modules/rules_cc/0.0.1/MODULE.bazel": "cb2aa0747f84c6c3a78dad4e2049c154f08ab9d166b1273835a8174940365647", + "https://bcr.bazel.build/modules/rules_cc/0.0.10/MODULE.bazel": "ec1705118f7eaedd6e118508d3d26deba2a4e76476ada7e0e3965211be012002", + "https://bcr.bazel.build/modules/rules_cc/0.0.13/MODULE.bazel": "0e8529ed7b323dad0775ff924d2ae5af7640b23553dfcd4d34344c7e7a867191", + "https://bcr.bazel.build/modules/rules_cc/0.0.14/MODULE.bazel": "5e343a3aac88b8d7af3b1b6d2093b55c347b8eefc2e7d1442f7a02dc8fea48ac", + "https://bcr.bazel.build/modules/rules_cc/0.0.15/MODULE.bazel": "6704c35f7b4a72502ee81f61bf88706b54f06b3cbe5558ac17e2e14666cd5dcc", + "https://bcr.bazel.build/modules/rules_cc/0.0.16/MODULE.bazel": "7661303b8fc1b4d7f532e54e9d6565771fea666fbdf839e0a86affcd02defe87", + "https://bcr.bazel.build/modules/rules_cc/0.0.2/MODULE.bazel": "6915987c90970493ab97393024c156ea8fb9f3bea953b2f3ec05c34f19b5695c", + "https://bcr.bazel.build/modules/rules_cc/0.0.6/MODULE.bazel": "abf360251023dfe3efcef65ab9d56beefa8394d4176dd29529750e1c57eaa33f", + "https://bcr.bazel.build/modules/rules_cc/0.0.8/MODULE.bazel": "964c85c82cfeb6f3855e6a07054fdb159aced38e99a5eecf7bce9d53990afa3e", + "https://bcr.bazel.build/modules/rules_cc/0.0.9/MODULE.bazel": "836e76439f354b89afe6a911a7adf59a6b2518fafb174483ad78a2a2fde7b1c5", + "https://bcr.bazel.build/modules/rules_cc/0.1.1/MODULE.bazel": "2f0222a6f229f0bf44cd711dc13c858dad98c62d52bd51d8fc3a764a83125513", + "https://bcr.bazel.build/modules/rules_cc/0.1.1/source.json": "d61627377bd7dd1da4652063e368d9366fc9a73920bfa396798ad92172cf645c", + "https://bcr.bazel.build/modules/rules_foreign_cc/0.9.0/MODULE.bazel": "c9e8c682bf75b0e7c704166d79b599f93b72cfca5ad7477df596947891feeef6", + "https://bcr.bazel.build/modules/rules_fuzzing/0.5.2/MODULE.bazel": "40c97d1144356f52905566c55811f13b299453a14ac7769dfba2ac38192337a8", + "https://bcr.bazel.build/modules/rules_fuzzing/0.5.2/source.json": "c8b1e2c717646f1702290959a3302a178fb639d987ab61d548105019f11e527e", + "https://bcr.bazel.build/modules/rules_go/0.41.0/MODULE.bazel": "55861d8e8bb0e62cbd2896f60ff303f62ffcb0eddb74ecb0e5c0cbe36fc292c8", + "https://bcr.bazel.build/modules/rules_go/0.42.0/MODULE.bazel": "8cfa875b9aa8c6fce2b2e5925e73c1388173ea3c32a0db4d2b4804b453c14270", + "https://bcr.bazel.build/modules/rules_go/0.46.0/MODULE.bazel": "3477df8bdcc49e698b9d25f734c4f3a9f5931ff34ee48a2c662be168f5f2d3fd", + "https://bcr.bazel.build/modules/rules_go/0.51.0/MODULE.bazel": "b6920f505935bfd69381651c942496d99b16e2a12f3dd5263b90ded16f3b4d0f", + "https://bcr.bazel.build/modules/rules_go/0.55.1/MODULE.bazel": "a57a6fc59a74326c0b440d07cca209edf13c7d1a641e48cfbeab56e79f873609", + "https://bcr.bazel.build/modules/rules_go/0.55.1/source.json": "827a740c8959c9d20616889e7746cde4dcc6ee80d25146943627ccea0736328f", + "https://bcr.bazel.build/modules/rules_java/4.0.0/MODULE.bazel": "5a78a7ae82cd1a33cef56dc578c7d2a46ed0dca12643ee45edbb8417899e6f74", + "https://bcr.bazel.build/modules/rules_java/5.3.5/MODULE.bazel": "a4ec4f2db570171e3e5eb753276ee4b389bae16b96207e9d3230895c99644b86", + "https://bcr.bazel.build/modules/rules_java/6.0.0/MODULE.bazel": "8a43b7df601a7ec1af61d79345c17b31ea1fedc6711fd4abfd013ea612978e39", + "https://bcr.bazel.build/modules/rules_java/6.3.0/MODULE.bazel": "a97c7678c19f236a956ad260d59c86e10a463badb7eb2eda787490f4c969b963", + "https://bcr.bazel.build/modules/rules_java/6.4.0/MODULE.bazel": "e986a9fe25aeaa84ac17ca093ef13a4637f6107375f64667a15999f77db6c8f6", + "https://bcr.bazel.build/modules/rules_java/6.5.2/MODULE.bazel": "1d440d262d0e08453fa0c4d8f699ba81609ed0e9a9a0f02cd10b3e7942e61e31", + "https://bcr.bazel.build/modules/rules_java/7.10.0/MODULE.bazel": "530c3beb3067e870561739f1144329a21c851ff771cd752a49e06e3dc9c2e71a", + "https://bcr.bazel.build/modules/rules_java/7.12.2/MODULE.bazel": "579c505165ee757a4280ef83cda0150eea193eed3bef50b1004ba88b99da6de6", + "https://bcr.bazel.build/modules/rules_java/7.2.0/MODULE.bazel": "06c0334c9be61e6cef2c8c84a7800cef502063269a5af25ceb100b192453d4ab", + "https://bcr.bazel.build/modules/rules_java/7.3.2/MODULE.bazel": "50dece891cfdf1741ea230d001aa9c14398062f2b7c066470accace78e412bc2", + "https://bcr.bazel.build/modules/rules_java/7.6.1/MODULE.bazel": "2f14b7e8a1aa2f67ae92bc69d1ec0fa8d9f827c4e17ff5e5f02e91caa3b2d0fe", + "https://bcr.bazel.build/modules/rules_java/8.12.0/MODULE.bazel": "8e6590b961f2defdfc2811c089c75716cb2f06c8a4edeb9a8d85eaa64ee2a761", + "https://bcr.bazel.build/modules/rules_java/8.12.0/source.json": "cbd5d55d9d38d4008a7d00bee5b5a5a4b6031fcd4a56515c9accbcd42c7be2ba", + "https://bcr.bazel.build/modules/rules_jvm_external/4.4.2/MODULE.bazel": "a56b85e418c83eb1839819f0b515c431010160383306d13ec21959ac412d2fe7", + "https://bcr.bazel.build/modules/rules_jvm_external/5.1/MODULE.bazel": "33f6f999e03183f7d088c9be518a63467dfd0be94a11d0055fe2d210f89aa909", + "https://bcr.bazel.build/modules/rules_jvm_external/5.2/MODULE.bazel": "d9351ba35217ad0de03816ef3ed63f89d411349353077348a45348b096615036", + "https://bcr.bazel.build/modules/rules_jvm_external/5.3/MODULE.bazel": "bf93870767689637164657731849fb887ad086739bd5d360d90007a581d5527d", + "https://bcr.bazel.build/modules/rules_jvm_external/6.1/MODULE.bazel": "75b5fec090dbd46cf9b7d8ea08cf84a0472d92ba3585b476f44c326eda8059c4", + "https://bcr.bazel.build/modules/rules_jvm_external/6.3/MODULE.bazel": "c998e060b85f71e00de5ec552019347c8bca255062c990ac02d051bb80a38df0", + "https://bcr.bazel.build/modules/rules_jvm_external/6.3/source.json": "6f5f5a5a4419ae4e37c35a5bb0a6ae657ed40b7abc5a5189111b47fcebe43197", + "https://bcr.bazel.build/modules/rules_kotlin/1.9.0/MODULE.bazel": "ef85697305025e5a61f395d4eaede272a5393cee479ace6686dba707de804d59", + "https://bcr.bazel.build/modules/rules_kotlin/1.9.6/MODULE.bazel": "d269a01a18ee74d0335450b10f62c9ed81f2321d7958a2934e44272fe82dcef3", + "https://bcr.bazel.build/modules/rules_kotlin/1.9.6/source.json": "2faa4794364282db7c06600b7e5e34867a564ae91bda7cae7c29c64e9466b7d5", + "https://bcr.bazel.build/modules/rules_license/0.0.3/MODULE.bazel": "627e9ab0247f7d1e05736b59dbb1b6871373de5ad31c3011880b4133cafd4bd0", + "https://bcr.bazel.build/modules/rules_license/0.0.7/MODULE.bazel": "088fbeb0b6a419005b89cf93fe62d9517c0a2b8bb56af3244af65ecfe37e7d5d", + "https://bcr.bazel.build/modules/rules_license/1.0.0/MODULE.bazel": "a7fda60eefdf3d8c827262ba499957e4df06f659330bbe6cdbdb975b768bb65c", + "https://bcr.bazel.build/modules/rules_license/1.0.0/source.json": "a52c89e54cc311196e478f8382df91c15f7a2bfdf4c6cd0e2675cc2ff0b56efb", + "https://bcr.bazel.build/modules/rules_pkg/0.7.0/MODULE.bazel": "df99f03fc7934a4737122518bb87e667e62d780b610910f0447665a7e2be62dc", + "https://bcr.bazel.build/modules/rules_pkg/1.0.1/MODULE.bazel": "5b1df97dbc29623bccdf2b0dcd0f5cb08e2f2c9050aab1092fd39a41e82686ff", + "https://bcr.bazel.build/modules/rules_pkg/1.0.1/source.json": "bd82e5d7b9ce2d31e380dd9f50c111d678c3bdaca190cb76b0e1c71b05e1ba8a", + "https://bcr.bazel.build/modules/rules_proto/4.0.0/MODULE.bazel": "a7a7b6ce9bee418c1a760b3d84f83a299ad6952f9903c67f19e4edd964894e06", + "https://bcr.bazel.build/modules/rules_proto/5.3.0-21.7/MODULE.bazel": "e8dff86b0971688790ae75528fe1813f71809b5afd57facb44dad9e8eca631b7", + "https://bcr.bazel.build/modules/rules_proto/6.0.0/MODULE.bazel": "b531d7f09f58dce456cd61b4579ce8c86b38544da75184eadaf0a7cb7966453f", + "https://bcr.bazel.build/modules/rules_proto/6.0.2/MODULE.bazel": "ce916b775a62b90b61888052a416ccdda405212b6aaeb39522f7dc53431a5e73", + "https://bcr.bazel.build/modules/rules_proto/7.0.2/MODULE.bazel": "bf81793bd6d2ad89a37a40693e56c61b0ee30f7a7fdbaf3eabbf5f39de47dea2", + "https://bcr.bazel.build/modules/rules_proto/7.0.2/source.json": "1e5e7260ae32ef4f2b52fd1d0de8d03b606a44c91b694d2f1afb1d3b28a48ce1", + "https://bcr.bazel.build/modules/rules_python/0.10.2/MODULE.bazel": "cc82bc96f2997baa545ab3ce73f196d040ffb8756fd2d66125a530031cd90e5f", + "https://bcr.bazel.build/modules/rules_python/0.23.1/MODULE.bazel": "49ffccf0511cb8414de28321f5fcf2a31312b47c40cc21577144b7447f2bf300", + "https://bcr.bazel.build/modules/rules_python/0.25.0/MODULE.bazel": "72f1506841c920a1afec76975b35312410eea3aa7b63267436bfb1dd91d2d382", + "https://bcr.bazel.build/modules/rules_python/0.28.0/MODULE.bazel": "cba2573d870babc976664a912539b320cbaa7114cd3e8f053c720171cde331ed", + "https://bcr.bazel.build/modules/rules_python/0.31.0/MODULE.bazel": "93a43dc47ee570e6ec9f5779b2e64c1476a6ce921c48cc9a1678a91dd5f8fd58", + "https://bcr.bazel.build/modules/rules_python/0.4.0/MODULE.bazel": "9208ee05fd48bf09ac60ed269791cf17fb343db56c8226a720fbb1cdf467166c", + "https://bcr.bazel.build/modules/rules_python/0.40.0/MODULE.bazel": "9d1a3cd88ed7d8e39583d9ffe56ae8a244f67783ae89b60caafc9f5cf318ada7", + "https://bcr.bazel.build/modules/rules_python/0.40.0/source.json": "939d4bd2e3110f27bfb360292986bb79fd8dcefb874358ccd6cdaa7bda029320", + "https://bcr.bazel.build/modules/rules_shell/0.2.0/MODULE.bazel": "fda8a652ab3c7d8fee214de05e7a9916d8b28082234e8d2c0094505c5268ed3c", + "https://bcr.bazel.build/modules/rules_shell/0.3.0/MODULE.bazel": "de4402cd12f4cc8fda2354fce179fdb068c0b9ca1ec2d2b17b3e21b24c1a937b", + "https://bcr.bazel.build/modules/rules_shell/0.3.0/source.json": "c55ed591aa5009401ddf80ded9762ac32c358d2517ee7820be981e2de9756cf3", + "https://bcr.bazel.build/modules/stardoc/0.5.1/MODULE.bazel": "1a05d92974d0c122f5ccf09291442580317cdd859f07a8655f1db9a60374f9f8", + "https://bcr.bazel.build/modules/stardoc/0.5.3/MODULE.bazel": "c7f6948dae6999bf0db32c1858ae345f112cacf98f174c7a8bb707e41b974f1c", + "https://bcr.bazel.build/modules/stardoc/0.5.6/MODULE.bazel": "c43dabc564990eeab55e25ed61c07a1aadafe9ece96a4efabb3f8bf9063b71ef", + "https://bcr.bazel.build/modules/stardoc/0.6.2/MODULE.bazel": "7060193196395f5dd668eda046ccbeacebfd98efc77fed418dbe2b82ffaa39fd", + "https://bcr.bazel.build/modules/stardoc/0.7.0/MODULE.bazel": "05e3d6d30c099b6770e97da986c53bd31844d7f13d41412480ea265ac9e8079c", + "https://bcr.bazel.build/modules/stardoc/0.7.1/MODULE.bazel": "3548faea4ee5dda5580f9af150e79d0f6aea934fc60c1cc50f4efdd9420759e7", + "https://bcr.bazel.build/modules/stardoc/0.7.1/source.json": "b6500ffcd7b48cd72c29bb67bcac781e12701cc0d6d55d266a652583cfcdab01", + "https://bcr.bazel.build/modules/upb/0.0.0-20220923-a547704/MODULE.bazel": "7298990c00040a0e2f121f6c32544bab27d4452f80d9ce51349b1a28f3005c43", + "https://bcr.bazel.build/modules/zlib/1.2.11/MODULE.bazel": "07b389abc85fdbca459b69e2ec656ae5622873af3f845e1c9d80fe179f3effa0", + "https://bcr.bazel.build/modules/zlib/1.2.12/MODULE.bazel": "3b1a8834ada2a883674be8cbd36ede1b6ec481477ada359cd2d3ddc562340b27", + "https://bcr.bazel.build/modules/zlib/1.3.1.bcr.5/MODULE.bazel": "eec517b5bbe5492629466e11dae908d043364302283de25581e3eb944326c4ca", + "https://bcr.bazel.build/modules/zlib/1.3.1.bcr.5/source.json": "22bc55c47af97246cfc093d0acf683a7869377de362b5d1c552c2c2e16b7a806", + "https://bcr.bazel.build/modules/zlib/1.3.1/MODULE.bazel": "751c9940dcfe869f5f7274e1295422a34623555916eb98c174c1e945594bf198" + }, + "selectedYankedVersions": {}, + "moduleExtensions": { + "@@rules_kotlin+//src/main/starlark/core/repositories:bzlmod_setup.bzl%rules_kotlin_extensions": { + "general": { + "bzlTransitiveDigest": "hUTp2w+RUVdL7ma5esCXZJAFnX7vLbVfLd7FwnQI6bU=", + "usagesDigest": "QI2z8ZUR+mqtbwsf2fLqYdJAkPOHdOV+tF2yVAUgRzw=", + "recordedFileInputs": {}, + "recordedDirentsInputs": {}, + "envVariables": {}, + "generatedRepoSpecs": { + "com_github_jetbrains_kotlin_git": { + "repoRuleId": "@@rules_kotlin+//src/main/starlark/core/repositories:compiler.bzl%kotlin_compiler_git_repository", + "attributes": { + "urls": [ + "https://github.com/JetBrains/kotlin/releases/download/v1.9.23/kotlin-compiler-1.9.23.zip" + ], + "sha256": "93137d3aab9afa9b27cb06a824c2324195c6b6f6179d8a8653f440f5bd58be88" + } + }, + "com_github_jetbrains_kotlin": { + "repoRuleId": "@@rules_kotlin+//src/main/starlark/core/repositories:compiler.bzl%kotlin_capabilities_repository", + "attributes": { + "git_repository_name": "com_github_jetbrains_kotlin_git", + "compiler_version": "1.9.23" + } + }, + "com_github_google_ksp": { + "repoRuleId": "@@rules_kotlin+//src/main/starlark/core/repositories:ksp.bzl%ksp_compiler_plugin_repository", + "attributes": { + "urls": [ + "https://github.com/google/ksp/releases/download/1.9.23-1.0.20/artifacts.zip" + ], + "sha256": "ee0618755913ef7fd6511288a232e8fad24838b9af6ea73972a76e81053c8c2d", + "strip_version": "1.9.23-1.0.20" + } + }, + "com_github_pinterest_ktlint": { + "repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_file", + "attributes": { + "sha256": "01b2e0ef893383a50dbeb13970fe7fa3be36ca3e83259e01649945b09d736985", + "urls": [ + "https://github.com/pinterest/ktlint/releases/download/1.3.0/ktlint" + ], + "executable": true + } + }, + "rules_android": { + "repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive", + "attributes": { + "sha256": "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806", + "strip_prefix": "rules_android-0.1.1", + "urls": [ + "https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip" + ] + } + } + }, + "recordedRepoMappingEntries": [ + [ + "rules_kotlin+", + "bazel_tools", + "bazel_tools" + ] + ] + } + } + } +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/Makefile b/vendor/github.com/lestrrat-go/jwx/v3/Makefile new file mode 100644 index 0000000000..672c007b29 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/Makefile @@ -0,0 +1,98 @@ +.PHONY: generate realclean cover viewcover test lint check_diffs imports tidy jwx +generate: + @go generate + @$(MAKE) generate-jwa generate-jwe generate-jwk generate-jws generate-jwt + @./tools/cmd/gofmt.sh + +generate-%: + @go generate $(shell pwd -P)/$(patsubst generate-%,%,$@) + +realclean: + rm coverage.out + +test-cmd: + env TESTOPTS="$(TESTOPTS)" ./tools/test.sh + +test: + $(MAKE) test-stdlib TESTOPTS= + +test-stdlib: + $(MAKE) test-cmd TESTOPTS= + +test-goccy: + $(MAKE) test-cmd TESTOPTS="-tags jwx_goccy" + +test-es256k: + $(MAKE) test-cmd TESTOPTS="-tags jwx_es256k" + +test-secp256k1-pem: + $(MAKE) test-cmd TESTOPTS="-tags jwx_es256k,jwx_secp256k1_pem" + +test-asmbase64: + $(MAKE) test-cmd TESTOPTS="-tags jwx_asmbase64" + +test-alltags: + $(MAKE) test-cmd TESTOPTS="-tags jwx_asmbase64,jwx_goccy,jwx_es256k,jwx_secp256k1_pem" + +cover-cmd: + env MODE=cover ./tools/test.sh + +cover: + $(MAKE) cover-stdlib + +cover-stdlib: + $(MAKE) cover-cmd TESTOPTS= + +cover-goccy: + $(MAKE) cover-cmd TESTOPTS="-tags jwx_goccy" + +cover-es256k: + $(MAKE) cover-cmd TESTOPTS="-tags jwx_es256k" + +cover-secp256k1-pem: + $(MAKE) cover-cmd TESTOPTS="-tags jwx_es256k,jwx_secp256k1" + +cover-asmbase64: + $(MAKE) cover-cmd TESTOPTS="-tags jwx_asmbase64" + +cover-alltags: + $(MAKE) cover-cmd TESTOPTS="-tags jwx_asmbase64,jwx_goccy,jwx_es256k,jwx_secp256k1_pem" + +smoke-cmd: + env MODE=short ./tools/test.sh + +smoke: + $(MAKE) smoke-stdlib + +smoke-stdlib: + $(MAKE) smoke-cmd TESTOPTS= + +smoke-goccy: + $(MAKE) smoke-cmd TESTOPTS="-tags jwx_goccy" + +smoke-es256k: + $(MAKE) smoke-cmd TESTOPTS="-tags jwx_es256k" + +smoke-secp256k1-pem: + $(MAKE) smoke-cmd TESTOPTS="-tags jwx_es256k,jwx_secp256k1_pem" + +smoke-alltags: + $(MAKE) smoke-cmd TESTOPTS="-tags jwx_goccy,jwx_es256k,jwx_secp256k1_pem" + +viewcover: + go tool cover -html=coverage.out + +lint: + golangci-lint run ./... + +check_diffs: + ./scripts/check-diff.sh + +imports: + goimports -w ./ + +tidy: + ./scripts/tidy.sh + +jwx: + @./tools/cmd/install-jwx.sh diff --git a/vendor/github.com/lestrrat-go/jwx/v3/README.md b/vendor/github.com/lestrrat-go/jwx/v3/README.md new file mode 100644 index 0000000000..632033f3cb --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/README.md @@ -0,0 +1,263 @@ +# github.com/lestrrat-go/jwx/v3 [![CI](https://github.com/lestrrat-go/jwx/actions/workflows/ci.yml/badge.svg)](https://github.com/lestrrat-go/jwx/actions/workflows/ci.yml) [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/jwx/v3.svg)](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3) [![codecov.io](https://codecov.io/github/lestrrat-go/jwx/coverage.svg?branch=v3)](https://codecov.io/github/lestrrat-go/jwx?branch=v3) + +Go module implementing various JWx (JWA/JWE/JWK/JWS/JWT, otherwise known as JOSE) technologies. + +If you are using this module in your product or your company, please add your product and/or company name in the [Wiki](https://github.com/lestrrat-go/jwx/wiki/Users)! It really helps keeping up our motivation. + +# Features + +* Complete coverage of JWA/JWE/JWK/JWS/JWT, not just JWT+minimum tool set. + * Supports JWS messages with multiple signatures, both compact and JSON serialization + * Supports JWS with detached payload + * Supports JWS with unencoded payload (RFC7797) + * Supports JWE messages with multiple recipients, both compact and JSON serialization + * Most operations work with either JWK or raw keys e.g. *rsa.PrivateKey, *ecdsa.PrivateKey, etc). +* Opinionated, but very uniform API. Everything is symmetric, and follows a standard convention + * jws.Parse/Verify/Sign + * jwe.Parse/Encrypt/Decrypt + * Arguments are organized as explicit required parameters and optional WithXXXX() style options. +* Extra utilities + * `jwk.Cache` to always keep a JWKS up-to-date + * [bazel](https://bazel.build)-ready + +Some more in-depth discussion on why you might want to use this library over others +can be found in the [Description section](#description) + +If you are using v0 or v1, you are strongly encouraged to migrate to using v3 +(the version that comes with the README you are reading). + +# SYNOPSIS + + +```go +package examples_test + +import ( + "bytes" + "fmt" + "net/http" + "time" + + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwe" + "github.com/lestrrat-go/jwx/v3/jwk" + "github.com/lestrrat-go/jwx/v3/jws" + "github.com/lestrrat-go/jwx/v3/jwt" +) + +func Example() { + // Parse, serialize, slice and dice JWKs! + privkey, err := jwk.ParseKey(jsonRSAPrivateKey) + if err != nil { + fmt.Printf("failed to parse JWK: %s\n", err) + return + } + + pubkey, err := jwk.PublicKeyOf(privkey) + if err != nil { + fmt.Printf("failed to get public key: %s\n", err) + return + } + + // Work with JWTs! + { + // Build a JWT! + tok, err := jwt.NewBuilder(). + Issuer(`github.com/lestrrat-go/jwx`). + IssuedAt(time.Now()). + Build() + if err != nil { + fmt.Printf("failed to build token: %s\n", err) + return + } + + // Sign a JWT! + signed, err := jwt.Sign(tok, jwt.WithKey(jwa.RS256(), privkey)) + if err != nil { + fmt.Printf("failed to sign token: %s\n", err) + return + } + + // Verify a JWT! + { + verifiedToken, err := jwt.Parse(signed, jwt.WithKey(jwa.RS256(), pubkey)) + if err != nil { + fmt.Printf("failed to verify JWS: %s\n", err) + return + } + _ = verifiedToken + } + + // Work with *http.Request! + { + req, err := http.NewRequest(http.MethodGet, `https://github.com/lestrrat-go/jwx`, nil) + req.Header.Set(`Authorization`, fmt.Sprintf(`Bearer %s`, signed)) + + verifiedToken, err := jwt.ParseRequest(req, jwt.WithKey(jwa.RS256(), pubkey)) + if err != nil { + fmt.Printf("failed to verify token from HTTP request: %s\n", err) + return + } + _ = verifiedToken + } + } + + // Encrypt and Decrypt arbitrary payload with JWE! + { + encrypted, err := jwe.Encrypt(payloadLoremIpsum, jwe.WithKey(jwa.RSA_OAEP(), jwkRSAPublicKey)) + if err != nil { + fmt.Printf("failed to encrypt payload: %s\n", err) + return + } + + decrypted, err := jwe.Decrypt(encrypted, jwe.WithKey(jwa.RSA_OAEP(), jwkRSAPrivateKey)) + if err != nil { + fmt.Printf("failed to decrypt payload: %s\n", err) + return + } + + if !bytes.Equal(decrypted, payloadLoremIpsum) { + fmt.Printf("verified payload did not match\n") + return + } + } + + // Sign and Verify arbitrary payload with JWS! + { + signed, err := jws.Sign(payloadLoremIpsum, jws.WithKey(jwa.RS256(), jwkRSAPrivateKey)) + if err != nil { + fmt.Printf("failed to sign payload: %s\n", err) + return + } + + verified, err := jws.Verify(signed, jws.WithKey(jwa.RS256(), jwkRSAPublicKey)) + if err != nil { + fmt.Printf("failed to verify payload: %s\n", err) + return + } + + if !bytes.Equal(verified, payloadLoremIpsum) { + fmt.Printf("verified payload did not match\n") + return + } + } + // OUTPUT: +} +``` +source: [examples/jwx_readme_example_test.go](https://github.com/lestrrat-go/jwx/blob/v3/examples/jwx_readme_example_test.go) + + +# How-to Documentation + +* [API documentation](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3) +* [How-to style documentation](./docs) +* [Runnable Examples](./examples) + +# Description + +This Go module implements JWA, JWE, JWK, JWS, and JWT. Please see the following table for the list of +available packages: + +| Package name | Notes | +|-----------------------------------------------------------|-------------------------------------------------| +| [jwt](https://github.com/lestrrat-go/jwx/tree/v3/jwt) | [RFC 7519](https://tools.ietf.org/html/rfc7519) | +| [jwk](https://github.com/lestrrat-go/jwx/tree/v3/jwk) | [RFC 7517](https://tools.ietf.org/html/rfc7517) + [RFC 7638](https://tools.ietf.org/html/rfc7638) | +| [jwa](https://github.com/lestrrat-go/jwx/tree/v3/jwa) | [RFC 7518](https://tools.ietf.org/html/rfc7518) | +| [jws](https://github.com/lestrrat-go/jwx/tree/v3/jws) | [RFC 7515](https://tools.ietf.org/html/rfc7515) + [RFC 7797](https://tools.ietf.org/html/rfc7797) | +| [jwe](https://github.com/lestrrat-go/jwx/tree/v3/jwe) | [RFC 7516](https://tools.ietf.org/html/rfc7516) | +## History + +My goal was to write a server that heavily uses JWK and JWT. At first glance +the libraries that already exist seemed sufficient, but soon I realized that + +1. To completely implement the protocols, I needed the entire JWT, JWK, JWS, JWE (and JWA, by necessity). +2. Most of the libraries that existed only deal with a subset of the various JWx specifications that were necessary to implement their specific needs + +For example, a certain library looks like it had most of JWS, JWE, JWK covered, but then it lacked the ability to include private claims in its JWT responses. Another library had support of all the private claims, but completely lacked in its flexibility to generate various different response formats. + +Because I was writing the server side (and the client side for testing), I needed the *entire* JOSE toolset to properly implement my server, **and** they needed to be *flexible* enough to fulfill the entire spec that I was writing. + +So here's `github.com/lestrrat-go/jwx/v3`. This library is extensible, customizable, and hopefully well organized to the point that it is easy for you to slice and dice it. + +## Why would I use this library? + +There are several other major Go modules that handle JWT and related data formats, +so why should you use this library? + +From a purely functional perspective, the only major difference is this: +Whereas most other projects only deal with what they seem necessary to handle +JWTs, this module handles the **_entire_** spectrum of JWS, JWE, JWK, and JWT. + +That is, if you need to not only parse JWTs, but also to control JWKs, or +if you need to handle payloads that are NOT JWTs, you should probably consider +using this module. You should also note that JWT is built _on top_ of those +other technologies. You simply cannot have a complete JWT package without +implementing the entirety of JWS/JWE/JWK, which this library does. + +Next, from an implementation perspective, this module differs significantly +from others in that it tries very hard to expose only the APIs, and not the +internal data. For example, individual JWT claims are not accessible through +struct field lookups. You need to use one of the getter methods. + +This is because this library takes the stance that the end user is fully capable +and even willing to shoot themselves on the foot when presented with a lax +API. By making sure that users do not have access to open structs, we can protect +users from doing silly things like creating _incomplete_ structs, or access the +structs concurrently without any protection. This structure also allows +us to put extra smarts in the structs, such as doing the right thing when +you want to parse / write custom fields (this module does not require the user +to specify alternate structs to parse objects with custom fields) + +In the end I think it comes down to your usage pattern, and priorities. +Some general guidelines that come to mind are: + +* If you want a single library to handle everything JWx, such as using JWE, JWK, JWS, handling [auto-refreshing JWKs](https://github.com/lestrrat-go/jwx/blob/v3/docs/04-jwk.md#auto-refreshing-remote-keys), use this module. +* If you want to honor all possible custom fields transparently, use this module. +* If you want a standardized clean API, use this module. + +Otherwise, feel free to choose something else. + +# Contributions + +## Issues + +For bug reports and feature requests, please try to follow the issue templates as much as possible. +For either bug reports or feature requests, failing tests are even better. + +## Pull Requests + +Please make sure to include tests that exercise the changes you made. + +If you are editing auto-generated files (those files with the `_gen.go` suffix, please make sure that you do the following: + +1. Edit the generator, not the generated files (e.g. internal/cmd/genreadfile/main.go) +2. Run `make generate` (or `go generate`) to generate the new code +3. Commit _both_ the generator _and_ the generated files + +## Discussions / Usage + +Please try [discussions](https://github.com/lestrrat-go/jwx/tree/v3/discussions) first. + +# Related Modules + +* [github.com/lestrrat-go/echo-middleware-jwx](https://github.com/lestrrat-go/echo-middleware-jwx) - Sample Echo middleware +* [github.com/jwx-go/crypto-signer/gcp](https://github.com/jwx-go/crypto-signer/tree/main/gcp) - GCP KMS wrapper that implements [`crypto.Signer`](https://pkg.go.dev/crypto#Signer) +* [github.com/jwx-go/crypto-signer/aws](https://github.com/jwx-go/crypto-signer/tree/main/aws) - AWS KMS wrapper that implements [`crypto.Signer`](https://pkg.go.dev/crypto#Signer) + +# Credits + +* Initial work on this library was generously sponsored by HDE Inc (https://www.hde.co.jp) +* Lots of code, especially JWE was initially taken from go-jose library (https://github.com/square/go-jose) +* Lots of individual contributors have helped this project over the years. Thank each and everyone of you very much. + +# Quid pro quo + +If you use this software to build products in a for-profit organization, we ask you to _consider_ +contributing back to FOSS in the following manner: + +* For every 100 employees (direct hires) of your organization, please consider contributing minimum of $1 every year to either this project, **or** another FOSS projects that this project uses. For example, for 100 employees, we ask you contribute $100 yearly; for 10,000 employees, we ask you contribute $10,000 yearly. +* If possible, please make this information public. You do not need to disclose the amount you are contributing, but please make the information that you are contributing to particular FOSS projects public. For this project, please consider writing your name on the [Wiki](https://github.com/lestrrat-go/jwx/wiki/Users) + +This is _NOT_ a licensing term: you are still free to use this software according to the license it +comes with. This clause is only a plea for people to acknowledge the work from FOSS developers whose +work you rely on each and everyday. diff --git a/vendor/github.com/lestrrat-go/jwx/v3/SECURITY.md b/vendor/github.com/lestrrat-go/jwx/v3/SECURITY.md new file mode 100644 index 0000000000..601dced5cd --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/SECURITY.md @@ -0,0 +1,18 @@ +# Security Policy + +## Supported Versions + +Most recent two major versions will receive security updates + +| Version | Supported | +| -------- | ------------------ | +| v3.x.x | :white_check_mark: | +| v2.x.x | :white_check_mark: | +| < v2.0.0 | :x: | + +## Reporting a Vulnerability + +If you think you found a vulnerability, please report it via [GitHub Security Advisory](https://github.com/lestrrat-go/jwx/security/advisories/new). +Please include explicit steps to reproduce the security issue. + +We will do our best to respond in a timely manner, but please also be aware that this project is maintained by a very limited number of people. Please help us with test code and such. diff --git a/vendor/github.com/lestrrat-go/jwx/v3/WORKSPACE b/vendor/github.com/lestrrat-go/jwx/v3/WORKSPACE new file mode 100644 index 0000000000..c8578d8b0a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/WORKSPACE @@ -0,0 +1,2 @@ +# Empty WORKSPACE file for bzlmod compatibility +# All dependencies are now managed in MODULE.bazel \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/jwx/v3/cert/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/cert/BUILD.bazel new file mode 100644 index 0000000000..f308530bcf --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/cert/BUILD.bazel @@ -0,0 +1,34 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "cert", + srcs = [ + "cert.go", + "chain.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/cert", + visibility = ["//visibility:public"], + deps = [ + "//internal/base64", + "//internal/tokens", + ], +) + +go_test( + name = "cert_test", + srcs = [ + "cert_test.go", + "chain_test.go", + ], + deps = [ + ":cert", + "//internal/jwxtest", + "@com_github_stretchr_testify//require", + ], +) + +alias( + name = "go_default_library", + actual = ":cert", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/cert/cert.go b/vendor/github.com/lestrrat-go/jwx/v3/cert/cert.go new file mode 100644 index 0000000000..efefbcb417 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/cert/cert.go @@ -0,0 +1,48 @@ +package cert + +import ( + "crypto/x509" + stdlibb64 "encoding/base64" + "fmt" + "io" + + "github.com/lestrrat-go/jwx/v3/internal/base64" +) + +// Create is a wrapper around x509.CreateCertificate, but it additionally +// encodes it in base64 so that it can be easily added to `x5c` fields +func Create(rand io.Reader, template, parent *x509.Certificate, pub, priv any) ([]byte, error) { + der, err := x509.CreateCertificate(rand, template, parent, pub, priv) + if err != nil { + return nil, fmt.Errorf(`failed to create x509 certificate: %w`, err) + } + return EncodeBase64(der) +} + +// EncodeBase64 is a utility function to encode ASN.1 DER certificates +// using base64 encoding. This operation is normally done by `pem.Encode` +// but since PEM would include the markers (`-----BEGIN`, and the like) +// while `x5c` fields do not need this, this function can be used to +// shave off a few lines +func EncodeBase64(der []byte) ([]byte, error) { + enc := stdlibb64.StdEncoding + dst := make([]byte, enc.EncodedLen(len(der))) + enc.Encode(dst, der) + return dst, nil +} + +// Parse is a utility function to decode a base64 encoded +// ASN.1 DER format certificate, and to parse the byte sequence. +// The certificate must be in PKIX format, and it must not contain PEM markers +func Parse(src []byte) (*x509.Certificate, error) { + dst, err := base64.Decode(src) + if err != nil { + return nil, fmt.Errorf(`failed to base64 decode the certificate: %w`, err) + } + + cert, err := x509.ParseCertificate(dst) + if err != nil { + return nil, fmt.Errorf(`failed to parse x509 certificate: %w`, err) + } + return cert, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/cert/chain.go b/vendor/github.com/lestrrat-go/jwx/v3/cert/chain.go new file mode 100644 index 0000000000..112274669a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/cert/chain.go @@ -0,0 +1,80 @@ +package cert + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/lestrrat-go/jwx/v3/internal/tokens" +) + +// Chain represents a certificate chain as used in the `x5c` field of +// various objects within JOSE. +// +// It stores the certificates as a list of base64 encoded []byte +// sequence. By definition these values must PKIX encoded. +type Chain struct { + certificates [][]byte +} + +func (cc Chain) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + buf.WriteByte(tokens.OpenSquareBracket) + for i, cert := range cc.certificates { + if i > 0 { + buf.WriteByte(tokens.Comma) + } + buf.WriteByte('"') + buf.Write(cert) + buf.WriteByte('"') + } + buf.WriteByte(tokens.CloseSquareBracket) + return buf.Bytes(), nil +} + +func (cc *Chain) UnmarshalJSON(data []byte) error { + var tmp []string + if err := json.Unmarshal(data, &tmp); err != nil { + return fmt.Errorf(`failed to unmarshal certificate chain: %w`, err) + } + + certs := make([][]byte, len(tmp)) + for i, cert := range tmp { + certs[i] = []byte(cert) + } + cc.certificates = certs + return nil +} + +// Get returns the n-th ASN.1 DER + base64 encoded certificate +// stored. `false` will be returned in the second argument if +// the corresponding index is out of range. +func (cc *Chain) Get(index int) ([]byte, bool) { + if index < 0 || index >= len(cc.certificates) { + return nil, false + } + + return cc.certificates[index], true +} + +// Len returns the number of certificates stored in this Chain +func (cc *Chain) Len() int { + return len(cc.certificates) +} + +var pemStart = []byte("----- BEGIN CERTIFICATE -----") +var pemEnd = []byte("----- END CERTIFICATE -----") + +func (cc *Chain) AddString(der string) error { + return cc.Add([]byte(der)) +} + +func (cc *Chain) Add(der []byte) error { + // We're going to be nice and remove marker lines if they + // give it to us + der = bytes.TrimPrefix(der, pemStart) + der = bytes.TrimSuffix(der, pemEnd) + der = bytes.TrimSpace(der) + cc.certificates = append(cc.certificates, der) + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/codecov.yml b/vendor/github.com/lestrrat-go/jwx/v3/codecov.yml new file mode 100644 index 0000000000..130effd7a6 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/codecov.yml @@ -0,0 +1,2 @@ +codecov: + allow_coverage_offsets: true diff --git a/vendor/github.com/lestrrat-go/jwx/v3/format.go b/vendor/github.com/lestrrat-go/jwx/v3/format.go new file mode 100644 index 0000000000..6cb6efe7eb --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/format.go @@ -0,0 +1,104 @@ +package jwx + +import ( + "bytes" + "encoding/json" + + "github.com/lestrrat-go/jwx/v3/internal/tokens" +) + +type FormatKind int + +// These constants describe the result from guessing the format +// of the incoming buffer. +const ( + // InvalidFormat is returned when the format of the incoming buffer + // has been deemed conclusively invalid + InvalidFormat FormatKind = iota + // UnknownFormat is returned when GuessFormat was not able to conclusively + // determine the format of the + UnknownFormat + JWE + JWS + JWK + JWKS + JWT +) + +type formatHint struct { + Payload json.RawMessage `json:"payload"` // Only in JWS + Signatures json.RawMessage `json:"signatures"` // Only in JWS + Ciphertext json.RawMessage `json:"ciphertext"` // Only in JWE + KeyType json.RawMessage `json:"kty"` // Only in JWK + Keys json.RawMessage `json:"keys"` // Only in JWKS + Audience json.RawMessage `json:"aud"` // Only in JWT +} + +// GuessFormat is used to guess the format the given payload is in +// using heuristics. See the type FormatKind for a full list of +// possible types. +// +// This may be useful in determining your next action when you may +// encounter a payload that could either be a JWE, JWS, or a plain JWT. +// +// Because JWTs are almost always JWS signed, you may be thrown off +// if you pass what you think is a JWT payload to this function. +// If the function is in the "Compact" format, it means it's a JWS +// signed message, and its payload is the JWT. Therefore this function +// will return JWS, not JWT. +// +// This function requires an extra parsing of the payload, and therefore +// may be inefficient if you call it every time before parsing. +func GuessFormat(payload []byte) FormatKind { + // The check against kty, keys, and aud are something this library + // made up. for the distinctions between JWE and JWS, we used + // https://datatracker.ietf.org/doc/html/rfc7516#section-9. + // + // The above RFC described several ways to distinguish between + // a JWE and JWS JSON, but we're only using one of them + + payload = bytes.TrimSpace(payload) + if len(payload) <= 0 { + return UnknownFormat + } + + if payload[0] != tokens.OpenCurlyBracket { + // Compact format. It's probably a JWS or JWE + sep := []byte{tokens.Period} // I want to const this :/ + + // Note: this counts the number of occurrences of the + // separator, but the RFC talks about the number of segments. + // number of tokens.Period == segments - 1, so that's why we have 2 and 4 here + switch count := bytes.Count(payload, sep); count { + case 2: + return JWS + case 4: + return JWE + default: + return InvalidFormat + } + } + + // If we got here, we probably have JSON. + var h formatHint + if err := json.Unmarshal(payload, &h); err != nil { + return UnknownFormat + } + + if h.Audience != nil { + return JWT + } + if h.KeyType != nil { + return JWK + } + if h.Keys != nil { + return JWKS + } + if h.Ciphertext != nil { + return JWE + } + if h.Signatures != nil && h.Payload != nil { + return JWS + } + return UnknownFormat +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/formatkind_string_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/formatkind_string_gen.go new file mode 100644 index 0000000000..ab7287214f --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/formatkind_string_gen.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=FormatKind"; DO NOT EDIT. + +package jwx + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidFormat-0] + _ = x[UnknownFormat-1] + _ = x[JWE-2] + _ = x[JWS-3] + _ = x[JWK-4] + _ = x[JWKS-5] + _ = x[JWT-6] +} + +const _FormatKind_name = "InvalidFormatUnknownFormatJWEJWSJWKJWKSJWT" + +var _FormatKind_index = [...]uint8{0, 13, 26, 29, 32, 35, 39, 42} + +func (i FormatKind) String() string { + idx := int(i) - 0 + if i < 0 || idx >= len(_FormatKind_index)-1 { + return "FormatKind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _FormatKind_name[_FormatKind_index[idx]:_FormatKind_index[idx+1]] +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/BUILD.bazel new file mode 100644 index 0000000000..57da5179f3 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/BUILD.bazel @@ -0,0 +1,21 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "base64", + srcs = ["base64.go"], + importpath = "github.com/lestrrat-go/jwx/v3/internal/base64", + visibility = ["//:__subpackages__"], +) + +go_test( + name = "base64_test", + srcs = ["base64_test.go"], + embed = [":base64"], + deps = ["@com_github_stretchr_testify//require"], +) + +alias( + name = "go_default_library", + actual = ":base64", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/asmbase64.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/asmbase64.go new file mode 100644 index 0000000000..6e83ecc4a5 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/asmbase64.go @@ -0,0 +1,51 @@ +//go:build jwx_asmbase64 + +package base64 + +import ( + "fmt" + "slices" + + asmbase64 "github.com/segmentio/asm/base64" +) + +func init() { + SetEncoder(asmEncoder{asmbase64.RawURLEncoding}) + SetDecoder(asmDecoder{}) +} + +type asmEncoder struct { + *asmbase64.Encoding +} + +func (e asmEncoder) AppendEncode(dst, src []byte) []byte { + n := e.Encoding.EncodedLen(len(src)) + dst = slices.Grow(dst, n) + e.Encoding.Encode(dst[len(dst):][:n], src) + return dst[:len(dst)+n] +} + +type asmDecoder struct{} + +func (d asmDecoder) Decode(src []byte) ([]byte, error) { + var enc *asmbase64.Encoding + switch Guess(src) { + case Std: + enc = asmbase64.StdEncoding + case RawStd: + enc = asmbase64.RawStdEncoding + case URL: + enc = asmbase64.URLEncoding + case RawURL: + enc = asmbase64.RawURLEncoding + default: + return nil, fmt.Errorf(`invalid encoding`) + } + + dst := make([]byte, enc.DecodedLen(len(src))) + n, err := enc.Decode(dst, src) + if err != nil { + return nil, fmt.Errorf(`failed to decode source: %w`, err) + } + return dst[:n], nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/base64.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/base64.go new file mode 100644 index 0000000000..5ed8e35006 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/base64/base64.go @@ -0,0 +1,139 @@ +package base64 + +import ( + "bytes" + "encoding/base64" + "encoding/binary" + "fmt" + "sync" +) + +type Decoder interface { + Decode([]byte) ([]byte, error) +} + +type Encoder interface { + Encode([]byte, []byte) + EncodedLen(int) int + EncodeToString([]byte) string + AppendEncode([]byte, []byte) []byte +} + +var muEncoder sync.RWMutex +var encoder Encoder = base64.RawURLEncoding +var muDecoder sync.RWMutex +var decoder Decoder = defaultDecoder{} + +func SetEncoder(enc Encoder) { + muEncoder.Lock() + defer muEncoder.Unlock() + encoder = enc +} + +func getEncoder() Encoder { + muEncoder.RLock() + defer muEncoder.RUnlock() + return encoder +} + +func DefaultEncoder() Encoder { + return getEncoder() +} + +func SetDecoder(dec Decoder) { + muDecoder.Lock() + defer muDecoder.Unlock() + decoder = dec +} + +func getDecoder() Decoder { + muDecoder.RLock() + defer muDecoder.RUnlock() + return decoder +} + +func Encode(src []byte) []byte { + encoder := getEncoder() + dst := make([]byte, encoder.EncodedLen(len(src))) + encoder.Encode(dst, src) + return dst +} + +func EncodeToString(src []byte) string { + return getEncoder().EncodeToString(src) +} + +func EncodeUint64ToString(v uint64) string { + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, v) + + i := 0 + for ; i < len(data); i++ { + if data[i] != 0x0 { + break + } + } + + return EncodeToString(data[i:]) +} + +const ( + InvalidEncoding = iota + Std + URL + RawStd + RawURL +) + +func Guess(src []byte) int { + var isRaw = !bytes.HasSuffix(src, []byte{'='}) + var isURL = !bytes.ContainsAny(src, "+/") + switch { + case isRaw && isURL: + return RawURL + case isURL: + return URL + case isRaw: + return RawStd + default: + return Std + } +} + +// defaultDecoder is a Decoder that detects the encoding of the source and +// decodes it accordingly. This shouldn't really be required per the spec, but +// it exist because we have seen in the wild JWTs that are encoded using +// various versions of the base64 encoding. +type defaultDecoder struct{} + +func (defaultDecoder) Decode(src []byte) ([]byte, error) { + var enc *base64.Encoding + + switch Guess(src) { + case RawURL: + enc = base64.RawURLEncoding + case URL: + enc = base64.URLEncoding + case RawStd: + enc = base64.RawStdEncoding + case Std: + enc = base64.StdEncoding + default: + return nil, fmt.Errorf(`invalid encoding`) + } + + dst := make([]byte, enc.DecodedLen(len(src))) + n, err := enc.Decode(dst, src) + if err != nil { + return nil, fmt.Errorf(`failed to decode source: %w`, err) + } + return dst[:n], nil +} + +func Decode(src []byte) ([]byte, error) { + return getDecoder().Decode(src) +} + +func DecodeString(src string) ([]byte, error) { + return getDecoder().Decode([]byte(src)) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/ecutil/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/internal/ecutil/BUILD.bazel new file mode 100644 index 0000000000..3ccdcf372a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/ecutil/BUILD.bazel @@ -0,0 +1,14 @@ +load("@rules_go//go:def.bzl", "go_library") + +go_library( + name = "ecutil", + srcs = ["ecutil.go"], + importpath = "github.com/lestrrat-go/jwx/v3/internal/ecutil", + visibility = ["//:__subpackages__"], +) + +alias( + name = "go_default_library", + actual = ":ecutil", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/ecutil/ecutil.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/ecutil/ecutil.go new file mode 100644 index 0000000000..cf0bd4ac48 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/ecutil/ecutil.go @@ -0,0 +1,76 @@ +// Package ecutil defines tools that help with elliptic curve related +// computation +package ecutil + +import ( + "crypto/elliptic" + "math/big" + "sync" +) + +const ( + // size of buffer that needs to be allocated for EC521 curve + ec521BufferSize = 66 // (521 / 8) + 1 +) + +var ecpointBufferPool = sync.Pool{ + New: func() any { + // In most cases the curve bit size will be less than this length + // so allocate the maximum, and keep reusing + buf := make([]byte, 0, ec521BufferSize) + return &buf + }, +} + +func getCrvFixedBuffer(size int) []byte { + //nolint:forcetypeassert + buf := *(ecpointBufferPool.Get().(*[]byte)) + if size > ec521BufferSize && cap(buf) < size { + buf = append(buf, make([]byte, size-cap(buf))...) + } + return buf[:size] +} + +// ReleaseECPointBuffer releases the []byte buffer allocated. +func ReleaseECPointBuffer(buf []byte) { + buf = buf[:cap(buf)] + buf[0] = 0x0 + for i := 1; i < len(buf); i *= 2 { + copy(buf[i:], buf[:i]) + } + buf = buf[:0] + ecpointBufferPool.Put(&buf) +} + +func CalculateKeySize(crv elliptic.Curve) int { + // We need to create a buffer that fits the entire curve. + // If the curve size is 66, that fits in 9 bytes. If the curve + // size is 64, it fits in 8 bytes. + bits := crv.Params().BitSize + + // For most common cases we know before hand what the byte length + // is going to be. optimize + var inBytes int + switch bits { + case 224, 256, 384: // TODO: use constant? + inBytes = bits / 8 + case 521: + inBytes = ec521BufferSize + default: + inBytes = bits / 8 + if (bits % 8) != 0 { + inBytes++ + } + } + + return inBytes +} + +// AllocECPointBuffer allocates a buffer for the given point in the given +// curve. This buffer should be released using the ReleaseECPointBuffer +// function. +func AllocECPointBuffer(v *big.Int, crv elliptic.Curve) []byte { + buf := getCrvFixedBuffer(CalculateKeySize(crv)) + v.FillBytes(buf) + return buf +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/json/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/BUILD.bazel new file mode 100644 index 0000000000..4e2dbe12b7 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/BUILD.bazel @@ -0,0 +1,19 @@ +load("@rules_go//go:def.bzl", "go_library") + +go_library( + name = "json", + srcs = [ + "json.go", + "registry.go", + "stdlib.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/internal/json", + visibility = ["//:__subpackages__"], + deps = ["//internal/base64"], +) + +alias( + name = "go_default_library", + actual = ":json", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/json/goccy.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/goccy.go new file mode 100644 index 0000000000..9c99c098bc --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/goccy.go @@ -0,0 +1,48 @@ +//go:build jwx_goccy + +package json + +import ( + "io" + + "github.com/goccy/go-json" +) + +type Decoder = json.Decoder +type Delim = json.Delim +type Encoder = json.Encoder +type Marshaler = json.Marshaler +type Number = json.Number +type RawMessage = json.RawMessage +type Unmarshaler = json.Unmarshaler + +func Engine() string { + return "github.com/goccy/go-json" +} + +// NewDecoder respects the values specified in DecoderSettings, +// and creates a Decoder that has certain features turned on/off +func NewDecoder(r io.Reader) *json.Decoder { + dec := json.NewDecoder(r) + + if UseNumber() { + dec.UseNumber() + } + + return dec +} + +// NewEncoder is just a proxy for "encoding/json".NewEncoder +func NewEncoder(w io.Writer) *json.Encoder { + return json.NewEncoder(w) +} + +// Marshal is just a proxy for "encoding/json".Marshal +func Marshal(v any) ([]byte, error) { + return json.Marshal(v) +} + +// MarshalIndent is just a proxy for "encoding/json".MarshalIndent +func MarshalIndent(v any, prefix, indent string) ([]byte, error) { + return json.MarshalIndent(v, prefix, indent) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/json/json.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/json.go new file mode 100644 index 0000000000..c1917ef27a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/json.go @@ -0,0 +1,127 @@ +package json + +import ( + "bytes" + "fmt" + "os" + "sync/atomic" + + "github.com/lestrrat-go/jwx/v3/internal/base64" +) + +var useNumber uint32 // TODO: at some point, change to atomic.Bool + +func UseNumber() bool { + return atomic.LoadUint32(&useNumber) == 1 +} + +// Sets the global configuration for json decoding +func DecoderSettings(inUseNumber bool) { + var val uint32 + if inUseNumber { + val = 1 + } + atomic.StoreUint32(&useNumber, val) +} + +// Unmarshal respects the values specified in DecoderSettings, +// and uses a Decoder that has certain features turned on/off +func Unmarshal(b []byte, v any) error { + dec := NewDecoder(bytes.NewReader(b)) + return dec.Decode(v) +} + +func AssignNextBytesToken(dst *[]byte, dec *Decoder) error { + var val string + if err := dec.Decode(&val); err != nil { + return fmt.Errorf(`error reading next value: %w`, err) + } + + buf, err := base64.DecodeString(val) + if err != nil { + return fmt.Errorf(`expected base64 encoded []byte (%T)`, val) + } + *dst = buf + return nil +} + +func ReadNextStringToken(dec *Decoder) (string, error) { + var val string + if err := dec.Decode(&val); err != nil { + return "", fmt.Errorf(`error reading next value: %w`, err) + } + return val, nil +} + +func AssignNextStringToken(dst **string, dec *Decoder) error { + val, err := ReadNextStringToken(dec) + if err != nil { + return err + } + *dst = &val + return nil +} + +// FlattenAudience is a flag to specify if we should flatten the "aud" +// entry to a string when there's only one entry. +// In jwx < 1.1.8 we just dumped everything as an array of strings, +// but apparently AWS Cognito doesn't handle this well. +// +// So now we have the ability to dump "aud" as a string if there's +// only one entry, but we need to retain the old behavior so that +// we don't accidentally break somebody else's code. (e.g. messing +// up how signatures are calculated) +var FlattenAudience uint32 + +func MarshalAudience(aud []string, flatten bool) ([]byte, error) { + var val any + if len(aud) == 1 && flatten { + val = aud[0] + } else { + val = aud + } + return Marshal(val) +} + +func EncodeAudience(enc *Encoder, aud []string, flatten bool) error { + var val any + if len(aud) == 1 && flatten { + val = aud[0] + } else { + val = aud + } + return enc.Encode(val) +} + +// DecodeCtx is an interface for objects that needs that extra something +// when decoding JSON into an object. +type DecodeCtx interface { + Registry() *Registry +} + +// DecodeCtxContainer is used to differentiate objects that can carry extra +// decoding hints and those who can't. +type DecodeCtxContainer interface { + DecodeCtx() DecodeCtx + SetDecodeCtx(DecodeCtx) +} + +// stock decodeCtx. should cover 80% of the cases +type decodeCtx struct { + registry *Registry +} + +func NewDecodeCtx(r *Registry) DecodeCtx { + return &decodeCtx{registry: r} +} + +func (dc *decodeCtx) Registry() *Registry { + return dc.registry +} + +func Dump(v any) { + enc := NewEncoder(os.Stdout) + enc.SetIndent("", " ") + //nolint:errchkjson + _ = enc.Encode(v) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/json/registry.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/registry.go new file mode 100644 index 0000000000..04a6a4c4a5 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/registry.go @@ -0,0 +1,90 @@ +package json + +import ( + "fmt" + "reflect" + "sync" +) + +// CustomDecoder is the interface we expect from RegisterCustomField in jws, jwe, jwk, and jwt packages. +type CustomDecoder interface { + // Decode takes a JSON encoded byte slice and returns the desired + // decoded value,which will be used as the value for that field + // registered through RegisterCustomField + Decode([]byte) (any, error) +} + +// CustomDecodeFunc is a stateless, function-based implementation of CustomDecoder +type CustomDecodeFunc func([]byte) (any, error) + +func (fn CustomDecodeFunc) Decode(data []byte) (any, error) { + return fn(data) +} + +type objectTypeDecoder struct { + typ reflect.Type + name string +} + +func (dec *objectTypeDecoder) Decode(data []byte) (any, error) { + ptr := reflect.New(dec.typ).Interface() + if err := Unmarshal(data, ptr); err != nil { + return nil, fmt.Errorf(`failed to decode field %s: %w`, dec.name, err) + } + return reflect.ValueOf(ptr).Elem().Interface(), nil +} + +type Registry struct { + mu *sync.RWMutex + ctrs map[string]CustomDecoder +} + +func NewRegistry() *Registry { + return &Registry{ + mu: &sync.RWMutex{}, + ctrs: make(map[string]CustomDecoder), + } +} + +func (r *Registry) Register(name string, object any) { + if object == nil { + r.mu.Lock() + defer r.mu.Unlock() + delete(r.ctrs, name) + return + } + + r.mu.Lock() + defer r.mu.Unlock() + if ctr, ok := object.(CustomDecoder); ok { + r.ctrs[name] = ctr + } else { + r.ctrs[name] = &objectTypeDecoder{ + typ: reflect.TypeOf(object), + name: name, + } + } +} + +func (r *Registry) Decode(dec *Decoder, name string) (any, error) { + r.mu.RLock() + defer r.mu.RUnlock() + + if ctr, ok := r.ctrs[name]; ok { + var raw RawMessage + if err := dec.Decode(&raw); err != nil { + return nil, fmt.Errorf(`failed to decode field %s: %w`, name, err) + } + v, err := ctr.Decode([]byte(raw)) + if err != nil { + return nil, fmt.Errorf(`failed to decode field %s: %w`, name, err) + } + return v, nil + } + + var decoded any + if err := dec.Decode(&decoded); err != nil { + return nil, fmt.Errorf(`failed to decode field %s: %w`, name, err) + } + return decoded, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/json/stdlib.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/stdlib.go new file mode 100644 index 0000000000..9e51fa7fe9 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/json/stdlib.go @@ -0,0 +1,47 @@ +//go:build !jwx_goccy + +//nolint:revive +package json + +import ( + "encoding/json" + "io" +) + +type Decoder = json.Decoder +type Delim = json.Delim +type Encoder = json.Encoder +type Marshaler = json.Marshaler +type Number = json.Number +type RawMessage = json.RawMessage +type Unmarshaler = json.Unmarshaler + +func Engine() string { + return "encoding/json" +} + +// NewDecoder respects the values specified in DecoderSettings, +// and creates a Decoder that has certain features turned on/off +func NewDecoder(r io.Reader) *json.Decoder { + dec := json.NewDecoder(r) + + if UseNumber() { + dec.UseNumber() + } + + return dec +} + +func NewEncoder(w io.Writer) *json.Encoder { + return json.NewEncoder(w) +} + +// Marshal is just a proxy for "encoding/json".Marshal +func Marshal(v any) ([]byte, error) { + return json.Marshal(v) +} + +// MarshalIndent is just a proxy for "encoding/json".MarshalIndent +func MarshalIndent(v any, prefix, indent string) ([]byte, error) { + return json.MarshalIndent(v, prefix, indent) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/jwxio/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/internal/jwxio/BUILD.bazel new file mode 100644 index 0000000000..c70c4d2871 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/jwxio/BUILD.bazel @@ -0,0 +1,8 @@ +load("@rules_go//go:def.bzl", "go_library") + +go_library( + name = "jwxio", + srcs = ["jwxio.go"], + importpath = "github.com/lestrrat-go/jwx/v3/internal/jwxio", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/jwxio/jwxio.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/jwxio/jwxio.go new file mode 100644 index 0000000000..8396417a9d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/jwxio/jwxio.go @@ -0,0 +1,29 @@ +package jwxio + +import ( + "bytes" + "errors" + "io" + "strings" +) + +var errNonFiniteSource = errors.New(`cannot read from non-finite source`) + +func NonFiniteSourceError() error { + return errNonFiniteSource +} + +// ReadAllFromFiniteSource reads all data from a io.Reader _if_ it comes from a +// finite source. +func ReadAllFromFiniteSource(rdr io.Reader) ([]byte, error) { + switch rdr.(type) { + case *bytes.Reader, *bytes.Buffer, *strings.Reader: + data, err := io.ReadAll(rdr) + if err != nil { + return nil, err + } + return data, nil + default: + return nil, errNonFiniteSource + } +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/BUILD.bazel new file mode 100644 index 0000000000..d46d2f3814 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/BUILD.bazel @@ -0,0 +1,31 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "keyconv", + srcs = ["keyconv.go"], + importpath = "github.com/lestrrat-go/jwx/v3/internal/keyconv", + visibility = ["//:__subpackages__"], + deps = [ + "//jwk", + "@com_github_lestrrat_go_blackmagic//:blackmagic", + "@org_golang_x_crypto//ed25519", + ], +) + +go_test( + name = "keyconv_test", + srcs = ["keyconv_test.go"], + deps = [ + ":keyconv", + "//internal/jwxtest", + "//jwa", + "//jwk", + "@com_github_stretchr_testify//require", + ], +) + +alias( + name = "go_default_library", + actual = ":keyconv", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/keyconv.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/keyconv.go new file mode 100644 index 0000000000..751fd8f05a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/keyconv/keyconv.go @@ -0,0 +1,354 @@ +package keyconv + +import ( + "crypto" + "crypto/ecdh" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "fmt" + "math/big" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v3/jwk" +) + +// RSAPrivateKey assigns src to dst. +// `dst` should be a pointer to a rsa.PrivateKey. +// `src` may be rsa.PrivateKey, *rsa.PrivateKey, or a jwk.Key +func RSAPrivateKey(dst, src any) error { + if jwkKey, ok := src.(jwk.Key); ok { + var raw rsa.PrivateKey + if err := jwk.Export(jwkKey, &raw); err != nil { + return fmt.Errorf(`failed to produce rsa.PrivateKey from %T: %w`, src, err) + } + src = &raw + } + + var ptr *rsa.PrivateKey + switch src := src.(type) { + case rsa.PrivateKey: + ptr = &src + case *rsa.PrivateKey: + ptr = src + default: + return fmt.Errorf(`keyconv: expected rsa.PrivateKey or *rsa.PrivateKey, got %T`, src) + } + + return blackmagic.AssignIfCompatible(dst, ptr) +} + +// RSAPublicKey assigns src to dst +// `dst` should be a pointer to a non-zero rsa.PublicKey. +// `src` may be rsa.PublicKey, *rsa.PublicKey, or a jwk.Key +func RSAPublicKey(dst, src any) error { + if jwkKey, ok := src.(jwk.Key); ok { + pk, err := jwk.PublicRawKeyOf(jwkKey) + if err != nil { + return fmt.Errorf(`keyconv: failed to produce public key from %T: %w`, src, err) + } + src = pk + } + + var ptr *rsa.PublicKey + switch src := src.(type) { + case rsa.PrivateKey: + ptr = &src.PublicKey + case *rsa.PrivateKey: + ptr = &src.PublicKey + case rsa.PublicKey: + ptr = &src + case *rsa.PublicKey: + ptr = src + default: + return fmt.Errorf(`keyconv: expected rsa.PublicKey/rsa.PrivateKey or *rsa.PublicKey/*rsa.PrivateKey, got %T`, src) + } + + return blackmagic.AssignIfCompatible(dst, ptr) +} + +// ECDSAPrivateKey assigns src to dst, converting its type from a +// non-pointer to a pointer +func ECDSAPrivateKey(dst, src any) error { + if jwkKey, ok := src.(jwk.Key); ok { + var raw ecdsa.PrivateKey + if err := jwk.Export(jwkKey, &raw); err != nil { + return fmt.Errorf(`keyconv: failed to produce ecdsa.PrivateKey from %T: %w`, src, err) + } + src = &raw + } + + var ptr *ecdsa.PrivateKey + switch src := src.(type) { + case ecdsa.PrivateKey: + ptr = &src + case *ecdsa.PrivateKey: + ptr = src + default: + return fmt.Errorf(`keyconv: expected ecdsa.PrivateKey or *ecdsa.PrivateKey, got %T`, src) + } + return blackmagic.AssignIfCompatible(dst, ptr) +} + +// ECDSAPublicKey assigns src to dst, converting its type from a +// non-pointer to a pointer +func ECDSAPublicKey(dst, src any) error { + if jwkKey, ok := src.(jwk.Key); ok { + pk, err := jwk.PublicRawKeyOf(jwkKey) + if err != nil { + return fmt.Errorf(`keyconv: failed to produce public key from %T: %w`, src, err) + } + src = pk + } + + var ptr *ecdsa.PublicKey + switch src := src.(type) { + case ecdsa.PrivateKey: + ptr = &src.PublicKey + case *ecdsa.PrivateKey: + ptr = &src.PublicKey + case ecdsa.PublicKey: + ptr = &src + case *ecdsa.PublicKey: + ptr = src + default: + return fmt.Errorf(`keyconv: expected ecdsa.PublicKey/ecdsa.PrivateKey or *ecdsa.PublicKey/*ecdsa.PrivateKey, got %T`, src) + } + return blackmagic.AssignIfCompatible(dst, ptr) +} + +func ByteSliceKey(dst, src any) error { + if jwkKey, ok := src.(jwk.Key); ok { + var raw []byte + if err := jwk.Export(jwkKey, &raw); err != nil { + return fmt.Errorf(`keyconv: failed to produce []byte from %T: %w`, src, err) + } + src = raw + } + + if _, ok := src.([]byte); !ok { + return fmt.Errorf(`keyconv: expected []byte, got %T`, src) + } + return blackmagic.AssignIfCompatible(dst, src) +} + +func Ed25519PrivateKey(dst, src any) error { + if jwkKey, ok := src.(jwk.Key); ok { + var raw ed25519.PrivateKey + if err := jwk.Export(jwkKey, &raw); err != nil { + return fmt.Errorf(`failed to produce ed25519.PrivateKey from %T: %w`, src, err) + } + src = &raw + } + + var ptr *ed25519.PrivateKey + switch src := src.(type) { + case ed25519.PrivateKey: + ptr = &src + case *ed25519.PrivateKey: + ptr = src + default: + return fmt.Errorf(`expected ed25519.PrivateKey or *ed25519.PrivateKey, got %T`, src) + } + return blackmagic.AssignIfCompatible(dst, ptr) +} + +func Ed25519PublicKey(dst, src any) error { + if jwkKey, ok := src.(jwk.Key); ok { + pk, err := jwk.PublicRawKeyOf(jwkKey) + if err != nil { + return fmt.Errorf(`keyconv: failed to produce public key from %T: %w`, src, err) + } + src = pk + } + + switch key := src.(type) { + case ed25519.PrivateKey: + src = key.Public() + case *ed25519.PrivateKey: + src = key.Public() + } + + var ptr *ed25519.PublicKey + switch src := src.(type) { + case ed25519.PublicKey: + ptr = &src + case *ed25519.PublicKey: + ptr = src + case *crypto.PublicKey: + tmp, ok := (*src).(ed25519.PublicKey) + if !ok { + return fmt.Errorf(`failed to retrieve ed25519.PublicKey out of *crypto.PublicKey`) + } + ptr = &tmp + case crypto.PublicKey: + tmp, ok := src.(ed25519.PublicKey) + if !ok { + return fmt.Errorf(`failed to retrieve ed25519.PublicKey out of crypto.PublicKey`) + } + ptr = &tmp + default: + return fmt.Errorf(`expected ed25519.PublicKey or *ed25519.PublicKey, got %T`, src) + } + return blackmagic.AssignIfCompatible(dst, ptr) +} + +type privECDHer interface { + ECDH() (*ecdh.PrivateKey, error) +} + +func ECDHPrivateKey(dst, src any) error { + var privECDH *ecdh.PrivateKey + if jwkKey, ok := src.(jwk.Key); ok { + var rawECDH ecdh.PrivateKey + if err := jwk.Export(jwkKey, &rawECDH); err == nil { + privECDH = &rawECDH + } else { + // If we cannot export the key as an ecdh.PrivateKey, we try to export it as an ecdsa.PrivateKey + var rawECDSA ecdsa.PrivateKey + if err := jwk.Export(jwkKey, &rawECDSA); err != nil { + return fmt.Errorf(`keyconv: failed to produce ecdh.PrivateKey or ecdsa.PrivateKey from %T: %w`, src, err) + } + src = &rawECDSA + } + } + + switch src := src.(type) { + case ecdh.PrivateKey: + privECDH = &src + case *ecdh.PrivateKey: + privECDH = src + case privECDHer: + priv, err := src.ECDH() + if err != nil { + return fmt.Errorf(`keyconv: failed to convert ecdsa.PrivateKey to ecdh.PrivateKey: %w`, err) + } + privECDH = priv + } + + return blackmagic.AssignIfCompatible(dst, privECDH) +} + +type pubECDHer interface { + ECDH() (*ecdh.PublicKey, error) +} + +func ECDHPublicKey(dst, src any) error { + var pubECDH *ecdh.PublicKey + if jwkKey, ok := src.(jwk.Key); ok { + var rawECDH ecdh.PublicKey + if err := jwk.Export(jwkKey, &rawECDH); err == nil { + pubECDH = &rawECDH + } else { + // If we cannot export the key as an ecdh.PublicKey, we try to export it as an ecdsa.PublicKey + var rawECDSA ecdsa.PublicKey + if err := jwk.Export(jwkKey, &rawECDSA); err != nil { + return fmt.Errorf(`keyconv: failed to produce ecdh.PublicKey or ecdsa.PublicKey from %T: %w`, src, err) + } + src = &rawECDSA + } + } + + switch src := src.(type) { + case ecdh.PublicKey: + pubECDH = &src + case *ecdh.PublicKey: + pubECDH = src + case pubECDHer: + pub, err := src.ECDH() + if err != nil { + return fmt.Errorf(`keyconv: failed to convert ecdsa.PublicKey to ecdh.PublicKey: %w`, err) + } + pubECDH = pub + } + + return blackmagic.AssignIfCompatible(dst, pubECDH) +} + +// ecdhCurveToElliptic maps ECDH curves to elliptic curves +func ecdhCurveToElliptic(ecdhCurve ecdh.Curve) (elliptic.Curve, error) { + switch ecdhCurve { + case ecdh.P256(): + return elliptic.P256(), nil + case ecdh.P384(): + return elliptic.P384(), nil + case ecdh.P521(): + return elliptic.P521(), nil + default: + return nil, fmt.Errorf(`keyconv: unsupported ECDH curve: %v`, ecdhCurve) + } +} + +// ecdhPublicKeyToECDSA converts an ECDH public key to an ECDSA public key +func ecdhPublicKeyToECDSA(ecdhPubKey *ecdh.PublicKey) (*ecdsa.PublicKey, error) { + curve, err := ecdhCurveToElliptic(ecdhPubKey.Curve()) + if err != nil { + return nil, err + } + + pubBytes := ecdhPubKey.Bytes() + + // Parse the uncompressed point format (0x04 prefix + X + Y coordinates) + if len(pubBytes) == 0 || pubBytes[0] != 0x04 { + return nil, fmt.Errorf(`keyconv: invalid ECDH public key format`) + } + + keyLen := (len(pubBytes) - 1) / 2 + if len(pubBytes) != 1+2*keyLen { + return nil, fmt.Errorf(`keyconv: invalid ECDH public key length`) + } + + x := new(big.Int).SetBytes(pubBytes[1 : 1+keyLen]) + y := new(big.Int).SetBytes(pubBytes[1+keyLen:]) + + return &ecdsa.PublicKey{ + Curve: curve, + X: x, + Y: y, + }, nil +} + +func ECDHToECDSA(dst, src any) error { + // convert ecdh.PublicKey to ecdsa.PublicKey, ecdh.PrivateKey to ecdsa.PrivateKey + + // First, handle value types by converting to pointers + switch s := src.(type) { + case ecdh.PrivateKey: + src = &s + case ecdh.PublicKey: + src = &s + } + + var privBytes []byte + var pubkey *ecdh.PublicKey + // Now handle the actual conversion with pointer types + switch src := src.(type) { + case *ecdh.PrivateKey: + pubkey = src.PublicKey() + privBytes = src.Bytes() + case *ecdh.PublicKey: + pubkey = src + default: + return fmt.Errorf(`keyconv: expected ecdh.PrivateKey, *ecdh.PrivateKey, ecdh.PublicKey, or *ecdh.PublicKey, got %T`, src) + } + + // convert the public key + ecdsaPubKey, err := ecdhPublicKeyToECDSA(pubkey) + if err != nil { + return fmt.Errorf(`keyconv.ECDHToECDSA: failed to convert ECDH public key to ECDSA public key: %w`, err) + } + + // return if we were being asked to convert *ecdh.PublicKey + if privBytes == nil { + return blackmagic.AssignIfCompatible(dst, ecdsaPubKey) + } + + // Then create the private key with the public key embedded + ecdsaPrivKey := &ecdsa.PrivateKey{ + D: new(big.Int).SetBytes(privBytes), + PublicKey: *ecdsaPubKey, + } + + return blackmagic.AssignIfCompatible(dst, ecdsaPrivKey) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/BUILD.bazel new file mode 100644 index 0000000000..cebc269330 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/BUILD.bazel @@ -0,0 +1,32 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "pool", + srcs = [ + "big_int.go", + "byte_slice.go", + "bytes_buffer.go", + "error_slice.go", + "key_to_error_map.go", + "pool.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/internal/pool", + visibility = ["//:__subpackages__"], +) + +alias( + name = "go_default_library", + actual = ":pool", + visibility = ["//:__subpackages__"], +) + +go_test( + name = "pool_test", + srcs = [ + "byte_slice_test.go", + ], + deps = [ + ":pool", + "@com_github_stretchr_testify//require", + ], +) \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/big_int.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/big_int.go new file mode 100644 index 0000000000..57c446d4d2 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/big_int.go @@ -0,0 +1,19 @@ +package pool + +import "math/big" + +var bigIntPool = New[*big.Int](allocBigInt, freeBigInt) + +func allocBigInt() *big.Int { + return &big.Int{} +} + +func freeBigInt(b *big.Int) *big.Int { + b.SetInt64(0) // Reset the value to zero + return b +} + +// BigInt returns a pool of *big.Int instances. +func BigInt() *Pool[*big.Int] { + return bigIntPool +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/byte_slice.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/byte_slice.go new file mode 100644 index 0000000000..46f1028343 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/byte_slice.go @@ -0,0 +1,19 @@ +package pool + +var byteSlicePool = SlicePool[byte]{ + pool: New[[]byte](allocByteSlice, freeByteSlice), +} + +func allocByteSlice() []byte { + return make([]byte, 0, 64) // Default capacity of 64 bytes +} + +func freeByteSlice(b []byte) []byte { + clear(b) + b = b[:0] // Reset the slice to zero length + return b +} + +func ByteSlice() SlicePool[byte] { + return byteSlicePool +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/bytes_buffer.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/bytes_buffer.go new file mode 100644 index 0000000000..a877f73ff8 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/bytes_buffer.go @@ -0,0 +1,18 @@ +package pool + +import "bytes" + +var bytesBufferPool = New[*bytes.Buffer](allocBytesBuffer, freeBytesBuffer) + +func allocBytesBuffer() *bytes.Buffer { + return &bytes.Buffer{} +} + +func freeBytesBuffer(b *bytes.Buffer) *bytes.Buffer { + b.Reset() + return b +} + +func BytesBuffer() *Pool[*bytes.Buffer] { + return bytesBufferPool +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/error_slice.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/error_slice.go new file mode 100644 index 0000000000..4f1675c1c0 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/error_slice.go @@ -0,0 +1,16 @@ +package pool + +var errorSlicePool = New[[]error](allocErrorSlice, freeErrorSlice) + +func allocErrorSlice() []error { + return make([]error, 0, 1) +} + +func freeErrorSlice(s []error) []error { + // Reset the slice to its zero value + return s[:0] +} + +func ErrorSlice() *Pool[[]error] { + return errorSlicePool +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/key_to_error_map.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/key_to_error_map.go new file mode 100644 index 0000000000..9fae012644 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/key_to_error_map.go @@ -0,0 +1,19 @@ +package pool + +var keyToErrorMapPool = New[map[string]error](allocKeyToErrorMap, freeKeyToErrorMap) + +func allocKeyToErrorMap() map[string]error { + return make(map[string]error) +} + +func freeKeyToErrorMap(m map[string]error) map[string]error { + for k := range m { + delete(m, k) // Clear the map + } + return m +} + +// KeyToErrorMap returns a pool of map[string]error instances. +func KeyToErrorMap() *Pool[map[string]error] { + return keyToErrorMapPool +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/pool.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/pool.go new file mode 100644 index 0000000000..008b1cdb8b --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/pool/pool.go @@ -0,0 +1,70 @@ +package pool + +import ( + "sync" +) + +type Pool[T any] struct { + pool sync.Pool + destructor func(T) T +} + +// New creates a new Pool instance for the type T. +// The allocator function is used to create new instances of T when the pool is empty. +// The destructor function is used to clean up instances of T before they are returned to the pool. +// The destructor should reset the state of T to a clean state, so it can be reused, and +// return the modified instance of T. This is required for cases when you reset operations +// can modify the underlying data structure, such as slices or maps. +func New[T any](allocator func() T, destructor func(T) T) *Pool[T] { + return &Pool[T]{ + pool: sync.Pool{ + New: func() any { + return allocator() + }, + }, + destructor: destructor, + } +} + +// Get retrieves an item of type T from the pool. +func (p *Pool[T]) Get() T { + //nolint:forcetypeassert + return p.pool.Get().(T) +} + +// Put returns an item of type T to the pool. +// The item is first processed by the destructor function to ensure it is in a clean state. +func (p *Pool[T]) Put(item T) { + p.pool.Put(p.destructor(item)) +} + +// SlicePool is a specialized pool for slices of type T. It is identical to Pool[T] but +// provides additional functionality to get slices with a specific capacity. +type SlicePool[T any] struct { + pool *Pool[[]T] +} + +func NewSlicePool[T any](allocator func() []T, destructor func([]T) []T) SlicePool[T] { + return SlicePool[T]{ + pool: New(allocator, destructor), + } +} + +func (p SlicePool[T]) Get() []T { + return p.pool.Get() +} + +func (p SlicePool[T]) GetCapacity(capacity int) []T { + if capacity <= 0 { + return p.Get() + } + s := p.Get() + if cap(s) < capacity { + s = make([]T, 0, capacity) + } + return s +} + +func (p SlicePool[T]) Put(s []T) { + p.pool.Put(s) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/BUILD.bazel new file mode 100644 index 0000000000..6a331efb34 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/BUILD.bazel @@ -0,0 +1,17 @@ +load("@rules_go//go:def.bzl", "go_library") + +go_library( + name = "tokens", + srcs = [ + "jwe_tokens.go", + "tokens.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/internal/tokens", + visibility = ["//:__subpackages__"], +) + +alias( + name = "go_default_library", + actual = ":tokens", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/jwe_tokens.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/jwe_tokens.go new file mode 100644 index 0000000000..9001cbbbc7 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/jwe_tokens.go @@ -0,0 +1,48 @@ +package tokens + +// JWE Key Encryption Algorithms +const ( + // RSA algorithms + RSA1_5 = "RSA1_5" + RSA_OAEP = "RSA-OAEP" + RSA_OAEP_256 = "RSA-OAEP-256" + RSA_OAEP_384 = "RSA-OAEP-384" + RSA_OAEP_512 = "RSA-OAEP-512" + + // AES Key Wrap algorithms + A128KW = "A128KW" + A192KW = "A192KW" + A256KW = "A256KW" + + // AES GCM Key Wrap algorithms + A128GCMKW = "A128GCMKW" + A192GCMKW = "A192GCMKW" + A256GCMKW = "A256GCMKW" + + // ECDH-ES algorithms + ECDH_ES = "ECDH-ES" + ECDH_ES_A128KW = "ECDH-ES+A128KW" + ECDH_ES_A192KW = "ECDH-ES+A192KW" + ECDH_ES_A256KW = "ECDH-ES+A256KW" + + // PBES2 algorithms + PBES2_HS256_A128KW = "PBES2-HS256+A128KW" + PBES2_HS384_A192KW = "PBES2-HS384+A192KW" + PBES2_HS512_A256KW = "PBES2-HS512+A256KW" + + // Direct key agreement + DIRECT = "dir" +) + +// JWE Content Encryption Algorithms +const ( + // AES GCM algorithms + A128GCM = "A128GCM" + A192GCM = "A192GCM" + A256GCM = "A256GCM" + + // AES CBC + HMAC algorithms + A128CBC_HS256 = "A128CBC-HS256" + A192CBC_HS384 = "A192CBC-HS384" + A256CBC_HS512 = "A256CBC-HS512" +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/tokens.go b/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/tokens.go new file mode 100644 index 0000000000..2af3b88de1 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/internal/tokens/tokens.go @@ -0,0 +1,51 @@ +package tokens + +const ( + CloseCurlyBracket = '}' + CloseSquareBracket = ']' + Colon = ':' + Comma = ',' + DoubleQuote = '"' + OpenCurlyBracket = '{' + OpenSquareBracket = '[' + Period = '.' +) + +// Cryptographic key sizes +const ( + KeySize16 = 16 + KeySize24 = 24 + KeySize32 = 32 + KeySize48 = 48 // A192CBC_HS384 key size + KeySize64 = 64 // A256CBC_HS512 key size +) + +// Bit/byte conversion factors +const ( + BitsPerByte = 8 + BytesPerBit = 1.0 / 8 +) + +// Key wrapping constants +const ( + KeywrapChunkLen = 8 + KeywrapRounds = 6 // RFC 3394 key wrap rounds + KeywrapBlockSize = 8 // Key wrap block size in bytes +) + +// AES-GCM constants +const ( + GCMIVSize = 12 // GCM IV size in bytes (96 bits) + GCMTagSize = 16 // GCM tag size in bytes (128 bits) +) + +// PBES2 constants +const ( + PBES2DefaultIterations = 10000 // Default PBKDF2 iteration count + PBES2NullByteSeparator = 0 // Null byte separator for PBES2 +) + +// RSA key generation constants +const ( + RSAKeyGenMultiplier = 2 // RSA key generation size multiplier +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwa/BUILD.bazel new file mode 100644 index 0000000000..cfb7af02a0 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/BUILD.bazel @@ -0,0 +1,45 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "jwa", + srcs = [ + "compression_gen.go", + "content_encryption_gen.go", + "elliptic_gen.go", + "jwa.go", + "key_encryption_gen.go", + "key_type_gen.go", + "options_gen.go", + "signature_gen.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/jwa", + visibility = ["//visibility:public"], + deps = [ + "//internal/tokens", + "@com_github_lestrrat_go_option_v2//:option", + ], +) + +go_test( + name = "jwa_test", + srcs = [ + "compression_gen_test.go", + "content_encryption_gen_test.go", + "elliptic_gen_test.go", + "jwa_test.go", + "key_encryption_gen_test.go", + "key_type_gen_test.go", + "signature_gen_test.go", + ], + deps = [ + ":jwa", + "@com_github_stretchr_testify//require", + "@com_github_lestrrat_go_option_v2//:option", + ], +) + +alias( + name = "go_default_library", + actual = ":jwa", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/README.md b/vendor/github.com/lestrrat-go/jwx/v3/jwa/README.md new file mode 100644 index 0000000000..270e60c672 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/README.md @@ -0,0 +1,3 @@ +# JWA [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/jwx/v3/jwa.svg)](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3/jwa) + +Package [github.com/lestrrat-go/jwx/v3/jwa](./jwa) defines the various algorithm described in [RFC7518](https://tools.ietf.org/html/rfc7518) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/compression_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/compression_gen.go new file mode 100644 index 0000000000..a7a2451afa --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/compression_gen.go @@ -0,0 +1,153 @@ +// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT. + +package jwa + +import ( + "encoding/json" + "fmt" + "sort" + "sync" +) + +var muAllCompressionAlgorithm sync.RWMutex +var allCompressionAlgorithm = map[string]CompressionAlgorithm{} +var muListCompressionAlgorithm sync.RWMutex +var listCompressionAlgorithm []CompressionAlgorithm +var builtinCompressionAlgorithm = map[string]struct{}{} + +func init() { + // builtin values for CompressionAlgorithm + algorithms := make([]CompressionAlgorithm, 2) + algorithms[0] = NewCompressionAlgorithm("DEF") + algorithms[1] = NewCompressionAlgorithm("") + + RegisterCompressionAlgorithm(algorithms...) +} + +// Deflate returns an object representing the "DEF" content compression algorithm value. Using this value specifies that the content should be compressed using DEFLATE (RFC 1951). +func Deflate() CompressionAlgorithm { + return lookupBuiltinCompressionAlgorithm("DEF") +} + +// NoCompress returns an object representing an empty compression algorithm value. Using this value specifies that the content should not be compressed. +func NoCompress() CompressionAlgorithm { + return lookupBuiltinCompressionAlgorithm("") +} + +func lookupBuiltinCompressionAlgorithm(name string) CompressionAlgorithm { + muAllCompressionAlgorithm.RLock() + v, ok := allCompressionAlgorithm[name] + muAllCompressionAlgorithm.RUnlock() + if !ok { + panic(fmt.Sprintf(`jwa: CompressionAlgorithm %q not registered`, name)) + } + return v +} + +// CompressionAlgorithm represents the compression algorithms as described in https://tools.ietf.org/html/rfc7518#section-7.3 +type CompressionAlgorithm struct { + name string + deprecated bool +} + +func (s CompressionAlgorithm) String() string { + return s.name +} + +// IsDeprecated returns true if the CompressionAlgorithm object is deprecated. +func (s CompressionAlgorithm) IsDeprecated() bool { + return s.deprecated +} + +// EmptyCompressionAlgorithm returns an empty CompressionAlgorithm object, used as a zero value. +func EmptyCompressionAlgorithm() CompressionAlgorithm { + return CompressionAlgorithm{} +} + +// NewCompressionAlgorithm creates a new CompressionAlgorithm object with the given name. +func NewCompressionAlgorithm(name string, options ...NewAlgorithmOption) CompressionAlgorithm { + var deprecated bool + for _, option := range options { + switch option.Ident() { + case identDeprecated{}: + if err := option.Value(&deprecated); err != nil { + panic("jwa.NewCompressionAlgorithm: WithDeprecated option must be a boolean") + } + } + } + return CompressionAlgorithm{name: name, deprecated: deprecated} +} + +// LookupCompressionAlgorithm returns the CompressionAlgorithm object for the given name. +func LookupCompressionAlgorithm(name string) (CompressionAlgorithm, bool) { + muAllCompressionAlgorithm.RLock() + v, ok := allCompressionAlgorithm[name] + muAllCompressionAlgorithm.RUnlock() + return v, ok +} + +// RegisterCompressionAlgorithm registers a new CompressionAlgorithm. The signature value must be immutable +// and safe to be used by multiple goroutines, as it is going to be shared with all other users of this library. +func RegisterCompressionAlgorithm(algorithms ...CompressionAlgorithm) { + muAllCompressionAlgorithm.Lock() + for _, alg := range algorithms { + allCompressionAlgorithm[alg.String()] = alg + } + muAllCompressionAlgorithm.Unlock() + rebuildCompressionAlgorithm() +} + +// UnregisterCompressionAlgorithm unregisters a CompressionAlgorithm from its known database. +// Non-existent entries, as well as built-in algorithms will silently be ignored. +func UnregisterCompressionAlgorithm(algorithms ...CompressionAlgorithm) { + muAllCompressionAlgorithm.Lock() + for _, alg := range algorithms { + if _, ok := builtinCompressionAlgorithm[alg.String()]; ok { + continue + } + delete(allCompressionAlgorithm, alg.String()) + } + muAllCompressionAlgorithm.Unlock() + rebuildCompressionAlgorithm() +} + +func rebuildCompressionAlgorithm() { + list := make([]CompressionAlgorithm, 0, len(allCompressionAlgorithm)) + muAllCompressionAlgorithm.RLock() + for _, v := range allCompressionAlgorithm { + list = append(list, v) + } + muAllCompressionAlgorithm.RUnlock() + sort.Slice(list, func(i, j int) bool { + return list[i].String() < list[j].String() + }) + muListCompressionAlgorithm.Lock() + listCompressionAlgorithm = list + muListCompressionAlgorithm.Unlock() +} + +// CompressionAlgorithms returns a list of all available values for CompressionAlgorithm. +func CompressionAlgorithms() []CompressionAlgorithm { + muListCompressionAlgorithm.RLock() + defer muListCompressionAlgorithm.RUnlock() + return listCompressionAlgorithm +} + +// MarshalJSON serializes the CompressionAlgorithm object to a JSON string. +func (s CompressionAlgorithm) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// UnmarshalJSON deserializes the JSON string to a CompressionAlgorithm object. +func (s *CompressionAlgorithm) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return fmt.Errorf(`failed to unmarshal CompressionAlgorithm: %w`, err) + } + v, ok := LookupCompressionAlgorithm(name) + if !ok { + return fmt.Errorf(`unknown CompressionAlgorithm: %q`, name) + } + *s = v + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/content_encryption_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/content_encryption_gen.go new file mode 100644 index 0000000000..8ccc47e462 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/content_encryption_gen.go @@ -0,0 +1,179 @@ +// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT. + +package jwa + +import ( + "encoding/json" + "fmt" + "sort" + "sync" + + "github.com/lestrrat-go/jwx/v3/internal/tokens" +) + +var muAllContentEncryptionAlgorithm sync.RWMutex +var allContentEncryptionAlgorithm = map[string]ContentEncryptionAlgorithm{} +var muListContentEncryptionAlgorithm sync.RWMutex +var listContentEncryptionAlgorithm []ContentEncryptionAlgorithm +var builtinContentEncryptionAlgorithm = map[string]struct{}{} + +func init() { + // builtin values for ContentEncryptionAlgorithm + algorithms := make([]ContentEncryptionAlgorithm, 6) + algorithms[0] = NewContentEncryptionAlgorithm(tokens.A128CBC_HS256) + algorithms[1] = NewContentEncryptionAlgorithm(tokens.A128GCM) + algorithms[2] = NewContentEncryptionAlgorithm(tokens.A192CBC_HS384) + algorithms[3] = NewContentEncryptionAlgorithm(tokens.A192GCM) + algorithms[4] = NewContentEncryptionAlgorithm(tokens.A256CBC_HS512) + algorithms[5] = NewContentEncryptionAlgorithm(tokens.A256GCM) + + RegisterContentEncryptionAlgorithm(algorithms...) +} + +// A128CBC_HS256 returns an object representing A128CBC-HS256. Using this value specifies that the content should be encrypted using AES-CBC + HMAC-SHA256 (128). +func A128CBC_HS256() ContentEncryptionAlgorithm { + return lookupBuiltinContentEncryptionAlgorithm(tokens.A128CBC_HS256) +} + +// A128GCM returns an object representing A128GCM. Using this value specifies that the content should be encrypted using AES-GCM (128). +func A128GCM() ContentEncryptionAlgorithm { + return lookupBuiltinContentEncryptionAlgorithm(tokens.A128GCM) +} + +// A192CBC_HS384 returns an object representing A192CBC-HS384. Using this value specifies that the content should be encrypted using AES-CBC + HMAC-SHA384 (192). +func A192CBC_HS384() ContentEncryptionAlgorithm { + return lookupBuiltinContentEncryptionAlgorithm(tokens.A192CBC_HS384) +} + +// A192GCM returns an object representing A192GCM. Using this value specifies that the content should be encrypted using AES-GCM (192). +func A192GCM() ContentEncryptionAlgorithm { + return lookupBuiltinContentEncryptionAlgorithm(tokens.A192GCM) +} + +// A256CBC_HS512 returns an object representing A256CBC-HS512. Using this value specifies that the content should be encrypted using AES-CBC + HMAC-SHA512 (256). +func A256CBC_HS512() ContentEncryptionAlgorithm { + return lookupBuiltinContentEncryptionAlgorithm(tokens.A256CBC_HS512) +} + +// A256GCM returns an object representing A256GCM. Using this value specifies that the content should be encrypted using AES-GCM (256). +func A256GCM() ContentEncryptionAlgorithm { + return lookupBuiltinContentEncryptionAlgorithm(tokens.A256GCM) +} + +func lookupBuiltinContentEncryptionAlgorithm(name string) ContentEncryptionAlgorithm { + muAllContentEncryptionAlgorithm.RLock() + v, ok := allContentEncryptionAlgorithm[name] + muAllContentEncryptionAlgorithm.RUnlock() + if !ok { + panic(fmt.Sprintf(`jwa: ContentEncryptionAlgorithm %q not registered`, name)) + } + return v +} + +// ContentEncryptionAlgorithm represents the various encryption algorithms as described in https://tools.ietf.org/html/rfc7518#section-5 +type ContentEncryptionAlgorithm struct { + name string + deprecated bool +} + +func (s ContentEncryptionAlgorithm) String() string { + return s.name +} + +// IsDeprecated returns true if the ContentEncryptionAlgorithm object is deprecated. +func (s ContentEncryptionAlgorithm) IsDeprecated() bool { + return s.deprecated +} + +// EmptyContentEncryptionAlgorithm returns an empty ContentEncryptionAlgorithm object, used as a zero value. +func EmptyContentEncryptionAlgorithm() ContentEncryptionAlgorithm { + return ContentEncryptionAlgorithm{} +} + +// NewContentEncryptionAlgorithm creates a new ContentEncryptionAlgorithm object with the given name. +func NewContentEncryptionAlgorithm(name string, options ...NewAlgorithmOption) ContentEncryptionAlgorithm { + var deprecated bool + for _, option := range options { + switch option.Ident() { + case identDeprecated{}: + if err := option.Value(&deprecated); err != nil { + panic("jwa.NewContentEncryptionAlgorithm: WithDeprecated option must be a boolean") + } + } + } + return ContentEncryptionAlgorithm{name: name, deprecated: deprecated} +} + +// LookupContentEncryptionAlgorithm returns the ContentEncryptionAlgorithm object for the given name. +func LookupContentEncryptionAlgorithm(name string) (ContentEncryptionAlgorithm, bool) { + muAllContentEncryptionAlgorithm.RLock() + v, ok := allContentEncryptionAlgorithm[name] + muAllContentEncryptionAlgorithm.RUnlock() + return v, ok +} + +// RegisterContentEncryptionAlgorithm registers a new ContentEncryptionAlgorithm. The signature value must be immutable +// and safe to be used by multiple goroutines, as it is going to be shared with all other users of this library. +func RegisterContentEncryptionAlgorithm(algorithms ...ContentEncryptionAlgorithm) { + muAllContentEncryptionAlgorithm.Lock() + for _, alg := range algorithms { + allContentEncryptionAlgorithm[alg.String()] = alg + } + muAllContentEncryptionAlgorithm.Unlock() + rebuildContentEncryptionAlgorithm() +} + +// UnregisterContentEncryptionAlgorithm unregisters a ContentEncryptionAlgorithm from its known database. +// Non-existent entries, as well as built-in algorithms will silently be ignored. +func UnregisterContentEncryptionAlgorithm(algorithms ...ContentEncryptionAlgorithm) { + muAllContentEncryptionAlgorithm.Lock() + for _, alg := range algorithms { + if _, ok := builtinContentEncryptionAlgorithm[alg.String()]; ok { + continue + } + delete(allContentEncryptionAlgorithm, alg.String()) + } + muAllContentEncryptionAlgorithm.Unlock() + rebuildContentEncryptionAlgorithm() +} + +func rebuildContentEncryptionAlgorithm() { + list := make([]ContentEncryptionAlgorithm, 0, len(allContentEncryptionAlgorithm)) + muAllContentEncryptionAlgorithm.RLock() + for _, v := range allContentEncryptionAlgorithm { + list = append(list, v) + } + muAllContentEncryptionAlgorithm.RUnlock() + sort.Slice(list, func(i, j int) bool { + return list[i].String() < list[j].String() + }) + muListContentEncryptionAlgorithm.Lock() + listContentEncryptionAlgorithm = list + muListContentEncryptionAlgorithm.Unlock() +} + +// ContentEncryptionAlgorithms returns a list of all available values for ContentEncryptionAlgorithm. +func ContentEncryptionAlgorithms() []ContentEncryptionAlgorithm { + muListContentEncryptionAlgorithm.RLock() + defer muListContentEncryptionAlgorithm.RUnlock() + return listContentEncryptionAlgorithm +} + +// MarshalJSON serializes the ContentEncryptionAlgorithm object to a JSON string. +func (s ContentEncryptionAlgorithm) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// UnmarshalJSON deserializes the JSON string to a ContentEncryptionAlgorithm object. +func (s *ContentEncryptionAlgorithm) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return fmt.Errorf(`failed to unmarshal ContentEncryptionAlgorithm: %w`, err) + } + v, ok := LookupContentEncryptionAlgorithm(name) + if !ok { + return fmt.Errorf(`unknown ContentEncryptionAlgorithm: %q`, name) + } + *s = v + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/elliptic_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/elliptic_gen.go new file mode 100644 index 0000000000..2418efde08 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/elliptic_gen.go @@ -0,0 +1,190 @@ +// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT. + +package jwa + +import ( + "encoding/json" + "fmt" + "sort" + "sync" +) + +var muAllEllipticCurveAlgorithm sync.RWMutex +var allEllipticCurveAlgorithm = map[string]EllipticCurveAlgorithm{} +var muListEllipticCurveAlgorithm sync.RWMutex +var listEllipticCurveAlgorithm []EllipticCurveAlgorithm +var builtinEllipticCurveAlgorithm = map[string]struct{}{} + +func init() { + // builtin values for EllipticCurveAlgorithm + algorithms := make([]EllipticCurveAlgorithm, 7) + algorithms[0] = NewEllipticCurveAlgorithm("Ed25519") + algorithms[1] = NewEllipticCurveAlgorithm("Ed448") + algorithms[2] = NewEllipticCurveAlgorithm("P-256") + algorithms[3] = NewEllipticCurveAlgorithm("P-384") + algorithms[4] = NewEllipticCurveAlgorithm("P-521") + algorithms[5] = NewEllipticCurveAlgorithm("X25519") + algorithms[6] = NewEllipticCurveAlgorithm("X448") + + RegisterEllipticCurveAlgorithm(algorithms...) +} + +// Ed25519 returns an object representing Ed25519 algorithm for EdDSA operations. +func Ed25519() EllipticCurveAlgorithm { + return lookupBuiltinEllipticCurveAlgorithm("Ed25519") +} + +// Ed448 returns an object representing Ed448 algorithm for EdDSA operations. +func Ed448() EllipticCurveAlgorithm { + return lookupBuiltinEllipticCurveAlgorithm("Ed448") +} + +var invalidEllipticCurve = NewEllipticCurveAlgorithm("P-invalid") + +// InvalidEllipticCurve returns an object representing an invalid elliptic curve. +func InvalidEllipticCurve() EllipticCurveAlgorithm { + return invalidEllipticCurve +} + +// P256 returns an object representing P-256 algorithm for ECDSA operations. +func P256() EllipticCurveAlgorithm { + return lookupBuiltinEllipticCurveAlgorithm("P-256") +} + +// P384 returns an object representing P-384 algorithm for ECDSA operations. +func P384() EllipticCurveAlgorithm { + return lookupBuiltinEllipticCurveAlgorithm("P-384") +} + +// P521 returns an object representing P-521 algorithm for ECDSA operations. +func P521() EllipticCurveAlgorithm { + return lookupBuiltinEllipticCurveAlgorithm("P-521") +} + +// X25519 returns an object representing X25519 algorithm for ECDH operations. +func X25519() EllipticCurveAlgorithm { + return lookupBuiltinEllipticCurveAlgorithm("X25519") +} + +// X448 returns an object representing X448 algorithm for ECDH operations. +func X448() EllipticCurveAlgorithm { + return lookupBuiltinEllipticCurveAlgorithm("X448") +} + +func lookupBuiltinEllipticCurveAlgorithm(name string) EllipticCurveAlgorithm { + muAllEllipticCurveAlgorithm.RLock() + v, ok := allEllipticCurveAlgorithm[name] + muAllEllipticCurveAlgorithm.RUnlock() + if !ok { + panic(fmt.Sprintf(`jwa: EllipticCurveAlgorithm %q not registered`, name)) + } + return v +} + +// EllipticCurveAlgorithm represents the algorithms used for EC keys +type EllipticCurveAlgorithm struct { + name string + deprecated bool +} + +func (s EllipticCurveAlgorithm) String() string { + return s.name +} + +// IsDeprecated returns true if the EllipticCurveAlgorithm object is deprecated. +func (s EllipticCurveAlgorithm) IsDeprecated() bool { + return s.deprecated +} + +// EmptyEllipticCurveAlgorithm returns an empty EllipticCurveAlgorithm object, used as a zero value. +func EmptyEllipticCurveAlgorithm() EllipticCurveAlgorithm { + return EllipticCurveAlgorithm{} +} + +// NewEllipticCurveAlgorithm creates a new EllipticCurveAlgorithm object with the given name. +func NewEllipticCurveAlgorithm(name string, options ...NewAlgorithmOption) EllipticCurveAlgorithm { + var deprecated bool + for _, option := range options { + switch option.Ident() { + case identDeprecated{}: + if err := option.Value(&deprecated); err != nil { + panic("jwa.NewEllipticCurveAlgorithm: WithDeprecated option must be a boolean") + } + } + } + return EllipticCurveAlgorithm{name: name, deprecated: deprecated} +} + +// LookupEllipticCurveAlgorithm returns the EllipticCurveAlgorithm object for the given name. +func LookupEllipticCurveAlgorithm(name string) (EllipticCurveAlgorithm, bool) { + muAllEllipticCurveAlgorithm.RLock() + v, ok := allEllipticCurveAlgorithm[name] + muAllEllipticCurveAlgorithm.RUnlock() + return v, ok +} + +// RegisterEllipticCurveAlgorithm registers a new EllipticCurveAlgorithm. The signature value must be immutable +// and safe to be used by multiple goroutines, as it is going to be shared with all other users of this library. +func RegisterEllipticCurveAlgorithm(algorithms ...EllipticCurveAlgorithm) { + muAllEllipticCurveAlgorithm.Lock() + for _, alg := range algorithms { + allEllipticCurveAlgorithm[alg.String()] = alg + } + muAllEllipticCurveAlgorithm.Unlock() + rebuildEllipticCurveAlgorithm() +} + +// UnregisterEllipticCurveAlgorithm unregisters a EllipticCurveAlgorithm from its known database. +// Non-existent entries, as well as built-in algorithms will silently be ignored. +func UnregisterEllipticCurveAlgorithm(algorithms ...EllipticCurveAlgorithm) { + muAllEllipticCurveAlgorithm.Lock() + for _, alg := range algorithms { + if _, ok := builtinEllipticCurveAlgorithm[alg.String()]; ok { + continue + } + delete(allEllipticCurveAlgorithm, alg.String()) + } + muAllEllipticCurveAlgorithm.Unlock() + rebuildEllipticCurveAlgorithm() +} + +func rebuildEllipticCurveAlgorithm() { + list := make([]EllipticCurveAlgorithm, 0, len(allEllipticCurveAlgorithm)) + muAllEllipticCurveAlgorithm.RLock() + for _, v := range allEllipticCurveAlgorithm { + list = append(list, v) + } + muAllEllipticCurveAlgorithm.RUnlock() + sort.Slice(list, func(i, j int) bool { + return list[i].String() < list[j].String() + }) + muListEllipticCurveAlgorithm.Lock() + listEllipticCurveAlgorithm = list + muListEllipticCurveAlgorithm.Unlock() +} + +// EllipticCurveAlgorithms returns a list of all available values for EllipticCurveAlgorithm. +func EllipticCurveAlgorithms() []EllipticCurveAlgorithm { + muListEllipticCurveAlgorithm.RLock() + defer muListEllipticCurveAlgorithm.RUnlock() + return listEllipticCurveAlgorithm +} + +// MarshalJSON serializes the EllipticCurveAlgorithm object to a JSON string. +func (s EllipticCurveAlgorithm) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// UnmarshalJSON deserializes the JSON string to a EllipticCurveAlgorithm object. +func (s *EllipticCurveAlgorithm) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return fmt.Errorf(`failed to unmarshal EllipticCurveAlgorithm: %w`, err) + } + v, ok := LookupEllipticCurveAlgorithm(name) + if !ok { + return fmt.Errorf(`unknown EllipticCurveAlgorithm: %q`, name) + } + *s = v + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/jwa.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/jwa.go new file mode 100644 index 0000000000..29ac1dfe76 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/jwa.go @@ -0,0 +1,68 @@ +//go:generate ../tools/cmd/genjwa.sh + +// Package jwa defines the various algorithm described in https://tools.ietf.org/html/rfc7518 +package jwa + +import ( + "errors" + "fmt" +) + +// KeyAlgorithm is a workaround for jwk.Key being able to contain different +// types of algorithms in its `alg` field. +// +// Previously the storage for the `alg` field was represented as a string, +// but this caused some users to wonder why the field was not typed appropriately +// like other fields. +// +// Ideally we would like to keep track of Signature Algorithms and +// Key Encryption Algorithms separately, and force the APIs to +// type-check at compile time, but this allows users to pass a value from a +// jwk.Key directly +type KeyAlgorithm interface { + String() string + IsDeprecated() bool +} + +var errInvalidKeyAlgorithm = errors.New(`invalid key algorithm`) + +func ErrInvalidKeyAlgorithm() error { + return errInvalidKeyAlgorithm +} + +// KeyAlgorithmFrom takes either a string, `jwa.SignatureAlgorithm`, +// `jwa.KeyEncryptionAlgorithm`, or `jwa.ContentEncryptionAlgorithm`. +// and returns a `jwa.KeyAlgorithm`. +// +// If the value cannot be handled, it returns an `jwa.InvalidKeyAlgorithm` +// object instead of returning an error. This design choice was made to allow +// users to directly pass the return value to functions such as `jws.Sign()` +func KeyAlgorithmFrom(v any) (KeyAlgorithm, error) { + switch v := v.(type) { + case SignatureAlgorithm: + return v, nil + case KeyEncryptionAlgorithm: + return v, nil + case ContentEncryptionAlgorithm: + return v, nil + case string: + salg, ok := LookupSignatureAlgorithm(v) + if ok { + return salg, nil + } + + kalg, ok := LookupKeyEncryptionAlgorithm(v) + if ok { + return kalg, nil + } + + calg, ok := LookupContentEncryptionAlgorithm(v) + if ok { + return calg, nil + } + + return nil, fmt.Errorf(`invalid key value: %q: %w`, v, errInvalidKeyAlgorithm) + default: + return nil, fmt.Errorf(`invalid key type: %T: %w`, v, errInvalidKeyAlgorithm) + } +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/key_encryption_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/key_encryption_gen.go new file mode 100644 index 0000000000..716c43cd04 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/key_encryption_gen.go @@ -0,0 +1,268 @@ +// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT. + +package jwa + +import ( + "encoding/json" + "fmt" + "sort" + "sync" + + "github.com/lestrrat-go/jwx/v3/internal/tokens" +) + +var muAllKeyEncryptionAlgorithm sync.RWMutex +var allKeyEncryptionAlgorithm = map[string]KeyEncryptionAlgorithm{} +var muListKeyEncryptionAlgorithm sync.RWMutex +var listKeyEncryptionAlgorithm []KeyEncryptionAlgorithm +var builtinKeyEncryptionAlgorithm = map[string]struct{}{} + +func init() { + // builtin values for KeyEncryptionAlgorithm + algorithms := make([]KeyEncryptionAlgorithm, 19) + algorithms[0] = NewKeyEncryptionAlgorithm(tokens.A128GCMKW, WithIsSymmetric(true)) + algorithms[1] = NewKeyEncryptionAlgorithm(tokens.A128KW, WithIsSymmetric(true)) + algorithms[2] = NewKeyEncryptionAlgorithm(tokens.A192GCMKW, WithIsSymmetric(true)) + algorithms[3] = NewKeyEncryptionAlgorithm(tokens.A192KW, WithIsSymmetric(true)) + algorithms[4] = NewKeyEncryptionAlgorithm(tokens.A256GCMKW, WithIsSymmetric(true)) + algorithms[5] = NewKeyEncryptionAlgorithm(tokens.A256KW, WithIsSymmetric(true)) + algorithms[6] = NewKeyEncryptionAlgorithm(tokens.DIRECT, WithIsSymmetric(true)) + algorithms[7] = NewKeyEncryptionAlgorithm(tokens.ECDH_ES) + algorithms[8] = NewKeyEncryptionAlgorithm(tokens.ECDH_ES_A128KW) + algorithms[9] = NewKeyEncryptionAlgorithm(tokens.ECDH_ES_A192KW) + algorithms[10] = NewKeyEncryptionAlgorithm(tokens.ECDH_ES_A256KW) + algorithms[11] = NewKeyEncryptionAlgorithm(tokens.PBES2_HS256_A128KW, WithIsSymmetric(true)) + algorithms[12] = NewKeyEncryptionAlgorithm(tokens.PBES2_HS384_A192KW, WithIsSymmetric(true)) + algorithms[13] = NewKeyEncryptionAlgorithm(tokens.PBES2_HS512_A256KW, WithIsSymmetric(true)) + algorithms[14] = NewKeyEncryptionAlgorithm(tokens.RSA1_5, WithDeprecated(true)) + algorithms[15] = NewKeyEncryptionAlgorithm(tokens.RSA_OAEP) + algorithms[16] = NewKeyEncryptionAlgorithm(tokens.RSA_OAEP_256) + algorithms[17] = NewKeyEncryptionAlgorithm(tokens.RSA_OAEP_384) + algorithms[18] = NewKeyEncryptionAlgorithm(tokens.RSA_OAEP_512) + + RegisterKeyEncryptionAlgorithm(algorithms...) +} + +// A128GCMKW returns an object representing AES-GCM key wrap (128) key encryption algorithm. +func A128GCMKW() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.A128GCMKW) +} + +// A128KW returns an object representing AES key wrap (128) key encryption algorithm. +func A128KW() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.A128KW) +} + +// A192GCMKW returns an object representing AES-GCM key wrap (192) key encryption algorithm. +func A192GCMKW() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.A192GCMKW) +} + +// A192KW returns an object representing AES key wrap (192) key encryption algorithm. +func A192KW() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.A192KW) +} + +// A256GCMKW returns an object representing AES-GCM key wrap (256) key encryption algorithm. +func A256GCMKW() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.A256GCMKW) +} + +// A256KW returns an object representing AES key wrap (256) key encryption algorithm. +func A256KW() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.A256KW) +} + +// DIRECT returns an object representing Direct key encryption algorithm. +func DIRECT() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.DIRECT) +} + +// ECDH_ES returns an object representing ECDH-ES key encryption algorithm. +func ECDH_ES() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.ECDH_ES) +} + +// ECDH_ES_A128KW returns an object representing ECDH-ES + AES key wrap (128) key encryption algorithm. +func ECDH_ES_A128KW() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.ECDH_ES_A128KW) +} + +// ECDH_ES_A192KW returns an object representing ECDH-ES + AES key wrap (192) key encryption algorithm. +func ECDH_ES_A192KW() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.ECDH_ES_A192KW) +} + +// ECDH_ES_A256KW returns an object representing ECDH-ES + AES key wrap (256) key encryption algorithm. +func ECDH_ES_A256KW() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.ECDH_ES_A256KW) +} + +// PBES2_HS256_A128KW returns an object representing PBES2 + HMAC-SHA256 + AES key wrap (128) key encryption algorithm. +func PBES2_HS256_A128KW() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.PBES2_HS256_A128KW) +} + +// PBES2_HS384_A192KW returns an object representing PBES2 + HMAC-SHA384 + AES key wrap (192) key encryption algorithm. +func PBES2_HS384_A192KW() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.PBES2_HS384_A192KW) +} + +// PBES2_HS512_A256KW returns an object representing PBES2 + HMAC-SHA512 + AES key wrap (256) key encryption algorithm. +func PBES2_HS512_A256KW() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.PBES2_HS512_A256KW) +} + +// RSA1_5 returns an object representing RSA-PKCS1v1.5 key encryption algorithm. +func RSA1_5() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.RSA1_5) +} + +// RSA_OAEP returns an object representing RSA-OAEP-SHA1 key encryption algorithm. +func RSA_OAEP() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.RSA_OAEP) +} + +// RSA_OAEP_256 returns an object representing RSA-OAEP-SHA256 key encryption algorithm. +func RSA_OAEP_256() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.RSA_OAEP_256) +} + +// RSA_OAEP_384 returns an object representing RSA-OAEP-SHA384 key encryption algorithm. +func RSA_OAEP_384() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.RSA_OAEP_384) +} + +// RSA_OAEP_512 returns an object representing RSA-OAEP-SHA512 key encryption algorithm. +func RSA_OAEP_512() KeyEncryptionAlgorithm { + return lookupBuiltinKeyEncryptionAlgorithm(tokens.RSA_OAEP_512) +} + +func lookupBuiltinKeyEncryptionAlgorithm(name string) KeyEncryptionAlgorithm { + muAllKeyEncryptionAlgorithm.RLock() + v, ok := allKeyEncryptionAlgorithm[name] + muAllKeyEncryptionAlgorithm.RUnlock() + if !ok { + panic(fmt.Sprintf(`jwa: KeyEncryptionAlgorithm %q not registered`, name)) + } + return v +} + +// KeyEncryptionAlgorithm represents the various encryption algorithms as described in https://tools.ietf.org/html/rfc7518#section-4.1 +type KeyEncryptionAlgorithm struct { + name string + deprecated bool + isSymmetric bool +} + +func (s KeyEncryptionAlgorithm) String() string { + return s.name +} + +// IsDeprecated returns true if the KeyEncryptionAlgorithm object is deprecated. +func (s KeyEncryptionAlgorithm) IsDeprecated() bool { + return s.deprecated +} + +// IsSymmetric returns true if the KeyEncryptionAlgorithm object is symmetric. Symmetric algorithms use the same key for both encryption and decryption. +func (s KeyEncryptionAlgorithm) IsSymmetric() bool { + return s.isSymmetric +} + +// EmptyKeyEncryptionAlgorithm returns an empty KeyEncryptionAlgorithm object, used as a zero value. +func EmptyKeyEncryptionAlgorithm() KeyEncryptionAlgorithm { + return KeyEncryptionAlgorithm{} +} + +// NewKeyEncryptionAlgorithm creates a new KeyEncryptionAlgorithm object with the given name. +func NewKeyEncryptionAlgorithm(name string, options ...NewKeyEncryptionAlgorithmOption) KeyEncryptionAlgorithm { + var deprecated bool + var isSymmetric bool + for _, option := range options { + switch option.Ident() { + case identIsSymmetric{}: + if err := option.Value(&isSymmetric); err != nil { + panic("jwa.NewKeyEncryptionAlgorithm: WithIsSymmetric option must be a boolean") + } + case identDeprecated{}: + if err := option.Value(&deprecated); err != nil { + panic("jwa.NewKeyEncryptionAlgorithm: WithDeprecated option must be a boolean") + } + } + } + return KeyEncryptionAlgorithm{name: name, deprecated: deprecated, isSymmetric: isSymmetric} +} + +// LookupKeyEncryptionAlgorithm returns the KeyEncryptionAlgorithm object for the given name. +func LookupKeyEncryptionAlgorithm(name string) (KeyEncryptionAlgorithm, bool) { + muAllKeyEncryptionAlgorithm.RLock() + v, ok := allKeyEncryptionAlgorithm[name] + muAllKeyEncryptionAlgorithm.RUnlock() + return v, ok +} + +// RegisterKeyEncryptionAlgorithm registers a new KeyEncryptionAlgorithm. The signature value must be immutable +// and safe to be used by multiple goroutines, as it is going to be shared with all other users of this library. +func RegisterKeyEncryptionAlgorithm(algorithms ...KeyEncryptionAlgorithm) { + muAllKeyEncryptionAlgorithm.Lock() + for _, alg := range algorithms { + allKeyEncryptionAlgorithm[alg.String()] = alg + } + muAllKeyEncryptionAlgorithm.Unlock() + rebuildKeyEncryptionAlgorithm() +} + +// UnregisterKeyEncryptionAlgorithm unregisters a KeyEncryptionAlgorithm from its known database. +// Non-existent entries, as well as built-in algorithms will silently be ignored. +func UnregisterKeyEncryptionAlgorithm(algorithms ...KeyEncryptionAlgorithm) { + muAllKeyEncryptionAlgorithm.Lock() + for _, alg := range algorithms { + if _, ok := builtinKeyEncryptionAlgorithm[alg.String()]; ok { + continue + } + delete(allKeyEncryptionAlgorithm, alg.String()) + } + muAllKeyEncryptionAlgorithm.Unlock() + rebuildKeyEncryptionAlgorithm() +} + +func rebuildKeyEncryptionAlgorithm() { + list := make([]KeyEncryptionAlgorithm, 0, len(allKeyEncryptionAlgorithm)) + muAllKeyEncryptionAlgorithm.RLock() + for _, v := range allKeyEncryptionAlgorithm { + list = append(list, v) + } + muAllKeyEncryptionAlgorithm.RUnlock() + sort.Slice(list, func(i, j int) bool { + return list[i].String() < list[j].String() + }) + muListKeyEncryptionAlgorithm.Lock() + listKeyEncryptionAlgorithm = list + muListKeyEncryptionAlgorithm.Unlock() +} + +// KeyEncryptionAlgorithms returns a list of all available values for KeyEncryptionAlgorithm. +func KeyEncryptionAlgorithms() []KeyEncryptionAlgorithm { + muListKeyEncryptionAlgorithm.RLock() + defer muListKeyEncryptionAlgorithm.RUnlock() + return listKeyEncryptionAlgorithm +} + +// MarshalJSON serializes the KeyEncryptionAlgorithm object to a JSON string. +func (s KeyEncryptionAlgorithm) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// UnmarshalJSON deserializes the JSON string to a KeyEncryptionAlgorithm object. +func (s *KeyEncryptionAlgorithm) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return fmt.Errorf(`failed to unmarshal KeyEncryptionAlgorithm: %w`, err) + } + v, ok := LookupKeyEncryptionAlgorithm(name) + if !ok { + return fmt.Errorf(`unknown KeyEncryptionAlgorithm: %q`, name) + } + *s = v + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/key_type_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/key_type_gen.go new file mode 100644 index 0000000000..8bc5ebb5f0 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/key_type_gen.go @@ -0,0 +1,172 @@ +// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT. + +package jwa + +import ( + "encoding/json" + "fmt" + "sort" + "sync" +) + +var muAllKeyType sync.RWMutex +var allKeyType = map[string]KeyType{} +var muListKeyType sync.RWMutex +var listKeyType []KeyType +var builtinKeyType = map[string]struct{}{} + +func init() { + // builtin values for KeyType + algorithms := make([]KeyType, 4) + algorithms[0] = NewKeyType("EC") + algorithms[1] = NewKeyType("OKP") + algorithms[2] = NewKeyType("oct") + algorithms[3] = NewKeyType("RSA") + + RegisterKeyType(algorithms...) +} + +// EC returns an object representing EC. Elliptic Curve +func EC() KeyType { + return lookupBuiltinKeyType("EC") +} + +var invalidKeyType = NewKeyType("") + +// InvalidKeyType returns an object representing invalid key type. Invalid KeyType +func InvalidKeyType() KeyType { + return invalidKeyType +} + +// OKP returns an object representing OKP. Octet string key pairs +func OKP() KeyType { + return lookupBuiltinKeyType("OKP") +} + +// OctetSeq returns an object representing oct. Octet sequence (used to represent symmetric keys) +func OctetSeq() KeyType { + return lookupBuiltinKeyType("oct") +} + +// RSA returns an object representing RSA. RSA +func RSA() KeyType { + return lookupBuiltinKeyType("RSA") +} + +func lookupBuiltinKeyType(name string) KeyType { + muAllKeyType.RLock() + v, ok := allKeyType[name] + muAllKeyType.RUnlock() + if !ok { + panic(fmt.Sprintf(`jwa: KeyType %q not registered`, name)) + } + return v +} + +// KeyType represents the key type ("kty") that are supported +type KeyType struct { + name string + deprecated bool +} + +func (s KeyType) String() string { + return s.name +} + +// IsDeprecated returns true if the KeyType object is deprecated. +func (s KeyType) IsDeprecated() bool { + return s.deprecated +} + +// EmptyKeyType returns an empty KeyType object, used as a zero value. +func EmptyKeyType() KeyType { + return KeyType{} +} + +// NewKeyType creates a new KeyType object with the given name. +func NewKeyType(name string, options ...NewAlgorithmOption) KeyType { + var deprecated bool + for _, option := range options { + switch option.Ident() { + case identDeprecated{}: + if err := option.Value(&deprecated); err != nil { + panic("jwa.NewKeyType: WithDeprecated option must be a boolean") + } + } + } + return KeyType{name: name, deprecated: deprecated} +} + +// LookupKeyType returns the KeyType object for the given name. +func LookupKeyType(name string) (KeyType, bool) { + muAllKeyType.RLock() + v, ok := allKeyType[name] + muAllKeyType.RUnlock() + return v, ok +} + +// RegisterKeyType registers a new KeyType. The signature value must be immutable +// and safe to be used by multiple goroutines, as it is going to be shared with all other users of this library. +func RegisterKeyType(algorithms ...KeyType) { + muAllKeyType.Lock() + for _, alg := range algorithms { + allKeyType[alg.String()] = alg + } + muAllKeyType.Unlock() + rebuildKeyType() +} + +// UnregisterKeyType unregisters a KeyType from its known database. +// Non-existent entries, as well as built-in algorithms will silently be ignored. +func UnregisterKeyType(algorithms ...KeyType) { + muAllKeyType.Lock() + for _, alg := range algorithms { + if _, ok := builtinKeyType[alg.String()]; ok { + continue + } + delete(allKeyType, alg.String()) + } + muAllKeyType.Unlock() + rebuildKeyType() +} + +func rebuildKeyType() { + list := make([]KeyType, 0, len(allKeyType)) + muAllKeyType.RLock() + for _, v := range allKeyType { + list = append(list, v) + } + muAllKeyType.RUnlock() + sort.Slice(list, func(i, j int) bool { + return list[i].String() < list[j].String() + }) + muListKeyType.Lock() + listKeyType = list + muListKeyType.Unlock() +} + +// KeyTypes returns a list of all available values for KeyType. +func KeyTypes() []KeyType { + muListKeyType.RLock() + defer muListKeyType.RUnlock() + return listKeyType +} + +// MarshalJSON serializes the KeyType object to a JSON string. +func (s KeyType) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// UnmarshalJSON deserializes the JSON string to a KeyType object. +func (s *KeyType) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return fmt.Errorf(`failed to unmarshal KeyType: %w`, err) + } + v, ok := LookupKeyType(name) + if !ok { + return fmt.Errorf(`unknown KeyType: %q`, name) + } + *s = v + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/options.yaml b/vendor/github.com/lestrrat-go/jwx/v3/jwa/options.yaml new file mode 100644 index 0000000000..dd498c680e --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/options.yaml @@ -0,0 +1,41 @@ +package_name: jwa +output: jwa/options_gen.go +interfaces: + - name: NewAlgorithmOption + methods: + - newSignatureAlgorithmOption + - newKeyEncryptionAlgorithmOption + - newSignatureKeyEncryptionAlgorithmOption + comment: | + NewAlgorithmOption represents an option that can be passed to any of the constructor functions + - name: NewSignatureAlgorithmOption + methods: + - newSignatureAlgorithmOption + comment: | + NewSignatureAlgorithmOption represents an option that can be passed to the NewSignatureAlgorithm + - name: NewKeyEncryptionAlgorithmOption + methods: + - newKeyEncryptionAlgorithmOption + comment: | + NewKeyEncryptionAlgorithmOption represents an option that can be passed to the NewKeyEncryptionAlgorithm + - name: NewSignatureKeyEncryptionAlgorithmOption + comment: | + NewSignatureKeyEncryptionAlgorithmOption represents an option that can be passed to both + NewSignatureAlgorithm and NewKeyEncryptionAlgorithm + methods: + - newSignatureAlgorithmOption + - newKeyEncryptionAlgorithmOption +options: + - ident: IsSymmetric + interface: NewSignatureKeyEncryptionAlgorithmOption + argument_type: bool + comment: | + IsSymmetric specifies that the algorithm is symmetric + - ident: Deprecated + interface: NewAlgorithmOption + argument_type: bool + comment: | + WithDeprecated specifies that the algorithm is deprecated. In order to + un-deprecate an algorithm, you will have to create a new algorithm + with the same values but with the Deprecated option set to false, and + then call RegisterXXXXAlgorithm with the new algorithm. \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/options_gen.go new file mode 100644 index 0000000000..9394ac587a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/options_gen.go @@ -0,0 +1,91 @@ +// Code generated by tools/cmd/genoptions/main.go. DO NOT EDIT. + +package jwa + +import ( + "github.com/lestrrat-go/option/v2" +) + +type Option = option.Interface + +// NewAlgorithmOption represents an option that can be passed to any of the constructor functions +type NewAlgorithmOption interface { + Option + newSignatureAlgorithmOption() + newKeyEncryptionAlgorithmOption() + newSignatureKeyEncryptionAlgorithmOption() +} + +type newAlgorithmOption struct { + Option +} + +func (*newAlgorithmOption) newSignatureAlgorithmOption() {} + +func (*newAlgorithmOption) newKeyEncryptionAlgorithmOption() {} + +func (*newAlgorithmOption) newSignatureKeyEncryptionAlgorithmOption() {} + +// NewKeyEncryptionAlgorithmOption represents an option that can be passed to the NewKeyEncryptionAlgorithm +type NewKeyEncryptionAlgorithmOption interface { + Option + newKeyEncryptionAlgorithmOption() +} + +type newKeyEncryptionAlgorithmOption struct { + Option +} + +func (*newKeyEncryptionAlgorithmOption) newKeyEncryptionAlgorithmOption() {} + +// NewSignatureAlgorithmOption represents an option that can be passed to the NewSignatureAlgorithm +type NewSignatureAlgorithmOption interface { + Option + newSignatureAlgorithmOption() +} + +type newSignatureAlgorithmOption struct { + Option +} + +func (*newSignatureAlgorithmOption) newSignatureAlgorithmOption() {} + +// NewSignatureKeyEncryptionAlgorithmOption represents an option that can be passed to both +// NewSignatureAlgorithm and NewKeyEncryptionAlgorithm +type NewSignatureKeyEncryptionAlgorithmOption interface { + Option + newSignatureAlgorithmOption() + newKeyEncryptionAlgorithmOption() +} + +type newSignatureKeyEncryptionAlgorithmOption struct { + Option +} + +func (*newSignatureKeyEncryptionAlgorithmOption) newSignatureAlgorithmOption() {} + +func (*newSignatureKeyEncryptionAlgorithmOption) newKeyEncryptionAlgorithmOption() {} + +type identDeprecated struct{} +type identIsSymmetric struct{} + +func (identDeprecated) String() string { + return "WithDeprecated" +} + +func (identIsSymmetric) String() string { + return "WithIsSymmetric" +} + +// WithDeprecated specifies that the algorithm is deprecated. In order to +// un-deprecate an algorithm, you will have to create a new algorithm +// with the same values but with the Deprecated option set to false, and +// then call RegisterXXXXAlgorithm with the new algorithm. +func WithDeprecated(v bool) NewAlgorithmOption { + return &newAlgorithmOption{option.New(identDeprecated{}, v)} +} + +// IsSymmetric specifies that the algorithm is symmetric +func WithIsSymmetric(v bool) NewSignatureKeyEncryptionAlgorithmOption { + return &newSignatureKeyEncryptionAlgorithmOption{option.New(identIsSymmetric{}, v)} +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/secp2561k.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/secp2561k.go new file mode 100644 index 0000000000..9ce6ad4d05 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/secp2561k.go @@ -0,0 +1,14 @@ +//go:build jwx_es256k + +package jwa + +var secp256k1Algorithm = NewEllipticCurveAlgorithm("secp256k1") + +// This constant is only available if compiled with jwx_es256k build tag +func Secp256k1() EllipticCurveAlgorithm { + return secp256k1Algorithm +} + +func init() { + RegisterEllipticCurveAlgorithm(secp256k1Algorithm) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwa/signature_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwa/signature_gen.go new file mode 100644 index 0000000000..653d7d56af --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwa/signature_gen.go @@ -0,0 +1,242 @@ +// Code generated by tools/cmd/genjwa/main.go. DO NOT EDIT. + +package jwa + +import ( + "encoding/json" + "fmt" + "sort" + "sync" +) + +var muAllSignatureAlgorithm sync.RWMutex +var allSignatureAlgorithm = map[string]SignatureAlgorithm{} +var muListSignatureAlgorithm sync.RWMutex +var listSignatureAlgorithm []SignatureAlgorithm +var builtinSignatureAlgorithm = map[string]struct{}{} + +func init() { + // builtin values for SignatureAlgorithm + algorithms := make([]SignatureAlgorithm, 15) + algorithms[0] = NewSignatureAlgorithm("ES256") + algorithms[1] = NewSignatureAlgorithm("ES256K") + algorithms[2] = NewSignatureAlgorithm("ES384") + algorithms[3] = NewSignatureAlgorithm("ES512") + algorithms[4] = NewSignatureAlgorithm("EdDSA") + algorithms[5] = NewSignatureAlgorithm("HS256", WithIsSymmetric(true)) + algorithms[6] = NewSignatureAlgorithm("HS384", WithIsSymmetric(true)) + algorithms[7] = NewSignatureAlgorithm("HS512", WithIsSymmetric(true)) + algorithms[8] = NewSignatureAlgorithm("none") + algorithms[9] = NewSignatureAlgorithm("PS256") + algorithms[10] = NewSignatureAlgorithm("PS384") + algorithms[11] = NewSignatureAlgorithm("PS512") + algorithms[12] = NewSignatureAlgorithm("RS256") + algorithms[13] = NewSignatureAlgorithm("RS384") + algorithms[14] = NewSignatureAlgorithm("RS512") + + RegisterSignatureAlgorithm(algorithms...) +} + +// ES256 returns an object representing ECDSA signature algorithm using P-256 curve and SHA-256. +func ES256() SignatureAlgorithm { + return lookupBuiltinSignatureAlgorithm("ES256") +} + +// ES256K returns an object representing ECDSA signature algorithm using secp256k1 curve and SHA-256. +func ES256K() SignatureAlgorithm { + return lookupBuiltinSignatureAlgorithm("ES256K") +} + +// ES384 returns an object representing ECDSA signature algorithm using P-384 curve and SHA-384. +func ES384() SignatureAlgorithm { + return lookupBuiltinSignatureAlgorithm("ES384") +} + +// ES512 returns an object representing ECDSA signature algorithm using P-521 curve and SHA-512. +func ES512() SignatureAlgorithm { + return lookupBuiltinSignatureAlgorithm("ES512") +} + +// EdDSA returns an object representing EdDSA signature algorithms. +func EdDSA() SignatureAlgorithm { + return lookupBuiltinSignatureAlgorithm("EdDSA") +} + +// HS256 returns an object representing HMAC signature algorithm using SHA-256. +func HS256() SignatureAlgorithm { + return lookupBuiltinSignatureAlgorithm("HS256") +} + +// HS384 returns an object representing HMAC signature algorithm using SHA-384. +func HS384() SignatureAlgorithm { + return lookupBuiltinSignatureAlgorithm("HS384") +} + +// HS512 returns an object representing HMAC signature algorithm using SHA-512. +func HS512() SignatureAlgorithm { + return lookupBuiltinSignatureAlgorithm("HS512") +} + +// NoSignature returns an object representing the lack of a signature algorithm. Using this value specifies that the content should not be signed, which you should avoid doing. +func NoSignature() SignatureAlgorithm { + return lookupBuiltinSignatureAlgorithm("none") +} + +// PS256 returns an object representing RSASSA-PSS signature algorithm using SHA-256 and MGF1-SHA256. +func PS256() SignatureAlgorithm { + return lookupBuiltinSignatureAlgorithm("PS256") +} + +// PS384 returns an object representing RSASSA-PSS signature algorithm using SHA-384 and MGF1-SHA384. +func PS384() SignatureAlgorithm { + return lookupBuiltinSignatureAlgorithm("PS384") +} + +// PS512 returns an object representing RSASSA-PSS signature algorithm using SHA-512 and MGF1-SHA512. +func PS512() SignatureAlgorithm { + return lookupBuiltinSignatureAlgorithm("PS512") +} + +// RS256 returns an object representing RSASSA-PKCS-v1.5 signature algorithm using SHA-256. +func RS256() SignatureAlgorithm { + return lookupBuiltinSignatureAlgorithm("RS256") +} + +// RS384 returns an object representing RSASSA-PKCS-v1.5 signature algorithm using SHA-384. +func RS384() SignatureAlgorithm { + return lookupBuiltinSignatureAlgorithm("RS384") +} + +// RS512 returns an object representing RSASSA-PKCS-v1.5 signature algorithm using SHA-512. +func RS512() SignatureAlgorithm { + return lookupBuiltinSignatureAlgorithm("RS512") +} + +func lookupBuiltinSignatureAlgorithm(name string) SignatureAlgorithm { + muAllSignatureAlgorithm.RLock() + v, ok := allSignatureAlgorithm[name] + muAllSignatureAlgorithm.RUnlock() + if !ok { + panic(fmt.Sprintf(`jwa: SignatureAlgorithm %q not registered`, name)) + } + return v +} + +// SignatureAlgorithm represents the various signature algorithms as described in https://tools.ietf.org/html/rfc7518#section-3.1 +type SignatureAlgorithm struct { + name string + deprecated bool + isSymmetric bool +} + +func (s SignatureAlgorithm) String() string { + return s.name +} + +// IsDeprecated returns true if the SignatureAlgorithm object is deprecated. +func (s SignatureAlgorithm) IsDeprecated() bool { + return s.deprecated +} + +// IsSymmetric returns true if the SignatureAlgorithm object is symmetric. Symmetric algorithms use the same key for both encryption and decryption. +func (s SignatureAlgorithm) IsSymmetric() bool { + return s.isSymmetric +} + +// EmptySignatureAlgorithm returns an empty SignatureAlgorithm object, used as a zero value. +func EmptySignatureAlgorithm() SignatureAlgorithm { + return SignatureAlgorithm{} +} + +// NewSignatureAlgorithm creates a new SignatureAlgorithm object with the given name. +func NewSignatureAlgorithm(name string, options ...NewSignatureAlgorithmOption) SignatureAlgorithm { + var deprecated bool + var isSymmetric bool + for _, option := range options { + switch option.Ident() { + case identIsSymmetric{}: + if err := option.Value(&isSymmetric); err != nil { + panic("jwa.NewSignatureAlgorithm: WithIsSymmetric option must be a boolean") + } + case identDeprecated{}: + if err := option.Value(&deprecated); err != nil { + panic("jwa.NewSignatureAlgorithm: WithDeprecated option must be a boolean") + } + } + } + return SignatureAlgorithm{name: name, deprecated: deprecated, isSymmetric: isSymmetric} +} + +// LookupSignatureAlgorithm returns the SignatureAlgorithm object for the given name. +func LookupSignatureAlgorithm(name string) (SignatureAlgorithm, bool) { + muAllSignatureAlgorithm.RLock() + v, ok := allSignatureAlgorithm[name] + muAllSignatureAlgorithm.RUnlock() + return v, ok +} + +// RegisterSignatureAlgorithm registers a new SignatureAlgorithm. The signature value must be immutable +// and safe to be used by multiple goroutines, as it is going to be shared with all other users of this library. +func RegisterSignatureAlgorithm(algorithms ...SignatureAlgorithm) { + muAllSignatureAlgorithm.Lock() + for _, alg := range algorithms { + allSignatureAlgorithm[alg.String()] = alg + } + muAllSignatureAlgorithm.Unlock() + rebuildSignatureAlgorithm() +} + +// UnregisterSignatureAlgorithm unregisters a SignatureAlgorithm from its known database. +// Non-existent entries, as well as built-in algorithms will silently be ignored. +func UnregisterSignatureAlgorithm(algorithms ...SignatureAlgorithm) { + muAllSignatureAlgorithm.Lock() + for _, alg := range algorithms { + if _, ok := builtinSignatureAlgorithm[alg.String()]; ok { + continue + } + delete(allSignatureAlgorithm, alg.String()) + } + muAllSignatureAlgorithm.Unlock() + rebuildSignatureAlgorithm() +} + +func rebuildSignatureAlgorithm() { + list := make([]SignatureAlgorithm, 0, len(allSignatureAlgorithm)) + muAllSignatureAlgorithm.RLock() + for _, v := range allSignatureAlgorithm { + list = append(list, v) + } + muAllSignatureAlgorithm.RUnlock() + sort.Slice(list, func(i, j int) bool { + return list[i].String() < list[j].String() + }) + muListSignatureAlgorithm.Lock() + listSignatureAlgorithm = list + muListSignatureAlgorithm.Unlock() +} + +// SignatureAlgorithms returns a list of all available values for SignatureAlgorithm. +func SignatureAlgorithms() []SignatureAlgorithm { + muListSignatureAlgorithm.RLock() + defer muListSignatureAlgorithm.RUnlock() + return listSignatureAlgorithm +} + +// MarshalJSON serializes the SignatureAlgorithm object to a JSON string. +func (s SignatureAlgorithm) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// UnmarshalJSON deserializes the JSON string to a SignatureAlgorithm object. +func (s *SignatureAlgorithm) UnmarshalJSON(data []byte) error { + var name string + if err := json.Unmarshal(data, &name); err != nil { + return fmt.Errorf(`failed to unmarshal SignatureAlgorithm: %w`, err) + } + v, ok := LookupSignatureAlgorithm(name) + if !ok { + return fmt.Errorf(`unknown SignatureAlgorithm: %q`, name) + } + *s = v + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwe/BUILD.bazel new file mode 100644 index 0000000000..0719efd2dc --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/BUILD.bazel @@ -0,0 +1,70 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "jwe", + srcs = [ + "compress.go", + "decrypt.go", + "encrypt.go", + "errors.go", + "filter.go", + "headers.go", + "headers_gen.go", + "interface.go", + "io.go", + "jwe.go", + "key_provider.go", + "message.go", + "options.go", + "options_gen.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/jwe", + visibility = ["//visibility:public"], + deps = [ + "//cert", + "//internal/base64", + "//transform", + "//internal/json", + "//internal/tokens", + "//internal/keyconv", + "//internal/pool", + "//jwa", + "//jwe/internal/aescbc", + "//jwe/internal/cipher", + "//jwe/internal/content_crypt", + "//jwe/internal/keygen", + "//jwe/jwebb", + "//jwk", + "@com_github_lestrrat_go_blackmagic//:blackmagic", + "@com_github_lestrrat_go_option_v2//:option", + "@org_golang_x_crypto//pbkdf2", + ], +) + +go_test( + name = "jwe_test", + srcs = [ + "filter_test.go", + "gh402_test.go", + "headers_test.go", + "jwe_test.go", + "message_test.go", + "options_gen_test.go", + "speed_test.go", + ], + embed = [":jwe"], + deps = [ + "//cert", + "//internal/json", + "//internal/jwxtest", + "//jwa", + "//jwk", + "@com_github_stretchr_testify//require", + ], +) + +alias( + name = "go_default_library", + actual = ":jwe", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/README.md b/vendor/github.com/lestrrat-go/jwx/v3/jwe/README.md new file mode 100644 index 0000000000..c85d05bbbe --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/README.md @@ -0,0 +1,94 @@ +# JWE [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/jwx/v3/jwe.svg)](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3/jwe) + +Package jwe implements JWE as described in [RFC7516](https://tools.ietf.org/html/rfc7516) + +* Encrypt and Decrypt arbitrary data +* Content compression and decompression +* Add arbitrary fields in the JWE header object + +How-to style documentation can be found in the [docs directory](../docs). + +Examples are located in the examples directory ([jwe_example_test.go](../examples/jwe_example_test.go)) + +Supported key encryption algorithm: + +| Algorithm | Supported? | Constant in [jwa](../jwa) | +|:-----------------------------------------|:-----------|:-------------------------| +| RSA-PKCS1v1.5 | YES | jwa.RSA1_5 | +| RSA-OAEP-SHA1 | YES | jwa.RSA_OAEP | +| RSA-OAEP-SHA256 | YES | jwa.RSA_OAEP_256 | +| AES key wrap (128) | YES | jwa.A128KW | +| AES key wrap (192) | YES | jwa.A192KW | +| AES key wrap (256) | YES | jwa.A256KW | +| Direct encryption | YES (1) | jwa.DIRECT | +| ECDH-ES | YES (1) | jwa.ECDH_ES | +| ECDH-ES + AES key wrap (128) | YES | jwa.ECDH_ES_A128KW | +| ECDH-ES + AES key wrap (192) | YES | jwa.ECDH_ES_A192KW | +| ECDH-ES + AES key wrap (256) | YES | jwa.ECDH_ES_A256KW | +| AES-GCM key wrap (128) | YES | jwa.A128GCMKW | +| AES-GCM key wrap (192) | YES | jwa.A192GCMKW | +| AES-GCM key wrap (256) | YES | jwa.A256GCMKW | +| PBES2 + HMAC-SHA256 + AES key wrap (128) | YES | jwa.PBES2_HS256_A128KW | +| PBES2 + HMAC-SHA384 + AES key wrap (192) | YES | jwa.PBES2_HS384_A192KW | +| PBES2 + HMAC-SHA512 + AES key wrap (256) | YES | jwa.PBES2_HS512_A256KW | + +* Note 1: Single-recipient only + +Supported content encryption algorithm: + +| Algorithm | Supported? | Constant in [jwa](../jwa) | +|:----------------------------|:-----------|:--------------------------| +| AES-CBC + HMAC-SHA256 (128) | YES | jwa.A128CBC_HS256 | +| AES-CBC + HMAC-SHA384 (192) | YES | jwa.A192CBC_HS384 | +| AES-CBC + HMAC-SHA512 (256) | YES | jwa.A256CBC_HS512 | +| AES-GCM (128) | YES | jwa.A128GCM | +| AES-GCM (192) | YES | jwa.A192GCM | +| AES-GCM (256) | YES | jwa.A256GCM | + +# SYNOPSIS + +## Encrypt data + +```go +func ExampleEncrypt() { + privkey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + log.Printf("failed to generate private key: %s", err) + return + } + + payload := []byte("Lorem Ipsum") + + encrypted, err := jwe.Encrypt(payload, jwe.WithKey(jwa.RSA1_5, &privkey.PublicKey), jwe.WithContentEncryption(jwa.A128CBC_HS256)) + if err != nil { + log.Printf("failed to encrypt payload: %s", err) + return + } + _ = encrypted + // OUTPUT: +} +``` + +## Decrypt data + +```go +func ExampleDecrypt() { + privkey, encrypted, err := exampleGenPayload() + if err != nil { + log.Printf("failed to generate encrypted payload: %s", err) + return + } + + decrypted, err := jwe.Decrypt(encrypted, jwe.WithKey(jwa.RSA1_5, privkey)) + if err != nil { + log.Printf("failed to decrypt: %s", err) + return + } + + if string(decrypted) != "Lorem Ipsum" { + log.Printf("WHAT?!") + return + } + // OUTPUT: +} +``` diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/compress.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/compress.go new file mode 100644 index 0000000000..a1ed158fb0 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/compress.go @@ -0,0 +1,62 @@ +package jwe + +import ( + "bytes" + "compress/flate" + "fmt" + "io" + + "github.com/lestrrat-go/jwx/v3/internal/pool" +) + +func uncompress(src []byte, maxBufferSize int64) ([]byte, error) { + var dst bytes.Buffer + r := flate.NewReader(bytes.NewReader(src)) + defer r.Close() + var buf [16384]byte + var sofar int64 + for { + n, readErr := r.Read(buf[:]) + sofar += int64(n) + if sofar > maxBufferSize { + return nil, fmt.Errorf(`compressed payload exceeds maximum allowed size`) + } + if readErr != nil { + // if we have a read error, and it's not EOF, then we need to stop + if readErr != io.EOF { + return nil, fmt.Errorf(`failed to read inflated data: %w`, readErr) + } + } + + if _, err := dst.Write(buf[:n]); err != nil { + return nil, fmt.Errorf(`failed to write inflated data: %w`, err) + } + + if readErr != nil { + // if it got here, then readErr == io.EOF, we're done + return dst.Bytes(), nil + } + } +} + +func compress(plaintext []byte) ([]byte, error) { + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + + w, _ := flate.NewWriter(buf, 1) + in := plaintext + for len(in) > 0 { + n, err := w.Write(in) + if err != nil { + return nil, fmt.Errorf(`failed to write to compression writer: %w`, err) + } + in = in[n:] + } + if err := w.Close(); err != nil { + return nil, fmt.Errorf(`failed to close compression writer: %w`, err) + } + + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/decrypt.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/decrypt.go new file mode 100644 index 0000000000..9429d84b1b --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/decrypt.go @@ -0,0 +1,227 @@ +package jwe + +import ( + "fmt" + + "github.com/lestrrat-go/jwx/v3/internal/tokens" + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt" + "github.com/lestrrat-go/jwx/v3/jwe/jwebb" +) + +// decrypter is responsible for taking various components to decrypt a message. +// its operation is not concurrency safe. You must provide locking yourself +// +//nolint:govet +type decrypter struct { + aad []byte + apu []byte + apv []byte + cek *[]byte + computedAad []byte + iv []byte + keyiv []byte + keysalt []byte + keytag []byte + tag []byte + privkey any + pubkey any + ctalg jwa.ContentEncryptionAlgorithm + keyalg jwa.KeyEncryptionAlgorithm + cipher content_crypt.Cipher + keycount int +} + +// newDecrypter Creates a new Decrypter instance. You must supply the +// rest of parameters via their respective setter methods before +// calling Decrypt(). +// +// privkey must be a private key in its "raw" format (i.e. something like +// *rsa.PrivateKey, instead of jwk.Key) +// +// You should consider this object immutable once you assign values to it. +func newDecrypter(keyalg jwa.KeyEncryptionAlgorithm, ctalg jwa.ContentEncryptionAlgorithm, privkey any) *decrypter { + return &decrypter{ + ctalg: ctalg, + keyalg: keyalg, + privkey: privkey, + } +} + +func (d *decrypter) AgreementPartyUInfo(apu []byte) *decrypter { + d.apu = apu + return d +} + +func (d *decrypter) AgreementPartyVInfo(apv []byte) *decrypter { + d.apv = apv + return d +} + +func (d *decrypter) AuthenticatedData(aad []byte) *decrypter { + d.aad = aad + return d +} + +func (d *decrypter) ComputedAuthenticatedData(aad []byte) *decrypter { + d.computedAad = aad + return d +} + +func (d *decrypter) ContentEncryptionAlgorithm(ctalg jwa.ContentEncryptionAlgorithm) *decrypter { + d.ctalg = ctalg + return d +} + +func (d *decrypter) InitializationVector(iv []byte) *decrypter { + d.iv = iv + return d +} + +func (d *decrypter) KeyCount(keycount int) *decrypter { + d.keycount = keycount + return d +} + +func (d *decrypter) KeyInitializationVector(keyiv []byte) *decrypter { + d.keyiv = keyiv + return d +} + +func (d *decrypter) KeySalt(keysalt []byte) *decrypter { + d.keysalt = keysalt + return d +} + +func (d *decrypter) KeyTag(keytag []byte) *decrypter { + d.keytag = keytag + return d +} + +// PublicKey sets the public key to be used in decoding EC based encryptions. +// The key must be in its "raw" format (i.e. *ecdsa.PublicKey, instead of jwk.Key) +func (d *decrypter) PublicKey(pubkey any) *decrypter { + d.pubkey = pubkey + return d +} + +func (d *decrypter) Tag(tag []byte) *decrypter { + d.tag = tag + return d +} + +func (d *decrypter) CEK(ptr *[]byte) *decrypter { + d.cek = ptr + return d +} + +func (d *decrypter) ContentCipher() (content_crypt.Cipher, error) { + if d.cipher == nil { + cipher, err := jwebb.CreateContentCipher(d.ctalg.String()) + if err != nil { + return nil, err + } + d.cipher = cipher + } + + return d.cipher, nil +} + +func (d *decrypter) Decrypt(recipient Recipient, ciphertext []byte, msg *Message) (plaintext []byte, err error) { + cek, keyerr := d.DecryptKey(recipient, msg) + if keyerr != nil { + err = fmt.Errorf(`failed to decrypt key: %w`, keyerr) + return + } + + cipher, ciphererr := d.ContentCipher() + if ciphererr != nil { + err = fmt.Errorf(`failed to fetch content crypt cipher: %w`, ciphererr) + return + } + + computedAad := d.computedAad + if d.aad != nil { + computedAad = append(append(computedAad, tokens.Period), d.aad...) + } + + plaintext, err = cipher.Decrypt(cek, d.iv, ciphertext, d.tag, computedAad) + if err != nil { + err = fmt.Errorf(`failed to decrypt payload: %w`, err) + return + } + + if d.cek != nil { + *d.cek = cek + } + return plaintext, nil +} + +func (d *decrypter) DecryptKey(recipient Recipient, msg *Message) (cek []byte, err error) { + recipientKey := recipient.EncryptedKey() + if kd, ok := d.privkey.(KeyDecrypter); ok { + return kd.DecryptKey(d.keyalg, recipientKey, recipient, msg) + } + + if jwebb.IsDirect(d.keyalg.String()) { + cek, ok := d.privkey.([]byte) + if !ok { + return nil, fmt.Errorf("decrypt key: []byte is required as the key for %s (got %T)", d.keyalg, d.privkey) + } + return jwebb.KeyDecryptDirect(recipientKey, recipientKey, d.keyalg.String(), cek) + } + + if jwebb.IsPBES2(d.keyalg.String()) { + password, ok := d.privkey.([]byte) + if !ok { + return nil, fmt.Errorf("decrypt key: []byte is required as the password for %s (got %T)", d.keyalg, d.privkey) + } + salt := []byte(d.keyalg.String()) + salt = append(salt, byte(0)) + salt = append(salt, d.keysalt...) + return jwebb.KeyDecryptPBES2(recipientKey, recipientKey, d.keyalg.String(), password, salt, d.keycount) + } + + if jwebb.IsAESGCMKW(d.keyalg.String()) { + sharedkey, ok := d.privkey.([]byte) + if !ok { + return nil, fmt.Errorf("decrypt key: []byte is required as the key for %s (got %T)", d.keyalg, d.privkey) + } + return jwebb.KeyDecryptAESGCMKW(recipientKey, recipientKey, d.keyalg.String(), sharedkey, d.keyiv, d.keytag) + } + + if jwebb.IsECDHES(d.keyalg.String()) { + alg, keysize, keywrap, err := jwebb.KeyEncryptionECDHESKeySize(d.keyalg.String(), d.ctalg.String()) + if err != nil { + return nil, fmt.Errorf(`failed to determine ECDH-ES key size: %w`, err) + } + + if !keywrap { + return jwebb.KeyDecryptECDHES(recipientKey, cek, alg, d.apu, d.apv, d.privkey, d.pubkey, keysize) + } + return jwebb.KeyDecryptECDHESKeyWrap(recipientKey, recipientKey, d.keyalg.String(), d.apu, d.apv, d.privkey, d.pubkey, keysize) + } + + if jwebb.IsRSA15(d.keyalg.String()) { + cipher, err := d.ContentCipher() + if err != nil { + return nil, fmt.Errorf(`failed to fetch content crypt cipher: %w`, err) + } + keysize := cipher.KeySize() / 2 + return jwebb.KeyDecryptRSA15(recipientKey, recipientKey, d.privkey, keysize) + } + + if jwebb.IsRSAOAEP(d.keyalg.String()) { + return jwebb.KeyDecryptRSAOAEP(recipientKey, recipientKey, d.keyalg.String(), d.privkey) + } + + if jwebb.IsAESKW(d.keyalg.String()) { + sharedkey, ok := d.privkey.([]byte) + if !ok { + return nil, fmt.Errorf("[]byte is required as the key to decrypt %s", d.keyalg.String()) + } + return jwebb.KeyDecryptAESKW(recipientKey, recipientKey, d.keyalg.String(), sharedkey) + } + + return nil, fmt.Errorf(`unsupported algorithm for key decryption (%s)`, d.keyalg) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/encrypt.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/encrypt.go new file mode 100644 index 0000000000..e75f342a3d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/encrypt.go @@ -0,0 +1,193 @@ +package jwe + +import ( + "crypto/ecdh" + "crypto/ecdsa" + "crypto/rsa" + "fmt" + + "github.com/lestrrat-go/jwx/v3/internal/keyconv" + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt" + "github.com/lestrrat-go/jwx/v3/jwe/internal/keygen" + "github.com/lestrrat-go/jwx/v3/jwe/jwebb" +) + +// encrypter is responsible for taking various components to encrypt a key. +// its operation is not concurrency safe. You must provide locking yourself +// +//nolint:govet +type encrypter struct { + apu []byte + apv []byte + ctalg jwa.ContentEncryptionAlgorithm + keyalg jwa.KeyEncryptionAlgorithm + pubkey any + rawKey any + cipher content_crypt.Cipher +} + +// newEncrypter creates a new Encrypter instance with all required parameters. +// The content cipher is built internally during construction. +// +// pubkey must be a public key in its "raw" format (i.e. something like +// *rsa.PublicKey, instead of jwk.Key) +// +// You should consider this object immutable once created. +func newEncrypter(keyalg jwa.KeyEncryptionAlgorithm, ctalg jwa.ContentEncryptionAlgorithm, pubkey any, rawKey any, apu, apv []byte) (*encrypter, error) { + cipher, err := jwebb.CreateContentCipher(ctalg.String()) + if err != nil { + return nil, fmt.Errorf(`failed to create content cipher: %w`, err) + } + + return &encrypter{ + apu: apu, + apv: apv, + ctalg: ctalg, + keyalg: keyalg, + pubkey: pubkey, + rawKey: rawKey, + cipher: cipher, + }, nil +} + +func (e *encrypter) EncryptKey(cek []byte) (keygen.ByteSource, error) { + if ke, ok := e.pubkey.(KeyEncrypter); ok { + encrypted, err := ke.EncryptKey(cek) + if err != nil { + return nil, err + } + return keygen.ByteKey(encrypted), nil + } + + if jwebb.IsDirect(e.keyalg.String()) { + sharedkey, ok := e.rawKey.([]byte) + if !ok { + return nil, fmt.Errorf("encrypt key: []byte is required as the key for %s (got %T)", e.keyalg, e.rawKey) + } + return jwebb.KeyEncryptDirect(cek, e.keyalg.String(), sharedkey) + } + + if jwebb.IsPBES2(e.keyalg.String()) { + password, ok := e.rawKey.([]byte) + if !ok { + return nil, fmt.Errorf("encrypt key: []byte is required as the password for %s (got %T)", e.keyalg, e.rawKey) + } + return jwebb.KeyEncryptPBES2(cek, e.keyalg.String(), password) + } + + if jwebb.IsAESGCMKW(e.keyalg.String()) { + sharedkey, ok := e.rawKey.([]byte) + if !ok { + return nil, fmt.Errorf("encrypt key: []byte is required as the key for %s (got %T)", e.keyalg, e.rawKey) + } + return jwebb.KeyEncryptAESGCMKW(cek, e.keyalg.String(), sharedkey) + } + + if jwebb.IsECDHES(e.keyalg.String()) { + _, keysize, keywrap, err := jwebb.KeyEncryptionECDHESKeySize(e.keyalg.String(), e.ctalg.String()) + if err != nil { + return nil, fmt.Errorf(`failed to determine ECDH-ES key size: %w`, err) + } + + // Use rawKey for ECDH-ES operations - it should contain the actual key material + keyToUse := e.rawKey + if keyToUse == nil { + keyToUse = e.pubkey + } + + switch key := keyToUse.(type) { + case *ecdsa.PublicKey: + // no op + case ecdsa.PublicKey: + keyToUse = &key + case *ecdsa.PrivateKey: + keyToUse = &key.PublicKey + case ecdsa.PrivateKey: + keyToUse = &key.PublicKey + case *ecdh.PublicKey: + // no op + case ecdh.PublicKey: + keyToUse = &key + case ecdh.PrivateKey: + keyToUse = key.PublicKey() + case *ecdh.PrivateKey: + keyToUse = key.PublicKey() + } + + // Determine key type and call appropriate function + switch key := keyToUse.(type) { + case *ecdh.PublicKey: + if key.Curve() == ecdh.X25519() { + if !keywrap { + return jwebb.KeyEncryptECDHESX25519(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String()) + } + return jwebb.KeyEncryptECDHESKeyWrapX25519(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String()) + } + + var ecdsaKey *ecdsa.PublicKey + if err := keyconv.ECDHToECDSA(&ecdsaKey, key); err != nil { + return nil, fmt.Errorf(`encrypt: failed to convert ECDH public key to ECDSA: %w`, err) + } + keyToUse = ecdsaKey + } + + switch key := keyToUse.(type) { + case *ecdsa.PublicKey: + if !keywrap { + return jwebb.KeyEncryptECDHESECDSA(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String()) + } + return jwebb.KeyEncryptECDHESKeyWrapECDSA(cek, e.keyalg.String(), e.apu, e.apv, key, keysize, e.ctalg.String()) + default: + return nil, fmt.Errorf(`encrypt: unsupported key type for ECDH-ES: %T`, keyToUse) + } + } + + if jwebb.IsRSA15(e.keyalg.String()) { + keyToUse := e.rawKey + if keyToUse == nil { + keyToUse = e.pubkey + } + + // Handle rsa.PublicKey by value - convert to pointer + if pk, ok := keyToUse.(rsa.PublicKey); ok { + keyToUse = &pk + } + + var pubkey *rsa.PublicKey + if err := keyconv.RSAPublicKey(&pubkey, keyToUse); err != nil { + return nil, fmt.Errorf(`encrypt: failed to convert to RSA public key: %w`, err) + } + + return jwebb.KeyEncryptRSA15(cek, e.keyalg.String(), pubkey) + } + + if jwebb.IsRSAOAEP(e.keyalg.String()) { + keyToUse := e.rawKey + if keyToUse == nil { + keyToUse = e.pubkey + } + + // Handle rsa.PublicKey by value - convert to pointer + if pk, ok := keyToUse.(rsa.PublicKey); ok { + keyToUse = &pk + } + + var pubkey *rsa.PublicKey + if err := keyconv.RSAPublicKey(&pubkey, keyToUse); err != nil { + return nil, fmt.Errorf(`encrypt: failed to convert to RSA public key: %w`, err) + } + + return jwebb.KeyEncryptRSAOAEP(cek, e.keyalg.String(), pubkey) + } + + if jwebb.IsAESKW(e.keyalg.String()) { + sharedkey, ok := e.rawKey.([]byte) + if !ok { + return nil, fmt.Errorf("[]byte is required as the key to encrypt %s", e.keyalg.String()) + } + return jwebb.KeyEncryptAESKW(cek, e.keyalg.String(), sharedkey) + } + + return nil, fmt.Errorf(`unsupported algorithm for key encryption (%s)`, e.keyalg) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/errors.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/errors.go new file mode 100644 index 0000000000..89d276fc44 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/errors.go @@ -0,0 +1,90 @@ +package jwe + +import "errors" + +type encryptError struct { + error +} + +func (e encryptError) Unwrap() error { + return e.error +} + +func (encryptError) Is(err error) bool { + _, ok := err.(encryptError) + return ok +} + +var errDefaultEncryptError = encryptError{errors.New(`encrypt error`)} + +// EncryptError returns an error that can be passed to `errors.Is` to check if the error is an error returned by `jwe.Encrypt`. +func EncryptError() error { + return errDefaultEncryptError +} + +type decryptError struct { + error +} + +func (e decryptError) Unwrap() error { + return e.error +} + +func (decryptError) Is(err error) bool { + _, ok := err.(decryptError) + return ok +} + +var errDefaultDecryptError = decryptError{errors.New(`decrypt error`)} + +// DecryptError returns an error that can be passed to `errors.Is` to check if the error is an error returned by `jwe.Decrypt`. +func DecryptError() error { + return errDefaultDecryptError +} + +type recipientError struct { + error +} + +func (e recipientError) Unwrap() error { + return e.error +} + +func (recipientError) Is(err error) bool { + _, ok := err.(recipientError) + return ok +} + +var errDefaultRecipientError = recipientError{errors.New(`recipient error`)} + +// RecipientError returns an error that can be passed to `errors.Is` to check if the error is +// an error that occurred while attempting to decrypt a JWE message for a particular recipient. +// +// For example, if the JWE message failed to parse during `jwe.Decrypt`, it will be a +// `jwe.DecryptError`, but NOT `jwe.RecipientError`. However, if the JWE message could not +// be decrypted for any of the recipients, then it will be a `jwe.RecipientError` +// (actually, it will be _multiple_ `jwe.RecipientError` errors, one for each recipient) +func RecipientError() error { + return errDefaultRecipientError +} + +type parseError struct { + error +} + +func (e parseError) Unwrap() error { + return e.error +} + +func (parseError) Is(err error) bool { + _, ok := err.(parseError) + return ok +} + +var errDefaultParseError = parseError{errors.New(`parse error`)} + +// ParseError returns an error that can be passed to `errors.Is` to check if the error +// is an error returned by `jwe.Parse` and related functions. +func ParseError() error { + return errDefaultParseError +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/filter.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/filter.go new file mode 100644 index 0000000000..14941604bf --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/filter.go @@ -0,0 +1,36 @@ +package jwe + +import ( + "github.com/lestrrat-go/jwx/v3/transform" +) + +// HeaderFilter is an interface that allows users to filter JWE header fields. +// It provides two methods: Filter and Reject; Filter returns a new header with only +// the fields that match the filter criteria, while Reject returns a new header with +// only the fields that DO NOT match the filter. +// +// EXPERIMENTAL: This API is experimental and its interface and behavior is +// subject to change in future releases. This API is not subject to semver +// compatibility guarantees. +type HeaderFilter interface { + Filter(header Headers) (Headers, error) + Reject(header Headers) (Headers, error) +} + +// StandardHeadersFilter returns a HeaderFilter that filters out standard JWE header fields. +// +// You can use this filter to create headers that either only have standard fields +// or only custom fields. +// +// If you need to configure the filter more precisely, consider +// using the HeaderNameFilter directly. +func StandardHeadersFilter() HeaderFilter { + return stdHeadersFilter +} + +var stdHeadersFilter = NewHeaderNameFilter(stdHeaderNames...) + +// NewHeaderNameFilter creates a new HeaderNameFilter with the specified field names. +func NewHeaderNameFilter(names ...string) HeaderFilter { + return transform.NewNameBasedFilter[Headers](names...) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/headers.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/headers.go new file mode 100644 index 0000000000..6e13afde76 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/headers.go @@ -0,0 +1,95 @@ +package jwe + +import ( + "fmt" + + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/json" +) + +type isZeroer interface { + isZero() bool +} + +func (h *stdHeaders) isZero() bool { + return h.agreementPartyUInfo == nil && + h.agreementPartyVInfo == nil && + h.algorithm == nil && + h.compression == nil && + h.contentEncryption == nil && + h.contentType == nil && + h.critical == nil && + h.ephemeralPublicKey == nil && + h.jwk == nil && + h.jwkSetURL == nil && + h.keyID == nil && + h.typ == nil && + h.x509CertChain == nil && + h.x509CertThumbprint == nil && + h.x509CertThumbprintS256 == nil && + h.x509URL == nil && + len(h.privateParams) == 0 +} + +func (h *stdHeaders) Clone() (Headers, error) { + dst := NewHeaders() + if err := h.Copy(dst); err != nil { + return nil, fmt.Errorf(`failed to copy header contents to new object: %w`, err) + } + return dst, nil +} + +func (h *stdHeaders) Copy(dst Headers) error { + for _, key := range h.Keys() { + var v any + if err := h.Get(key, &v); err != nil { + return fmt.Errorf(`jwe.Headers: Copy: failed to get header %q: %w`, key, err) + } + + if err := dst.Set(key, v); err != nil { + return fmt.Errorf(`jwe.Headers: Copy: failed to set header %q: %w`, key, err) + } + } + return nil +} + +func (h *stdHeaders) Merge(h2 Headers) (Headers, error) { + h3 := NewHeaders() + + if h != nil { + if err := h.Copy(h3); err != nil { + return nil, fmt.Errorf(`failed to copy headers from receiver: %w`, err) + } + } + + if h2 != nil { + if err := h2.Copy(h3); err != nil { + return nil, fmt.Errorf(`failed to copy headers from argument: %w`, err) + } + } + + return h3, nil +} + +func (h *stdHeaders) Encode() ([]byte, error) { + buf, err := json.Marshal(h) + if err != nil { + return nil, fmt.Errorf(`failed to marshal headers to JSON prior to encoding: %w`, err) + } + + return base64.Encode(buf), nil +} + +func (h *stdHeaders) Decode(buf []byte) error { + // base64 json string -> json object representation of header + decoded, err := base64.Decode(buf) + if err != nil { + return fmt.Errorf(`failed to unmarshal base64 encoded buffer: %w`, err) + } + + if err := json.Unmarshal(decoded, h); err != nil { + return fmt.Errorf(`failed to unmarshal buffer: %w`, err) + } + + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/headers_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/headers_gen.go new file mode 100644 index 0000000000..7773a5d812 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/headers_gen.go @@ -0,0 +1,899 @@ +// Code generated by tools/cmd/genjwe/main.go. DO NOT EDIT. + +package jwe + +import ( + "bytes" + "fmt" + "sort" + "sync" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v3/cert" + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/internal/tokens" + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwk" +) + +const ( + AgreementPartyUInfoKey = "apu" + AgreementPartyVInfoKey = "apv" + AlgorithmKey = "alg" + CompressionKey = "zip" + ContentEncryptionKey = "enc" + ContentTypeKey = "cty" + CriticalKey = "crit" + EphemeralPublicKeyKey = "epk" + JWKKey = "jwk" + JWKSetURLKey = "jku" + KeyIDKey = "kid" + TypeKey = "typ" + X509CertChainKey = "x5c" + X509CertThumbprintKey = "x5t" + X509CertThumbprintS256Key = "x5t#S256" + X509URLKey = "x5u" +) + +// Headers describe a standard JWE Header set. It is part of the JWE message +// and is used to represent both Protected and Unprotected headers, +// which in turn can be found in each Recipient object. +// If you are not sure how this works, it is strongly recommended that +// you read RFC7516, especially the section +// that describes the full JSON serialization format of JWE messages. +// +// In most cases, you likely want to use the protected headers, as this is the part of the encrypted content +type Headers interface { + AgreementPartyUInfo() ([]byte, bool) + AgreementPartyVInfo() ([]byte, bool) + Algorithm() (jwa.KeyEncryptionAlgorithm, bool) + Compression() (jwa.CompressionAlgorithm, bool) + ContentEncryption() (jwa.ContentEncryptionAlgorithm, bool) + ContentType() (string, bool) + Critical() ([]string, bool) + EphemeralPublicKey() (jwk.Key, bool) + JWK() (jwk.Key, bool) + JWKSetURL() (string, bool) + KeyID() (string, bool) + Type() (string, bool) + X509CertChain() (*cert.Chain, bool) + X509CertThumbprint() (string, bool) + X509CertThumbprintS256() (string, bool) + X509URL() (string, bool) + + // Get is used to extract the value of any field, including non-standard fields, out of the header. + // + // The first argument is the name of the field. The second argument is a pointer + // to a variable that will receive the value of the field. The method returns + // an error if the field does not exist, or if the value cannot be assigned to + // the destination variable. Note that a field is considered to "exist" even if + // the value is empty-ish (e.g. 0, false, ""), as long as it is explicitly set. + Get(string, any) error + Set(string, any) error + Remove(string) error + // Has returns true if the specified header has a value, even if + // the value is empty-ish (e.g. 0, false, "") as long as it has been + // explicitly set. + Has(string) bool + Encode() ([]byte, error) + Decode([]byte) error + Clone() (Headers, error) + Copy(Headers) error + Merge(Headers) (Headers, error) + + // Keys returns a list of the keys contained in this header. + Keys() []string +} + +// stdHeaderNames is a list of all standard header names defined in the JWE specification. +var stdHeaderNames = []string{AgreementPartyUInfoKey, AgreementPartyVInfoKey, AlgorithmKey, CompressionKey, ContentEncryptionKey, ContentTypeKey, CriticalKey, EphemeralPublicKeyKey, JWKKey, JWKSetURLKey, KeyIDKey, TypeKey, X509CertChainKey, X509CertThumbprintKey, X509CertThumbprintS256Key, X509URLKey} + +type stdHeaders struct { + agreementPartyUInfo []byte + agreementPartyVInfo []byte + algorithm *jwa.KeyEncryptionAlgorithm + compression *jwa.CompressionAlgorithm + contentEncryption *jwa.ContentEncryptionAlgorithm + contentType *string + critical []string + ephemeralPublicKey jwk.Key + jwk jwk.Key + jwkSetURL *string + keyID *string + typ *string + x509CertChain *cert.Chain + x509CertThumbprint *string + x509CertThumbprintS256 *string + x509URL *string + privateParams map[string]any + mu *sync.RWMutex +} + +func NewHeaders() Headers { + return &stdHeaders{ + mu: &sync.RWMutex{}, + privateParams: map[string]any{}, + } +} + +func (h *stdHeaders) AgreementPartyUInfo() ([]byte, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + return h.agreementPartyUInfo, h.agreementPartyUInfo != nil +} + +func (h *stdHeaders) AgreementPartyVInfo() ([]byte, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + return h.agreementPartyVInfo, h.agreementPartyVInfo != nil +} + +func (h *stdHeaders) Algorithm() (jwa.KeyEncryptionAlgorithm, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.algorithm == nil { + return jwa.EmptyKeyEncryptionAlgorithm(), false + } + return *(h.algorithm), true +} + +func (h *stdHeaders) Compression() (jwa.CompressionAlgorithm, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.compression == nil { + return jwa.NoCompress(), false + } + return *(h.compression), true +} + +func (h *stdHeaders) ContentEncryption() (jwa.ContentEncryptionAlgorithm, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.contentEncryption == nil { + return jwa.EmptyContentEncryptionAlgorithm(), false + } + return *(h.contentEncryption), true +} + +func (h *stdHeaders) ContentType() (string, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.contentType == nil { + return "", false + } + return *(h.contentType), true +} + +func (h *stdHeaders) Critical() ([]string, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + return h.critical, h.critical != nil +} + +func (h *stdHeaders) EphemeralPublicKey() (jwk.Key, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + return h.ephemeralPublicKey, h.ephemeralPublicKey != nil +} + +func (h *stdHeaders) JWK() (jwk.Key, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + return h.jwk, h.jwk != nil +} + +func (h *stdHeaders) JWKSetURL() (string, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.jwkSetURL == nil { + return "", false + } + return *(h.jwkSetURL), true +} + +func (h *stdHeaders) KeyID() (string, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.keyID == nil { + return "", false + } + return *(h.keyID), true +} + +func (h *stdHeaders) Type() (string, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.typ == nil { + return "", false + } + return *(h.typ), true +} + +func (h *stdHeaders) X509CertChain() (*cert.Chain, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + return h.x509CertChain, h.x509CertChain != nil +} + +func (h *stdHeaders) X509CertThumbprint() (string, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.x509CertThumbprint == nil { + return "", false + } + return *(h.x509CertThumbprint), true +} + +func (h *stdHeaders) X509CertThumbprintS256() (string, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.x509CertThumbprintS256 == nil { + return "", false + } + return *(h.x509CertThumbprintS256), true +} + +func (h *stdHeaders) X509URL() (string, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.x509URL == nil { + return "", false + } + return *(h.x509URL), true +} + +func (h *stdHeaders) PrivateParams() map[string]any { + h.mu.RLock() + defer h.mu.RUnlock() + return h.privateParams +} + +func (h *stdHeaders) Has(name string) bool { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case AgreementPartyUInfoKey: + return h.agreementPartyUInfo != nil + case AgreementPartyVInfoKey: + return h.agreementPartyVInfo != nil + case AlgorithmKey: + return h.algorithm != nil + case CompressionKey: + return h.compression != nil + case ContentEncryptionKey: + return h.contentEncryption != nil + case ContentTypeKey: + return h.contentType != nil + case CriticalKey: + return h.critical != nil + case EphemeralPublicKeyKey: + return h.ephemeralPublicKey != nil + case JWKKey: + return h.jwk != nil + case JWKSetURLKey: + return h.jwkSetURL != nil + case KeyIDKey: + return h.keyID != nil + case TypeKey: + return h.typ != nil + case X509CertChainKey: + return h.x509CertChain != nil + case X509CertThumbprintKey: + return h.x509CertThumbprint != nil + case X509CertThumbprintS256Key: + return h.x509CertThumbprintS256 != nil + case X509URLKey: + return h.x509URL != nil + default: + _, ok := h.privateParams[name] + return ok + } +} + +func (h *stdHeaders) Get(name string, dst any) error { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case AgreementPartyUInfoKey: + if h.agreementPartyUInfo == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.agreementPartyUInfo); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + case AgreementPartyVInfoKey: + if h.agreementPartyVInfo == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.agreementPartyVInfo); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + case AlgorithmKey: + if h.algorithm == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + case CompressionKey: + if h.compression == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.compression)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + case ContentEncryptionKey: + if h.contentEncryption == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.contentEncryption)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + case ContentTypeKey: + if h.contentType == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.contentType)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + case CriticalKey: + if h.critical == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.critical); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + case EphemeralPublicKeyKey: + if h.ephemeralPublicKey == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.ephemeralPublicKey); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + case JWKKey: + if h.jwk == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.jwk); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + case JWKSetURLKey: + if h.jwkSetURL == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.jwkSetURL)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + case KeyIDKey: + if h.keyID == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + case TypeKey: + if h.typ == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.typ)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + case X509CertChainKey: + if h.x509CertChain == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.x509CertChain); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + case X509URLKey: + if h.x509URL == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + default: + v, ok := h.privateParams[name] + if !ok { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, v); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + } + return nil +} + +func (h *stdHeaders) Set(name string, value any) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *stdHeaders) setNoLock(name string, value any) error { + switch name { + case AgreementPartyUInfoKey: + if v, ok := value.([]byte); ok { + h.agreementPartyUInfo = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, AgreementPartyUInfoKey, value) + case AgreementPartyVInfoKey: + if v, ok := value.([]byte); ok { + h.agreementPartyVInfo = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, AgreementPartyVInfoKey, value) + case AlgorithmKey: + if v, ok := value.(jwa.KeyEncryptionAlgorithm); ok { + h.algorithm = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, AlgorithmKey, value) + case CompressionKey: + if v, ok := value.(jwa.CompressionAlgorithm); ok { + h.compression = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, CompressionKey, value) + case ContentEncryptionKey: + if v, ok := value.(jwa.ContentEncryptionAlgorithm); ok { + if v == jwa.EmptyContentEncryptionAlgorithm() { + return fmt.Errorf(`"enc" field cannot be an empty string`) + } + h.contentEncryption = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ContentEncryptionKey, value) + case ContentTypeKey: + if v, ok := value.(string); ok { + h.contentType = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ContentTypeKey, value) + case CriticalKey: + if v, ok := value.([]string); ok { + h.critical = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, CriticalKey, value) + case EphemeralPublicKeyKey: + if v, ok := value.(jwk.Key); ok { + h.ephemeralPublicKey = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, EphemeralPublicKeyKey, value) + case JWKKey: + if v, ok := value.(jwk.Key); ok { + h.jwk = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, JWKKey, value) + case JWKSetURLKey: + if v, ok := value.(string); ok { + h.jwkSetURL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, JWKSetURLKey, value) + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case TypeKey: + if v, ok := value.(string); ok { + h.typ = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, TypeKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]any{} + } + h.privateParams[name] = value + } + return nil +} + +func (h *stdHeaders) Remove(key string) error { + h.mu.Lock() + defer h.mu.Unlock() + switch key { + case AgreementPartyUInfoKey: + h.agreementPartyUInfo = nil + case AgreementPartyVInfoKey: + h.agreementPartyVInfo = nil + case AlgorithmKey: + h.algorithm = nil + case CompressionKey: + h.compression = nil + case ContentEncryptionKey: + h.contentEncryption = nil + case ContentTypeKey: + h.contentType = nil + case CriticalKey: + h.critical = nil + case EphemeralPublicKeyKey: + h.ephemeralPublicKey = nil + case JWKKey: + h.jwk = nil + case JWKSetURLKey: + h.jwkSetURL = nil + case KeyIDKey: + h.keyID = nil + case TypeKey: + h.typ = nil + case X509CertChainKey: + h.x509CertChain = nil + case X509CertThumbprintKey: + h.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + h.x509CertThumbprintS256 = nil + case X509URLKey: + h.x509URL = nil + default: + delete(h.privateParams, key) + } + return nil +} + +func (h *stdHeaders) UnmarshalJSON(buf []byte) error { + h.agreementPartyUInfo = nil + h.agreementPartyVInfo = nil + h.algorithm = nil + h.compression = nil + h.contentEncryption = nil + h.contentType = nil + h.critical = nil + h.ephemeralPublicKey = nil + h.jwk = nil + h.jwkSetURL = nil + h.keyID = nil + h.typ = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here. + if tok == tokens.CloseCurlyBracket { // End of object + break LOOP + } else if tok != tokens.OpenCurlyBracket { + return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok) + } + case string: // Objects can only have string keys + switch tok { + case AgreementPartyUInfoKey: + if err := json.AssignNextBytesToken(&h.agreementPartyUInfo, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AgreementPartyUInfoKey, err) + } + case AgreementPartyVInfoKey: + if err := json.AssignNextBytesToken(&h.agreementPartyVInfo, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AgreementPartyVInfoKey, err) + } + case AlgorithmKey: + var decoded jwa.KeyEncryptionAlgorithm + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + h.algorithm = &decoded + case CompressionKey: + var decoded jwa.CompressionAlgorithm + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, CompressionKey, err) + } + h.compression = &decoded + case ContentEncryptionKey: + var decoded jwa.ContentEncryptionAlgorithm + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ContentEncryptionKey, err) + } + h.contentEncryption = &decoded + case ContentTypeKey: + if err := json.AssignNextStringToken(&h.contentType, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ContentTypeKey, err) + } + case CriticalKey: + var decoded []string + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, CriticalKey, err) + } + h.critical = decoded + case EphemeralPublicKeyKey: + var buf json.RawMessage + if err := dec.Decode(&buf); err != nil { + return fmt.Errorf(`failed to decode value for key %s:%w`, EphemeralPublicKeyKey, err) + } + key, err := jwk.ParseKey(buf) + if err != nil { + return fmt.Errorf(`failed to parse JWK for key %s: %w`, EphemeralPublicKeyKey, err) + } + h.ephemeralPublicKey = key + case JWKKey: + var buf json.RawMessage + if err := dec.Decode(&buf); err != nil { + return fmt.Errorf(`failed to decode value for key %s:%w`, JWKKey, err) + } + key, err := jwk.ParseKey(buf) + if err != nil { + return fmt.Errorf(`failed to parse JWK for key %s: %w`, JWKKey, err) + } + h.jwk = key + case JWKSetURLKey: + if err := json.AssignNextStringToken(&h.jwkSetURL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, JWKSetURLKey, err) + } + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case TypeKey: + if err := json.AssignNextStringToken(&h.typ, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, TypeKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + default: + decoded, err := registry.Decode(dec, tok) + if err != nil { + return err + } + h.setNoLock(tok, decoded) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + return nil +} + +func (h *stdHeaders) Keys() []string { + h.mu.RLock() + defer h.mu.RUnlock() + keys := make([]string, 0, 16+len(h.privateParams)) + if h.agreementPartyUInfo != nil { + keys = append(keys, AgreementPartyUInfoKey) + } + if h.agreementPartyVInfo != nil { + keys = append(keys, AgreementPartyVInfoKey) + } + if h.algorithm != nil { + keys = append(keys, AlgorithmKey) + } + if h.compression != nil { + keys = append(keys, CompressionKey) + } + if h.contentEncryption != nil { + keys = append(keys, ContentEncryptionKey) + } + if h.contentType != nil { + keys = append(keys, ContentTypeKey) + } + if h.critical != nil { + keys = append(keys, CriticalKey) + } + if h.ephemeralPublicKey != nil { + keys = append(keys, EphemeralPublicKeyKey) + } + if h.jwk != nil { + keys = append(keys, JWKKey) + } + if h.jwkSetURL != nil { + keys = append(keys, JWKSetURLKey) + } + if h.keyID != nil { + keys = append(keys, KeyIDKey) + } + if h.typ != nil { + keys = append(keys, TypeKey) + } + if h.x509CertChain != nil { + keys = append(keys, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + keys = append(keys, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + keys = append(keys, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + keys = append(keys, X509URLKey) + } + for k := range h.privateParams { + keys = append(keys, k) + } + return keys +} + +func (h stdHeaders) MarshalJSON() ([]byte, error) { + data := make(map[string]any) + keys := make([]string, 0, 16+len(h.privateParams)) + h.mu.RLock() + if h.agreementPartyUInfo != nil { + data[AgreementPartyUInfoKey] = h.agreementPartyUInfo + keys = append(keys, AgreementPartyUInfoKey) + } + if h.agreementPartyVInfo != nil { + data[AgreementPartyVInfoKey] = h.agreementPartyVInfo + keys = append(keys, AgreementPartyVInfoKey) + } + if h.algorithm != nil { + data[AlgorithmKey] = *(h.algorithm) + keys = append(keys, AlgorithmKey) + } + if h.compression != nil { + data[CompressionKey] = *(h.compression) + keys = append(keys, CompressionKey) + } + if h.contentEncryption != nil { + data[ContentEncryptionKey] = *(h.contentEncryption) + keys = append(keys, ContentEncryptionKey) + } + if h.contentType != nil { + data[ContentTypeKey] = *(h.contentType) + keys = append(keys, ContentTypeKey) + } + if h.critical != nil { + data[CriticalKey] = h.critical + keys = append(keys, CriticalKey) + } + if h.ephemeralPublicKey != nil { + data[EphemeralPublicKeyKey] = h.ephemeralPublicKey + keys = append(keys, EphemeralPublicKeyKey) + } + if h.jwk != nil { + data[JWKKey] = h.jwk + keys = append(keys, JWKKey) + } + if h.jwkSetURL != nil { + data[JWKSetURLKey] = *(h.jwkSetURL) + keys = append(keys, JWKSetURLKey) + } + if h.keyID != nil { + data[KeyIDKey] = *(h.keyID) + keys = append(keys, KeyIDKey) + } + if h.typ != nil { + data[TypeKey] = *(h.typ) + keys = append(keys, TypeKey) + } + if h.x509CertChain != nil { + data[X509CertChainKey] = h.x509CertChain + keys = append(keys, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + data[X509CertThumbprintKey] = *(h.x509CertThumbprint) + keys = append(keys, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256) + keys = append(keys, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + data[X509URLKey] = *(h.x509URL) + keys = append(keys, X509URLKey) + } + for k, v := range h.privateParams { + data[k] = v + keys = append(keys, k) + } + h.mu.RUnlock() + + sort.Strings(keys) + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + enc := json.NewEncoder(buf) + buf.WriteByte(tokens.OpenCurlyBracket) + for i, k := range keys { + if i > 0 { + buf.WriteRune(tokens.Comma) + } + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(k) + buf.WriteString(`":`) + v := data[k] + switch v := v.(type) { + case []byte: + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune(tokens.DoubleQuote) + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s`, k) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte(tokens.CloseCurlyBracket) + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (h *stdHeaders) clear() { + h.mu.Lock() + h.agreementPartyUInfo = nil + h.agreementPartyVInfo = nil + h.algorithm = nil + h.compression = nil + h.contentEncryption = nil + h.contentType = nil + h.critical = nil + h.ephemeralPublicKey = nil + h.jwk = nil + h.jwkSetURL = nil + h.keyID = nil + h.typ = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + h.privateParams = map[string]any{} + h.mu.Unlock() +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/interface.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/interface.go new file mode 100644 index 0000000000..91ad8cb809 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/interface.go @@ -0,0 +1,207 @@ +package jwe + +import ( + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwe/internal/keygen" +) + +// KeyEncrypter is an interface for object that can encrypt a +// content encryption key. +// +// You can use this in place of a regular key (i.e. in jwe.WithKey()) +// to encrypt the content encryption key in a JWE message without +// having to expose the secret key in memory, for example, when you +// want to use hardware security modules (HSMs) to encrypt the key. +// +// This API is experimental and may change without notice, even +// in minor releases. +type KeyEncrypter interface { + // Algorithm returns the algorithm used to encrypt the key. + Algorithm() jwa.KeyEncryptionAlgorithm + + // EncryptKey encrypts the given content encryption key. + EncryptKey([]byte) ([]byte, error) +} + +// KeyIDer is an interface for things that can return a key ID. +// +// As of this writing, this is solely used to identify KeyEncrypter +// objects that also carry a key ID on its own. +type KeyIDer interface { + KeyID() (string, bool) +} + +// KeyDecrypter is an interface for objects that can decrypt a content +// encryption key. +// +// You can use this in place of a regular key (i.e. in jwe.WithKey()) +// to decrypt the encrypted key in a JWE message without having to +// expose the secret key in memory, for example, when you want to use +// hardware security modules (HSMs) to decrypt the key. +// +// This API is experimental and may change without notice, even +// in minor releases. +type KeyDecrypter interface { + // Decrypt decrypts the encrypted key of a JWE message. + // + // Make sure you understand how JWE messages are structured. + // + // For example, while in most circumstances a JWE message will only have one recipient, + // a JWE message may contain multiple recipients, each with their own + // encrypted key. This method will be called for each recipient, instead of + // just once for a message. + // + // Also, header values could be found in either protected/unprotected headers + // of a JWE message, as well as in protected/unprotected headers for each recipient. + // When checking a header value, you can decide to use either one, or both, but you + // must be aware that there are multiple places to look for. + DecryptKey(alg jwa.KeyEncryptionAlgorithm, encryptedKey []byte, recipient Recipient, message *Message) ([]byte, error) +} + +// Recipient holds the encrypted key and hints to decrypt the key +type Recipient interface { + Headers() Headers + EncryptedKey() []byte + SetHeaders(Headers) error + SetEncryptedKey([]byte) error +} + +type stdRecipient struct { + // Comments on each field are taken from https://datatracker.ietf.org/doc/html/rfc7516 + // + // header + // The "header" member MUST be present and contain the value JWE Per- + // Recipient Unprotected Header when the JWE Per-Recipient + // Unprotected Header value is non-empty; otherwise, it MUST be + // absent. This value is represented as an unencoded JSON object, + // rather than as a string. These Header Parameter values are not + // integrity protected. + // + // At least one of the "header", "protected", and "unprotected" members + // MUST be present so that "alg" and "enc" Header Parameter values are + // conveyed for each recipient computation. + // + // JWX note: see Message.unprotectedHeaders + headers Headers + + // encrypted_key + // The "encrypted_key" member MUST be present and contain the value + // BASE64URL(JWE Encrypted Key) when the JWE Encrypted Key value is + // non-empty; otherwise, it MUST be absent. + encryptedKey []byte +} + +// Message contains the entire encrypted JWE message. You should not +// expect to use Message for anything other than inspecting the +// state of an encrypted message. This is because encryption is +// highly context-sensitive, and once we parse the original payload +// into an object, we may not always be able to recreate the exact +// context in which the encryption happened. +// +// For example, it is totally valid for if the protected header's +// integrity was calculated using a non-standard line breaks: +// +// {"a dummy": +// "protected header"} +// +// Once parsed, though, we can only serialize the protected header as: +// +// {"a dummy":"protected header"} +// +// which would obviously result in a contradicting integrity value +// if we tried to re-calculate it from a parsed message. +// +//nolint:govet +type Message struct { + // Comments on each field are taken from https://datatracker.ietf.org/doc/html/rfc7516 + // + // protected + // The "protected" member MUST be present and contain the value + // BASE64URL(UTF8(JWE Protected Header)) when the JWE Protected + // Header value is non-empty; otherwise, it MUST be absent. These + // Header Parameter values are integrity protected. + protectedHeaders Headers + + // unprotected + // The "unprotected" member MUST be present and contain the value JWE + // Shared Unprotected Header when the JWE Shared Unprotected Header + // value is non-empty; otherwise, it MUST be absent. This value is + // represented as an unencoded JSON object, rather than as a string. + // These Header Parameter values are not integrity protected. + // + // JWX note: This field is NOT mutually exclusive with per-recipient + // headers within the implementation because... it's too much work. + // It is _never_ populated (we don't provide a way to do this) upon encryption. + // When decrypting, if present its values are always merged with + // per-recipient header. + unprotectedHeaders Headers + + // iv + // The "iv" member MUST be present and contain the value + // BASE64URL(JWE Initialization Vector) when the JWE Initialization + // Vector value is non-empty; otherwise, it MUST be absent. + initializationVector []byte + + // aad + // The "aad" member MUST be present and contain the value + // BASE64URL(JWE AAD)) when the JWE AAD value is non-empty; + // otherwise, it MUST be absent. A JWE AAD value can be included to + // supply a base64url-encoded value to be integrity protected but not + // encrypted. + authenticatedData []byte + + // ciphertext + // The "ciphertext" member MUST be present and contain the value + // BASE64URL(JWE Ciphertext). + cipherText []byte + + // tag + // The "tag" member MUST be present and contain the value + // BASE64URL(JWE Authentication Tag) when the JWE Authentication Tag + // value is non-empty; otherwise, it MUST be absent. + tag []byte + + // recipients + // The "recipients" member value MUST be an array of JSON objects. + // Each object contains information specific to a single recipient. + // This member MUST be present with exactly one array element per + // recipient, even if some or all of the array element values are the + // empty JSON object "{}" (which can happen when all Header Parameter + // values are shared between all recipients and when no encrypted key + // is used, such as when doing Direct Encryption). + // + // Some Header Parameters, including the "alg" parameter, can be shared + // among all recipient computations. Header Parameters in the JWE + // Protected Header and JWE Shared Unprotected Header values are shared + // among all recipients. + // + // The Header Parameter values used when creating or validating per- + // recipient ciphertext and Authentication Tag values are the union of + // the three sets of Header Parameter values that may be present: (1) + // the JWE Protected Header represented in the "protected" member, (2) + // the JWE Shared Unprotected Header represented in the "unprotected" + // member, and (3) the JWE Per-Recipient Unprotected Header represented + // in the "header" member of the recipient's array element. The union + // of these sets of Header Parameters comprises the JOSE Header. The + // Header Parameter names in the three locations MUST be disjoint. + recipients []Recipient + + // TODO: Additional members can be present in both the JSON objects defined + // above; if not understood by implementations encountering them, they + // MUST be ignored. + // privateParams map[string]any + + // These two fields below are not available for the public consumers of this object. + // rawProtectedHeaders stores the original protected header buffer + rawProtectedHeaders []byte + // storeProtectedHeaders is a hint to be used in UnmarshalJSON(). + // When this flag is true, UnmarshalJSON() will populate the + // rawProtectedHeaders field + storeProtectedHeaders bool +} + +// populater is an interface for things that may modify the +// JWE header. e.g. ByteWithECPrivateKey +type populater interface { + Populate(keygen.Setter) error +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/BUILD.bazel new file mode 100644 index 0000000000..4ed4c53fa3 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/BUILD.bazel @@ -0,0 +1,22 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "aescbc", + srcs = ["aescbc.go"], + importpath = "github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc", + visibility = ["//:__subpackages__"], + deps = ["//internal/pool"], +) + +go_test( + name = "aescbc_test", + srcs = ["aescbc_test.go"], + embed = [":aescbc"], + deps = ["@com_github_stretchr_testify//require"] +) + +alias( + name = "go_default_library", + actual = ":aescbc", + visibility = ["//jwe:__subpackages__"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/aescbc.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/aescbc.go new file mode 100644 index 0000000000..4f08c4936f --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc/aescbc.go @@ -0,0 +1,268 @@ +package aescbc + +import ( + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "crypto/subtle" + "encoding/binary" + "errors" + "fmt" + "hash" + "sync/atomic" + + "github.com/lestrrat-go/jwx/v3/internal/pool" +) + +const ( + NonceSize = 16 +) + +const defaultBufSize int64 = 256 * 1024 * 1024 + +var maxBufSize atomic.Int64 + +func init() { + SetMaxBufferSize(defaultBufSize) +} + +func SetMaxBufferSize(siz int64) { + if siz <= 0 { + siz = defaultBufSize + } + maxBufSize.Store(siz) +} + +func pad(buf []byte, n int) []byte { + rem := n - len(buf)%n + if rem == 0 { + return buf + } + + bufsiz := len(buf) + rem + mbs := maxBufSize.Load() + if int64(bufsiz) > mbs { + panic(fmt.Errorf("failed to allocate buffer")) + } + newbuf := make([]byte, bufsiz) + copy(newbuf, buf) + + for i := len(buf); i < len(newbuf); i++ { + newbuf[i] = byte(rem) + } + return newbuf +} + +// ref. https://github.com/golang/go/blob/c3db64c0f45e8f2d75c5b59401e0fc925701b6f4/src/crypto/tls/conn.go#L279-L324 +// +// extractPadding returns, in constant time, the length of the padding to remove +// from the end of payload. It also returns a byte which is equal to 255 if the +// padding was valid and 0 otherwise. See RFC 2246, Section 6.2.3.2. +func extractPadding(payload []byte) (toRemove int, good byte) { + if len(payload) < 1 { + return 0, 0 + } + + paddingLen := payload[len(payload)-1] + t := uint(len(payload)) - uint(paddingLen) + // if len(payload) > paddingLen then the MSB of t is zero + good = byte(int32(^t) >> 31) + + // The maximum possible padding length plus the actual length field + toCheck := 256 + // The length of the padded data is public, so we can use an if here + toCheck = min(toCheck, len(payload)) + + for i := 1; i <= toCheck; i++ { + t := uint(paddingLen) - uint(i) + // if i <= paddingLen then the MSB of t is zero + mask := byte(int32(^t) >> 31) + b := payload[len(payload)-i] + good &^= mask&paddingLen ^ mask&b + } + + // We AND together the bits of good and replicate the result across + // all the bits. + good &= good << 4 + good &= good << 2 + good &= good << 1 + good = uint8(int8(good) >> 7) + + // Zero the padding length on error. This ensures any unchecked bytes + // are included in the MAC. Otherwise, an attacker that could + // distinguish MAC failures from padding failures could mount an attack + // similar to POODLE in SSL 3.0: given a good ciphertext that uses a + // full block's worth of padding, replace the final block with another + // block. If the MAC check passed but the padding check failed, the + // last byte of that block decrypted to the block size. + // + // See also macAndPaddingGood logic below. + paddingLen &= good + + toRemove = int(paddingLen) + return +} + +type Hmac struct { + blockCipher cipher.Block + hash func() hash.Hash + keysize int + tagsize int + integrityKey []byte +} + +type BlockCipherFunc func([]byte) (cipher.Block, error) + +func New(key []byte, f BlockCipherFunc) (hmac *Hmac, err error) { + keysize := len(key) / 2 + ikey := key[:keysize] + ekey := key[keysize:] + + bc, ciphererr := f(ekey) + if ciphererr != nil { + err = fmt.Errorf(`failed to execute block cipher function: %w`, ciphererr) + return + } + + var hfunc func() hash.Hash + switch keysize { + case 16: + hfunc = sha256.New + case 24: + hfunc = sha512.New384 + case 32: + hfunc = sha512.New + default: + return nil, fmt.Errorf("unsupported key size %d", keysize) + } + + return &Hmac{ + blockCipher: bc, + hash: hfunc, + integrityKey: ikey, + keysize: keysize, + tagsize: keysize, // NonceSize, + // While investigating GH #207, I stumbled upon another problem where + // the computed tags don't match on decrypt. After poking through the + // code using a bunch of debug statements, I've finally found out that + // tagsize = keysize makes the whole thing work. + }, nil +} + +// NonceSize fulfills the crypto.AEAD interface +func (c Hmac) NonceSize() int { + return NonceSize +} + +// Overhead fulfills the crypto.AEAD interface +func (c Hmac) Overhead() int { + return c.blockCipher.BlockSize() + c.tagsize +} + +func (c Hmac) ComputeAuthTag(aad, nonce, ciphertext []byte) ([]byte, error) { + var buf [8]byte + binary.BigEndian.PutUint64(buf[:], uint64(len(aad)*8)) + + h := hmac.New(c.hash, c.integrityKey) + + // compute the tag + // no need to check errors because Write never returns an error: https://pkg.go.dev/hash#Hash + // + // > Write (via the embedded io.Writer interface) adds more data to the running hash. + // > It never returns an error. + h.Write(aad) + h.Write(nonce) + h.Write(ciphertext) + h.Write(buf[:]) + s := h.Sum(nil) + return s[:c.tagsize], nil +} + +func ensureSize(dst []byte, n int) []byte { + // if the dst buffer has enough length just copy the relevant parts to it. + // Otherwise create a new slice that's big enough, and operate on that + // Note: I think go-jose has a bug in that it checks for cap(), but not len(). + ret := dst + if diff := n - len(dst); diff > 0 { + // dst is not big enough + ret = make([]byte, n) + copy(ret, dst) + } + return ret +} + +// Seal fulfills the crypto.AEAD interface +func (c Hmac) Seal(dst, nonce, plaintext, data []byte) []byte { + ctlen := len(plaintext) + bufsiz := ctlen + c.Overhead() + mbs := maxBufSize.Load() + + if int64(bufsiz) > mbs { + panic(fmt.Errorf("failed to allocate buffer")) + } + ciphertext := make([]byte, bufsiz)[:ctlen] + copy(ciphertext, plaintext) + ciphertext = pad(ciphertext, c.blockCipher.BlockSize()) + + cbc := cipher.NewCBCEncrypter(c.blockCipher, nonce) + cbc.CryptBlocks(ciphertext, ciphertext) + + authtag, err := c.ComputeAuthTag(data, nonce, ciphertext) + if err != nil { + // Hmac implements cipher.AEAD interface. Seal can't return error. + // But currently it never reach here because of Hmac.ComputeAuthTag doesn't return error. + panic(fmt.Errorf("failed to seal on hmac: %v", err)) + } + + retlen := len(dst) + len(ciphertext) + len(authtag) + + ret := ensureSize(dst, retlen) + out := ret[len(dst):] + n := copy(out, ciphertext) + copy(out[n:], authtag) + + return ret +} + +// Open fulfills the crypto.AEAD interface +func (c Hmac) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { + if len(ciphertext) < c.keysize { + return nil, fmt.Errorf(`invalid ciphertext (too short)`) + } + + tagOffset := len(ciphertext) - c.tagsize + if tagOffset%c.blockCipher.BlockSize() != 0 { + return nil, fmt.Errorf( + "invalid ciphertext (invalid length: %d %% %d != 0)", + tagOffset, + c.blockCipher.BlockSize(), + ) + } + tag := ciphertext[tagOffset:] + ciphertext = ciphertext[:tagOffset] + + expectedTag, err := c.ComputeAuthTag(data, nonce, ciphertext[:tagOffset]) + if err != nil { + return nil, fmt.Errorf(`failed to compute auth tag: %w`, err) + } + + cbc := cipher.NewCBCDecrypter(c.blockCipher, nonce) + buf := pool.ByteSlice().GetCapacity(tagOffset) + defer pool.ByteSlice().Put(buf) + buf = buf[:tagOffset] + + cbc.CryptBlocks(buf, ciphertext) + + toRemove, good := extractPadding(buf) + cmp := subtle.ConstantTimeCompare(expectedTag, tag) & int(good) + if cmp != 1 { + return nil, errors.New(`invalid ciphertext`) + } + + plaintext := buf[:len(buf)-toRemove] + ret := ensureSize(dst, len(plaintext)) + out := ret[len(dst):] + copy(out, plaintext) + return ret, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/BUILD.bazel new file mode 100644 index 0000000000..cf642c744d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/BUILD.bazel @@ -0,0 +1,34 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "cipher", + srcs = [ + "cipher.go", + "interface.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/jwe/internal/cipher", + visibility = ["//:__subpackages__"], + deps = [ + "//jwa", + "//jwe/internal/aescbc", + "//jwe/internal/keygen", + "//internal/tokens", + ], +) + +go_test( + name = "cipher_test", + srcs = ["cipher_test.go"], + deps = [ + ":cipher", + "//jwa", + "//internal/tokens", + "@com_github_stretchr_testify//require", + ], +) + +alias( + name = "go_default_library", + actual = ":cipher", + visibility = ["//jwe:__subpackages__"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/cipher.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/cipher.go new file mode 100644 index 0000000000..9b9a40d00d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/cipher.go @@ -0,0 +1,169 @@ +package cipher + +import ( + "crypto/aes" + "crypto/cipher" + "fmt" + + "github.com/lestrrat-go/jwx/v3/internal/tokens" + "github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc" + "github.com/lestrrat-go/jwx/v3/jwe/internal/keygen" +) + +var gcm = &gcmFetcher{} +var cbc = &cbcFetcher{} + +func (f gcmFetcher) Fetch(key []byte, size int) (cipher.AEAD, error) { + if len(key) != size { + return nil, fmt.Errorf(`key size (%d) does not match expected key size (%d)`, len(key), size) + } + aescipher, err := aes.NewCipher(key) + if err != nil { + return nil, fmt.Errorf(`cipher: failed to create AES cipher for GCM: %w`, err) + } + + aead, err := cipher.NewGCM(aescipher) + if err != nil { + return nil, fmt.Errorf(`failed to create GCM for cipher: %w`, err) + } + return aead, nil +} + +func (f cbcFetcher) Fetch(key []byte, size int) (cipher.AEAD, error) { + if len(key) != size { + return nil, fmt.Errorf(`key size (%d) does not match expected key size (%d)`, len(key), size) + } + aead, err := aescbc.New(key, aes.NewCipher) + if err != nil { + return nil, fmt.Errorf(`cipher: failed to create AES cipher for CBC: %w`, err) + } + return aead, nil +} + +func (c AesContentCipher) KeySize() int { + return c.keysize +} + +func (c AesContentCipher) TagSize() int { + return c.tagsize +} + +func NewAES(alg string) (*AesContentCipher, error) { + var keysize int + var tagsize int + var fetcher Fetcher + switch alg { + case tokens.A128GCM: + keysize = 16 + tagsize = 16 + fetcher = gcm + case tokens.A192GCM: + keysize = 24 + tagsize = 16 + fetcher = gcm + case tokens.A256GCM: + keysize = 32 + tagsize = 16 + fetcher = gcm + case tokens.A128CBC_HS256: + tagsize = 16 + keysize = tagsize * 2 + fetcher = cbc + case tokens.A192CBC_HS384: + tagsize = 24 + keysize = tagsize * 2 + fetcher = cbc + case tokens.A256CBC_HS512: + tagsize = 32 + keysize = tagsize * 2 + fetcher = cbc + default: + return nil, fmt.Errorf("failed to create AES content cipher: invalid algorithm (%s)", alg) + } + + return &AesContentCipher{ + keysize: keysize, + tagsize: tagsize, + fetch: fetcher, + }, nil +} + +func (c AesContentCipher) Encrypt(cek, plaintext, aad []byte) (iv, ciphertxt, tag []byte, err error) { + var aead cipher.AEAD + aead, err = c.fetch.Fetch(cek, c.keysize) + if err != nil { + return nil, nil, nil, fmt.Errorf(`failed to fetch AEAD: %w`, err) + } + + // Seal may panic (argh!), so protect ourselves from that + defer func() { + if e := recover(); e != nil { + switch e := e.(type) { + case error: + err = e + default: + err = fmt.Errorf("%s", e) + } + err = fmt.Errorf(`failed to encrypt: %w`, err) + } + }() + + if c.NonceGenerator != nil { + iv, err = c.NonceGenerator(aead.NonceSize()) + if err != nil { + return nil, nil, nil, fmt.Errorf(`failed to generate nonce: %w`, err) + } + } else { + bs, err := keygen.Random(aead.NonceSize()) + if err != nil { + return nil, nil, nil, fmt.Errorf(`failed to generate random nonce: %w`, err) + } + iv = bs.Bytes() + } + + combined := aead.Seal(nil, iv, plaintext, aad) + tagoffset := len(combined) - c.TagSize() + + if tagoffset < 0 { + panic(fmt.Sprintf("tag offset is less than 0 (combined len = %d, tagsize = %d)", len(combined), c.TagSize())) + } + + tag = combined[tagoffset:] + ciphertxt = make([]byte, tagoffset) + copy(ciphertxt, combined[:tagoffset]) + + return +} + +func (c AesContentCipher) Decrypt(cek, iv, ciphertxt, tag, aad []byte) (plaintext []byte, err error) { + aead, err := c.fetch.Fetch(cek, c.keysize) + if err != nil { + return nil, fmt.Errorf(`failed to fetch AEAD data: %w`, err) + } + + // Open may panic (argh!), so protect ourselves from that + defer func() { + if e := recover(); e != nil { + switch e := e.(type) { + case error: + err = e + default: + err = fmt.Errorf(`%s`, e) + } + err = fmt.Errorf(`failed to decrypt: %w`, err) + return + } + }() + + combined := make([]byte, len(ciphertxt)+len(tag)) + copy(combined, ciphertxt) + copy(combined[len(ciphertxt):], tag) + + buf, aeaderr := aead.Open(nil, iv, combined, aad) + if aeaderr != nil { + err = fmt.Errorf(`aead.Open failed: %w`, aeaderr) + return + } + plaintext = buf + return +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/interface.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/interface.go new file mode 100644 index 0000000000..a03e15b159 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/cipher/interface.go @@ -0,0 +1,32 @@ +package cipher + +import ( + "crypto/cipher" +) + +const ( + TagSize = 16 +) + +// ContentCipher knows how to encrypt/decrypt the content given a content +// encryption key and other data +type ContentCipher interface { + KeySize() int + Encrypt(cek, aad, plaintext []byte) ([]byte, []byte, []byte, error) + Decrypt(cek, iv, aad, ciphertext, tag []byte) ([]byte, error) +} + +type Fetcher interface { + Fetch([]byte, int) (cipher.AEAD, error) +} + +type gcmFetcher struct{} +type cbcFetcher struct{} + +// AesContentCipher represents a cipher based on AES +type AesContentCipher struct { + NonceGenerator func(int) ([]byte, error) + fetch Fetcher + keysize int + tagsize int +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf/BUILD.bazel new file mode 100644 index 0000000000..59aeb2cd27 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf/BUILD.bazel @@ -0,0 +1,24 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "concatkdf", + srcs = ["concatkdf.go"], + importpath = "github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf", + visibility = ["//:__subpackages__"], +) + +go_test( + name = "concatkdf_test", + srcs = ["concatkdf_test.go"], + embed = [":concatkdf"], + deps = [ + "//jwa", + "@com_github_stretchr_testify//require", + ], +) + +alias( + name = "go_default_library", + actual = ":concatkdf", + visibility = ["//jwe:__subpackages__"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf/concatkdf.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf/concatkdf.go new file mode 100644 index 0000000000..3691830a63 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf/concatkdf.go @@ -0,0 +1,66 @@ +package concatkdf + +import ( + "crypto" + "encoding/binary" + "fmt" +) + +type KDF struct { + buf []byte + otherinfo []byte + z []byte + hash crypto.Hash +} + +func ndata(src []byte) []byte { + buf := make([]byte, 4+len(src)) + binary.BigEndian.PutUint32(buf, uint32(len(src))) + copy(buf[4:], src) + return buf +} + +func New(hash crypto.Hash, alg, Z, apu, apv, pubinfo, privinfo []byte) *KDF { + algbuf := ndata(alg) + apubuf := ndata(apu) + apvbuf := ndata(apv) + + concat := make([]byte, len(algbuf)+len(apubuf)+len(apvbuf)+len(pubinfo)+len(privinfo)) + n := copy(concat, algbuf) + n += copy(concat[n:], apubuf) + n += copy(concat[n:], apvbuf) + n += copy(concat[n:], pubinfo) + copy(concat[n:], privinfo) + + return &KDF{ + hash: hash, + otherinfo: concat, + z: Z, + } +} + +func (k *KDF) Read(out []byte) (int, error) { + var round uint32 = 1 + h := k.hash.New() + + for len(out) > len(k.buf) { + h.Reset() + + if err := binary.Write(h, binary.BigEndian, round); err != nil { + return 0, fmt.Errorf(`failed to write round using kdf: %w`, err) + } + if _, err := h.Write(k.z); err != nil { + return 0, fmt.Errorf(`failed to write z using kdf: %w`, err) + } + if _, err := h.Write(k.otherinfo); err != nil { + return 0, fmt.Errorf(`failed to write other info using kdf: %w`, err) + } + + k.buf = append(k.buf, h.Sum(nil)...) + round++ + } + + n := copy(out, k.buf[:len(out)]) + k.buf = k.buf[len(out):] + return n, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/BUILD.bazel new file mode 100644 index 0000000000..bb395200f8 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/BUILD.bazel @@ -0,0 +1,21 @@ +load("@rules_go//go:def.bzl", "go_library") + +go_library( + name = "content_crypt", + srcs = [ + "content_crypt.go", + "interface.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt", + visibility = ["//:__subpackages__"], + deps = [ + "//jwa", + "//jwe/internal/cipher", + ], +) + +alias( + name = "go_default_library", + actual = ":content_crypt", + visibility = ["//jwe:__subpackages__"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/content_crypt.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/content_crypt.go new file mode 100644 index 0000000000..0ef45ed953 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/content_crypt.go @@ -0,0 +1,43 @@ +package content_crypt //nolint:golint + +import ( + "fmt" + + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwe/internal/cipher" +) + +func (c Generic) Algorithm() jwa.ContentEncryptionAlgorithm { + return c.alg +} + +func (c Generic) Encrypt(cek, plaintext, aad []byte) ([]byte, []byte, []byte, error) { + iv, encrypted, tag, err := c.cipher.Encrypt(cek, plaintext, aad) + if err != nil { + return nil, nil, nil, fmt.Errorf(`failed to crypt content: %w`, err) + } + + return iv, encrypted, tag, nil +} + +func (c Generic) Decrypt(cek, iv, ciphertext, tag, aad []byte) ([]byte, error) { + return c.cipher.Decrypt(cek, iv, ciphertext, tag, aad) +} + +func NewGeneric(alg jwa.ContentEncryptionAlgorithm) (*Generic, error) { + c, err := cipher.NewAES(alg.String()) + if err != nil { + return nil, fmt.Errorf(`aes crypt: failed to create content cipher: %w`, err) + } + + return &Generic{ + alg: alg, + cipher: c, + keysize: c.KeySize(), + tagsize: 16, + }, nil +} + +func (c Generic) KeySize() int { + return c.keysize +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/interface.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/interface.go new file mode 100644 index 0000000000..84b42fe07d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt/interface.go @@ -0,0 +1,20 @@ +package content_crypt //nolint:golint + +import ( + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwe/internal/cipher" +) + +// Generic encrypts a message by applying all the necessary +// modifications to the keys and the contents +type Generic struct { + alg jwa.ContentEncryptionAlgorithm + keysize int + tagsize int + cipher cipher.ContentCipher +} + +type Cipher interface { + Decrypt([]byte, []byte, []byte, []byte, []byte) ([]byte, error) + KeySize() int +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/BUILD.bazel new file mode 100644 index 0000000000..bde8eb68f7 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/BUILD.bazel @@ -0,0 +1,24 @@ +load("@rules_go//go:def.bzl", "go_library") + +go_library( + name = "keygen", + srcs = [ + "interface.go", + "keygen.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/jwe/internal/keygen", + visibility = ["//:__subpackages__"], + deps = [ + "//internal/ecutil", + "//jwa", + "//jwe/internal/concatkdf", + "//internal/tokens", + "//jwk", + ], +) + +alias( + name = "go_default_library", + actual = ":keygen", + visibility = ["//jwe:__subpackages__"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/interface.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/interface.go new file mode 100644 index 0000000000..7f8fb961a2 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/interface.go @@ -0,0 +1,40 @@ +package keygen + +// ByteKey is a generated key that only has the key's byte buffer +// as its instance data. If a key needs to do more, such as providing +// values to be set in a JWE header, that key type wraps a ByteKey +type ByteKey []byte + +// ByteWithECPublicKey holds the EC private key that generated +// the key along with the key itself. This is required to set the +// proper values in the JWE headers +type ByteWithECPublicKey struct { + ByteKey + + PublicKey any +} + +type ByteWithIVAndTag struct { + ByteKey + + IV []byte + Tag []byte +} + +type ByteWithSaltAndCount struct { + ByteKey + + Salt []byte + Count int +} + +// ByteSource is an interface for things that return a byte sequence. +// This is used for KeyGenerator so that the result of computations can +// carry more than just the generate byte sequence. +type ByteSource interface { + Bytes() []byte +} + +type Setter interface { + Set(string, any) error +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/keygen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/keygen.go new file mode 100644 index 0000000000..daa7599d9f --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/internal/keygen/keygen.go @@ -0,0 +1,139 @@ +package keygen + +import ( + "crypto" + "crypto/ecdh" + "crypto/ecdsa" + "crypto/rand" + "encoding/binary" + "fmt" + "io" + + "github.com/lestrrat-go/jwx/v3/internal/ecutil" + "github.com/lestrrat-go/jwx/v3/internal/tokens" + "github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf" + "github.com/lestrrat-go/jwx/v3/jwk" +) + +// Bytes returns the byte from this ByteKey +func (k ByteKey) Bytes() []byte { + return []byte(k) +} + +func Random(n int) (ByteSource, error) { + buf := make([]byte, n) + if _, err := io.ReadFull(rand.Reader, buf); err != nil { + return nil, fmt.Errorf(`failed to read from rand.Reader: %w`, err) + } + return ByteKey(buf), nil +} + +// Ecdhes generates a new key using ECDH-ES +func Ecdhes(alg string, enc string, keysize int, pubkey *ecdsa.PublicKey, apu, apv []byte) (ByteSource, error) { + priv, err := ecdsa.GenerateKey(pubkey.Curve, rand.Reader) + if err != nil { + return nil, fmt.Errorf(`failed to generate key for ECDH-ES: %w`, err) + } + + var algorithm string + if alg == tokens.ECDH_ES { + algorithm = enc + } else { + algorithm = alg + } + + pubinfo := make([]byte, 4) + binary.BigEndian.PutUint32(pubinfo, uint32(keysize)*8) + + if !priv.PublicKey.Curve.IsOnCurve(pubkey.X, pubkey.Y) { + return nil, fmt.Errorf(`public key used does not contain a point (X,Y) on the curve`) + } + z, _ := priv.PublicKey.Curve.ScalarMult(pubkey.X, pubkey.Y, priv.D.Bytes()) + zBytes := ecutil.AllocECPointBuffer(z, priv.PublicKey.Curve) + defer ecutil.ReleaseECPointBuffer(zBytes) + kdf := concatkdf.New(crypto.SHA256, []byte(algorithm), zBytes, apu, apv, pubinfo, []byte{}) + kek := make([]byte, keysize) + if _, err := kdf.Read(kek); err != nil { + return nil, fmt.Errorf(`failed to read kdf: %w`, err) + } + + return ByteWithECPublicKey{ + PublicKey: &priv.PublicKey, + ByteKey: ByteKey(kek), + }, nil +} + +// X25519 generates a new key using ECDH-ES with X25519 +func X25519(alg string, enc string, keysize int, pubkey *ecdh.PublicKey) (ByteSource, error) { + priv, err := ecdh.X25519().GenerateKey(rand.Reader) + if err != nil { + return nil, fmt.Errorf(`failed to generate key for X25519: %w`, err) + } + + var algorithm string + if alg == tokens.ECDH_ES { + algorithm = enc + } else { + algorithm = alg + } + + pubinfo := make([]byte, 4) + binary.BigEndian.PutUint32(pubinfo, uint32(keysize)*8) + + zBytes, err := priv.ECDH(pubkey) + if err != nil { + return nil, fmt.Errorf(`failed to compute Z: %w`, err) + } + kdf := concatkdf.New(crypto.SHA256, []byte(algorithm), zBytes, []byte{}, []byte{}, pubinfo, []byte{}) + kek := make([]byte, keysize) + if _, err := kdf.Read(kek); err != nil { + return nil, fmt.Errorf(`failed to read kdf: %w`, err) + } + + return ByteWithECPublicKey{ + PublicKey: priv.PublicKey(), + ByteKey: ByteKey(kek), + }, nil +} + +// HeaderPopulate populates the header with the required EC-DSA public key +// information ('epk' key) +func (k ByteWithECPublicKey) Populate(h Setter) error { + key, err := jwk.Import(k.PublicKey) + if err != nil { + return fmt.Errorf(`failed to create JWK: %w`, err) + } + + if err := h.Set("epk", key); err != nil { + return fmt.Errorf(`failed to write header: %w`, err) + } + return nil +} + +// HeaderPopulate populates the header with the required AES GCM +// parameters ('iv' and 'tag') +func (k ByteWithIVAndTag) Populate(h Setter) error { + if err := h.Set("iv", k.IV); err != nil { + return fmt.Errorf(`failed to write header: %w`, err) + } + + if err := h.Set("tag", k.Tag); err != nil { + return fmt.Errorf(`failed to write header: %w`, err) + } + + return nil +} + +// HeaderPopulate populates the header with the required PBES2 +// parameters ('p2s' and 'p2c') +func (k ByteWithSaltAndCount) Populate(h Setter) error { + if err := h.Set("p2c", k.Count); err != nil { + return fmt.Errorf(`failed to write header: %w`, err) + } + + if err := h.Set("p2s", k.Salt); err != nil { + return fmt.Errorf(`failed to write header: %w`, err) + } + + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/io.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/io.go new file mode 100644 index 0000000000..a5d6aca8a3 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/io.go @@ -0,0 +1,36 @@ +// Code generated by tools/cmd/genreadfile/main.go. DO NOT EDIT. + +package jwe + +import ( + "fmt" + "io/fs" + "os" +) + +type sysFS struct{} + +func (sysFS) Open(path string) (fs.File, error) { + return os.Open(path) +} + +func ReadFile(path string, options ...ReadFileOption) (*Message, error) { + + var srcFS fs.FS = sysFS{} + for _, option := range options { + switch option.Ident() { + case identFS{}: + if err := option.Value(&srcFS); err != nil { + return nil, fmt.Errorf("failed to set fs.FS: %w", err) + } + } + } + + f, err := srcFS.Open(path) + if err != nil { + return nil, err + } + + defer f.Close() + return ParseReader(f) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwe.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwe.go new file mode 100644 index 0000000000..5b9c92771a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwe.go @@ -0,0 +1,1118 @@ +//go:generate ../tools/cmd/genjwe.sh + +// Package jwe implements JWE as described in https://tools.ietf.org/html/rfc7516 +package jwe + +// #region imports +import ( + "bytes" + "context" + "crypto/ecdsa" + "errors" + "fmt" + "io" + "sync" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/internal/tokens" + "github.com/lestrrat-go/jwx/v3/jwk" + + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc" + "github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt" + "github.com/lestrrat-go/jwx/v3/jwe/internal/keygen" +) + +// #region globals + +var muSettings sync.RWMutex +var maxPBES2Count = 10000 +var maxDecompressBufferSize int64 = 10 * 1024 * 1024 // 10MB + +func Settings(options ...GlobalOption) { + muSettings.Lock() + defer muSettings.Unlock() + for _, option := range options { + switch option.Ident() { + case identMaxPBES2Count{}: + if err := option.Value(&maxPBES2Count); err != nil { + panic(fmt.Sprintf("jwe.Settings: value for option WithMaxPBES2Count must be an int: %s", err)) + } + case identMaxDecompressBufferSize{}: + if err := option.Value(&maxDecompressBufferSize); err != nil { + panic(fmt.Sprintf("jwe.Settings: value for option WithMaxDecompressBufferSize must be an int64: %s", err)) + } + case identCBCBufferSize{}: + var v int64 + if err := option.Value(&v); err != nil { + panic(fmt.Sprintf("jwe.Settings: value for option WithCBCBufferSize must be an int64: %s", err)) + } + aescbc.SetMaxBufferSize(v) + } + } +} + +const ( + fmtInvalid = iota + fmtCompact + fmtJSON + fmtJSONPretty + fmtMax +) + +var _ = fmtInvalid +var _ = fmtMax + +var registry = json.NewRegistry() + +type recipientBuilder struct { + alg jwa.KeyEncryptionAlgorithm + key any + headers Headers +} + +func (b *recipientBuilder) Build(r Recipient, cek []byte, calg jwa.ContentEncryptionAlgorithm, _ *content_crypt.Generic) ([]byte, error) { + // we need the raw key for later use + rawKey := b.key + + var keyID string + if ke, ok := b.key.(KeyEncrypter); ok { + if kider, ok := ke.(KeyIDer); ok { + if v, ok := kider.KeyID(); ok { + keyID = v + } + } + } else if jwkKey, ok := b.key.(jwk.Key); ok { + // Meanwhile, grab the kid as well + if v, ok := jwkKey.KeyID(); ok { + keyID = v + } + + var raw any + if err := jwk.Export(jwkKey, &raw); err != nil { + return nil, fmt.Errorf(`jwe.Encrypt: recipientBuilder: failed to retrieve raw key out of %T: %w`, b.key, err) + } + + rawKey = raw + } + + // Extract ECDH-ES specific parameters if needed. + var apu, apv []byte + + hdr := b.headers + if hdr == nil { + hdr = NewHeaders() + } + + if val, ok := hdr.AgreementPartyUInfo(); ok { + apu = val + } + + if val, ok := hdr.AgreementPartyVInfo(); ok { + apv = val + } + + // Create the encrypter using the new jwebb pattern + enc, err := newEncrypter(b.alg, calg, b.key, rawKey, apu, apv) + if err != nil { + return nil, fmt.Errorf(`jwe.Encrypt: recipientBuilder: failed to create encrypter: %w`, err) + } + + _ = r.SetHeaders(hdr) + + // Populate headers with stuff that we automatically set + if err := hdr.Set(AlgorithmKey, b.alg); err != nil { + return nil, fmt.Errorf(`failed to set header: %w`, err) + } + + if keyID != "" { + if err := hdr.Set(KeyIDKey, keyID); err != nil { + return nil, fmt.Errorf(`failed to set header: %w`, err) + } + } + + // Handle the encrypted key + var rawCEK []byte + enckey, err := enc.EncryptKey(cek) + if err != nil { + return nil, fmt.Errorf(`failed to encrypt key: %w`, err) + } + if b.alg == jwa.ECDH_ES() || b.alg == jwa.DIRECT() { + rawCEK = enckey.Bytes() + } else { + if err := r.SetEncryptedKey(enckey.Bytes()); err != nil { + return nil, fmt.Errorf(`failed to set encrypted key: %w`, err) + } + } + + // finally, anything specific should go here + if hp, ok := enckey.(populater); ok { + if err := hp.Populate(hdr); err != nil { + return nil, fmt.Errorf(`failed to populate: %w`, err) + } + } + + return rawCEK, nil +} + +// Encrypt generates a JWE message for the given payload and returns +// it in serialized form, which can be in either compact or +// JSON format. Default is compact. When JSON format is specified and +// there is only one recipient, the resulting serialization is +// automatically converted to flattened JSON serialization format. +// +// You must pass at least one key to `jwe.Encrypt()` by using `jwe.WithKey()` +// option. +// +// jwe.Encrypt(payload, jwe.WithKey(alg, key)) +// jwe.Encrypt(payload, jws.WithJSON(), jws.WithKey(alg1, key1), jws.WithKey(alg2, key2)) +// +// Note that in the second example the `jws.WithJSON()` option is +// specified as well. This is because the compact serialization +// format does not support multiple recipients, and users must +// specifically ask for the JSON serialization format. +// +// Read the documentation for `jwe.WithKey()` to learn more about the +// possible values that can be used for `alg` and `key`. +// +// Look for options that return `jwe.EncryptOption` or `jws.EncryptDecryptOption` +// for a complete list of options that can be passed to this function. +// +// As of v3.0.12, users can specify `jwe.WithLegacyHeaderMerging()` to +// disable header merging behavior that was the default prior to v3.0.12. +// Read the documentation for `jwe.WithLegacyHeaderMerging()` for more information. +func Encrypt(payload []byte, options ...EncryptOption) ([]byte, error) { + ec := encryptContextPool.Get() + defer encryptContextPool.Put(ec) + if err := ec.ProcessOptions(options); err != nil { + return nil, encryptError{fmt.Errorf(`jwe.Encrypt: failed to process options: %w`, err)} + } + ret, err := ec.EncryptMessage(payload, nil) + if err != nil { + return nil, encryptError{fmt.Errorf(`jwe.Encrypt: %w`, err)} + } + return ret, nil +} + +// EncryptStatic is exactly like Encrypt, except it accepts a static +// content encryption key (CEK). It is separated out from the main +// Encrypt function such that the latter does not accidentally use a static +// CEK. +// +// DO NOT attempt to use this function unless you completely understand the +// security implications to using static CEKs. You have been warned. +// +// This function is currently considered EXPERIMENTAL, and is subject to +// future changes across minor/micro versions. +func EncryptStatic(payload, cek []byte, options ...EncryptOption) ([]byte, error) { + if len(cek) <= 0 { + return nil, encryptError{fmt.Errorf(`jwe.EncryptStatic: empty CEK`)} + } + ec := encryptContextPool.Get() + defer encryptContextPool.Put(ec) + if err := ec.ProcessOptions(options); err != nil { + return nil, encryptError{fmt.Errorf(`jwe.EncryptStatic: failed to process options: %w`, err)} + } + ret, err := ec.EncryptMessage(payload, cek) + if err != nil { + return nil, encryptError{fmt.Errorf(`jwe.EncryptStatic: %w`, err)} + } + return ret, nil +} + +// decryptContext holds the state during JWE decryption, similar to JWS verifyContext +type decryptContext struct { + keyProviders []KeyProvider + keyUsed any + cek *[]byte + dst *Message + maxDecompressBufferSize int64 + //nolint:containedctx + ctx context.Context +} + +var decryptContextPool = pool.New(allocDecryptContext, freeDecryptContext) + +func allocDecryptContext() *decryptContext { + return &decryptContext{ + ctx: context.Background(), + } +} + +func freeDecryptContext(dc *decryptContext) *decryptContext { + dc.keyProviders = dc.keyProviders[:0] + dc.keyUsed = nil + dc.cek = nil + dc.dst = nil + dc.maxDecompressBufferSize = 0 + dc.ctx = context.Background() + return dc +} + +func (dc *decryptContext) ProcessOptions(options []DecryptOption) error { + // Set default max decompress buffer size + muSettings.RLock() + dc.maxDecompressBufferSize = maxDecompressBufferSize + muSettings.RUnlock() + + for _, option := range options { + switch option.Ident() { + case identMessage{}: + if err := option.Value(&dc.dst); err != nil { + return fmt.Errorf("jwe.decrypt: WithMessage must be a *jwe.Message: %w", err) + } + case identKeyProvider{}: + var kp KeyProvider + if err := option.Value(&kp); err != nil { + return fmt.Errorf("jwe.decrypt: WithKeyProvider must be a KeyProvider: %w", err) + } + dc.keyProviders = append(dc.keyProviders, kp) + case identKeyUsed{}: + if err := option.Value(&dc.keyUsed); err != nil { + return fmt.Errorf("jwe.decrypt: WithKeyUsed must be an any: %w", err) + } + case identKey{}: + var pair *withKey + if err := option.Value(&pair); err != nil { + return fmt.Errorf("jwe.decrypt: WithKey must be a *withKey: %w", err) + } + alg, ok := pair.alg.(jwa.KeyEncryptionAlgorithm) + if !ok { + return fmt.Errorf("jwe.decrypt: WithKey() option must be specified using jwa.KeyEncryptionAlgorithm (got %T)", pair.alg) + } + dc.keyProviders = append(dc.keyProviders, &staticKeyProvider{alg: alg, key: pair.key}) + case identCEK{}: + if err := option.Value(&dc.cek); err != nil { + return fmt.Errorf("jwe.decrypt: WithCEK must be a *[]byte: %w", err) + } + case identMaxDecompressBufferSize{}: + if err := option.Value(&dc.maxDecompressBufferSize); err != nil { + return fmt.Errorf("jwe.decrypt: WithMaxDecompressBufferSize must be int64: %w", err) + } + case identContext{}: + if err := option.Value(&dc.ctx); err != nil { + return fmt.Errorf("jwe.decrypt: WithContext must be a context.Context: %w", err) + } + } + } + + if len(dc.keyProviders) < 1 { + return fmt.Errorf(`jwe.Decrypt: no key providers have been provided (see jwe.WithKey(), jwe.WithKeySet(), and jwe.WithKeyProvider()`) + } + + return nil +} + +func (dc *decryptContext) DecryptMessage(buf []byte) ([]byte, error) { + msg, err := parseJSONOrCompact(buf, true) + if err != nil { + return nil, fmt.Errorf(`failed to parse buffer for Decrypt: %w`, err) + } + + // Process things that are common to the message + h, err := msg.protectedHeaders.Clone() + if err != nil { + return nil, fmt.Errorf(`failed to copy protected headers: %w`, err) + } + h, err = h.Merge(msg.unprotectedHeaders) + if err != nil { + return nil, fmt.Errorf(`failed to merge headers for message decryption: %w`, err) + } + + var aad []byte + if aadContainer := msg.authenticatedData; aadContainer != nil { + aad = base64.Encode(aadContainer) + } + + var computedAad []byte + if len(msg.rawProtectedHeaders) > 0 { + computedAad = msg.rawProtectedHeaders + } else { + // this is probably not required once msg.Decrypt is deprecated + var err error + computedAad, err = msg.protectedHeaders.Encode() + if err != nil { + return nil, fmt.Errorf(`failed to encode protected headers: %w`, err) + } + } + + // for each recipient, attempt to match the key providers + // if we have no recipients, pretend like we only have one + recipients := msg.recipients + if len(recipients) == 0 { + r := NewRecipient() + if err := r.SetHeaders(msg.protectedHeaders); err != nil { + return nil, fmt.Errorf(`failed to set headers to recipient: %w`, err) + } + recipients = append(recipients, r) + } + + errs := make([]error, 0, len(recipients)) + for _, recipient := range recipients { + decrypted, err := dc.tryRecipient(msg, recipient, h, aad, computedAad) + if err != nil { + errs = append(errs, recipientError{err}) + continue + } + if dc.dst != nil { + *dc.dst = *msg + dc.dst.rawProtectedHeaders = nil + dc.dst.storeProtectedHeaders = false + } + return decrypted, nil + } + return nil, fmt.Errorf(`failed to decrypt any of the recipients: %w`, errors.Join(errs...)) +} + +func (dc *decryptContext) tryRecipient(msg *Message, recipient Recipient, protectedHeaders Headers, aad, computedAad []byte) ([]byte, error) { + var tried int + var lastError error + for i, kp := range dc.keyProviders { + var sink algKeySink + if err := kp.FetchKeys(dc.ctx, &sink, recipient, msg); err != nil { + return nil, fmt.Errorf(`key provider %d failed: %w`, i, err) + } + + for _, pair := range sink.list { + tried++ + // alg is converted here because pair.alg is of type jwa.KeyAlgorithm. + // this may seem ugly, but we're trying to avoid declaring separate + // structs for `alg jwa.KeyEncryptionAlgorithm` and `alg jwa.SignatureAlgorithm` + //nolint:forcetypeassert + alg := pair.alg.(jwa.KeyEncryptionAlgorithm) + key := pair.key + + decrypted, err := dc.decryptContent(msg, alg, key, recipient, protectedHeaders, aad, computedAad) + if err != nil { + lastError = err + continue + } + + if dc.keyUsed != nil { + if err := blackmagic.AssignIfCompatible(dc.keyUsed, key); err != nil { + return nil, fmt.Errorf(`failed to assign used key (%T) to %T: %w`, key, dc.keyUsed, err) + } + } + return decrypted, nil + } + } + return nil, fmt.Errorf(`jwe.Decrypt: tried %d keys, but failed to match any of the keys with recipient (last error = %s)`, tried, lastError) +} + +func (dc *decryptContext) decryptContent(msg *Message, alg jwa.KeyEncryptionAlgorithm, key any, recipient Recipient, protectedHeaders Headers, aad, computedAad []byte) ([]byte, error) { + if jwkKey, ok := key.(jwk.Key); ok { + var raw any + if err := jwk.Export(jwkKey, &raw); err != nil { + return nil, fmt.Errorf(`failed to retrieve raw key from %T: %w`, key, err) + } + key = raw + } + + ce, ok := msg.protectedHeaders.ContentEncryption() + if !ok { + return nil, fmt.Errorf(`jwe.Decrypt: failed to retrieve content encryption algorithm from protected headers`) + } + dec := newDecrypter(alg, ce, key). + AuthenticatedData(aad). + ComputedAuthenticatedData(computedAad). + InitializationVector(msg.initializationVector). + Tag(msg.tag). + CEK(dc.cek) + + // The "alg" header can be in either protected/unprotected headers. + // prefer per-recipient headers (as it might be the case that the algorithm differs + // by each recipient), then look at protected headers. + var algMatched bool + for _, hdr := range []Headers{recipient.Headers(), protectedHeaders} { + v, ok := hdr.Algorithm() + if !ok { + continue + } + + if v == alg { + algMatched = true + break + } + // if we found something but didn't match, it's a failure + return nil, fmt.Errorf(`jwe.Decrypt: key (%q) and recipient (%q) algorithms do not match`, alg, v) + } + if !algMatched { + return nil, fmt.Errorf(`jwe.Decrypt: failed to find "alg" header in either protected or per-recipient headers`) + } + + h2, err := protectedHeaders.Clone() + if err != nil { + return nil, fmt.Errorf(`jwe.Decrypt: failed to copy headers (1): %w`, err) + } + + h2, err = h2.Merge(recipient.Headers()) + if err != nil { + return nil, fmt.Errorf(`failed to copy headers (2): %w`, err) + } + + switch alg { + case jwa.ECDH_ES(), jwa.ECDH_ES_A128KW(), jwa.ECDH_ES_A192KW(), jwa.ECDH_ES_A256KW(): + var epk any + if err := h2.Get(EphemeralPublicKeyKey, &epk); err != nil { + return nil, fmt.Errorf(`failed to get 'epk' field: %w`, err) + } + switch epk := epk.(type) { + case jwk.ECDSAPublicKey: + var pubkey ecdsa.PublicKey + if err := jwk.Export(epk, &pubkey); err != nil { + return nil, fmt.Errorf(`failed to get public key: %w`, err) + } + dec.PublicKey(&pubkey) + case jwk.OKPPublicKey: + var pubkey any + if err := jwk.Export(epk, &pubkey); err != nil { + return nil, fmt.Errorf(`failed to get public key: %w`, err) + } + dec.PublicKey(pubkey) + default: + return nil, fmt.Errorf("unexpected 'epk' type %T for alg %s", epk, alg) + } + + if apu, ok := h2.AgreementPartyUInfo(); ok && len(apu) > 0 { + dec.AgreementPartyUInfo(apu) + } + if apv, ok := h2.AgreementPartyVInfo(); ok && len(apv) > 0 { + dec.AgreementPartyVInfo(apv) + } + case jwa.A128GCMKW(), jwa.A192GCMKW(), jwa.A256GCMKW(): + var ivB64 string + if err := h2.Get(InitializationVectorKey, &ivB64); err == nil { + iv, err := base64.DecodeString(ivB64) + if err != nil { + return nil, fmt.Errorf(`failed to b64-decode 'iv': %w`, err) + } + dec.KeyInitializationVector(iv) + } + var tagB64 string + if err := h2.Get(TagKey, &tagB64); err == nil { + tag, err := base64.DecodeString(tagB64) + if err != nil { + return nil, fmt.Errorf(`failed to b64-decode 'tag': %w`, err) + } + dec.KeyTag(tag) + } + case jwa.PBES2_HS256_A128KW(), jwa.PBES2_HS384_A192KW(), jwa.PBES2_HS512_A256KW(): + var saltB64 string + if err := h2.Get(SaltKey, &saltB64); err != nil { + return nil, fmt.Errorf(`failed to get %q field`, SaltKey) + } + + // check if WithUseNumber is effective, because it will change the + // type of the underlying value (#1140) + var countFlt float64 + if json.UseNumber() { + var count json.Number + if err := h2.Get(CountKey, &count); err != nil { + return nil, fmt.Errorf(`failed to get %q field`, CountKey) + } + v, err := count.Float64() + if err != nil { + return nil, fmt.Errorf("failed to convert 'p2c' to float64: %w", err) + } + countFlt = v + } else { + var count float64 + if err := h2.Get(CountKey, &count); err != nil { + return nil, fmt.Errorf(`failed to get %q field`, CountKey) + } + countFlt = count + } + + muSettings.RLock() + maxCount := maxPBES2Count + muSettings.RUnlock() + if countFlt > float64(maxCount) { + return nil, fmt.Errorf("invalid 'p2c' value") + } + salt, err := base64.DecodeString(saltB64) + if err != nil { + return nil, fmt.Errorf(`failed to b64-decode 'salt': %w`, err) + } + dec.KeySalt(salt) + dec.KeyCount(int(countFlt)) + } + + plaintext, err := dec.Decrypt(recipient, msg.cipherText, msg) + if err != nil { + return nil, fmt.Errorf(`jwe.Decrypt: decryption failed: %w`, err) + } + + if v, ok := h2.Compression(); ok && v == jwa.Deflate() { + buf, err := uncompress(plaintext, dc.maxDecompressBufferSize) + if err != nil { + return nil, fmt.Errorf(`jwe.Derypt: failed to uncompress payload: %w`, err) + } + plaintext = buf + } + + if plaintext == nil { + return nil, fmt.Errorf(`failed to find matching recipient`) + } + + return plaintext, nil +} + +// encryptContext holds the state during JWE encryption, similar to JWS signContext +type encryptContext struct { + calg jwa.ContentEncryptionAlgorithm + compression jwa.CompressionAlgorithm + format int + builders []*recipientBuilder + protected Headers + legacyHeaderMerging bool +} + +var encryptContextPool = pool.New(allocEncryptContext, freeEncryptContext) + +func allocEncryptContext() *encryptContext { + return &encryptContext{ + calg: jwa.A256GCM(), + compression: jwa.NoCompress(), + format: fmtCompact, + } +} + +func freeEncryptContext(ec *encryptContext) *encryptContext { + ec.calg = jwa.A256GCM() + ec.compression = jwa.NoCompress() + ec.format = fmtCompact + ec.builders = ec.builders[:0] + ec.protected = nil + return ec +} + +func (ec *encryptContext) ProcessOptions(options []EncryptOption) error { + ec.legacyHeaderMerging = true + var mergeProtected bool + var useRawCEK bool + for _, option := range options { + switch option.Ident() { + case identKey{}: + var wk *withKey + if err := option.Value(&wk); err != nil { + return fmt.Errorf("jwe.encrypt: WithKey must be a *withKey: %w", err) + } + v, ok := wk.alg.(jwa.KeyEncryptionAlgorithm) + if !ok { + return fmt.Errorf("jwe.encrypt: WithKey() option must be specified using jwa.KeyEncryptionAlgorithm (got %T)", wk.alg) + } + if v == jwa.DIRECT() || v == jwa.ECDH_ES() { + useRawCEK = true + } + ec.builders = append(ec.builders, &recipientBuilder{ + alg: v, + key: wk.key, + headers: wk.headers, + }) + case identContentEncryptionAlgorithm{}: + var c jwa.ContentEncryptionAlgorithm + if err := option.Value(&c); err != nil { + return err + } + ec.calg = c + case identCompress{}: + var comp jwa.CompressionAlgorithm + if err := option.Value(&comp); err != nil { + return err + } + ec.compression = comp + case identMergeProtectedHeaders{}: + var mp bool + if err := option.Value(&mp); err != nil { + return err + } + mergeProtected = mp + case identProtectedHeaders{}: + var hdrs Headers + if err := option.Value(&hdrs); err != nil { + return err + } + if !mergeProtected || ec.protected == nil { + ec.protected = hdrs + } else { + merged, err := ec.protected.Merge(hdrs) + if err != nil { + return fmt.Errorf(`failed to merge headers: %w`, err) + } + ec.protected = merged + } + case identSerialization{}: + var fmtOpt int + if err := option.Value(&fmtOpt); err != nil { + return err + } + ec.format = fmtOpt + case identLegacyHeaderMerging{}: + var v bool + if err := option.Value(&v); err != nil { + return err + } + ec.legacyHeaderMerging = v + } + } + + // We need to have at least one builder + switch l := len(ec.builders); { + case l == 0: + return fmt.Errorf(`missing key encryption builders: use jwe.WithKey() to specify one`) + case l > 1: + if ec.format == fmtCompact { + return fmt.Errorf(`cannot use compact serialization when multiple recipients exist (check the number of WithKey() argument, or use WithJSON())`) + } + } + + if useRawCEK { + if len(ec.builders) != 1 { + return fmt.Errorf(`multiple recipients for ECDH-ES/DIRECT mode supported`) + } + } + + return nil +} + +var msgPool = pool.New(allocMessage, freeMessage) + +func allocMessage() *Message { + return &Message{ + recipients: make([]Recipient, 0, 1), + } +} + +func freeMessage(msg *Message) *Message { + msg.cipherText = nil + msg.initializationVector = nil + if hdr := msg.protectedHeaders; hdr != nil { + headerPool.Put(hdr) + } + msg.protectedHeaders = nil + msg.unprotectedHeaders = nil + msg.recipients = nil // reuse should be done elsewhere + msg.authenticatedData = nil + msg.tag = nil + msg.rawProtectedHeaders = nil + msg.storeProtectedHeaders = false + return msg +} + +var headerPool = pool.New(NewHeaders, freeHeaders) + +func freeHeaders(h Headers) Headers { + if c, ok := h.(interface{ clear() }); ok { + c.clear() + } + return h +} + +var recipientPool = pool.New(NewRecipient, freeRecipient) + +func freeRecipient(r Recipient) Recipient { + if h := r.Headers(); h != nil { + if c, ok := h.(interface{ clear() }); ok { + c.clear() + } + } + + if sr, ok := r.(*stdRecipient); ok { + sr.encryptedKey = nil + } + return r +} + +var recipientSlicePool = pool.NewSlicePool(allocRecipientSlice, freeRecipientSlice) + +func allocRecipientSlice() []Recipient { + return make([]Recipient, 0, 1) +} + +func freeRecipientSlice(rs []Recipient) []Recipient { + for _, r := range rs { + recipientPool.Put(r) + } + return rs[:0] +} + +func (ec *encryptContext) EncryptMessage(payload []byte, cek []byte) ([]byte, error) { + // Get protected headers from pool and copy contents from context + protected := headerPool.Get() + if userSupplied := ec.protected; userSupplied != nil { + ec.protected = nil // Clear from context + if err := userSupplied.Copy(protected); err != nil { + return nil, fmt.Errorf(`failed to copy protected headers: %w`, err) + } + } + + // There is exactly one content encrypter. + contentcrypt, err := content_crypt.NewGeneric(ec.calg) + if err != nil { + return nil, fmt.Errorf(`failed to create AES encrypter: %w`, err) + } + + // Generate CEK if not provided + if len(cek) <= 0 { + bk, err := keygen.Random(contentcrypt.KeySize()) + if err != nil { + return nil, fmt.Errorf(`failed to generate key: %w`, err) + } + cek = bk.Bytes() + } + + var useRawCEK bool + for _, builder := range ec.builders { + if builder.alg == jwa.DIRECT() || builder.alg == jwa.ECDH_ES() { + useRawCEK = true + break + } + } + + lbuilders := len(ec.builders) + recipients := recipientSlicePool.GetCapacity(lbuilders) + defer recipientSlicePool.Put(recipients) + + for i, builder := range ec.builders { + r := recipientPool.Get() + defer recipientPool.Put(r) + + // some builders require hint from the contentcrypt object + rawCEK, err := builder.Build(r, cek, ec.calg, contentcrypt) + if err != nil { + return nil, fmt.Errorf(`failed to create recipient #%d: %w`, i, err) + } + recipients = append(recipients, r) + + // Kinda feels weird, but if useRawCEK == true, we asserted earlier + // that len(builders) == 1, so this is OK + if useRawCEK { + cek = rawCEK + } + } + + if err := protected.Set(ContentEncryptionKey, ec.calg); err != nil { + return nil, fmt.Errorf(`failed to set "enc" in protected header: %w`, err) + } + + if ec.compression != jwa.NoCompress() { + payload, err = compress(payload) + if err != nil { + return nil, fmt.Errorf(`failed to compress payload before encryption: %w`, err) + } + if err := protected.Set(CompressionKey, ec.compression); err != nil { + return nil, fmt.Errorf(`failed to set "zip" in protected header: %w`, err) + } + } + + // fmtCompact does not have per-recipient headers, nor a "header" field. + // In this mode, we're going to have to merge everything to the protected + // header. + if ec.format == fmtCompact { + // We have already established that the number of builders is 1 in + // ec.ProcessOptions(). But we're going to be pedantic + if lbuilders != 1 { + return nil, fmt.Errorf(`internal error: expected exactly one recipient builder (got %d)`, lbuilders) + } + + // when we're using compact format, we can safely merge per-recipient + // headers into the protected header, if any + h, err := protected.Merge(recipients[0].Headers()) + if err != nil { + return nil, fmt.Errorf(`failed to merge protected headers for compact serialization: %w`, err) + } + protected = h + // per-recipient headers, if any, will be ignored in compact format + } else { + // If it got here, it's JSON (could be pretty mode, too). + if lbuilders == 1 { + // If it got here, then we're doing flattened JSON serialization. + // In this mode, we should merge per-recipient headers into the protected header, + // but we also need to make sure that the "header" field is reset so that + // it does not contain the same fields as the protected header. + // + // However, old behavior was to merge per-recipient headers into the + // protected header when there was only one recipient, AND leave the + // original "header" field as is, so we need to support that for backwards compatibility. + // + // The legacy merging only takes effect when there is exactly one recipient. + // + // This behavior can be disabled by passing jwe.WithLegacyHeaderMerging(false) + // If the user has explicitly asked for merging, do it + h, err := protected.Merge(recipients[0].Headers()) + if err != nil { + return nil, fmt.Errorf(`failed to merge protected headers for flattenend JSON format: %w`, err) + } + protected = h + + if !ec.legacyHeaderMerging { + // Clear per-recipient headers, since they have been merged. + // But we only do it when legacy merging is disabled. + // Note: we should probably introduce a Reset() method in v4 + if err := recipients[0].SetHeaders(NewHeaders()); err != nil { + return nil, fmt.Errorf(`failed to clear per-recipient headers after merging: %w`, err) + } + } + } + } + + aad, err := protected.Encode() + if err != nil { + return nil, fmt.Errorf(`failed to base64 encode protected headers: %w`, err) + } + + iv, ciphertext, tag, err := contentcrypt.Encrypt(cek, payload, aad) + if err != nil { + return nil, fmt.Errorf(`failed to encrypt payload: %w`, err) + } + + msg := msgPool.Get() + defer msgPool.Put(msg) + + if err := msg.Set(CipherTextKey, ciphertext); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, CipherTextKey, err) + } + if err := msg.Set(InitializationVectorKey, iv); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, InitializationVectorKey, err) + } + if err := msg.Set(ProtectedHeadersKey, protected); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, ProtectedHeadersKey, err) + } + if err := msg.Set(RecipientsKey, recipients); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, RecipientsKey, err) + } + if err := msg.Set(TagKey, tag); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, TagKey, err) + } + + switch ec.format { + case fmtCompact: + return Compact(msg) + case fmtJSON: + return json.Marshal(msg) + case fmtJSONPretty: + return json.MarshalIndent(msg, "", " ") + default: + return nil, fmt.Errorf(`invalid serialization`) + } +} + +// Decrypt takes encrypted payload, and information required to decrypt the +// payload (e.g. the key encryption algorithm and the corresponding +// key to decrypt the JWE message) in its optional arguments. See +// the examples and list of options that return a DecryptOption for possible +// values. Upon successful decryptiond returns the decrypted payload. +// +// The JWE message can be either compact or full JSON format. +// +// When using `jwe.WithKeyEncryptionAlgorithm()`, you can pass a `jwa.KeyAlgorithm` +// for convenience: this is mainly to allow you to directly pass the result of `(jwk.Key).Algorithm()`. +// However, do note that while `(jwk.Key).Algorithm()` could very well contain key encryption +// algorithms, it could also contain other types of values, such as _signature algorithms_. +// In order for `jwe.Decrypt` to work properly, the `alg` parameter must be of type +// `jwa.KeyEncryptionAlgorithm` or otherwise it will cause an error. +// +// When using `jwe.WithKey()`, the value must be a private key. +// It can be either in its raw format (e.g. *rsa.PrivateKey) or a jwk.Key +// +// When the encrypted message is also compressed, the decompressed payload must be +// smaller than the size specified by the `jwe.WithMaxDecompressBufferSize` setting, +// which defaults to 10MB. If the decompressed payload is larger than this size, +// an error is returned. +// +// You can opt to change the MaxDecompressBufferSize setting globally, or on a +// per-call basis by passing the `jwe.WithMaxDecompressBufferSize` option to +// either `jwe.Settings()` or `jwe.Decrypt()`: +// +// jwe.Settings(jwe.WithMaxDecompressBufferSize(10*1024*1024)) // changes value globally +// jwe.Decrypt(..., jwe.WithMaxDecompressBufferSize(250*1024)) // changes just for this call +func Decrypt(buf []byte, options ...DecryptOption) ([]byte, error) { + dc := decryptContextPool.Get() + defer decryptContextPool.Put(dc) + + if err := dc.ProcessOptions(options); err != nil { + return nil, decryptError{fmt.Errorf(`jwe.Decrypt: failed to process options: %w`, err)} + } + + ret, err := dc.DecryptMessage(buf) + if err != nil { + return nil, decryptError{fmt.Errorf(`jwe.Decrypt: %w`, err)} + } + return ret, nil +} + +// Parse parses the JWE message into a Message object. The JWE message +// can be either compact or full JSON format. +// +// Parse() currently does not take any options, but the API accepts it +// in anticipation of future addition. +func Parse(buf []byte, _ ...ParseOption) (*Message, error) { + return parseJSONOrCompact(buf, false) +} + +// errors are wrapped within this function, because we call it directly +// from Decrypt as well. +func parseJSONOrCompact(buf []byte, storeProtectedHeaders bool) (*Message, error) { + buf = bytes.TrimSpace(buf) + if len(buf) == 0 { + return nil, parseError{fmt.Errorf(`jwe.Parse: empty buffer`)} + } + + var msg *Message + var err error + if buf[0] == tokens.OpenCurlyBracket { + msg, err = parseJSON(buf, storeProtectedHeaders) + } else { + msg, err = parseCompact(buf, storeProtectedHeaders) + } + + if err != nil { + return nil, parseError{fmt.Errorf(`jwe.Parse: %w`, err)} + } + return msg, nil +} + +// ParseString is the same as Parse, but takes a string. +func ParseString(s string) (*Message, error) { + msg, err := Parse([]byte(s)) + if err != nil { + return nil, parseError{fmt.Errorf(`jwe.ParseString: %w`, err)} + } + return msg, nil +} + +// ParseReader is the same as Parse, but takes an io.Reader. +func ParseReader(src io.Reader) (*Message, error) { + buf, err := io.ReadAll(src) + if err != nil { + return nil, parseError{fmt.Errorf(`jwe.ParseReader: failed to read from io.Reader: %w`, err)} + } + msg, err := Parse(buf) + if err != nil { + return nil, parseError{fmt.Errorf(`jwe.ParseReader: %w`, err)} + } + return msg, nil +} + +func parseJSON(buf []byte, storeProtectedHeaders bool) (*Message, error) { + m := NewMessage() + m.storeProtectedHeaders = storeProtectedHeaders + if err := json.Unmarshal(buf, &m); err != nil { + return nil, fmt.Errorf(`failed to parse JSON: %w`, err) + } + return m, nil +} + +func parseCompact(buf []byte, storeProtectedHeaders bool) (*Message, error) { + var parts [5][]byte + var ok bool + + for i := range 4 { + parts[i], buf, ok = bytes.Cut(buf, []byte{tokens.Period}) + if !ok { + return nil, fmt.Errorf(`compact JWE format must have five parts (%d)`, i+1) + } + } + // Validate that the last part does not contain more dots + if bytes.ContainsRune(buf, tokens.Period) { + return nil, errors.New(`compact JWE format must have five parts, not more`) + } + parts[4] = buf + + hdrbuf, err := base64.Decode(parts[0]) + if err != nil { + return nil, fmt.Errorf(`failed to parse first part of compact form: %w`, err) + } + + protected := NewHeaders() + if err := json.Unmarshal(hdrbuf, protected); err != nil { + return nil, fmt.Errorf(`failed to parse header JSON: %w`, err) + } + + ivbuf, err := base64.Decode(parts[2]) + if err != nil { + return nil, fmt.Errorf(`failed to base64 decode iv: %w`, err) + } + + ctbuf, err := base64.Decode(parts[3]) + if err != nil { + return nil, fmt.Errorf(`failed to base64 decode content: %w`, err) + } + + tagbuf, err := base64.Decode(parts[4]) + if err != nil { + return nil, fmt.Errorf(`failed to base64 decode tag: %w`, err) + } + + m := NewMessage() + if err := m.Set(CipherTextKey, ctbuf); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, CipherTextKey, err) + } + if err := m.Set(InitializationVectorKey, ivbuf); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, InitializationVectorKey, err) + } + if err := m.Set(ProtectedHeadersKey, protected); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, ProtectedHeadersKey, err) + } + + if err := m.makeDummyRecipient(string(parts[1]), protected); err != nil { + return nil, fmt.Errorf(`failed to setup recipient: %w`, err) + } + + if err := m.Set(TagKey, tagbuf); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, TagKey, err) + } + + if storeProtectedHeaders { + // This is later used for decryption. + m.rawProtectedHeaders = parts[0] + } + + return m, nil +} + +type CustomDecoder = json.CustomDecoder +type CustomDecodeFunc = json.CustomDecodeFunc + +// RegisterCustomField allows users to specify that a private field +// be decoded as an instance of the specified type. This option has +// a global effect. +// +// For example, suppose you have a custom field `x-birthday`, which +// you want to represent as a string formatted in RFC3339 in JSON, +// but want it back as `time.Time`. +// +// In such case you would register a custom field as follows +// +// jws.RegisterCustomField(`x-birthday`, time.Time{}) +// +// Then you can use a `time.Time` variable to extract the value +// of `x-birthday` field, instead of having to use `any` +// and later convert it to `time.Time` +// +// var bday time.Time +// _ = hdr.Get(`x-birthday`, &bday) +// +// If you need a more fine-tuned control over the decoding process, +// you can register a `CustomDecoder`. For example, below shows +// how to register a decoder that can parse RFC1123 format string: +// +// jwe.RegisterCustomField(`x-birthday`, jwe.CustomDecodeFunc(func(data []byte) (any, error) { +// return time.Parse(time.RFC1123, string(data)) +// })) +// +// Please note that use of custom fields can be problematic if you +// are using a library that does not implement MarshalJSON/UnmarshalJSON +// and you try to roundtrip from an object to JSON, and then back to an object. +// For example, in the above example, you can _parse_ time values formatted +// in the format specified in RFC822, but when you convert an object into +// JSON, it will be formatted in RFC3339, because that's what `time.Time` +// likes to do. To avoid this, it's always better to use a custom type +// that wraps your desired type (in this case `time.Time`) and implement +// MarshalJSON and UnmashalJSON. +func RegisterCustomField(name string, object any) { + registry.Register(name, object) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/BUILD.bazel new file mode 100644 index 0000000000..c410a05cdf --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/BUILD.bazel @@ -0,0 +1,43 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "jwebb", + srcs = [ + "content_cipher.go", + "key_decrypt_asymmetric.go", + "key_decrypt_symmetric.go", + "key_encrypt_asymmetric.go", + "key_encrypt_symmetric.go", + "key_encryption.go", + "keywrap.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/jwe/jwebb", + visibility = ["//jwe:__subpackages__"], + deps = [ + "//internal/keyconv", + "//internal/pool", + "//jwe/internal/cipher", + "//jwe/internal/concatkdf", + "//jwe/internal/content_crypt", + "//jwe/internal/keygen", + "//internal/tokens", + "@org_golang_x_crypto//pbkdf2", + ], +) + +go_test( + name = "jwebb_test", + srcs = [ + "decrypt_test.go", + "jwebb_test.go", + "keywrap_test.go", + ], + embed = [":jwebb"], + deps = [ + "//internal/jwxtest", + "//jwa", + "//jwe/internal/keygen", + "//internal/tokens", + "@com_github_stretchr_testify//require", + ], +) \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/content_cipher.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/content_cipher.go new file mode 100644 index 0000000000..9078789d8d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/content_cipher.go @@ -0,0 +1,34 @@ +package jwebb + +import ( + "fmt" + + "github.com/lestrrat-go/jwx/v3/internal/tokens" + "github.com/lestrrat-go/jwx/v3/jwe/internal/cipher" + "github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt" +) + +// ContentEncryptionIsSupported checks if the content encryption algorithm is supported +func ContentEncryptionIsSupported(alg string) bool { + switch alg { + case tokens.A128GCM, tokens.A192GCM, tokens.A256GCM, + tokens.A128CBC_HS256, tokens.A192CBC_HS384, tokens.A256CBC_HS512: + return true + default: + return false + } +} + +// CreateContentCipher creates a content encryption cipher for the given algorithm string +func CreateContentCipher(alg string) (content_crypt.Cipher, error) { + if !ContentEncryptionIsSupported(alg) { + return nil, fmt.Errorf(`invalid content cipher algorithm (%s)`, alg) + } + + cipher, err := cipher.NewAES(alg) + if err != nil { + return nil, fmt.Errorf(`failed to build content cipher for %s: %w`, alg, err) + } + + return cipher, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/jwebb.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/jwebb.go new file mode 100644 index 0000000000..3768acef8b --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/jwebb.go @@ -0,0 +1,15 @@ +// Package jwebb provides the building blocks (hence the name "bb") for JWE operations. +// It should be thought of as a low-level API, almost akin to internal packages +// that should not be used directly by users of the jwx package. However, these exist +// to provide a more efficient way to perform JWE operations without the overhead of +// the higher-level jwe package to power-users who know what they are doing. +// +// This package is currently considered EXPERIMENTAL, and the API may change +// without notice. It is not recommended to use this package unless you are +// fully aware of the implications of using it. +// +// All bb packages in jwx follow the same design principles: +// 1. Does minimal checking of input parameters (for performance); callers need to ensure that the parameters are valid. +// 2. All exported functions are stringly typed (i.e. they do not take any parameters unless they absolutely have to). +// 3. Does not rely on other public jwx packages (they are standalone, except for internal packages). +package jwebb diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_decrypt_asymmetric.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_decrypt_asymmetric.go new file mode 100644 index 0000000000..ac07993176 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_decrypt_asymmetric.go @@ -0,0 +1,177 @@ +package jwebb + +import ( + "crypto" + "crypto/aes" + "crypto/ecdh" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/binary" + "fmt" + "hash" + + "github.com/lestrrat-go/jwx/v3/internal/keyconv" + "github.com/lestrrat-go/jwx/v3/internal/tokens" + "github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf" + "github.com/lestrrat-go/jwx/v3/jwe/internal/keygen" +) + +func contentEncryptionKeySize(ctalg string) (uint32, error) { + switch ctalg { + case tokens.A128GCM: + return tokens.KeySize16, nil + case tokens.A192GCM: + return tokens.KeySize24, nil + case tokens.A256GCM: + return tokens.KeySize32, nil + case tokens.A128CBC_HS256: + return tokens.KeySize32, nil + case tokens.A192CBC_HS384: + return tokens.KeySize48, nil + case tokens.A256CBC_HS512: + return tokens.KeySize64, nil + default: + return 0, fmt.Errorf(`unsupported content encryption algorithm %s`, ctalg) + } +} + +func KeyEncryptionECDHESKeySize(alg, ctalg string) (string, uint32, bool, error) { + switch alg { + case tokens.ECDH_ES: + keysize, err := contentEncryptionKeySize(ctalg) + if err != nil { + return "", 0, false, err + } + return ctalg, keysize, false, nil + case tokens.ECDH_ES_A128KW: + return alg, tokens.KeySize16, true, nil + case tokens.ECDH_ES_A192KW: + return alg, tokens.KeySize24, true, nil + case tokens.ECDH_ES_A256KW: + return alg, tokens.KeySize32, true, nil + default: + return "", 0, false, fmt.Errorf(`unsupported key encryption algorithm %s`, alg) + } +} + +func DeriveECDHES(alg string, apu, apv []byte, privkeyif, pubkeyif any, keysize uint32) ([]byte, error) { + pubinfo := make([]byte, 4) + binary.BigEndian.PutUint32(pubinfo, keysize*tokens.BitsPerByte) + + var privkey *ecdh.PrivateKey + var pubkey *ecdh.PublicKey + if err := keyconv.ECDHPrivateKey(&privkey, privkeyif); err != nil { + return nil, fmt.Errorf(`jwebb.DeriveECDHES: %w`, err) + } + if err := keyconv.ECDHPublicKey(&pubkey, pubkeyif); err != nil { + return nil, fmt.Errorf(`jwebb.DeriveECDHES: %w`, err) + } + + zBytes, err := privkey.ECDH(pubkey) + if err != nil { + return nil, fmt.Errorf(`jwebb.DeriveECDHES: unable to determine Z: %w`, err) + } + kdf := concatkdf.New(crypto.SHA256, []byte(alg), zBytes, apu, apv, pubinfo, []byte{}) + key := make([]byte, keysize) + if _, err := kdf.Read(key); err != nil { + return nil, fmt.Errorf(`jwebb.DeriveECDHES: failed to read kdf: %w`, err) + } + + return key, nil +} + +func KeyDecryptECDHESKeyWrap(_, enckey []byte, alg string, apu, apv []byte, privkey, pubkey any, keysize uint32) ([]byte, error) { + key, err := DeriveECDHES(alg, apu, apv, privkey, pubkey, keysize) + if err != nil { + return nil, fmt.Errorf(`failed to derive ECDHES encryption key: %w`, err) + } + + block, err := aes.NewCipher(key) + if err != nil { + return nil, fmt.Errorf(`failed to create cipher for ECDH-ES key wrap: %w`, err) + } + + return Unwrap(block, enckey) +} + +func KeyDecryptECDHES(_, _ []byte, alg string, apu, apv []byte, privkey, pubkey any, keysize uint32) ([]byte, error) { + key, err := DeriveECDHES(alg, apu, apv, privkey, pubkey, keysize) + if err != nil { + return nil, fmt.Errorf(`failed to derive ECDHES encryption key: %w`, err) + } + return key, nil +} + +// RSA key decryption functions + +func KeyDecryptRSA15(_, enckey []byte, privkeyif any, keysize int) ([]byte, error) { + var privkey *rsa.PrivateKey + if err := keyconv.RSAPrivateKey(&privkey, privkeyif); err != nil { + return nil, fmt.Errorf(`jwebb.KeyDecryptRSA15: %w`, err) + } + + // Perform some input validation. + expectedlen := privkey.PublicKey.N.BitLen() / tokens.BitsPerByte + if expectedlen != len(enckey) { + // Input size is incorrect, the encrypted payload should always match + // the size of the public modulus (e.g. using a 2048 bit key will + // produce 256 bytes of output). Reject this since it's invalid input. + return nil, fmt.Errorf( + "input size for key decrypt is incorrect (expected %d, got %d)", + expectedlen, + len(enckey), + ) + } + + // Generate a random CEK of the required size + bk, err := keygen.Random(keysize * tokens.RSAKeyGenMultiplier) + if err != nil { + return nil, fmt.Errorf(`failed to generate key`) + } + cek := bk.Bytes() + + // Use a defer/recover pattern to handle potential panics from DecryptPKCS1v15SessionKey + defer func() { + // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload + // because of an index out of bounds error, which we want to ignore. + // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() + // only exists for preventing crashes with unpatched versions. + // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k + // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 + _ = recover() + }() + + // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to + // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing + // the Million Message Attack on Cryptographic Message Syntax". We are + // therefore deliberately ignoring errors here. + _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, privkey, enckey, cek) + + return cek, nil +} + +func KeyDecryptRSAOAEP(_, enckey []byte, alg string, privkeyif any) ([]byte, error) { + var privkey *rsa.PrivateKey + if err := keyconv.RSAPrivateKey(&privkey, privkeyif); err != nil { + return nil, fmt.Errorf(`jwebb.KeyDecryptRSAOAEP: %w`, err) + } + + var hash hash.Hash + switch alg { + case tokens.RSA_OAEP: + hash = sha1.New() + case tokens.RSA_OAEP_256: + hash = sha256.New() + case tokens.RSA_OAEP_384: + hash = sha512.New384() + case tokens.RSA_OAEP_512: + hash = sha512.New() + default: + return nil, fmt.Errorf(`failed to generate key encrypter for RSA-OAEP: RSA_OAEP/RSA_OAEP_256/RSA_OAEP_384/RSA_OAEP_512 required`) + } + + return rsa.DecryptOAEP(hash, rand.Reader, privkey, enckey, []byte{}) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_decrypt_symmetric.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_decrypt_symmetric.go new file mode 100644 index 0000000000..c09e30a34e --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_decrypt_symmetric.go @@ -0,0 +1,91 @@ +package jwebb + +import ( + "crypto/aes" + cryptocipher "crypto/cipher" + "crypto/sha256" + "crypto/sha512" + "fmt" + "hash" + + "golang.org/x/crypto/pbkdf2" + + "github.com/lestrrat-go/jwx/v3/internal/tokens" +) + +// AES key wrap decryption functions + +// Use constants from tokens package +// No need to redefine them here + +func KeyDecryptAESKW(_, enckey []byte, _ string, sharedkey []byte) ([]byte, error) { + block, err := aes.NewCipher(sharedkey) + if err != nil { + return nil, fmt.Errorf(`failed to create cipher from shared key: %w`, err) + } + + cek, err := Unwrap(block, enckey) + if err != nil { + return nil, fmt.Errorf(`failed to unwrap data: %w`, err) + } + return cek, nil +} + +func KeyDecryptDirect(_, _ []byte, _ string, cek []byte) ([]byte, error) { + return cek, nil +} + +func KeyDecryptPBES2(_, enckey []byte, alg string, password []byte, salt []byte, count int) ([]byte, error) { + var hashFunc func() hash.Hash + var keylen int + + switch alg { + case tokens.PBES2_HS256_A128KW: + hashFunc = sha256.New + keylen = tokens.KeySize16 + case tokens.PBES2_HS384_A192KW: + hashFunc = sha512.New384 + keylen = tokens.KeySize24 + case tokens.PBES2_HS512_A256KW: + hashFunc = sha512.New + keylen = tokens.KeySize32 + default: + return nil, fmt.Errorf(`unsupported PBES2 algorithm: %s`, alg) + } + + // Derive key using PBKDF2 + derivedKey := pbkdf2.Key(password, salt, count, keylen, hashFunc) + + // Use the derived key for AES key wrap + return KeyDecryptAESKW(nil, enckey, alg, derivedKey) +} + +func KeyDecryptAESGCMKW(recipientKey, _ []byte, _ string, sharedkey []byte, iv []byte, tag []byte) ([]byte, error) { + if len(iv) != tokens.GCMIVSize { + return nil, fmt.Errorf("GCM requires 96-bit iv, got %d", len(iv)*tokens.BitsPerByte) + } + if len(tag) != tokens.GCMTagSize { + return nil, fmt.Errorf("GCM requires 128-bit tag, got %d", len(tag)*tokens.BitsPerByte) + } + + block, err := aes.NewCipher(sharedkey) + if err != nil { + return nil, fmt.Errorf(`failed to create new AES cipher: %w`, err) + } + + aesgcm, err := cryptocipher.NewGCM(block) + if err != nil { + return nil, fmt.Errorf(`failed to create new GCM wrap: %w`, err) + } + + // Combine recipient key and tag for GCM decryption + ciphertext := recipientKey[:] + ciphertext = append(ciphertext, tag...) + + jek, err := aesgcm.Open(nil, iv, ciphertext, nil) + if err != nil { + return nil, fmt.Errorf(`failed to decode key: %w`, err) + } + + return jek, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encrypt_asymmetric.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encrypt_asymmetric.go new file mode 100644 index 0000000000..6f008173c8 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encrypt_asymmetric.go @@ -0,0 +1,147 @@ +package jwebb + +import ( + "crypto/aes" + "crypto/ecdh" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "fmt" + "hash" + + "github.com/lestrrat-go/jwx/v3/internal/tokens" + "github.com/lestrrat-go/jwx/v3/jwe/internal/keygen" +) + +// KeyEncryptRSA15 encrypts the CEK using RSA PKCS#1 v1.5 +func KeyEncryptRSA15(cek []byte, _ string, pubkey *rsa.PublicKey) (keygen.ByteSource, error) { + encrypted, err := rsa.EncryptPKCS1v15(rand.Reader, pubkey, cek) + if err != nil { + return nil, fmt.Errorf(`failed to encrypt using PKCS1v15: %w`, err) + } + return keygen.ByteKey(encrypted), nil +} + +// KeyEncryptRSAOAEP encrypts the CEK using RSA OAEP +func KeyEncryptRSAOAEP(cek []byte, alg string, pubkey *rsa.PublicKey) (keygen.ByteSource, error) { + var hash hash.Hash + switch alg { + case tokens.RSA_OAEP: + hash = sha1.New() + case tokens.RSA_OAEP_256: + hash = sha256.New() + case tokens.RSA_OAEP_384: + hash = sha512.New384() + case tokens.RSA_OAEP_512: + hash = sha512.New() + default: + return nil, fmt.Errorf(`failed to generate key encrypter for RSA-OAEP: RSA_OAEP/RSA_OAEP_256/RSA_OAEP_384/RSA_OAEP_512 required`) + } + + encrypted, err := rsa.EncryptOAEP(hash, rand.Reader, pubkey, cek, []byte{}) + if err != nil { + return nil, fmt.Errorf(`failed to OAEP encrypt: %w`, err) + } + return keygen.ByteKey(encrypted), nil +} + +// generateECDHESKeyECDSA generates the key material for ECDSA keys using ECDH-ES +func generateECDHESKeyECDSA(alg string, calg string, keysize uint32, pubkey *ecdsa.PublicKey, apu, apv []byte) (keygen.ByteWithECPublicKey, error) { + // Generate the key directly + kg, err := keygen.Ecdhes(alg, calg, int(keysize), pubkey, apu, apv) + if err != nil { + return keygen.ByteWithECPublicKey{}, fmt.Errorf(`failed to generate ECDSA key: %w`, err) + } + + bwpk, ok := kg.(keygen.ByteWithECPublicKey) + if !ok { + return keygen.ByteWithECPublicKey{}, fmt.Errorf(`key generator generated invalid key (expected ByteWithECPublicKey)`) + } + + return bwpk, nil +} + +// generateECDHESKeyX25519 generates the key material for X25519 keys using ECDH-ES +func generateECDHESKeyX25519(alg string, calg string, keysize uint32, pubkey *ecdh.PublicKey) (keygen.ByteWithECPublicKey, error) { + // Generate the key directly + kg, err := keygen.X25519(alg, calg, int(keysize), pubkey) + if err != nil { + return keygen.ByteWithECPublicKey{}, fmt.Errorf(`failed to generate X25519 key: %w`, err) + } + + bwpk, ok := kg.(keygen.ByteWithECPublicKey) + if !ok { + return keygen.ByteWithECPublicKey{}, fmt.Errorf(`key generator generated invalid key (expected ByteWithECPublicKey)`) + } + + return bwpk, nil +} + +// KeyEncryptECDHESKeyWrapECDSA encrypts the CEK using ECDH-ES with key wrapping for ECDSA keys +func KeyEncryptECDHESKeyWrapECDSA(cek []byte, alg string, apu, apv []byte, pubkey *ecdsa.PublicKey, keysize uint32, calg string) (keygen.ByteSource, error) { + bwpk, err := generateECDHESKeyECDSA(alg, calg, keysize, pubkey, apu, apv) + if err != nil { + return nil, err + } + + // For key wrapping algorithms, wrap the CEK with the generated key + block, err := aes.NewCipher(bwpk.Bytes()) + if err != nil { + return nil, fmt.Errorf(`failed to generate cipher from generated key: %w`, err) + } + + jek, err := Wrap(block, cek) + if err != nil { + return nil, fmt.Errorf(`failed to wrap data: %w`, err) + } + + bwpk.ByteKey = keygen.ByteKey(jek) + return bwpk, nil +} + +// KeyEncryptECDHESKeyWrapX25519 encrypts the CEK using ECDH-ES with key wrapping for X25519 keys +func KeyEncryptECDHESKeyWrapX25519(cek []byte, alg string, _ []byte, _ []byte, pubkey *ecdh.PublicKey, keysize uint32, calg string) (keygen.ByteSource, error) { + bwpk, err := generateECDHESKeyX25519(alg, calg, keysize, pubkey) + if err != nil { + return nil, err + } + + // For key wrapping algorithms, wrap the CEK with the generated key + block, err := aes.NewCipher(bwpk.Bytes()) + if err != nil { + return nil, fmt.Errorf(`failed to generate cipher from generated key: %w`, err) + } + + jek, err := Wrap(block, cek) + if err != nil { + return nil, fmt.Errorf(`failed to wrap data: %w`, err) + } + + bwpk.ByteKey = keygen.ByteKey(jek) + return bwpk, nil +} + +// KeyEncryptECDHESECDSA encrypts using ECDH-ES direct (no key wrapping) for ECDSA keys +func KeyEncryptECDHESECDSA(_ []byte, alg string, apu, apv []byte, pubkey *ecdsa.PublicKey, keysize uint32, calg string) (keygen.ByteSource, error) { + bwpk, err := generateECDHESKeyECDSA(alg, calg, keysize, pubkey, apu, apv) + if err != nil { + return nil, err + } + + // For direct ECDH-ES, return the generated key directly + return bwpk, nil +} + +// KeyEncryptECDHESX25519 encrypts using ECDH-ES direct (no key wrapping) for X25519 keys +func KeyEncryptECDHESX25519(_ []byte, alg string, _, _ []byte, pubkey *ecdh.PublicKey, keysize uint32, calg string) (keygen.ByteSource, error) { + bwpk, err := generateECDHESKeyX25519(alg, calg, keysize, pubkey) + if err != nil { + return nil, err + } + + // For direct ECDH-ES, return the generated key directly + return bwpk, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encrypt_symmetric.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encrypt_symmetric.go new file mode 100644 index 0000000000..d489aaba28 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encrypt_symmetric.go @@ -0,0 +1,115 @@ +package jwebb + +import ( + "crypto/aes" + cryptocipher "crypto/cipher" + "crypto/rand" + "crypto/sha256" + "crypto/sha512" + "fmt" + "hash" + "io" + + "golang.org/x/crypto/pbkdf2" + + "github.com/lestrrat-go/jwx/v3/internal/tokens" + "github.com/lestrrat-go/jwx/v3/jwe/internal/keygen" +) + +// KeyEncryptAESKW encrypts the CEK using AES key wrap +func KeyEncryptAESKW(cek []byte, _ string, sharedkey []byte) (keygen.ByteSource, error) { + block, err := aes.NewCipher(sharedkey) + if err != nil { + return nil, fmt.Errorf(`failed to create cipher from shared key: %w`, err) + } + + encrypted, err := Wrap(block, cek) + if err != nil { + return nil, fmt.Errorf(`failed to wrap data: %w`, err) + } + return keygen.ByteKey(encrypted), nil +} + +// KeyEncryptDirect returns the CEK directly for DIRECT algorithm +func KeyEncryptDirect(_ []byte, _ string, sharedkey []byte) (keygen.ByteSource, error) { + return keygen.ByteKey(sharedkey), nil +} + +// KeyEncryptPBES2 encrypts the CEK using PBES2 password-based encryption +func KeyEncryptPBES2(cek []byte, alg string, password []byte) (keygen.ByteSource, error) { + var hashFunc func() hash.Hash + var keylen int + + switch alg { + case tokens.PBES2_HS256_A128KW: + hashFunc = sha256.New + keylen = tokens.KeySize16 + case tokens.PBES2_HS384_A192KW: + hashFunc = sha512.New384 + keylen = tokens.KeySize24 + case tokens.PBES2_HS512_A256KW: + hashFunc = sha512.New + keylen = tokens.KeySize32 + default: + return nil, fmt.Errorf(`unsupported PBES2 algorithm: %s`, alg) + } + + count := tokens.PBES2DefaultIterations + salt := make([]byte, keylen) + _, err := io.ReadFull(rand.Reader, salt) + if err != nil { + return nil, fmt.Errorf(`failed to get random salt: %w`, err) + } + + fullsalt := []byte(alg) + fullsalt = append(fullsalt, byte(tokens.PBES2NullByteSeparator)) + fullsalt = append(fullsalt, salt...) + + // Derive key using PBKDF2 + derivedKey := pbkdf2.Key(password, fullsalt, count, keylen, hashFunc) + + // Use the derived key for AES key wrap + block, err := aes.NewCipher(derivedKey) + if err != nil { + return nil, fmt.Errorf(`failed to create cipher from derived key: %w`, err) + } + encrypted, err := Wrap(block, cek) + if err != nil { + return nil, fmt.Errorf(`failed to wrap data: %w`, err) + } + + return keygen.ByteWithSaltAndCount{ + ByteKey: encrypted, + Salt: salt, + Count: count, + }, nil +} + +// KeyEncryptAESGCMKW encrypts the CEK using AES GCM key wrap +func KeyEncryptAESGCMKW(cek []byte, _ string, sharedkey []byte) (keygen.ByteSource, error) { + block, err := aes.NewCipher(sharedkey) + if err != nil { + return nil, fmt.Errorf(`failed to create new AES cipher: %w`, err) + } + + aesgcm, err := cryptocipher.NewGCM(block) + if err != nil { + return nil, fmt.Errorf(`failed to create new GCM wrap: %w`, err) + } + + iv := make([]byte, aesgcm.NonceSize()) + _, err = io.ReadFull(rand.Reader, iv) + if err != nil { + return nil, fmt.Errorf(`failed to get random iv: %w`, err) + } + + encrypted := aesgcm.Seal(nil, iv, cek, nil) + tag := encrypted[len(encrypted)-aesgcm.Overhead():] + ciphertext := encrypted[:len(encrypted)-aesgcm.Overhead()] + + return keygen.ByteWithIVAndTag{ + ByteKey: ciphertext, + IV: iv, + Tag: tag, + }, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encryption.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encryption.go new file mode 100644 index 0000000000..ce39352fc5 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/key_encryption.go @@ -0,0 +1,70 @@ +package jwebb + +import ( + "github.com/lestrrat-go/jwx/v3/internal/tokens" +) + +// IsECDHES checks if the algorithm is an ECDH-ES based algorithm +func IsECDHES(alg string) bool { + switch alg { + case tokens.ECDH_ES, tokens.ECDH_ES_A128KW, tokens.ECDH_ES_A192KW, tokens.ECDH_ES_A256KW: + return true + default: + return false + } +} + +// IsRSA15 checks if the algorithm is RSA1_5 +func IsRSA15(alg string) bool { + return alg == tokens.RSA1_5 +} + +// IsRSAOAEP checks if the algorithm is an RSA-OAEP based algorithm +func IsRSAOAEP(alg string) bool { + switch alg { + case tokens.RSA_OAEP, tokens.RSA_OAEP_256, tokens.RSA_OAEP_384, tokens.RSA_OAEP_512: + return true + default: + return false + } +} + +// IsAESKW checks if the algorithm is an AES key wrap algorithm +func IsAESKW(alg string) bool { + switch alg { + case tokens.A128KW, tokens.A192KW, tokens.A256KW: + return true + default: + return false + } +} + +// IsAESGCMKW checks if the algorithm is an AES-GCM key wrap algorithm +func IsAESGCMKW(alg string) bool { + switch alg { + case tokens.A128GCMKW, tokens.A192GCMKW, tokens.A256GCMKW: + return true + default: + return false + } +} + +// IsPBES2 checks if the algorithm is a PBES2 based algorithm +func IsPBES2(alg string) bool { + switch alg { + case tokens.PBES2_HS256_A128KW, tokens.PBES2_HS384_A192KW, tokens.PBES2_HS512_A256KW: + return true + default: + return false + } +} + +// IsDirect checks if the algorithm is direct encryption +func IsDirect(alg string) bool { + return alg == tokens.DIRECT +} + +// IsSymmetric checks if the algorithm is a symmetric key encryption algorithm +func IsSymmetric(alg string) bool { + return IsAESKW(alg) || IsAESGCMKW(alg) || IsPBES2(alg) || IsDirect(alg) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/keywrap.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/keywrap.go new file mode 100644 index 0000000000..0792d6cb8e --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/jwebb/keywrap.go @@ -0,0 +1,110 @@ +package jwebb + +import ( + "crypto/cipher" + "crypto/subtle" + "encoding/binary" + "fmt" + + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/internal/tokens" +) + +var keywrapDefaultIV = []byte{0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6} + +func Wrap(kek cipher.Block, cek []byte) ([]byte, error) { + if len(cek)%tokens.KeywrapBlockSize != 0 { + return nil, fmt.Errorf(`keywrap input must be %d byte blocks`, tokens.KeywrapBlockSize) + } + + n := len(cek) / tokens.KeywrapChunkLen + r := make([][]byte, n) + + for i := range n { + r[i] = make([]byte, tokens.KeywrapChunkLen) + copy(r[i], cek[i*tokens.KeywrapChunkLen:]) + } + + buffer := pool.ByteSlice().GetCapacity(tokens.KeywrapChunkLen * 2) + defer pool.ByteSlice().Put(buffer) + // the byte slice has the capacity, but len is 0 + buffer = buffer[:tokens.KeywrapChunkLen*2] + + tBytes := pool.ByteSlice().GetCapacity(tokens.KeywrapChunkLen) + defer pool.ByteSlice().Put(tBytes) + // the byte slice has the capacity, but len is 0 + tBytes = tBytes[:tokens.KeywrapChunkLen] + + copy(buffer, keywrapDefaultIV) + + for t := range tokens.KeywrapRounds * n { + copy(buffer[tokens.KeywrapChunkLen:], r[t%n]) + + kek.Encrypt(buffer, buffer) + + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := range tokens.KeywrapChunkLen { + buffer[i] = buffer[i] ^ tBytes[i] + } + copy(r[t%n], buffer[tokens.KeywrapChunkLen:]) + } + + out := make([]byte, (n+1)*tokens.KeywrapChunkLen) + copy(out, buffer[:tokens.KeywrapChunkLen]) + for i := range r { + copy(out[(i+1)*tokens.KeywrapBlockSize:], r[i]) + } + + return out, nil +} + +func Unwrap(block cipher.Block, ciphertxt []byte) ([]byte, error) { + if len(ciphertxt)%tokens.KeywrapChunkLen != 0 { + return nil, fmt.Errorf(`keyunwrap input must be %d byte blocks`, tokens.KeywrapChunkLen) + } + + n := (len(ciphertxt) / tokens.KeywrapChunkLen) - 1 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, tokens.KeywrapChunkLen) + copy(r[i], ciphertxt[(i+1)*tokens.KeywrapChunkLen:]) + } + + buffer := pool.ByteSlice().GetCapacity(tokens.KeywrapChunkLen * 2) + defer pool.ByteSlice().Put(buffer) + // the byte slice has the capacity, but len is 0 + buffer = buffer[:tokens.KeywrapChunkLen*2] + + tBytes := pool.ByteSlice().GetCapacity(tokens.KeywrapChunkLen) + defer pool.ByteSlice().Put(tBytes) + // the byte slice has the capacity, but len is 0 + tBytes = tBytes[:tokens.KeywrapChunkLen] + + copy(buffer[:tokens.KeywrapChunkLen], ciphertxt[:tokens.KeywrapChunkLen]) + + for t := tokens.KeywrapRounds*n - 1; t >= 0; t-- { + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := range tokens.KeywrapChunkLen { + buffer[i] = buffer[i] ^ tBytes[i] + } + copy(buffer[tokens.KeywrapChunkLen:], r[t%n]) + + block.Decrypt(buffer, buffer) + + copy(r[t%n], buffer[tokens.KeywrapChunkLen:]) + } + + if subtle.ConstantTimeCompare(buffer[:tokens.KeywrapChunkLen], keywrapDefaultIV) == 0 { + return nil, fmt.Errorf(`key unwrap: failed to unwrap key`) + } + + out := make([]byte, n*tokens.KeywrapChunkLen) + for i := range r { + copy(out[i*tokens.KeywrapChunkLen:], r[i]) + } + + return out, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/key_provider.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/key_provider.go new file mode 100644 index 0000000000..05adc04517 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/key_provider.go @@ -0,0 +1,163 @@ +package jwe + +import ( + "context" + "fmt" + "sync" + + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwk" +) + +// KeyProvider is responsible for providing key(s) to encrypt or decrypt a payload. +// Multiple `jwe.KeyProvider`s can be passed to `jwe.Encrypt()` or `jwe.Decrypt()` +// +// `jwe.Encrypt()` can only accept static key providers via `jwe.WithKey()`, +// while `jwe.Decrypt()` can accept `jwe.WithKey()`, `jwe.WithKeySet()`, +// and `jwe.WithKeyProvider()`. +// +// Understanding how this works is crucial to learn how this package works. +// Here we will use `jwe.Decrypt()` as an example to show how the `KeyProvider` +// works. +// +// `jwe.Encrypt()` is straightforward: the content encryption key is encrypted +// using the provided keys, and JWS recipient objects are created for each. +// +// `jwe.Decrypt()` is a bit more involved, because there are cases you +// will want to compute/deduce/guess the keys that you would like to +// use for decryption. +// +// The first thing that `jwe.Decrypt()` needs to do is to collect the +// KeyProviders from the option list that the user provided (presented in pseudocode): +// +// keyProviders := filterKeyProviders(options) +// +// Then, remember that a JWE message may contain multiple recipients in the +// message. For each recipient, we call on the KeyProviders to give us +// the key(s) to use on this CEK: +// +// for r in msg.Recipients { +// for kp in keyProviders { +// kp.FetchKeys(ctx, sink, r, msg) +// ... +// } +// } +// +// The `sink` argument passed to the KeyProvider is a temporary storage +// for the keys (either a jwk.Key or a "raw" key). The `KeyProvider` +// is responsible for sending keys into the `sink`. +// +// When called, the `KeyProvider` created by `jwe.WithKey()` sends the same key, +// `jwe.WithKeySet()` sends keys that matches a particular `kid` and `alg`, +// and finally `jwe.WithKeyProvider()` allows you to execute arbitrary +// logic to provide keys. If you are providing a custom `KeyProvider`, +// you should execute the necessary checks or retrieval of keys, and +// then send the key(s) to the sink: +// +// sink.Key(alg, key) +// +// These keys are then retrieved and tried for each recipient, until +// a match is found: +// +// keys := sink.Keys() +// for key in keys { +// if decryptJWEKey(recipient.EncryptedKey(), key) { +// return OK +// } +// } +type KeyProvider interface { + FetchKeys(context.Context, KeySink, Recipient, *Message) error +} + +// KeySink is a data storage where `jwe.KeyProvider` objects should +// send their keys to. +type KeySink interface { + Key(jwa.KeyEncryptionAlgorithm, any) +} + +type algKeyPair struct { + alg jwa.KeyAlgorithm + key any +} + +type algKeySink struct { + mu sync.Mutex + list []algKeyPair +} + +func (s *algKeySink) Key(alg jwa.KeyEncryptionAlgorithm, key any) { + s.mu.Lock() + s.list = append(s.list, algKeyPair{alg, key}) + s.mu.Unlock() +} + +type staticKeyProvider struct { + alg jwa.KeyEncryptionAlgorithm + key any +} + +func (kp *staticKeyProvider) FetchKeys(_ context.Context, sink KeySink, _ Recipient, _ *Message) error { + sink.Key(kp.alg, kp.key) + return nil +} + +type keySetProvider struct { + set jwk.Set + requireKid bool +} + +func (kp *keySetProvider) selectKey(sink KeySink, key jwk.Key, _ Recipient, _ *Message) error { + if usage, ok := key.KeyUsage(); ok { + if usage != "" && usage != jwk.ForEncryption.String() { + return nil + } + } + + if v, ok := key.Algorithm(); ok { + kalg, ok := jwa.LookupKeyEncryptionAlgorithm(v.String()) + if !ok { + return fmt.Errorf(`invalid key encryption algorithm %s`, v) + } + + sink.Key(kalg, key) + return nil + } + + return nil +} + +func (kp *keySetProvider) FetchKeys(_ context.Context, sink KeySink, r Recipient, msg *Message) error { + if kp.requireKid { + var key jwk.Key + + wantedKid, ok := r.Headers().KeyID() + if !ok || wantedKid == "" { + return fmt.Errorf(`failed to find matching key: no key ID ("kid") specified in token but multiple keys available in key set`) + } + // Otherwise we better be able to look up the key, baby. + v, ok := kp.set.LookupKeyID(wantedKid) + if !ok { + return fmt.Errorf(`failed to find key with key ID %q in key set`, wantedKid) + } + key = v + + return kp.selectKey(sink, key, r, msg) + } + + for i := range kp.set.Len() { + key, _ := kp.set.Key(i) + if err := kp.selectKey(sink, key, r, msg); err != nil { + continue + } + } + return nil +} + +// KeyProviderFunc is a type of KeyProvider that is implemented by +// a single function. You can use this to create ad-hoc `KeyProvider` +// instances. +type KeyProviderFunc func(context.Context, KeySink, Recipient, *Message) error + +func (kp KeyProviderFunc) FetchKeys(ctx context.Context, sink KeySink, r Recipient, msg *Message) error { + return kp(ctx, sink, r, msg) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/message.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/message.go new file mode 100644 index 0000000000..7aad833f26 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/message.go @@ -0,0 +1,560 @@ +package jwe + +import ( + "fmt" + "sort" + "strings" + + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/internal/tokens" +) + +// NewRecipient creates a Recipient object +func NewRecipient() Recipient { + return &stdRecipient{ + headers: NewHeaders(), + } +} + +func (r *stdRecipient) SetHeaders(h Headers) error { + r.headers = h + return nil +} + +func (r *stdRecipient) SetEncryptedKey(v []byte) error { + r.encryptedKey = v + return nil +} + +func (r *stdRecipient) Headers() Headers { + return r.headers +} + +func (r *stdRecipient) EncryptedKey() []byte { + return r.encryptedKey +} + +type recipientMarshalProxy struct { + Headers Headers `json:"header"` + EncryptedKey string `json:"encrypted_key"` +} + +func (r *stdRecipient) UnmarshalJSON(buf []byte) error { + var proxy recipientMarshalProxy + proxy.Headers = NewHeaders() + if err := json.Unmarshal(buf, &proxy); err != nil { + return fmt.Errorf(`failed to unmarshal json into recipient: %w`, err) + } + + r.headers = proxy.Headers + decoded, err := base64.DecodeString(proxy.EncryptedKey) + if err != nil { + return fmt.Errorf(`failed to decode "encrypted_key": %w`, err) + } + r.encryptedKey = decoded + return nil +} + +func (r *stdRecipient) MarshalJSON() ([]byte, error) { + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + + buf.WriteString(`{"header":`) + hdrbuf, err := json.Marshal(r.headers) + if err != nil { + return nil, fmt.Errorf(`failed to marshal recipient header: %w`, err) + } + buf.Write(hdrbuf) + buf.WriteString(`,"encrypted_key":"`) + buf.WriteString(base64.EncodeToString(r.encryptedKey)) + buf.WriteString(`"}`) + + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +// NewMessage creates a new message +func NewMessage() *Message { + return &Message{} +} + +func (m *Message) AuthenticatedData() []byte { + return m.authenticatedData +} + +func (m *Message) CipherText() []byte { + return m.cipherText +} + +func (m *Message) InitializationVector() []byte { + return m.initializationVector +} + +func (m *Message) Tag() []byte { + return m.tag +} + +func (m *Message) ProtectedHeaders() Headers { + return m.protectedHeaders +} + +func (m *Message) Recipients() []Recipient { + return m.recipients +} + +func (m *Message) UnprotectedHeaders() Headers { + return m.unprotectedHeaders +} + +const ( + AuthenticatedDataKey = "aad" + CipherTextKey = "ciphertext" + CountKey = "p2c" + InitializationVectorKey = "iv" + ProtectedHeadersKey = "protected" + RecipientsKey = "recipients" + SaltKey = "p2s" + TagKey = "tag" + UnprotectedHeadersKey = "unprotected" + HeadersKey = "header" + EncryptedKeyKey = "encrypted_key" +) + +func (m *Message) Set(k string, v any) error { + switch k { + case AuthenticatedDataKey: + buf, ok := v.([]byte) + if !ok { + return fmt.Errorf(`invalid value %T for %s key`, v, AuthenticatedDataKey) + } + m.authenticatedData = buf + case CipherTextKey: + buf, ok := v.([]byte) + if !ok { + return fmt.Errorf(`invalid value %T for %s key`, v, CipherTextKey) + } + m.cipherText = buf + case InitializationVectorKey: + buf, ok := v.([]byte) + if !ok { + return fmt.Errorf(`invalid value %T for %s key`, v, InitializationVectorKey) + } + m.initializationVector = buf + case ProtectedHeadersKey: + cv, ok := v.(Headers) + if !ok { + return fmt.Errorf(`invalid value %T for %s key`, v, ProtectedHeadersKey) + } + m.protectedHeaders = cv + case RecipientsKey: + cv, ok := v.([]Recipient) + if !ok { + return fmt.Errorf(`invalid value %T for %s key`, v, RecipientsKey) + } + m.recipients = cv + case TagKey: + buf, ok := v.([]byte) + if !ok { + return fmt.Errorf(`invalid value %T for %s key`, v, TagKey) + } + m.tag = buf + case UnprotectedHeadersKey: + cv, ok := v.(Headers) + if !ok { + return fmt.Errorf(`invalid value %T for %s key`, v, UnprotectedHeadersKey) + } + m.unprotectedHeaders = cv + default: + if m.unprotectedHeaders == nil { + m.unprotectedHeaders = NewHeaders() + } + return m.unprotectedHeaders.Set(k, v) + } + return nil +} + +type messageMarshalProxy struct { + AuthenticatedData string `json:"aad,omitempty"` + CipherText string `json:"ciphertext"` + InitializationVector string `json:"iv,omitempty"` + ProtectedHeaders json.RawMessage `json:"protected"` + Recipients []json.RawMessage `json:"recipients,omitempty"` + Tag string `json:"tag,omitempty"` + UnprotectedHeaders Headers `json:"unprotected,omitempty"` + + // For flattened structure. Headers is NOT a Headers type, + // so that we can detect its presence by checking proxy.Headers != nil + Headers json.RawMessage `json:"header,omitempty"` + EncryptedKey string `json:"encrypted_key,omitempty"` +} + +type jsonKV struct { + Key string + Value string +} + +func (m *Message) MarshalJSON() ([]byte, error) { + // This is slightly convoluted, but we need to encode the + // protected headers, so we do it by hand + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + enc := json.NewEncoder(buf) + + var fields []jsonKV + + if cipherText := m.CipherText(); len(cipherText) > 0 { + buf.Reset() + if err := enc.Encode(base64.EncodeToString(cipherText)); err != nil { + return nil, fmt.Errorf(`failed to encode %s field: %w`, CipherTextKey, err) + } + fields = append(fields, jsonKV{ + Key: CipherTextKey, + Value: strings.TrimSpace(buf.String()), + }) + } + + if iv := m.InitializationVector(); len(iv) > 0 { + buf.Reset() + if err := enc.Encode(base64.EncodeToString(iv)); err != nil { + return nil, fmt.Errorf(`failed to encode %s field: %w`, InitializationVectorKey, err) + } + fields = append(fields, jsonKV{ + Key: InitializationVectorKey, + Value: strings.TrimSpace(buf.String()), + }) + } + + var encodedProtectedHeaders []byte + if h := m.ProtectedHeaders(); h != nil { + v, err := h.Encode() + if err != nil { + return nil, fmt.Errorf(`failed to encode protected headers: %w`, err) + } + + encodedProtectedHeaders = v + if len(encodedProtectedHeaders) <= 2 { // '{}' + encodedProtectedHeaders = nil + } else { + fields = append(fields, jsonKV{ + Key: ProtectedHeadersKey, + Value: fmt.Sprintf("%q", encodedProtectedHeaders), + }) + } + } + + if aad := m.AuthenticatedData(); len(aad) > 0 { + aad = base64.Encode(aad) + if encodedProtectedHeaders != nil { + tmp := append(encodedProtectedHeaders, tokens.Period) + aad = append(tmp, aad...) + } + + buf.Reset() + if err := enc.Encode(aad); err != nil { + return nil, fmt.Errorf(`failed to encode %s field: %w`, AuthenticatedDataKey, err) + } + fields = append(fields, jsonKV{ + Key: AuthenticatedDataKey, + Value: strings.TrimSpace(buf.String()), + }) + } + + if recipients := m.Recipients(); len(recipients) > 0 { + if len(recipients) == 1 { // Use flattened format + if hdrs := recipients[0].Headers(); hdrs != nil { + var skipHeaders bool + if zeroer, ok := hdrs.(isZeroer); ok { + if zeroer.isZero() { + skipHeaders = true + } + } + + if !skipHeaders { + buf.Reset() + if err := enc.Encode(hdrs); err != nil { + return nil, fmt.Errorf(`failed to encode %s field: %w`, HeadersKey, err) + } + fields = append(fields, jsonKV{ + Key: HeadersKey, + Value: strings.TrimSpace(buf.String()), + }) + } + } + + if ek := recipients[0].EncryptedKey(); len(ek) > 0 { + buf.Reset() + if err := enc.Encode(base64.EncodeToString(ek)); err != nil { + return nil, fmt.Errorf(`failed to encode %s field: %w`, EncryptedKeyKey, err) + } + fields = append(fields, jsonKV{ + Key: EncryptedKeyKey, + Value: strings.TrimSpace(buf.String()), + }) + } + } else { + buf.Reset() + if err := enc.Encode(recipients); err != nil { + return nil, fmt.Errorf(`failed to encode %s field: %w`, RecipientsKey, err) + } + fields = append(fields, jsonKV{ + Key: RecipientsKey, + Value: strings.TrimSpace(buf.String()), + }) + } + } + + if tag := m.Tag(); len(tag) > 0 { + buf.Reset() + if err := enc.Encode(base64.EncodeToString(tag)); err != nil { + return nil, fmt.Errorf(`failed to encode %s field: %w`, TagKey, err) + } + fields = append(fields, jsonKV{ + Key: TagKey, + Value: strings.TrimSpace(buf.String()), + }) + } + + if h := m.UnprotectedHeaders(); h != nil { + unprotected, err := json.Marshal(h) + if err != nil { + return nil, fmt.Errorf(`failed to encode unprotected headers: %w`, err) + } + + if len(unprotected) > 2 { + fields = append(fields, jsonKV{ + Key: UnprotectedHeadersKey, + Value: fmt.Sprintf("%q", unprotected), + }) + } + } + + sort.Slice(fields, func(i, j int) bool { + return fields[i].Key < fields[j].Key + }) + buf.Reset() + fmt.Fprintf(buf, `{`) + for i, kv := range fields { + if i > 0 { + fmt.Fprintf(buf, `,`) + } + fmt.Fprintf(buf, `%q:%s`, kv.Key, kv.Value) + } + fmt.Fprintf(buf, `}`) + + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (m *Message) UnmarshalJSON(buf []byte) error { + var proxy messageMarshalProxy + proxy.UnprotectedHeaders = NewHeaders() + + if err := json.Unmarshal(buf, &proxy); err != nil { + return fmt.Errorf(`failed to unmashal JSON into message: %w`, err) + } + + // Get the string value + var protectedHeadersStr string + if err := json.Unmarshal(proxy.ProtectedHeaders, &protectedHeadersStr); err != nil { + return fmt.Errorf(`failed to decode protected headers (1): %w`, err) + } + + // It's now in _quoted_ base64 string. Decode it + protectedHeadersRaw, err := base64.DecodeString(protectedHeadersStr) + if err != nil { + return fmt.Errorf(`failed to base64 decoded protected headers buffer: %w`, err) + } + + h := NewHeaders() + if err := json.Unmarshal(protectedHeadersRaw, h); err != nil { + return fmt.Errorf(`failed to decode protected headers (2): %w`, err) + } + + // if this were a flattened message, we would see a "header" and "ciphertext" + // field. TODO: do both of these conditions need to meet, or just one? + if proxy.Headers != nil || len(proxy.EncryptedKey) > 0 { + recipient := NewRecipient() + + // `"heders"` could be empty. If that's the case, just skip the + // following unmarshaling step + if proxy.Headers != nil { + hdrs := NewHeaders() + if err := json.Unmarshal(proxy.Headers, hdrs); err != nil { + return fmt.Errorf(`failed to decode headers field: %w`, err) + } + + if err := recipient.SetHeaders(hdrs); err != nil { + return fmt.Errorf(`failed to set new headers: %w`, err) + } + } + + if v := proxy.EncryptedKey; len(v) > 0 { + buf, err := base64.DecodeString(v) + if err != nil { + return fmt.Errorf(`failed to decode encrypted key: %w`, err) + } + if err := recipient.SetEncryptedKey(buf); err != nil { + return fmt.Errorf(`failed to set encrypted key: %w`, err) + } + } + + m.recipients = append(m.recipients, recipient) + } else { + for i, recipientbuf := range proxy.Recipients { + recipient := NewRecipient() + if err := json.Unmarshal(recipientbuf, recipient); err != nil { + return fmt.Errorf(`failed to decode recipient at index %d: %w`, i, err) + } + + m.recipients = append(m.recipients, recipient) + } + } + + if src := proxy.AuthenticatedData; len(src) > 0 { + v, err := base64.DecodeString(src) + if err != nil { + return fmt.Errorf(`failed to decode "aad": %w`, err) + } + m.authenticatedData = v + } + + if src := proxy.CipherText; len(src) > 0 { + v, err := base64.DecodeString(src) + if err != nil { + return fmt.Errorf(`failed to decode "ciphertext": %w`, err) + } + m.cipherText = v + } + + if src := proxy.InitializationVector; len(src) > 0 { + v, err := base64.DecodeString(src) + if err != nil { + return fmt.Errorf(`failed to decode "iv": %w`, err) + } + m.initializationVector = v + } + + if src := proxy.Tag; len(src) > 0 { + v, err := base64.DecodeString(src) + if err != nil { + return fmt.Errorf(`failed to decode "tag": %w`, err) + } + m.tag = v + } + + m.protectedHeaders = h + if m.storeProtectedHeaders { + // this is later used for decryption + m.rawProtectedHeaders = base64.Encode(protectedHeadersRaw) + } + + if iz, ok := proxy.UnprotectedHeaders.(isZeroer); ok { + if !iz.isZero() { + m.unprotectedHeaders = proxy.UnprotectedHeaders + } + } + + if len(m.recipients) == 0 { + if err := m.makeDummyRecipient(proxy.EncryptedKey, m.protectedHeaders); err != nil { + return fmt.Errorf(`failed to setup recipient: %w`, err) + } + } + + return nil +} + +func (m *Message) makeDummyRecipient(enckeybuf string, protected Headers) error { + // Recipients in this case should not contain the content encryption key, + // so move that out + hdrs, err := protected.Clone() + if err != nil { + return fmt.Errorf(`failed to clone headers: %w`, err) + } + + if err := hdrs.Remove(ContentEncryptionKey); err != nil { + return fmt.Errorf(`failed to remove %#v from public header: %w`, ContentEncryptionKey, err) + } + + enckey, err := base64.DecodeString(enckeybuf) + if err != nil { + return fmt.Errorf(`failed to decode encrypted key: %w`, err) + } + + if err := m.Set(RecipientsKey, []Recipient{ + &stdRecipient{ + headers: hdrs, + encryptedKey: enckey, + }, + }); err != nil { + return fmt.Errorf(`failed to set %s: %w`, RecipientsKey, err) + } + return nil +} + +// Compact generates a JWE message in compact serialization format from a +// `*jwe.Message` object. The object contain exactly one recipient, or +// an error is returned. +// +// This function currently does not take any options, but the function +// signature contains `options` for possible future expansion of the API +func Compact(m *Message, _ ...CompactOption) ([]byte, error) { + if len(m.recipients) != 1 { + return nil, fmt.Errorf(`wrong number of recipients for compact serialization`) + } + + recipient := m.recipients[0] + + // The protected header must be a merge between the message-wide + // protected header AND the recipient header + + // There's something wrong if m.protectedHeaders is nil, but + // it could happen + if m.protectedHeaders == nil { + return nil, fmt.Errorf(`invalid protected header`) + } + + hcopy, err := m.protectedHeaders.Clone() + if err != nil { + return nil, fmt.Errorf(`failed to copy protected header: %w`, err) + } + hcopy, err = hcopy.Merge(m.unprotectedHeaders) + if err != nil { + return nil, fmt.Errorf(`failed to merge unprotected header: %w`, err) + } + hcopy, err = hcopy.Merge(recipient.Headers()) + if err != nil { + return nil, fmt.Errorf(`failed to merge recipient header: %w`, err) + } + + protected, err := hcopy.Encode() + if err != nil { + return nil, fmt.Errorf(`failed to encode header: %w`, err) + } + + encryptedKey := base64.Encode(recipient.EncryptedKey()) + iv := base64.Encode(m.initializationVector) + cipher := base64.Encode(m.cipherText) + tag := base64.Encode(m.tag) + + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + + buf.Grow(len(protected) + len(encryptedKey) + len(iv) + len(cipher) + len(tag) + 4) + buf.Write(protected) + buf.WriteByte(tokens.Period) + buf.Write(encryptedKey) + buf.WriteByte(tokens.Period) + buf.Write(iv) + buf.WriteByte(tokens.Period) + buf.Write(cipher) + buf.WriteByte(tokens.Period) + buf.Write(tag) + + result := make([]byte, buf.Len()) + copy(result, buf.Bytes()) + return result, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.go new file mode 100644 index 0000000000..0437ea8733 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.go @@ -0,0 +1,109 @@ +package jwe + +import ( + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwk" + "github.com/lestrrat-go/option/v2" +) + +// WithProtectedHeaders is used to specify contents of the protected header. +// Some fields such as "enc" and "zip" will be overwritten when encryption is +// performed. +// +// There is no equivalent for unprotected headers in this implementation +func WithProtectedHeaders(h Headers) EncryptOption { + cloned, _ := h.Clone() + return &encryptOption{option.New(identProtectedHeaders{}, cloned)} +} + +type withKey struct { + alg jwa.KeyAlgorithm + key any + headers Headers +} + +type WithKeySuboption interface { + Option + withKeySuboption() +} + +type withKeySuboption struct { + Option +} + +func (*withKeySuboption) withKeySuboption() {} + +// WithPerRecipientHeaders is used to pass header values for each recipient. +// Note that these headers are by definition _unprotected_. +func WithPerRecipientHeaders(hdr Headers) WithKeySuboption { + return &withKeySuboption{option.New(identPerRecipientHeaders{}, hdr)} +} + +// WithKey is used to pass a static algorithm/key pair to either `jwe.Encrypt()` or `jwe.Decrypt()`. +// either a raw key or `jwk.Key` may be passed as `key`. +// +// The `alg` parameter is the identifier for the key encryption algorithm that should be used. +// It is of type `jwa.KeyAlgorithm` but in reality you can only pass `jwa.KeyEncryptionAlgorithm` +// types. It is this way so that the value in `(jwk.Key).Algorithm()` can be directly +// passed to the option. If you specify other algorithm types such as `jwa.SignatureAlgorithm`, +// then you will get an error when `jwe.Encrypt()` or `jwe.Decrypt()` is executed. +// +// Unlike `jwe.WithKeySet()`, the `kid` field does not need to match for the key +// to be tried. +func WithKey(alg jwa.KeyAlgorithm, key any, options ...WithKeySuboption) EncryptDecryptOption { + var hdr Headers + for _, option := range options { + switch option.Ident() { + case identPerRecipientHeaders{}: + if err := option.Value(&hdr); err != nil { + panic(`jwe.WithKey() requires Headers value for WithPerRecipientHeaders option`) + } + } + } + + return &encryptDecryptOption{option.New(identKey{}, &withKey{ + alg: alg, + key: key, + headers: hdr, + })} +} + +func WithKeySet(set jwk.Set, options ...WithKeySetSuboption) DecryptOption { + requireKid := true + for _, option := range options { + switch option.Ident() { + case identRequireKid{}: + if err := option.Value(&requireKid); err != nil { + panic(`jwe.WithKeySet() requires bool value for WithRequireKid option`) + } + } + } + + return WithKeyProvider(&keySetProvider{ + set: set, + requireKid: requireKid, + }) +} + +// WithJSON specifies that the result of `jwe.Encrypt()` is serialized in +// JSON format. +// +// If you pass multiple keys to `jwe.Encrypt()`, it will fail unless +// you also pass this option. +func WithJSON(options ...WithJSONSuboption) EncryptOption { + var pretty bool + for _, option := range options { + switch option.Ident() { + case identPretty{}: + if err := option.Value(&pretty); err != nil { + panic(`jwe.WithJSON() requires bool value for WithPretty option`) + } + } + } + + format := fmtJSON + if pretty { + format = fmtJSONPretty + } + return &encryptOption{option.New(identSerialization{}, format)} +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.yaml b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.yaml new file mode 100644 index 0000000000..359d80944d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options.yaml @@ -0,0 +1,210 @@ +package_name: jwe +output: jwe/options_gen.go +interfaces: + - name: GlobalOption + comment: | + GlobalOption describes options that changes global settings for this package + - name: GlobalDecryptOption + comment: | + GlobalDecryptOption describes options that changes global settings and for each call of the `jwe.Decrypt` function + methods: + - globalOption + - decryptOption + - name: CompactOption + comment: | + CompactOption describes options that can be passed to `jwe.Compact` + - name: DecryptOption + comment: | + DecryptOption describes options that can be passed to `jwe.Decrypt` + - name: EncryptOption + comment: | + EncryptOption describes options that can be passed to `jwe.Encrypt` + - name: EncryptDecryptOption + methods: + - encryptOption + - decryptOption + comment: | + EncryptDecryptOption describes options that can be passed to either `jwe.Encrypt` or `jwe.Decrypt` + - name: WithJSONSuboption + concrete_type: withJSONSuboption + comment: | + JSONSuboption describes suboptions that can be passed to `jwe.WithJSON()` option + - name: WithKeySetSuboption + comment: | + WithKeySetSuboption is a suboption passed to the WithKeySet() option + - name: ParseOption + methods: + - readFileOption + comment: | + ReadFileOption is a type of `Option` that can be passed to `jwe.Parse` + - name: ReadFileOption + comment: | + ReadFileOption is a type of `Option` that can be passed to `jwe.ReadFile` +options: + - ident: Key + skip_option: true + - ident: Pretty + skip_option: true + - ident: ProtectedHeaders + skip_option: true + - ident: PerRecipientHeaders + skip_option: true + - ident: KeyProvider + interface: DecryptOption + argument_type: KeyProvider + - ident: Context + interface: DecryptOption + argument_type: context.Context + comment: | + WithContext specifies the context.Context object to use when decrypting a JWE message. + If not provided, context.Background() will be used. + - ident: Serialization + option_name: WithCompact + interface: EncryptOption + constant_value: fmtCompact + comment: | + WithCompact specifies that the result of `jwe.Encrypt()` is serialized in + compact format. + + By default `jwe.Encrypt()` will opt to use compact format, so you usually + do not need to specify this option other than to be explicit about it + - ident: Compress + interface: EncryptOption + argument_type: jwa.CompressionAlgorithm + comment: | + WithCompress specifies the compression algorithm to use when encrypting + a payload using `jwe.Encrypt` (Yes, we know it can only be "" or "DEF", + but the way the specification is written it could allow for more options, + and therefore this option takes an argument) + - ident: ContentEncryptionAlgorithm + interface: EncryptOption + option_name: WithContentEncryption + argument_type: jwa.ContentEncryptionAlgorithm + comment: | + WithContentEncryptionAlgorithm specifies the algorithm to encrypt the + JWE message content with. If not provided, `jwa.A256GCM` is used. + - ident: Message + interface: DecryptOption + argument_type: '*Message' + comment: | + WithMessage provides a message object to be populated by `jwe.Decrypt` + Using this option allows you to decrypt AND obtain the `jwe.Message` + in one go. + - ident: RequireKid + interface: WithKeySetSuboption + argument_type: bool + comment: | + WithRequiredKid specifies whether the keys in the jwk.Set should + only be matched if the target JWE message's Key ID and the Key ID + in the given key matches. + - ident: Pretty + interface: WithJSONSuboption + argument_type: bool + comment: | + WithPretty specifies whether the JSON output should be formatted and + indented + - ident: MergeProtectedHeaders + interface: EncryptOption + argument_type: bool + comment: | + WithMergeProtectedHeaders specify that when given multiple headers + as options to `jwe.Encrypt`, these headers should be merged instead + of overwritten + - ident: FS + interface: ReadFileOption + argument_type: fs.FS + comment: | + WithFS specifies the source `fs.FS` object to read the file from. + - ident: KeyUsed + interface: DecryptOption + argument_type: 'any' + comment: | + WithKeyUsed allows you to specify the `jwe.Decrypt()` function to + return the key used for decryption. This may be useful when + you specify multiple key sources or if you pass a `jwk.Set` + and you want to know which key was successful at decrypting the + CEK. + + `v` must be a pointer to an empty `any`. Do not use + `jwk.Key` here unless you are 100% sure that all keys that you + have provided are instances of `jwk.Key` (remember that the + jwx API allows users to specify a raw key such as *rsa.PublicKey) + - ident: CEK + interface: DecryptOption + argument_type: '*[]byte' + comment: | + WithCEK allows users to specify a variable to store the CEK used in the + message upon successful decryption. The variable must be a pointer to + a byte slice, and it will only be populated if the decryption is successful. + + This option is currently considered EXPERIMENTAL, and is subject to + future changes across minor/micro versions. + - ident: MaxPBES2Count + interface: GlobalOption + argument_type: int + comment: | + WithMaxPBES2Count specifies the maximum number of PBES2 iterations + to use when decrypting a message. If not specified, the default + value of 10,000 is used. + + This option has a global effect. + - ident: MaxDecompressBufferSize + interface: GlobalDecryptOption + argument_type: int64 + comment: | + WithMaxDecompressBufferSize specifies the maximum buffer size for used when + decompressing the payload of a JWE message. If a compressed JWE payload + exceeds this amount when decompressed, jwe.Decrypt will return an error. + The default value is 10MB. + + This option can be used for `jwe.Settings()`, which changes the behavior + globally, or for `jwe.Decrypt()`, which changes the behavior for that + specific call. + - ident: CBCBufferSize + interface: GlobalOption + argument_type: int64 + comment: | + WithCBCBufferSize specifies the maximum buffer size for internal + calculations, such as when AES-CBC is performed. The default value is 256MB. + If set to an invalid value, the default value is used. + In v2, this option was called MaxBufferSize. + + This option has a global effect. + - ident: LegacyHeaderMerging + interface: EncryptOption + argument_type: bool + option_name: WithLegacyHeaderMerging + comment: | + WithLegacyHeaderMerging specifies whether to perform legacy header merging + when encrypting a JWE message in JSON serialization, when there is a single recipient. + This behavior is enabled by default for backwards compatibility. + + When a JWE message is encrypted in JSON serialization, and there is only + one recipient, this library automatically serializes the message in + flattened JSON serialization format. In older versions of this library, + the protected headers and the per-recipient headers were merged together + before computing the AAD (Additional Authenticated Data), but the per-recipient + headers were kept as-is in the `header` field of the recipient object. + + This behavior is not compliant with the JWE specification, which states that + the headers must be disjoint. + + Passing this option with a value of `false` disables this legacy behavior, + and while the per-recipient headers and protected headers are still merged + for the purpose of computing AAD, the per-recipient headers are cleared + after merging, so that the resulting JWE message is compliant with the + specification. + + This option has no effect when there are multiple recipients, or when + the serialization format is compact serialization. For multiple recipients + (i.e. full JSON serialization), the protected headers and per-recipient + headers are never merged, and it is the caller's responsibility to ensure + that the headers are disjoint. In compact serialization, there are no per-recipient + headers; in fact, the protected headers are the only headers that exist, + and therefore there is no possibility of header collision after merging + (note: while per-recipient headers do not make sense in compact serialization, + this library does not prevent you from setting them -- they are all just + merged into the protected headers). + + In future versions, the new behavior will be the default. New users are + encouraged to set this option to `false` now to avoid future issues. \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwe/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options_gen.go new file mode 100644 index 0000000000..2d28eecb44 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwe/options_gen.go @@ -0,0 +1,392 @@ +// Code generated by tools/cmd/genoptions/main.go. DO NOT EDIT. + +package jwe + +import ( + "context" + "io/fs" + + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/option/v2" +) + +type Option = option.Interface + +// CompactOption describes options that can be passed to `jwe.Compact` +type CompactOption interface { + Option + compactOption() +} + +type compactOption struct { + Option +} + +func (*compactOption) compactOption() {} + +// DecryptOption describes options that can be passed to `jwe.Decrypt` +type DecryptOption interface { + Option + decryptOption() +} + +type decryptOption struct { + Option +} + +func (*decryptOption) decryptOption() {} + +// EncryptDecryptOption describes options that can be passed to either `jwe.Encrypt` or `jwe.Decrypt` +type EncryptDecryptOption interface { + Option + encryptOption() + decryptOption() +} + +type encryptDecryptOption struct { + Option +} + +func (*encryptDecryptOption) encryptOption() {} + +func (*encryptDecryptOption) decryptOption() {} + +// EncryptOption describes options that can be passed to `jwe.Encrypt` +type EncryptOption interface { + Option + encryptOption() +} + +type encryptOption struct { + Option +} + +func (*encryptOption) encryptOption() {} + +// GlobalDecryptOption describes options that changes global settings and for each call of the `jwe.Decrypt` function +type GlobalDecryptOption interface { + Option + globalOption() + decryptOption() +} + +type globalDecryptOption struct { + Option +} + +func (*globalDecryptOption) globalOption() {} + +func (*globalDecryptOption) decryptOption() {} + +// GlobalOption describes options that changes global settings for this package +type GlobalOption interface { + Option + globalOption() +} + +type globalOption struct { + Option +} + +func (*globalOption) globalOption() {} + +// ReadFileOption is a type of `Option` that can be passed to `jwe.Parse` +type ParseOption interface { + Option + readFileOption() +} + +type parseOption struct { + Option +} + +func (*parseOption) readFileOption() {} + +// ReadFileOption is a type of `Option` that can be passed to `jwe.ReadFile` +type ReadFileOption interface { + Option + readFileOption() +} + +type readFileOption struct { + Option +} + +func (*readFileOption) readFileOption() {} + +// JSONSuboption describes suboptions that can be passed to `jwe.WithJSON()` option +type WithJSONSuboption interface { + Option + withJSONSuboption() +} + +type withJSONSuboption struct { + Option +} + +func (*withJSONSuboption) withJSONSuboption() {} + +// WithKeySetSuboption is a suboption passed to the WithKeySet() option +type WithKeySetSuboption interface { + Option + withKeySetSuboption() +} + +type withKeySetSuboption struct { + Option +} + +func (*withKeySetSuboption) withKeySetSuboption() {} + +type identCBCBufferSize struct{} +type identCEK struct{} +type identCompress struct{} +type identContentEncryptionAlgorithm struct{} +type identContext struct{} +type identFS struct{} +type identKey struct{} +type identKeyProvider struct{} +type identKeyUsed struct{} +type identLegacyHeaderMerging struct{} +type identMaxDecompressBufferSize struct{} +type identMaxPBES2Count struct{} +type identMergeProtectedHeaders struct{} +type identMessage struct{} +type identPerRecipientHeaders struct{} +type identPretty struct{} +type identProtectedHeaders struct{} +type identRequireKid struct{} +type identSerialization struct{} + +func (identCBCBufferSize) String() string { + return "WithCBCBufferSize" +} + +func (identCEK) String() string { + return "WithCEK" +} + +func (identCompress) String() string { + return "WithCompress" +} + +func (identContentEncryptionAlgorithm) String() string { + return "WithContentEncryption" +} + +func (identContext) String() string { + return "WithContext" +} + +func (identFS) String() string { + return "WithFS" +} + +func (identKey) String() string { + return "WithKey" +} + +func (identKeyProvider) String() string { + return "WithKeyProvider" +} + +func (identKeyUsed) String() string { + return "WithKeyUsed" +} + +func (identLegacyHeaderMerging) String() string { + return "WithLegacyHeaderMerging" +} + +func (identMaxDecompressBufferSize) String() string { + return "WithMaxDecompressBufferSize" +} + +func (identMaxPBES2Count) String() string { + return "WithMaxPBES2Count" +} + +func (identMergeProtectedHeaders) String() string { + return "WithMergeProtectedHeaders" +} + +func (identMessage) String() string { + return "WithMessage" +} + +func (identPerRecipientHeaders) String() string { + return "WithPerRecipientHeaders" +} + +func (identPretty) String() string { + return "WithPretty" +} + +func (identProtectedHeaders) String() string { + return "WithProtectedHeaders" +} + +func (identRequireKid) String() string { + return "WithRequireKid" +} + +func (identSerialization) String() string { + return "WithSerialization" +} + +// WithCBCBufferSize specifies the maximum buffer size for internal +// calculations, such as when AES-CBC is performed. The default value is 256MB. +// If set to an invalid value, the default value is used. +// In v2, this option was called MaxBufferSize. +// +// This option has a global effect. +func WithCBCBufferSize(v int64) GlobalOption { + return &globalOption{option.New(identCBCBufferSize{}, v)} +} + +// WithCEK allows users to specify a variable to store the CEK used in the +// message upon successful decryption. The variable must be a pointer to +// a byte slice, and it will only be populated if the decryption is successful. +// +// This option is currently considered EXPERIMENTAL, and is subject to +// future changes across minor/micro versions. +func WithCEK(v *[]byte) DecryptOption { + return &decryptOption{option.New(identCEK{}, v)} +} + +// WithCompress specifies the compression algorithm to use when encrypting +// a payload using `jwe.Encrypt` (Yes, we know it can only be "" or "DEF", +// but the way the specification is written it could allow for more options, +// and therefore this option takes an argument) +func WithCompress(v jwa.CompressionAlgorithm) EncryptOption { + return &encryptOption{option.New(identCompress{}, v)} +} + +// WithContentEncryptionAlgorithm specifies the algorithm to encrypt the +// JWE message content with. If not provided, `jwa.A256GCM` is used. +func WithContentEncryption(v jwa.ContentEncryptionAlgorithm) EncryptOption { + return &encryptOption{option.New(identContentEncryptionAlgorithm{}, v)} +} + +// WithContext specifies the context.Context object to use when decrypting a JWE message. +// If not provided, context.Background() will be used. +func WithContext(v context.Context) DecryptOption { + return &decryptOption{option.New(identContext{}, v)} +} + +// WithFS specifies the source `fs.FS` object to read the file from. +func WithFS(v fs.FS) ReadFileOption { + return &readFileOption{option.New(identFS{}, v)} +} + +func WithKeyProvider(v KeyProvider) DecryptOption { + return &decryptOption{option.New(identKeyProvider{}, v)} +} + +// WithKeyUsed allows you to specify the `jwe.Decrypt()` function to +// return the key used for decryption. This may be useful when +// you specify multiple key sources or if you pass a `jwk.Set` +// and you want to know which key was successful at decrypting the +// CEK. +// +// `v` must be a pointer to an empty `any`. Do not use +// `jwk.Key` here unless you are 100% sure that all keys that you +// have provided are instances of `jwk.Key` (remember that the +// jwx API allows users to specify a raw key such as *rsa.PublicKey) +func WithKeyUsed(v any) DecryptOption { + return &decryptOption{option.New(identKeyUsed{}, v)} +} + +// WithLegacyHeaderMerging specifies whether to perform legacy header merging +// when encrypting a JWE message in JSON serialization, when there is a single recipient. +// This behavior is enabled by default for backwards compatibility. +// +// When a JWE message is encrypted in JSON serialization, and there is only +// one recipient, this library automatically serializes the message in +// flattened JSON serialization format. In older versions of this library, +// the protected headers and the per-recipient headers were merged together +// before computing the AAD (Additional Authenticated Data), but the per-recipient +// headers were kept as-is in the `header` field of the recipient object. +// +// This behavior is not compliant with the JWE specification, which states that +// the headers must be disjoint. +// +// Passing this option with a value of `false` disables this legacy behavior, +// and while the per-recipient headers and protected headers are still merged +// for the purpose of computing AAD, the per-recipient headers are cleared +// after merging, so that the resulting JWE message is compliant with the +// specification. +// +// This option has no effect when there are multiple recipients, or when +// the serialization format is compact serialization. For multiple recipients +// (i.e. full JSON serialization), the protected headers and per-recipient +// headers are never merged, and it is the caller's responsibility to ensure +// that the headers are disjoint. In compact serialization, there are no per-recipient +// headers; in fact, the protected headers are the only headers that exist, +// and therefore there is no possibility of header collision after merging +// (note: while per-recipient headers do not make sense in compact serialization, +// this library does not prevent you from setting them -- they are all just +// merged into the protected headers). +// +// In future versions, the new behavior will be the default. New users are +// encouraged to set this option to `false` now to avoid future issues. +func WithLegacyHeaderMerging(v bool) EncryptOption { + return &encryptOption{option.New(identLegacyHeaderMerging{}, v)} +} + +// WithMaxDecompressBufferSize specifies the maximum buffer size for used when +// decompressing the payload of a JWE message. If a compressed JWE payload +// exceeds this amount when decompressed, jwe.Decrypt will return an error. +// The default value is 10MB. +// +// This option can be used for `jwe.Settings()`, which changes the behavior +// globally, or for `jwe.Decrypt()`, which changes the behavior for that +// specific call. +func WithMaxDecompressBufferSize(v int64) GlobalDecryptOption { + return &globalDecryptOption{option.New(identMaxDecompressBufferSize{}, v)} +} + +// WithMaxPBES2Count specifies the maximum number of PBES2 iterations +// to use when decrypting a message. If not specified, the default +// value of 10,000 is used. +// +// This option has a global effect. +func WithMaxPBES2Count(v int) GlobalOption { + return &globalOption{option.New(identMaxPBES2Count{}, v)} +} + +// WithMergeProtectedHeaders specify that when given multiple headers +// as options to `jwe.Encrypt`, these headers should be merged instead +// of overwritten +func WithMergeProtectedHeaders(v bool) EncryptOption { + return &encryptOption{option.New(identMergeProtectedHeaders{}, v)} +} + +// WithMessage provides a message object to be populated by `jwe.Decrypt` +// Using this option allows you to decrypt AND obtain the `jwe.Message` +// in one go. +func WithMessage(v *Message) DecryptOption { + return &decryptOption{option.New(identMessage{}, v)} +} + +// WithPretty specifies whether the JSON output should be formatted and +// indented +func WithPretty(v bool) WithJSONSuboption { + return &withJSONSuboption{option.New(identPretty{}, v)} +} + +// WithRequiredKid specifies whether the keys in the jwk.Set should +// only be matched if the target JWE message's Key ID and the Key ID +// in the given key matches. +func WithRequireKid(v bool) WithKeySetSuboption { + return &withKeySetSuboption{option.New(identRequireKid{}, v)} +} + +// WithCompact specifies that the result of `jwe.Encrypt()` is serialized in +// compact format. +// +// By default `jwe.Encrypt()` will opt to use compact format, so you usually +// do not need to specify this option other than to be explicit about it +func WithCompact() EncryptOption { + return &encryptOption{option.New(identSerialization{}, fmtCompact)} +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwk/BUILD.bazel new file mode 100644 index 0000000000..8e82e1f009 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/BUILD.bazel @@ -0,0 +1,87 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "jwk", + srcs = [ + "cache.go", + "convert.go", + "ecdsa.go", + "ecdsa_gen.go", + "errors.go", + "fetch.go", + "filter.go", + "interface.go", + "interface_gen.go", + "io.go", + "jwk.go", + "key_ops.go", + "okp.go", + "okp_gen.go", + "options.go", + "options_gen.go", + "parser.go", + "rsa.go", + "rsa_gen.go", + "set.go", + "symmetric.go", + "symmetric_gen.go", + "usage.go", + "whitelist.go", + "x509.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/jwk", + visibility = ["//visibility:public"], + deps = [ + "//cert", + "//internal/base64", + "//internal/ecutil", + "//transform", + "//internal/json", + "//internal/pool", + "//internal/tokens", + "//jwa", + "//jwk/ecdsa", + "//jwk/jwkbb", + "@com_github_lestrrat_go_blackmagic//:blackmagic", + "@com_github_lestrrat_go_httprc_v3//:httprc", + "@com_github_lestrrat_go_option_v2//:option", + ], +) + +go_test( + name = "jwk_test", + srcs = [ + "filter_test.go", + "headers_test.go", + "jwk_internal_test.go", + "jwk_test.go", + "options_gen_test.go", + "refresh_test.go", + "set_test.go", + "x5c_test.go", + ], + data = glob(["testdata/**"]), + embed = [":jwk"], + deps = [ + "//cert", + "//internal/base64", + "//internal/jose", + "//internal/json", + "//internal/jwxtest", + "//internal/tokens", + "//jwa", + "//jwk/ecdsa", + "//jws", + "@com_github_lestrrat_go_blackmagic//:blackmagic", + "@com_github_lestrrat_go_httprc_v3//:httprc", + "@com_github_lestrrat_go_httprc_v3//tracesink", + "@com_github_stretchr_testify//assert", + "@com_github_stretchr_testify//require", + ], +) + +alias( + name = "go_default_library", + actual = ":jwk", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/README.md b/vendor/github.com/lestrrat-go/jwx/v3/jwk/README.md new file mode 100644 index 0000000000..741dd4647d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/README.md @@ -0,0 +1,215 @@ +# JWK [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/jwx/v3/jwk.svg)](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3/jwk) + +Package jwk implements JWK as described in [RFC7517](https://tools.ietf.org/html/rfc7517). +If you are looking to use JWT wit JWKs, look no further than [github.com/lestrrat-go/jwx](../jwt). + +* Parse and work with RSA/EC/Symmetric/OKP JWK types + * Convert to and from JSON + * Convert to and from raw key types (e.g. *rsa.PrivateKey) +* Ability to keep a JWKS fresh using *jwk.AutoRefresh + +## Supported key types: + +| kty | Curve | Go Key Type | +|:----|:------------------------|:----------------------------------------------| +| RSA | N/A | rsa.PrivateKey / rsa.PublicKey (2) | +| EC | P-256
P-384
P-521
secp256k1 (1) | ecdsa.PrivateKey / ecdsa.PublicKey (2) | +| oct | N/A | []byte | +| OKP | Ed25519 (1) | ed25519.PrivateKey / ed25519.PublicKey (2) | +| | X25519 (1) | (jwx/)x25519.PrivateKey / x25519.PublicKey (2)| + +* Note 1: Experimental +* Note 2: Either value or pointers accepted (e.g. rsa.PrivateKey or *rsa.PrivateKey) + +# Documentation + +Please read the [API reference](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3/jwk), or +the how-to style documentation on how to use JWK can be found in the [docs directory](../docs/04-jwk.md). + +# Auto-Refresh a key during a long-running process + + +```go +package examples_test + +import ( + "context" + "fmt" + "time" + + "github.com/lestrrat-go/httprc/v3" + "github.com/lestrrat-go/jwx/v3/jwk" +) + +func Example_jwk_cache() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const googleCerts = `https://www.googleapis.com/oauth2/v3/certs` + + // First, set up the `jwk.Cache` object. You need to pass it a + // `context.Context` object to control the lifecycle of the background fetching goroutine. + c, err := jwk.NewCache(ctx, httprc.NewClient()) + if err != nil { + fmt.Printf("failed to create cache: %s\n", err) + return + } + + // Tell *jwk.Cache that we only want to refresh this JWKS periodically. + if err := c.Register(ctx, googleCerts); err != nil { + fmt.Printf("failed to register google JWKS: %s\n", err) + return + } + + // Pretend that this is your program's main loop +MAIN: + for { + select { + case <-ctx.Done(): + break MAIN + default: + } + keyset, err := c.Lookup(ctx, googleCerts) + if err != nil { + fmt.Printf("failed to fetch google JWKS: %s\n", err) + return + } + _ = keyset + // The returned `keyset` will always be "reasonably" new. + // + // By "reasonably" we mean that we cannot guarantee that the keys will be refreshed + // immediately after it has been rotated in the remote source. But it should be close\ + // enough, and should you need to forcefully refresh the token using the `(jwk.Cache).Refresh()` method. + // + // If refetching the keyset fails, a cached version will be returned from the previous + // successful sync + + // Do interesting stuff with the keyset... but here, we just + // sleep for a bit + time.Sleep(time.Second) + + // Because we're a dummy program, we just cancel the loop now. + // If this were a real program, you presumably loop forever + cancel() + } + // OUTPUT: +} +``` +source: [examples/jwk_cache_example_test.go](https://github.com/lestrrat-go/jwx/blob/v3/examples/jwk_cache_example_test.go) + + +Parse and use a JWK key: + + +```go +package examples_test + +import ( + "context" + "fmt" + "log" + + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/jwk" +) + +func Example_jwk_usage() { + // Use jwk.Cache if you intend to keep reuse the JWKS over and over + set, err := jwk.Fetch(context.Background(), "https://www.googleapis.com/oauth2/v3/certs") + if err != nil { + log.Printf("failed to parse JWK: %s", err) + return + } + + // Key sets can be serialized back to JSON + { + jsonbuf, err := json.Marshal(set) + if err != nil { + log.Printf("failed to marshal key set into JSON: %s", err) + return + } + log.Printf("%s", jsonbuf) + } + + for i := 0; i < set.Len(); i++ { + var rawkey any // This is where we would like to store the raw key, like *rsa.PrivateKey or *ecdsa.PrivateKey + key, ok := set.Key(i) // This retrieves the corresponding jwk.Key + if !ok { + log.Printf("failed to get key at index %d", i) + return + } + + // jws and jwe operations can be performed using jwk.Key, but you could also + // covert it to their "raw" forms, such as *rsa.PrivateKey or *ecdsa.PrivateKey + if err := jwk.Export(key, &rawkey); err != nil { + log.Printf("failed to create public key: %s", err) + return + } + _ = rawkey + + // You can create jwk.Key from a raw key, too + fromRawKey, err := jwk.Import(rawkey) + if err != nil { + log.Printf("failed to acquire raw key from jwk.Key: %s", err) + return + } + + // Keys can be serialized back to JSON + jsonbuf, err := json.Marshal(key) + if err != nil { + log.Printf("failed to marshal key into JSON: %s", err) + return + } + + fromJSONKey, err := jwk.Parse(jsonbuf) + if err != nil { + log.Printf("failed to parse json: %s", err) + return + } + _ = fromJSONKey + _ = fromRawKey + } + // OUTPUT: +} + +//nolint:govet +func Example_jwk_marshal_json() { + // JWKs that inherently involve randomness such as RSA and EC keys are + // not used in this example, because they may produce different results + // depending on the environment. + // + // (In fact, even if you use a static source of randomness, tests may fail + // because of internal changes in the Go runtime). + + raw := []byte("01234567890123456789012345678901234567890123456789ABCDEF") + + // This would create a symmetric key + key, err := jwk.Import(raw) + if err != nil { + fmt.Printf("failed to create symmetric key: %s\n", err) + return + } + if _, ok := key.(jwk.SymmetricKey); !ok { + fmt.Printf("expected jwk.SymmetricKey, got %T\n", key) + return + } + + key.Set(jwk.KeyIDKey, "mykey") + + buf, err := json.MarshalIndent(key, "", " ") + if err != nil { + fmt.Printf("failed to marshal key into JSON: %s\n", err) + return + } + fmt.Printf("%s\n", buf) + + // OUTPUT: + // { + // "k": "MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODlBQkNERUY", + // "kid": "mykey", + // "kty": "oct" + // } +} +``` +source: [examples/jwk_example_test.go](https://github.com/lestrrat-go/jwx/blob/v3/examples/jwk_example_test.go) + diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/cache.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/cache.go new file mode 100644 index 0000000000..6d5b00f056 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/cache.go @@ -0,0 +1,362 @@ +package jwk + +import ( + "context" + "fmt" + "io" + "net/http" + + "github.com/lestrrat-go/httprc/v3" +) + +type HTTPClient = httprc.HTTPClient +type ErrorSink = httprc.ErrorSink +type TraceSink = httprc.TraceSink + +// Cache is a container built on top of github.com/lestrrat-go/httprc/v3 +// that keeps track of Set object by their source URLs. +// The Set objects are stored in memory, and are refreshed automatically +// behind the scenes. +// +// Before retrieving the Set objects, the user must pre-register the +// URLs they intend to use by calling `Register()` +// +// c := jwk.NewCache(ctx, httprc.NewClient()) +// c.Register(ctx, url, options...) +// +// Once registered, you can call `Get()` to retrieve the Set object. +// +// All JWKS objects that are retrieved via this mechanism should be +// treated read-only, as they are shared among all consumers, as well +// as the `jwk.Cache` object. +// +// There are cases where `jwk.Cache` and `jwk.CachedSet` should and +// should not be used. +// +// First and foremost, do NOT use a cache for those JWKS objects that +// need constant checking. For example, unreliable or user-provided JWKS (i.e. those +// JWKS that are not from a well-known provider) should not be fetched +// through a `jwk.Cache` or `jwk.CachedSet`. +// +// For example, if you have a flaky JWKS server for development +// that can go down often, you should consider alternatives such as +// providing `http.Client` with a caching `http.RoundTripper` configured +// (see `jwk.WithHTTPClient`), setting up a reverse proxy, etc. +// These techniques allow you to set up a more robust way to both cache +// and report precise causes of the problems than using `jwk.Cache` or +// `jwk.CachedSet`. If you handle the caching at the HTTP level like this, +// you will be able to use a simple `jwk.Fetch` call and not worry about the cache. +// +// User-provided JWKS objects may also be problematic, as it may go down +// unexpectedly (and frequently!), and it will be hard to detect when +// the URLs or its contents are swapped. +// +// A good use-case for `jwk.Cache` and `jwk.CachedSet` are for "stable" +// JWKS objects. +// +// When we say "stable", we are thinking of JWKS that should mostly be +// ALWAYS available. A good example are those JWKS objects provided by +// major cloud providers such as Google Cloud, AWS, or Azure. +// Stable JWKS may still experience intermittent network connectivity problems, +// but you can expect that they will eventually recover in relatively +// short period of time. They rarely change URLs, and the contents are +// expected to be valid or otherwise it would cause havoc to those providers +// +// We also know that these stable JWKS objects are rotated periodically, +// which is a perfect use for `jwk.Cache` and `jwk.CachedSet`. The caches +// can be configured to periodically refresh the JWKS thereby keeping them +// fresh without extra intervention from the developer. +// +// Notice that for these recommended use-cases the requirement to check +// the validity or the availability of the JWKS objects are non-existent, +// as it is expected that they will be available and will be valid. The +// caching mechanism can hide intermittent connectivity problems as well +// as keep the objects mostly fresh. +type Cache struct { + ctrl httprc.Controller +} + +// Transformer is a specialized version of `httprc.Transformer` that implements +// conversion from a `http.Response` object to a `jwk.Set` object. Use this in +// conjection with `httprc.NewResource` to create a `httprc.Resource` object +// to auto-update `jwk.Set` objects. +type Transformer struct { + parseOptions []ParseOption +} + +func (t Transformer) Transform(_ context.Context, res *http.Response) (Set, error) { + buf, err := io.ReadAll(res.Body) + if err != nil { + return nil, fmt.Errorf(`failed to read response body status: %w`, err) + } + + set, err := Parse(buf, t.parseOptions...) + if err != nil { + return nil, fmt.Errorf(`failed to parse JWK set at %q: %w`, res.Request.URL.String(), err) + } + + return set, nil +} + +// NewCache creates a new `jwk.Cache` object. +// +// Under the hood, `jwk.Cache` uses `httprc.Client` manage the +// fetching and caching of JWKS objects, and thus spawns multiple goroutines +// per `jwk.Cache` object. +// +// The provided `httprc.Client` object must NOT be started prior to +// passing it to `jwk.NewCache`. The `jwk.Cache` object will start +// the `httprc.Client` object on its own. +func NewCache(ctx context.Context, client *httprc.Client) (*Cache, error) { + ctrl, err := client.Start(ctx) + if err != nil { + return nil, fmt.Errorf(`failed to start httprc.Client: %w`, err) + } + + return &Cache{ + ctrl: ctrl, + }, nil +} + +// Register registers a URL to be managed by the cache. URLs must +// be registered before issuing `Get` +// +// The `Register` method is a thin wrapper around `(httprc.Controller).Add` +func (c *Cache) Register(ctx context.Context, u string, options ...RegisterOption) error { + var parseOptions []ParseOption + var resourceOptions []httprc.NewResourceOption + waitReady := true + for _, option := range options { + switch option := option.(type) { + case ParseOption: + parseOptions = append(parseOptions, option) + case ResourceOption: + var v httprc.NewResourceOption + if err := option.Value(&v); err != nil { + return fmt.Errorf(`failed to retrieve NewResourceOption option value: %w`, err) + } + resourceOptions = append(resourceOptions, v) + default: + switch option.Ident() { + case identHTTPClient{}: + var cli HTTPClient + if err := option.Value(&cli); err != nil { + return fmt.Errorf(`failed to retrieve HTTPClient option value: %w`, err) + } + resourceOptions = append(resourceOptions, httprc.WithHTTPClient(cli)) + case identWaitReady{}: + if err := option.Value(&waitReady); err != nil { + return fmt.Errorf(`failed to retrieve WaitReady option value: %w`, err) + } + } + } + } + + r, err := httprc.NewResource[Set](u, &Transformer{ + parseOptions: parseOptions, + }, resourceOptions...) + if err != nil { + return fmt.Errorf(`failed to create httprc.Resource: %w`, err) + } + if err := c.ctrl.Add(ctx, r, httprc.WithWaitReady(waitReady)); err != nil { + return fmt.Errorf(`failed to add resource to httprc.Client: %w`, err) + } + + return nil +} + +// LookupResource returns the `httprc.Resource` object associated with the +// given URL `u`. If the URL has not been registered, an error is returned. +func (c *Cache) LookupResource(ctx context.Context, u string) (*httprc.ResourceBase[Set], error) { + r, err := c.ctrl.Lookup(ctx, u) + if err != nil { + return nil, fmt.Errorf(`failed to lookup resource %q: %w`, u, err) + } + //nolint:forcetypeassert + return r.(*httprc.ResourceBase[Set]), nil +} + +func (c *Cache) Lookup(ctx context.Context, u string) (Set, error) { + r, err := c.LookupResource(ctx, u) + if err != nil { + return nil, fmt.Errorf(`failed to lookup resource %q: %w`, u, err) + } + set := r.Resource() + if set == nil { + return nil, fmt.Errorf(`resource %q is not ready`, u) + } + return set, nil +} + +func (c *Cache) Ready(ctx context.Context, u string) bool { + r, err := c.LookupResource(ctx, u) + if err != nil { + return false + } + if err := r.Ready(ctx); err != nil { + return false + } + return true +} + +// Refresh is identical to Get(), except it always fetches the +// specified resource anew, and updates the cached content +// +// Please refer to the documentation for `(httprc.Cache).Refresh` for +// more details +func (c *Cache) Refresh(ctx context.Context, u string) (Set, error) { + if err := c.ctrl.Refresh(ctx, u); err != nil { + return nil, fmt.Errorf(`failed to refresh resource %q: %w`, u, err) + } + return c.Lookup(ctx, u) +} + +// IsRegistered returns true if the given URL `u` has already been registered +// in the cache. +func (c *Cache) IsRegistered(ctx context.Context, u string) bool { + _, err := c.LookupResource(ctx, u) + return err == nil +} + +// Unregister removes the given URL `u` from the cache. +func (c *Cache) Unregister(ctx context.Context, u string) error { + return c.ctrl.Remove(ctx, u) +} + +func (c *Cache) Shutdown(ctx context.Context) error { + return c.ctrl.ShutdownContext(ctx) +} + +// CachedSet is a thin shim over jwk.Cache that allows the user to cloak +// jwk.Cache as if it's a `jwk.Set`. Behind the scenes, the `jwk.Set` is +// retrieved from the `jwk.Cache` for every operation. +// +// Since `jwk.CachedSet` always deals with a cached version of the `jwk.Set`, +// all operations that mutate the object (such as AddKey(), RemoveKey(), et. al) +// are no-ops and return an error. +// +// Note that since this is a utility shim over `jwk.Cache`, you _will_ lose +// the ability to control the finer details (such as controlling how long to +// wait for in case of a fetch failure using `context.Context`) +// +// Make sure that you read the documentation for `jwk.Cache` as well. +type CachedSet interface { + Set + cached() (Set, error) // used as a marker +} + +type cachedSet struct { + r *httprc.ResourceBase[Set] +} + +func (c *Cache) CachedSet(u string) (CachedSet, error) { + r, err := c.LookupResource(context.Background(), u) + if err != nil { + return nil, fmt.Errorf(`failed to lookup resource %q: %w`, u, err) + } + return NewCachedSet(r), nil +} + +func NewCachedSet(r *httprc.ResourceBase[Set]) CachedSet { + return &cachedSet{ + r: r, + } +} + +func (cs *cachedSet) cached() (Set, error) { + if err := cs.r.Ready(context.Background()); err != nil { + return nil, fmt.Errorf(`failed to fetch resource: %w`, err) + } + return cs.r.Resource(), nil +} + +// AddKey is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only +func (*cachedSet) AddKey(_ Key) error { + return fmt.Errorf(`(jwk.Cachedset).AddKey: jwk.CachedSet is immutable`) +} + +// Clear is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only +func (*cachedSet) Clear() error { + return fmt.Errorf(`(jwk.cachedSet).Clear: jwk.CachedSet is immutable`) +} + +// Set is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only +func (*cachedSet) Set(_ string, _ any) error { + return fmt.Errorf(`(jwk.cachedSet).Set: jwk.CachedSet is immutable`) +} + +// Remove is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only +func (*cachedSet) Remove(_ string) error { + // TODO: Remove() should be renamed to Remove(string) error + return fmt.Errorf(`(jwk.cachedSet).Remove: jwk.CachedSet is immutable`) +} + +// RemoveKey is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only +func (*cachedSet) RemoveKey(_ Key) error { + return fmt.Errorf(`(jwk.cachedSet).RemoveKey: jwk.CachedSet is immutable`) +} + +func (cs *cachedSet) Clone() (Set, error) { + set, err := cs.cached() + if err != nil { + return nil, fmt.Errorf(`failed to get cached jwk.Set: %w`, err) + } + + return set.Clone() +} + +// Get returns the value of non-Key field stored in the jwk.Set +func (cs *cachedSet) Get(name string, dst any) error { + set, err := cs.cached() + if err != nil { + return err + } + + return set.Get(name, dst) +} + +// Key returns the Key at the specified index +func (cs *cachedSet) Key(idx int) (Key, bool) { + set, err := cs.cached() + if err != nil { + return nil, false + } + + return set.Key(idx) +} + +func (cs *cachedSet) Index(key Key) int { + set, err := cs.cached() + if err != nil { + return -1 + } + + return set.Index(key) +} + +func (cs *cachedSet) Keys() []string { + set, err := cs.cached() + if err != nil { + return nil + } + + return set.Keys() +} + +func (cs *cachedSet) Len() int { + set, err := cs.cached() + if err != nil { + return -1 + } + + return set.Len() +} + +func (cs *cachedSet) LookupKeyID(kid string) (Key, bool) { + set, err := cs.cached() + if err != nil { + return nil, false + } + + return set.LookupKeyID(kid) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/convert.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/convert.go new file mode 100644 index 0000000000..057f4b02a0 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/convert.go @@ -0,0 +1,399 @@ +package jwk + +import ( + "crypto/ecdh" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "errors" + "fmt" + "math/big" + "reflect" + "sync" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v3/internal/ecutil" + "github.com/lestrrat-go/jwx/v3/jwa" +) + +// # Converting between Raw Keys and `jwk.Key`s +// +// A converter that converts from a raw key to a `jwk.Key` is called a KeyImporter. +// A converter that converts from a `jwk.Key` to a raw key is called a KeyExporter. + +var keyImporters = make(map[reflect.Type]KeyImporter) +var keyExporters = make(map[jwa.KeyType][]KeyExporter) + +var muKeyImporters sync.RWMutex +var muKeyExporters sync.RWMutex + +// RegisterKeyImporter registers a KeyImporter for the given raw key. When `jwk.Import()` is called, +// the library will look up the appropriate KeyImporter for the given raw key type (via `reflect`) +// and execute the KeyImporters in succession until either one of them succeeds, or all of them fail. +func RegisterKeyImporter(from any, conv KeyImporter) { + muKeyImporters.Lock() + defer muKeyImporters.Unlock() + keyImporters[reflect.TypeOf(from)] = conv +} + +// RegisterKeyExporter registers a KeyExporter for the given key type. When `key.Raw()` is called, +// the library will look up the appropriate KeyExporter for the given key type and execute the +// KeyExporters in succession until either one of them succeeds, or all of them fail. +func RegisterKeyExporter(kty jwa.KeyType, conv KeyExporter) { + muKeyExporters.Lock() + defer muKeyExporters.Unlock() + convs, ok := keyExporters[kty] + if !ok { + convs = []KeyExporter{conv} + } else { + convs = append([]KeyExporter{conv}, convs...) + } + keyExporters[kty] = convs +} + +// KeyImporter is used to convert from a raw key to a `jwk.Key`. mneumonic: from the PoV of the `jwk.Key`, +// we're _importing_ a raw key. +type KeyImporter interface { + // Import takes the raw key to be converted, and returns a `jwk.Key` or an error if the conversion fails. + Import(any) (Key, error) +} + +// KeyImportFunc is a convenience type to implement KeyImporter as a function. +type KeyImportFunc func(any) (Key, error) + +func (f KeyImportFunc) Import(raw any) (Key, error) { + return f(raw) +} + +// KeyExporter is used to convert from a `jwk.Key` to a raw key. mneumonic: from the PoV of the `jwk.Key`, +// we're _exporting_ it to a raw key. +type KeyExporter interface { + // Export takes the `jwk.Key` to be converted, and a hint (the raw key to be converted to). + // The hint is the object that the user requested the result to be assigned to. + // The method should return the converted raw key, or an error if the conversion fails. + // + // Third party modules MUST NOT modifiy the hint object. + // + // When the user calls `key.Export(dst)`, the `dst` object is a _pointer_ to the + // object that the user wants the result to be assigned to, but the converter + // receives the _value_ that this pointer points to, to make it easier to + // detect the type of the result. + // + // Note that the second argument may be an `any` (which means that the + // user has delegated the type detection to the converter). + // + // Export must NOT modify the hint object, and should return jwk.ContinueError + // if the hint object is not compatible with the converter. + Export(Key, any) (any, error) +} + +// KeyExportFunc is a convenience type to implement KeyExporter as a function. +type KeyExportFunc func(Key, any) (any, error) + +func (f KeyExportFunc) Export(key Key, hint any) (any, error) { + return f(key, hint) +} + +func init() { + { + f := KeyImportFunc(rsaPrivateKeyToJWK) + k := rsa.PrivateKey{} + RegisterKeyImporter(k, f) + RegisterKeyImporter(&k, f) + } + { + f := KeyImportFunc(rsaPublicKeyToJWK) + k := rsa.PublicKey{} + RegisterKeyImporter(k, f) + RegisterKeyImporter(&k, f) + } + { + f := KeyImportFunc(ecdsaPrivateKeyToJWK) + k := ecdsa.PrivateKey{} + RegisterKeyImporter(k, f) + RegisterKeyImporter(&k, f) + } + { + f := KeyImportFunc(ecdsaPublicKeyToJWK) + k := ecdsa.PublicKey{} + RegisterKeyImporter(k, f) + RegisterKeyImporter(&k, f) + } + { + f := KeyImportFunc(okpPrivateKeyToJWK) + for _, k := range []any{ed25519.PrivateKey(nil)} { + RegisterKeyImporter(k, f) + } + } + { + f := KeyImportFunc(ecdhPrivateKeyToJWK) + for _, k := range []any{ecdh.PrivateKey{}, &ecdh.PrivateKey{}} { + RegisterKeyImporter(k, f) + } + } + { + f := KeyImportFunc(okpPublicKeyToJWK) + for _, k := range []any{ed25519.PublicKey(nil)} { + RegisterKeyImporter(k, f) + } + } + { + f := KeyImportFunc(ecdhPublicKeyToJWK) + for _, k := range []any{ecdh.PublicKey{}, &ecdh.PublicKey{}} { + RegisterKeyImporter(k, f) + } + } + RegisterKeyImporter([]byte(nil), KeyImportFunc(bytesToKey)) +} + +func ecdhPrivateKeyToJWK(src any) (Key, error) { + var raw *ecdh.PrivateKey + switch src := src.(type) { + case *ecdh.PrivateKey: + raw = src + case ecdh.PrivateKey: + raw = &src + default: + return nil, fmt.Errorf(`cannot convert key type '%T' to ECDH jwk.Key`, src) + } + + switch raw.Curve() { + case ecdh.X25519(): + return okpPrivateKeyToJWK(raw) + case ecdh.P256(): + return ecdhPrivateKeyToECJWK(raw, elliptic.P256()) + case ecdh.P384(): + return ecdhPrivateKeyToECJWK(raw, elliptic.P384()) + case ecdh.P521(): + return ecdhPrivateKeyToECJWK(raw, elliptic.P521()) + default: + return nil, fmt.Errorf(`unsupported curve %s`, raw.Curve()) + } +} + +func ecdhPrivateKeyToECJWK(raw *ecdh.PrivateKey, crv elliptic.Curve) (Key, error) { + pub := raw.PublicKey() + rawpub := pub.Bytes() + + size := ecutil.CalculateKeySize(crv) + var x, y, d big.Int + x.SetBytes(rawpub[1 : 1+size]) + y.SetBytes(rawpub[1+size:]) + d.SetBytes(raw.Bytes()) + + var ecdsaPriv ecdsa.PrivateKey + ecdsaPriv.Curve = crv + ecdsaPriv.D = &d + ecdsaPriv.X = &x + ecdsaPriv.Y = &y + return ecdsaPrivateKeyToJWK(&ecdsaPriv) +} + +func ecdhPublicKeyToJWK(src any) (Key, error) { + var raw *ecdh.PublicKey + switch src := src.(type) { + case *ecdh.PublicKey: + raw = src + case ecdh.PublicKey: + raw = &src + default: + return nil, fmt.Errorf(`cannot convert key type '%T' to ECDH jwk.Key`, src) + } + + switch raw.Curve() { + case ecdh.X25519(): + return okpPublicKeyToJWK(raw) + case ecdh.P256(): + return ecdhPublicKeyToECJWK(raw, elliptic.P256()) + case ecdh.P384(): + return ecdhPublicKeyToECJWK(raw, elliptic.P384()) + case ecdh.P521(): + return ecdhPublicKeyToECJWK(raw, elliptic.P521()) + default: + return nil, fmt.Errorf(`unsupported curve %s`, raw.Curve()) + } +} + +func ecdhPublicKeyToECJWK(raw *ecdh.PublicKey, crv elliptic.Curve) (Key, error) { + rawbytes := raw.Bytes() + size := ecutil.CalculateKeySize(crv) + var x, y big.Int + + x.SetBytes(rawbytes[1 : 1+size]) + y.SetBytes(rawbytes[1+size:]) + var ecdsaPriv ecdsa.PublicKey + ecdsaPriv.Curve = crv + ecdsaPriv.X = &x + ecdsaPriv.Y = &y + return ecdsaPublicKeyToJWK(&ecdsaPriv) +} + +// These may seem a bit repetitive and redandunt, but the problem is that +// each key type has its own Import method -- for example, Import(*ecdsa.PrivateKey) +// vs Import(*rsa.PrivateKey), and therefore they can't just be bundled into +// a single function. +func rsaPrivateKeyToJWK(src any) (Key, error) { + var raw *rsa.PrivateKey + switch src := src.(type) { + case *rsa.PrivateKey: + raw = src + case rsa.PrivateKey: + raw = &src + default: + return nil, fmt.Errorf(`cannot convert key type '%T' to RSA jwk.Key`, src) + } + k := newRSAPrivateKey() + if err := k.Import(raw); err != nil { + return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, raw, err) + } + return k, nil +} + +func rsaPublicKeyToJWK(src any) (Key, error) { + var raw *rsa.PublicKey + switch src := src.(type) { + case *rsa.PublicKey: + raw = src + case rsa.PublicKey: + raw = &src + default: + return nil, fmt.Errorf(`cannot convert key type '%T' to RSA jwk.Key`, src) + } + k := newRSAPublicKey() + if err := k.Import(raw); err != nil { + return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, raw, err) + } + return k, nil +} + +func ecdsaPrivateKeyToJWK(src any) (Key, error) { + var raw *ecdsa.PrivateKey + switch src := src.(type) { + case *ecdsa.PrivateKey: + raw = src + case ecdsa.PrivateKey: + raw = &src + default: + return nil, fmt.Errorf(`cannot convert key type '%T' to ECDSA jwk.Key`, src) + } + k := newECDSAPrivateKey() + if err := k.Import(raw); err != nil { + return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, raw, err) + } + return k, nil +} + +func ecdsaPublicKeyToJWK(src any) (Key, error) { + var raw *ecdsa.PublicKey + switch src := src.(type) { + case *ecdsa.PublicKey: + raw = src + case ecdsa.PublicKey: + raw = &src + default: + return nil, fmt.Errorf(`cannot convert key type '%T' to ECDSA jwk.Key`, src) + } + k := newECDSAPublicKey() + if err := k.Import(raw); err != nil { + return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, raw, err) + } + return k, nil +} + +func okpPrivateKeyToJWK(src any) (Key, error) { + var raw any + switch src.(type) { + case ed25519.PrivateKey, *ecdh.PrivateKey: + raw = src + case ecdh.PrivateKey: + raw = &src + default: + return nil, fmt.Errorf(`cannot convert key type '%T' to OKP jwk.Key`, src) + } + k := newOKPPrivateKey() + if err := k.Import(raw); err != nil { + return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, raw, err) + } + return k, nil +} + +func okpPublicKeyToJWK(src any) (Key, error) { + var raw any + switch src.(type) { + case ed25519.PublicKey, *ecdh.PublicKey: + raw = src + case ecdh.PublicKey: + raw = &src + default: + return nil, fmt.Errorf(`jwk: convert raw to OKP jwk.Key: cannot convert key type '%T' to OKP jwk.Key`, src) + } + k := newOKPPublicKey() + if err := k.Import(raw); err != nil { + return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, raw, err) + } + return k, nil +} + +func bytesToKey(src any) (Key, error) { + var raw []byte + switch src := src.(type) { + case []byte: + raw = src + default: + return nil, fmt.Errorf(`cannot convert key type '%T' to symmetric jwk.Key`, src) + } + + k := newSymmetricKey() + if err := k.Import(raw); err != nil { + return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, raw, err) + } + return k, nil +} + +// Export converts a `jwk.Key` to a Export key. The dst argument must be a pointer to the +// object that the user wants the result to be assigned to. +// +// Normally you would pass a pointer to the zero value of the raw key type +// such as &(*rsa.PrivateKey) or &(*ecdsa.PublicKey), which gets assigned +// the converted key. +// +// If you do not know the exact type of a jwk.Key before attempting +// to obtain the raw key, you can simply pass a pointer to an +// empty interface as the second argument +// +// If you already know the exact type, it is recommended that you +// pass a pointer to the zero value of the actual key type for efficiency. +// +// Be careful when/if you are using a third party key type that implements +// the `jwk.Key` interface, as the first argument. This function tries hard +// to Do The Right Thing, but it is not guaranteed to work in all cases, +// especially when the object implements the `jwk.Key` interface via +// embedding. +func Export(key Key, dst any) error { + // dst better be a pointer + rv := reflect.ValueOf(dst) + if rv.Kind() != reflect.Ptr { + return fmt.Errorf(`jwk.Export: destination object must be a pointer`) + } + muKeyExporters.RLock() + exporters, ok := keyExporters[key.KeyType()] + muKeyExporters.RUnlock() + if !ok { + return fmt.Errorf(`jwk.Export: no exporters registered for key type '%T'`, key) + } + for _, conv := range exporters { + v, err := conv.Export(key, dst) + if err != nil { + if errors.Is(err, ContinueError()) { + continue + } + return fmt.Errorf(`jwk.Export: failed to export jwk.Key to raw format: %w`, err) + } + if err := blackmagic.AssignIfCompatible(dst, v); err != nil { + return fmt.Errorf(`jwk.Export: failed to assign key: %w`, err) + } + return nil + } + return fmt.Errorf(`jwk.Export: no suitable exporter found for key type '%T'`, key) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/doc.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/doc.go new file mode 100644 index 0000000000..7df707521b --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/doc.go @@ -0,0 +1,294 @@ +// Package jwk implements JWK as described in https://tools.ietf.org/html/rfc7517 +// +// This package implements jwk.Key to represent a single JWK, and jwk.Set to represent +// a set of JWKs. +// +// The `jwk.Key` type is an interface, which hides the underlying implementation for +// each key type. Each key type can further be converted to interfaces for known +// types, such as `jwk.ECDSAPrivateKey`, `jwk.RSAPublicKey`, etc. This may not necessarily +// work for third party key types (see section on "Registering a key type" below). +// +// Users can create a JWK in two ways. One is to unmarshal a JSON representation of a +// key. The second one is to use `jwk.Import()` to import a raw key and convert it to +// a jwk.Key. +// +// # Simple Usage +// +// You can parse a JWK from a JSON payload: +// +// jwk.ParseKey([]byte(`{"kty":"EC",...}`)) +// +// You can go back and forth between raw key types and JWKs: +// +// jwkKey, _ := jwk.Import(rsaPrivateKey) +// var rawKey *rsa.PRrivateKey +// jwkKey.Raw(&rawKey) +// +// You can use them to sign/verify/encrypt/decrypt: +// +// jws.Sign([]byte(`...`), jws.WithKey(jwa.RS256, jwkKey)) +// jwe.Encrypt([]byte(`...`), jwe.WithKey(jwa.RSA_OAEP, jwkKey)) +// +// See examples/jwk_parse_example_test.go and other files in the exmaples/ directory for more. +// +// # Advanced Usage: Registering a custom key type and conversion routines +// +// Caveat Emptor: Functionality around registering keys +// (KeyProbe/KeyParser/KeyImporter/KeyExporter) should be considered experimental. +// While we expect that the functionality itself will remain, the API may +// change in backward incompatible ways, even during minor version +// releases. +// +// ## tl;dr +// +// * KeyProbe: Used for parsing JWKs in JSON format. Probes hint fields to be used for later parsing by KeyParser +// * KeyParser: Used for parsing JWKs in JSON format. Parses the JSON payload into a jwk.Key using the KeyProbe as hint +// * KeyImporter: Used for converting raw key into jwk.Key. +// * KeyExporter: Used for converting jwk.Key into raw key. +// +// ## Overview +// +// You can add the ability to use a JWK type that this library does not +// implement out of the box. You can do this by registering your own +// KeyParser, KeyImporter, and KeyExporter instances. +// +// func init() { +// jwk.RegiserProbeField(reflect.StructField{Name: "SomeHint", Type: reflect.TypeOf(""), Tag: `json:"some_hint"`}) +// jwk.RegisterKeyParser(&MyKeyParser{}) +// jwk.RegisterKeyImporter(&MyKeyImporter{}) +// jwk.RegisterKeyExporter(&MyKeyExporter{}) +// } +// +// The KeyParser is used to parse JSON payloads and conver them into a jwk.Key. +// The KeyImporter is used to convert a raw key (e.g. *rsa.PrivateKey, *ecdsa.PrivateKey, etc) into a jwk.Key. +// The KeyExporter is used to convert a jwk.Key into a raw key. +// +// Although we believe the mechanism has been streamline quite a lot, it is also true +// that the entire process of parsing and converting keys are much more convoluted than you might +// think. Please know before hand that if you intend to add support for a new key type, +// it _WILL_ require you to learn this module pretty much in-and-out. +// +// Read on for more explanation. +// +// ## Registering a KeyParser +// +// In order to understand how parsing works, we need to explain how the `jwk.ParseKey()` works. +// +// The first thing that occurs when parsing a key is a partial +// unmarshaling of the payload into a hint / probe object. +// +// Because the `json.Unmarshal` works by calling the `UnmarshalJSON` +// method on a concrete object, we need to create a concrete object first. +// In order/ to create the appropriate Go object, we need to know which concrete +// object to create from the JSON payload, meaning we need to peek into the +// payload and figure out what type of key it is. +// +// In order to do this, we effectively need to parse the JSON payload twice. +// First, we "probe" the payload to figure out what kind of key it is, then +// we parse it again to create the actual key object. +// +// For probing, we create a new "probe" object (KeyProbe, which is not +// directly available to end users) to populate the object with hints from the payload. +// For example, a JWK representing an RSA key would look like: +// +// { "kty": "RSA", "n": ..., "e": ..., ... } +// +// The default KeyProbe is constructed to unmarshal "kty" and "d" fields, +// because that is enough information to determine what kind of key to +// construct. +// +// For example, if the payload contains "kty" field with the value "RSA", +// we know that it's an RSA key. If it contains "EC", we know that it's +// an EC key. Furthermore, if the payload contains some value in the "d" field, we can +// also tell that this is a private key, as only private keys need +// this field. +// +// For most cases, the default KeyProbe implementation should be sufficient. +// However, there may be cases in the future where there are new key types +// that require further information. Perhaps you are embedding another hint +// in your JWK to further specify what kind of key it is. In that case, you +// would need to probe more. +// +// Normally you can only change how an object is unmarshaled by specifying +// JSON tags when defining a struct, but we use `reflect` package capabilities +// to create an object dynamically, which is shared among all parsing operations. +// +// To add a new field to be probed, you need to register a new `reflect.StructField` +// object that has all of the information. For example, the code below would +// register a field named "MyHint" that is of type string, and has a JSON tag +// of "my_hint". +// +// jwk.RegisterProbeField(reflect.StructField{Name: "MyHint", Type: reflect.TypeOf(""), Tag: `json:"my_hint"`}) +// +// The value of this field can be retrieved by calling `Get()` method on the +// KeyProbe object (from the `KeyParser`'s `ParseKey()` method discussed later) +// +// var myhint string +// _ = probe.Get("MyHint", &myhint) +// +// var kty string +// _ = probe.Get("Kty", &kty) +// +// This mechanism allows you to be flexible when trying to determine the key type +// to instantiate. +// +// ## Parse via the KeyParser +// +// When `jwk.Parse` / `jwk.ParseKey` is called, the library will first probe +// the payload as discussed above. +// +// Once the probe is done, the library will iterate over the registered parsers +// and attempt to parse the key by calling their `ParseKey()` methods. +// +// The parsers will be called in reverse order that they were registered. +// This means that it will try all parsers that were registered by third +// parties, and once those are exhausted, the default parser will be used. +// +// Each parser's `ParseKey()“ method will receive three arguments: the probe object, a +// KeyUnmarshaler, and the raw payload. The probe object can be used +// as a hint to determine what kind of key to instantiate. An example +// pseudocode may look like this: +// +// var kty string +// _ = probe.Get("Kty", &kty) +// switch kty { +// case "RSA": +// // create an RSA key +// case "EC": +// // create an EC key +// ... +// } +// +// The `KeyUnmarshaler` is a thin wrapper around `json.Unmarshal`. It works almost +// identical to `json.Unmarshal`, but it allows us to add extra magic that is +// specific to this library (which users do not need to be aware of) before calling +// the actual `json.Unmarshal`. Please use the `KeyUnmarshaler` to unmarshal JWKs instead of `json.Unmarshal`. +// +// Putting it all together, the boiler plate for registering a new parser may look like this: +// +// func init() { +// jwk.RegisterFieldProbe(reflect.StructField{Name: "MyHint", Type: reflect.TypeOf(""), Tag: `json:"my_hint"`}) +// jwk.RegisterParser(&MyKeyParser{}) +// } +// +// type MyKeyParser struct { ... } +// func(*MyKeyParser) ParseKey(rawProbe *KeyProbe, unmarshaler KeyUnmarshaler, data []byte) (jwk.Key, error) { +// // Create concrete type +// var hint string +// if err := probe.Get("MyHint", &hint); err != nil { +// // if it doesn't have the `my_hint` field, it probably means +// // it's not for us, so we return ContinueParseError so that +// // the next parser can pick it up +// return nil, jwk.ContinueParseError() +// } +// +// // Use hint to determine concrete key type +// var key jwk.Key +// switch hint { +// case ...: +// key = = myNewAwesomeJWK() +// ... +// } +// +// return unmarshaler.Unmarshal(data, key) +// } +// +// ## Registering KeyImporter/KeyExporter +// +// If you are going to do anything with the key that was parsed by your KeyParser, +// you will need to tell the library how to convert back and forth between +// raw keys and JWKs. Conversion from raw keys to jwk.Keys are done by KeyImporters, +// and conversion from jwk.Keys to raw keys are done by KeyExporters. +// +// ## Using jwk.Import() using KeyImporter +// +// Each KeyImporter is hooked to run against a specific raw key type. +// +// When `jwk.Import()` is called, the library will iterate over all registered +// KeyImporters for the specified raw key type, and attempt to convert the raw +// key to a JWK by calling the `Import()` method on each KeyImporter. +// +// The KeyImporter's `Import()` method will receive the raw key to be converted, +// and should return a JWK or an error if the conversion fails, or the return +// `jwk.ContinueError()` if the specified raw key cannot be handled by ths/ KeyImporter. +// +// Once a KeyImporter is available, you will be able to pass the raw key to `jwk.Import()`. +// The following example shows how you might register a KeyImporter for a hypotheical +// mypkg.SuperSecretKey: +// +// jwk.RegisterKeyImporter(&mypkg.SuperSecretKey{}, jwk.KeyImportFunc(imnportSuperSecretKey)) +// +// func importSuperSecretKey(key any) (jwk.Key, error) { +// mykey, ok := key.(*mypkg.SuperSecretKey) +// if !ok { +// // You must return jwk.ContinueError here, or otherwise +// // processing will stop with an error +// return nil, fmt.Errorf("invalid key type %T for importer: %w", key, jwk.ContinueError()) +// } +// +// return mypkg.SuperSecretJWK{ .... }, nil // You could reuse existing JWK types if you can +// } +// +// ## Registering a KeyExporter +// +// KeyExporters are the opposite of KeyImporters: they convert a JWK to a raw key when `key.Raw(...)` is +// called. If you intend to use `key.Raw(...)` for a JWK created using one of your KeyImporters, +// you will also +// +// KeyExporters are registered by key type. For example, if you want to register a KeyExporter for +// RSA keys, you would do: +// +// jwk.RegisterKeyExporter(jwa.RSA, jwk.KeyExportFunc(exportRSAKey)) +// +// For a given JWK, it will be passed a "destination" object to store the exported raw key. For example, +// an RSA-based private JWK can be exported to a `*rsa.PrivateKey` or to a `*any`, but not +// to a `*ecdsa.PrivateKey`: +// +// var dst *rsa.PrivateKey +// key.Raw(&dst) // OK +// +// var dst any +// key.Raw(&dst) // OK +// +// var dst *ecdsa.PrivateKey +// key.Raw(&dst) // Error, if key is an RSA key +// +// You will need to handle this distinction yourself in your KeyImporter. For example, certain +// elliptic curve keys can be expressed in JWK in the same format, minus the "kty". In that case +// you will need to check for the type of the destination object and return an error if it is +// not compatible with your key. +// +// var raw mypkg.PrivateKey // assume a hypothetical private key type using a different curve than standard ones lie P-256 +// key, _ := jwk.Import(raw) +// // key could be jwk.ECDSAPrivateKey, with different curve than P-256 +// +// var dst *ecdsa.PrivateKey +// key.Raw(&dst) // your KeyImporter will be called with *ecdsa.PrivateKey, which is not compatible with your key +// +// To implement this your code should look like the following: +// +// jwk.RegisterKeyExporter(jwk.EC, jwk.KeyExportFunc(exportMyKey)) +// +// func exportMyKey(key jwk.Key, hint any) (any, error) { +// // check if the type of object in hint is compatible with your key +// switch hint.(type) { +// case *mypkg.PrivateKey, *any: +// // OK, we can proceed +// default: +// // Not compatible, return jwk.ContinueError +// return nil, jwk.ContinueError() +// } +// +// // key is a jwk.ECDSAPrivateKey or jwk.ECDSAPublicKey +// switch key := key.(type) { +// case jwk.ECDSAPrivateKey: +// // convert key to mypkg.PrivateKey +// case jwk.ECDSAPublicKey: +// // convert key to mypkg.PublicKey +// default: +// // Not compatible, return jwk.ContinueError +// return nil, jwk.ContinueError() +// } +// return ..., nil +// } +package jwk diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa.go new file mode 100644 index 0000000000..8f76d0508e --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa.go @@ -0,0 +1,402 @@ +package jwk + +import ( + "crypto" + "crypto/ecdh" + "crypto/ecdsa" + "crypto/elliptic" + "fmt" + "math/big" + "reflect" + + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/ecutil" + "github.com/lestrrat-go/jwx/v3/jwa" + ourecdsa "github.com/lestrrat-go/jwx/v3/jwk/ecdsa" +) + +func init() { + ourecdsa.RegisterCurve(jwa.P256(), elliptic.P256()) + ourecdsa.RegisterCurve(jwa.P384(), elliptic.P384()) + ourecdsa.RegisterCurve(jwa.P521(), elliptic.P521()) + + RegisterKeyExporter(jwa.EC(), KeyExportFunc(ecdsaJWKToRaw)) +} + +func (k *ecdsaPublicKey) Import(rawKey *ecdsa.PublicKey) error { + k.mu.Lock() + defer k.mu.Unlock() + + if rawKey.X == nil { + return fmt.Errorf(`invalid ecdsa.PublicKey`) + } + + if rawKey.Y == nil { + return fmt.Errorf(`invalid ecdsa.PublicKey`) + } + + xbuf := ecutil.AllocECPointBuffer(rawKey.X, rawKey.Curve) + ybuf := ecutil.AllocECPointBuffer(rawKey.Y, rawKey.Curve) + defer ecutil.ReleaseECPointBuffer(xbuf) + defer ecutil.ReleaseECPointBuffer(ybuf) + + k.x = make([]byte, len(xbuf)) + copy(k.x, xbuf) + k.y = make([]byte, len(ybuf)) + copy(k.y, ybuf) + + alg, err := ourecdsa.AlgorithmFromCurve(rawKey.Curve) + if err != nil { + return fmt.Errorf(`jwk: failed to get algorithm for converting ECDSA public key to JWK: %w`, err) + } + k.crv = &alg + + return nil +} + +func (k *ecdsaPrivateKey) Import(rawKey *ecdsa.PrivateKey) error { + k.mu.Lock() + defer k.mu.Unlock() + + if rawKey.PublicKey.X == nil { + return fmt.Errorf(`invalid ecdsa.PrivateKey`) + } + if rawKey.PublicKey.Y == nil { + return fmt.Errorf(`invalid ecdsa.PrivateKey`) + } + if rawKey.D == nil { + return fmt.Errorf(`invalid ecdsa.PrivateKey`) + } + + xbuf := ecutil.AllocECPointBuffer(rawKey.PublicKey.X, rawKey.Curve) + ybuf := ecutil.AllocECPointBuffer(rawKey.PublicKey.Y, rawKey.Curve) + dbuf := ecutil.AllocECPointBuffer(rawKey.D, rawKey.Curve) + defer ecutil.ReleaseECPointBuffer(xbuf) + defer ecutil.ReleaseECPointBuffer(ybuf) + defer ecutil.ReleaseECPointBuffer(dbuf) + + k.x = make([]byte, len(xbuf)) + copy(k.x, xbuf) + k.y = make([]byte, len(ybuf)) + copy(k.y, ybuf) + k.d = make([]byte, len(dbuf)) + copy(k.d, dbuf) + + alg, err := ourecdsa.AlgorithmFromCurve(rawKey.Curve) + if err != nil { + return fmt.Errorf(`jwk: failed to get algorithm for converting ECDSA private key to JWK: %w`, err) + } + k.crv = &alg + + return nil +} + +func buildECDSAPublicKey(alg jwa.EllipticCurveAlgorithm, xbuf, ybuf []byte) (*ecdsa.PublicKey, error) { + crv, err := ourecdsa.CurveFromAlgorithm(alg) + if err != nil { + return nil, fmt.Errorf(`jwk: failed to get algorithm for ECDSA public key: %w`, err) + } + + var x, y big.Int + x.SetBytes(xbuf) + y.SetBytes(ybuf) + + return &ecdsa.PublicKey{Curve: crv, X: &x, Y: &y}, nil +} + +func buildECDHPublicKey(alg jwa.EllipticCurveAlgorithm, xbuf, ybuf []byte) (*ecdh.PublicKey, error) { + var ecdhcrv ecdh.Curve + switch alg { + case jwa.X25519(): + ecdhcrv = ecdh.X25519() + case jwa.P256(): + ecdhcrv = ecdh.P256() + case jwa.P384(): + ecdhcrv = ecdh.P384() + case jwa.P521(): + ecdhcrv = ecdh.P521() + default: + return nil, fmt.Errorf(`jwk: unsupported ECDH curve %s`, alg) + } + + return ecdhcrv.NewPublicKey(append([]byte{0x04}, append(xbuf, ybuf...)...)) +} + +func buildECDHPrivateKey(alg jwa.EllipticCurveAlgorithm, dbuf []byte) (*ecdh.PrivateKey, error) { + var ecdhcrv ecdh.Curve + switch alg { + case jwa.X25519(): + ecdhcrv = ecdh.X25519() + case jwa.P256(): + ecdhcrv = ecdh.P256() + case jwa.P384(): + ecdhcrv = ecdh.P384() + case jwa.P521(): + ecdhcrv = ecdh.P521() + default: + return nil, fmt.Errorf(`jwk: unsupported ECDH curve %s`, alg) + } + + return ecdhcrv.NewPrivateKey(dbuf) +} + +var ecdsaConvertibleTypes = []reflect.Type{ + reflect.TypeFor[ECDSAPrivateKey](), + reflect.TypeFor[ECDSAPublicKey](), +} + +func ecdsaJWKToRaw(keyif Key, hint any) (any, error) { + var isECDH bool + + extracted, err := extractEmbeddedKey(keyif, ecdsaConvertibleTypes) + if err != nil { + return nil, fmt.Errorf(`jwk: failed to extract embedded key: %w`, err) + } + + switch k := extracted.(type) { + case ECDSAPrivateKey: + switch hint.(type) { + case ecdsa.PrivateKey, *ecdsa.PrivateKey: + case ecdh.PrivateKey, *ecdh.PrivateKey: + isECDH = true + default: + rv := reflect.ValueOf(hint) + //nolint:revive + if rv.Kind() == reflect.Ptr && rv.Elem().Kind() == reflect.Interface { + // pointer to an interface value, presumably they want us to dynamically + // create an object of the right type + } else { + return nil, fmt.Errorf(`invalid destination object type %T: %w`, hint, ContinueError()) + } + } + + locker, ok := k.(rlocker) + if ok { + locker.rlock() + defer locker.runlock() + } + + crv, ok := k.Crv() + if !ok { + return nil, fmt.Errorf(`missing "crv" field`) + } + + if isECDH { + d, ok := k.D() + if !ok { + return nil, fmt.Errorf(`missing "d" field`) + } + return buildECDHPrivateKey(crv, d) + } + + x, ok := k.X() + if !ok { + return nil, fmt.Errorf(`missing "x" field`) + } + y, ok := k.Y() + if !ok { + return nil, fmt.Errorf(`missing "y" field`) + } + pubk, err := buildECDSAPublicKey(crv, x, y) + if err != nil { + return nil, fmt.Errorf(`failed to build public key: %w`, err) + } + + var key ecdsa.PrivateKey + var d big.Int + + origD, ok := k.D() + if !ok { + return nil, fmt.Errorf(`missing "d" field`) + } + + d.SetBytes(origD) + key.D = &d + key.PublicKey = *pubk + + return &key, nil + case ECDSAPublicKey: + switch hint.(type) { + case ecdsa.PublicKey, *ecdsa.PublicKey: + case ecdh.PublicKey, *ecdh.PublicKey: + isECDH = true + default: + rv := reflect.ValueOf(hint) + //nolint:revive + if rv.Kind() == reflect.Ptr && rv.Elem().Kind() == reflect.Interface { + // pointer to an interface value, presumably they want us to dynamically + // create an object of the right type + } else { + return nil, fmt.Errorf(`invalid destination object type %T: %w`, hint, ContinueError()) + } + } + + locker, ok := k.(rlocker) + if ok { + locker.rlock() + defer locker.runlock() + } + + crv, ok := k.Crv() + if !ok { + return nil, fmt.Errorf(`missing "crv" field`) + } + + x, ok := k.X() + if !ok { + return nil, fmt.Errorf(`missing "x" field`) + } + + y, ok := k.Y() + if !ok { + return nil, fmt.Errorf(`missing "y" field`) + } + if isECDH { + return buildECDHPublicKey(crv, x, y) + } + return buildECDSAPublicKey(crv, x, y) + default: + return nil, ContinueError() + } +} + +func makeECDSAPublicKey(src Key) (Key, error) { + newKey := newECDSAPublicKey() + + // Iterate and copy everything except for the bits that should not be in the public key + for _, k := range src.Keys() { + switch k { + case ECDSADKey: + continue + default: + var v any + if err := src.Get(k, &v); err != nil { + return nil, fmt.Errorf(`ecdsa: makeECDSAPublicKey: failed to get field %q: %w`, k, err) + } + + if err := newKey.Set(k, v); err != nil { + return nil, fmt.Errorf(`ecdsa: makeECDSAPublicKey: failed to set field %q: %w`, k, err) + } + } + } + + return newKey, nil +} + +func (k *ecdsaPrivateKey) PublicKey() (Key, error) { + return makeECDSAPublicKey(k) +} + +func (k *ecdsaPublicKey) PublicKey() (Key, error) { + return makeECDSAPublicKey(k) +} + +func ecdsaThumbprint(hash crypto.Hash, crv, x, y string) []byte { + h := hash.New() + fmt.Fprint(h, `{"crv":"`) + fmt.Fprint(h, crv) + fmt.Fprint(h, `","kty":"EC","x":"`) + fmt.Fprint(h, x) + fmt.Fprint(h, `","y":"`) + fmt.Fprint(h, y) + fmt.Fprint(h, `"}`) + return h.Sum(nil) +} + +// Thumbprint returns the JWK thumbprint using the indicated +// hashing algorithm, according to RFC 7638 +func (k ecdsaPublicKey) Thumbprint(hash crypto.Hash) ([]byte, error) { + k.mu.RLock() + defer k.mu.RUnlock() + + var key ecdsa.PublicKey + if err := Export(&k, &key); err != nil { + return nil, fmt.Errorf(`failed to export ecdsa.PublicKey for thumbprint generation: %w`, err) + } + + xbuf := ecutil.AllocECPointBuffer(key.X, key.Curve) + ybuf := ecutil.AllocECPointBuffer(key.Y, key.Curve) + defer ecutil.ReleaseECPointBuffer(xbuf) + defer ecutil.ReleaseECPointBuffer(ybuf) + + return ecdsaThumbprint( + hash, + key.Curve.Params().Name, + base64.EncodeToString(xbuf), + base64.EncodeToString(ybuf), + ), nil +} + +// Thumbprint returns the JWK thumbprint using the indicated +// hashing algorithm, according to RFC 7638 +func (k ecdsaPrivateKey) Thumbprint(hash crypto.Hash) ([]byte, error) { + k.mu.RLock() + defer k.mu.RUnlock() + + var key ecdsa.PrivateKey + if err := Export(&k, &key); err != nil { + return nil, fmt.Errorf(`failed to export ecdsa.PrivateKey for thumbprint generation: %w`, err) + } + + xbuf := ecutil.AllocECPointBuffer(key.X, key.Curve) + ybuf := ecutil.AllocECPointBuffer(key.Y, key.Curve) + defer ecutil.ReleaseECPointBuffer(xbuf) + defer ecutil.ReleaseECPointBuffer(ybuf) + + return ecdsaThumbprint( + hash, + key.Curve.Params().Name, + base64.EncodeToString(xbuf), + base64.EncodeToString(ybuf), + ), nil +} + +func ecdsaValidateKey(k interface { + Crv() (jwa.EllipticCurveAlgorithm, bool) + X() ([]byte, bool) + Y() ([]byte, bool) +}, checkPrivate bool) error { + crvtyp, ok := k.Crv() + if !ok { + return fmt.Errorf(`missing "crv" field`) + } + + crv, err := ourecdsa.CurveFromAlgorithm(crvtyp) + if err != nil { + return fmt.Errorf(`invalid curve algorithm %q: %w`, crvtyp, err) + } + + keySize := ecutil.CalculateKeySize(crv) + if x, ok := k.X(); !ok || len(x) != keySize { + return fmt.Errorf(`invalid "x" length (%d) for curve %q`, len(x), crv.Params().Name) + } + + if y, ok := k.Y(); !ok || len(y) != keySize { + return fmt.Errorf(`invalid "y" length (%d) for curve %q`, len(y), crv.Params().Name) + } + + if checkPrivate { + if priv, ok := k.(keyWithD); ok { + if d, ok := priv.D(); !ok || len(d) != keySize { + return fmt.Errorf(`invalid "d" length (%d) for curve %q`, len(d), crv.Params().Name) + } + } else { + return fmt.Errorf(`missing "d" value`) + } + } + return nil +} + +func (k *ecdsaPrivateKey) Validate() error { + if err := ecdsaValidateKey(k, true); err != nil { + return NewKeyValidationError(fmt.Errorf(`jwk.ECDSAPrivateKey: %w`, err)) + } + return nil +} + +func (k *ecdsaPublicKey) Validate() error { + if err := ecdsaValidateKey(k, false); err != nil { + return NewKeyValidationError(fmt.Errorf(`jwk.ECDSAPublicKey: %w`, err)) + } + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa/BUILD.bazel new file mode 100644 index 0000000000..bf058aa649 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa/BUILD.bazel @@ -0,0 +1,15 @@ +load("@rules_go//go:def.bzl", "go_library") + +go_library( + name = "ecdsa", + srcs = ["ecdsa.go"], + importpath = "github.com/lestrrat-go/jwx/v3/jwk/ecdsa", + visibility = ["//visibility:public"], + deps = ["//jwa"], +) + +alias( + name = "go_default_library", + actual = ":ecdsa", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa/ecdsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa/ecdsa.go new file mode 100644 index 0000000000..3392483218 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa/ecdsa.go @@ -0,0 +1,76 @@ +package ecdsa + +import ( + "crypto/elliptic" + "fmt" + "sync" + + "github.com/lestrrat-go/jwx/v3/jwa" +) + +var muCurves sync.RWMutex +var algToCurveMap map[jwa.EllipticCurveAlgorithm]elliptic.Curve +var curveToAlgMap map[elliptic.Curve]jwa.EllipticCurveAlgorithm +var algList []jwa.EllipticCurveAlgorithm + +func init() { + muCurves.Lock() + algToCurveMap = make(map[jwa.EllipticCurveAlgorithm]elliptic.Curve) + curveToAlgMap = make(map[elliptic.Curve]jwa.EllipticCurveAlgorithm) + muCurves.Unlock() +} + +// RegisterCurve registers a jwa.EllipticCurveAlgorithm constant and its +// corresponding elliptic.Curve object. Users do not need to call this unless +// they are registering a new ECDSA key type +func RegisterCurve(alg jwa.EllipticCurveAlgorithm, crv elliptic.Curve) { + muCurves.Lock() + defer muCurves.Unlock() + + algToCurveMap[alg] = crv + curveToAlgMap[crv] = alg + rebuildCurves() +} + +func rebuildCurves() { + l := len(algToCurveMap) + if cap(algList) < l { + algList = make([]jwa.EllipticCurveAlgorithm, 0, l) + } else { + algList = algList[:0] + } + + for alg := range algToCurveMap { + algList = append(algList, alg) + } +} + +// Algorithms returns the list of registered jwa.EllipticCurveAlgorithms +// that ca be used for ECDSA keys. +func Algorithms() []jwa.EllipticCurveAlgorithm { + muCurves.RLock() + defer muCurves.RUnlock() + + return algList +} + +func AlgorithmFromCurve(crv elliptic.Curve) (jwa.EllipticCurveAlgorithm, error) { + alg, ok := curveToAlgMap[crv] + if !ok { + return jwa.InvalidEllipticCurve(), fmt.Errorf(`unknown elliptic curve: %q`, crv) + } + return alg, nil +} + +func CurveFromAlgorithm(alg jwa.EllipticCurveAlgorithm) (elliptic.Curve, error) { + crv, ok := algToCurveMap[alg] + if !ok { + return nil, fmt.Errorf(`unknown elliptic curve algorithm: %q`, alg) + } + return crv, nil +} + +func IsCurveAvailable(alg jwa.EllipticCurveAlgorithm) bool { + _, ok := algToCurveMap[alg] + return ok +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa_gen.go new file mode 100644 index 0000000000..39536de3d8 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/ecdsa_gen.go @@ -0,0 +1,1432 @@ +// Code generated by tools/cmd/genjwk/main.go. DO NOT EDIT. + +package jwk + +import ( + "bytes" + "fmt" + "sort" + "sync" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v3/cert" + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/internal/tokens" + "github.com/lestrrat-go/jwx/v3/jwa" +) + +const ( + ECDSACrvKey = "crv" + ECDSADKey = "d" + ECDSAXKey = "x" + ECDSAYKey = "y" +) + +type ECDSAPublicKey interface { + Key + Crv() (jwa.EllipticCurveAlgorithm, bool) + X() ([]byte, bool) + Y() ([]byte, bool) +} + +type ecdsaPublicKey struct { + algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4 + crv *jwa.EllipticCurveAlgorithm + keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4 + keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3 + keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2 + x []byte + x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6 + x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7 + x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8 + x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5 + y []byte + privateParams map[string]any + mu *sync.RWMutex + dc json.DecodeCtx +} + +var _ ECDSAPublicKey = &ecdsaPublicKey{} +var _ Key = &ecdsaPublicKey{} + +func newECDSAPublicKey() *ecdsaPublicKey { + return &ecdsaPublicKey{ + mu: &sync.RWMutex{}, + privateParams: make(map[string]any), + } +} + +func (h ecdsaPublicKey) KeyType() jwa.KeyType { + return jwa.EC() +} + +func (h ecdsaPublicKey) rlock() { + h.mu.RLock() +} + +func (h ecdsaPublicKey) runlock() { + h.mu.RUnlock() +} + +func (h ecdsaPublicKey) IsPrivate() bool { + return false +} + +func (h *ecdsaPublicKey) Algorithm() (jwa.KeyAlgorithm, bool) { + if h.algorithm != nil { + return *(h.algorithm), true + } + return nil, false +} + +func (h *ecdsaPublicKey) Crv() (jwa.EllipticCurveAlgorithm, bool) { + if h.crv != nil { + return *(h.crv), true + } + return jwa.InvalidEllipticCurve(), false +} + +func (h *ecdsaPublicKey) KeyID() (string, bool) { + if h.keyID != nil { + return *(h.keyID), true + } + return "", false +} + +func (h *ecdsaPublicKey) KeyOps() (KeyOperationList, bool) { + if h.keyOps != nil { + return *(h.keyOps), true + } + return nil, false +} + +func (h *ecdsaPublicKey) KeyUsage() (string, bool) { + if h.keyUsage != nil { + return *(h.keyUsage), true + } + return "", false +} + +func (h *ecdsaPublicKey) X() ([]byte, bool) { + if h.x != nil { + return h.x, true + } + return nil, false +} + +func (h *ecdsaPublicKey) X509CertChain() (*cert.Chain, bool) { + return h.x509CertChain, true +} + +func (h *ecdsaPublicKey) X509CertThumbprint() (string, bool) { + if h.x509CertThumbprint != nil { + return *(h.x509CertThumbprint), true + } + return "", false +} + +func (h *ecdsaPublicKey) X509CertThumbprintS256() (string, bool) { + if h.x509CertThumbprintS256 != nil { + return *(h.x509CertThumbprintS256), true + } + return "", false +} + +func (h *ecdsaPublicKey) X509URL() (string, bool) { + if h.x509URL != nil { + return *(h.x509URL), true + } + return "", false +} + +func (h *ecdsaPublicKey) Y() ([]byte, bool) { + if h.y != nil { + return h.y, true + } + return nil, false +} + +func (h *ecdsaPublicKey) Has(name string) bool { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + return true + case AlgorithmKey: + return h.algorithm != nil + case ECDSACrvKey: + return h.crv != nil + case KeyIDKey: + return h.keyID != nil + case KeyOpsKey: + return h.keyOps != nil + case KeyUsageKey: + return h.keyUsage != nil + case ECDSAXKey: + return h.x != nil + case X509CertChainKey: + return h.x509CertChain != nil + case X509CertThumbprintKey: + return h.x509CertThumbprint != nil + case X509CertThumbprintS256Key: + return h.x509CertThumbprintS256 != nil + case X509URLKey: + return h.x509URL != nil + case ECDSAYKey: + return h.y != nil + default: + _, ok := h.privateParams[name] + return ok + } +} + +func (h *ecdsaPublicKey) Get(name string, dst any) error { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + if err := blackmagic.AssignIfCompatible(dst, h.KeyType()); err != nil { + return fmt.Errorf(`ecdsaPublicKey.Get: failed to assign value for field %q to destination object: %w`, name, err) + } + case AlgorithmKey: + if h.algorithm == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case ECDSACrvKey: + if h.crv == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.crv)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyIDKey: + if h.keyID == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyOpsKey: + if h.keyOps == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyOps)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyUsageKey: + if h.keyUsage == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyUsage)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case ECDSAXKey: + if h.x == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.x); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertChainKey: + if h.x509CertChain == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.x509CertChain); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509URLKey: + if h.x509URL == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case ECDSAYKey: + if h.y == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.y); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + default: + v, ok := h.privateParams[name] + if !ok { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, v); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + } + return nil +} + +func (h *ecdsaPublicKey) Set(name string, value any) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *ecdsaPublicKey) setNoLock(name string, value any) error { + switch name { + case "kty": + return nil + case AlgorithmKey: + switch v := value.(type) { + case string, jwa.SignatureAlgorithm, jwa.KeyEncryptionAlgorithm, jwa.ContentEncryptionAlgorithm: + tmp, err := jwa.KeyAlgorithmFrom(v) + if err != nil { + return fmt.Errorf(`invalid algorithm for %q key: %w`, AlgorithmKey, err) + } + h.algorithm = &tmp + default: + return fmt.Errorf(`invalid type for %q key: %T`, AlgorithmKey, value) + } + return nil + case ECDSACrvKey: + if v, ok := value.(jwa.EllipticCurveAlgorithm); ok { + h.crv = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ECDSACrvKey, value) + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case KeyOpsKey: + var acceptor KeyOperationList + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err) + } + h.keyOps = &acceptor + return nil + case KeyUsageKey: + switch v := value.(type) { + case KeyUsageType: + switch v { + case ForSignature, ForEncryption: + tmp := v.String() + h.keyUsage = &tmp + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case string: + h.keyUsage = &v + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case ECDSAXKey: + if v, ok := value.([]byte); ok { + h.x = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ECDSAXKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + case ECDSAYKey: + if v, ok := value.([]byte); ok { + h.y = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ECDSAYKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]any{} + } + h.privateParams[name] = value + } + return nil +} + +func (k *ecdsaPublicKey) Remove(key string) error { + k.mu.Lock() + defer k.mu.Unlock() + switch key { + case AlgorithmKey: + k.algorithm = nil + case ECDSACrvKey: + k.crv = nil + case KeyIDKey: + k.keyID = nil + case KeyOpsKey: + k.keyOps = nil + case KeyUsageKey: + k.keyUsage = nil + case ECDSAXKey: + k.x = nil + case X509CertChainKey: + k.x509CertChain = nil + case X509CertThumbprintKey: + k.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + k.x509CertThumbprintS256 = nil + case X509URLKey: + k.x509URL = nil + case ECDSAYKey: + k.y = nil + default: + delete(k.privateParams, key) + } + return nil +} + +func (k *ecdsaPublicKey) Clone() (Key, error) { + key, err := cloneKey(k) + if err != nil { + return nil, fmt.Errorf(`ecdsaPublicKey.Clone: %w`, err) + } + return key, nil +} + +func (k *ecdsaPublicKey) DecodeCtx() json.DecodeCtx { + k.mu.RLock() + defer k.mu.RUnlock() + return k.dc +} + +func (k *ecdsaPublicKey) SetDecodeCtx(dc json.DecodeCtx) { + k.mu.Lock() + defer k.mu.Unlock() + k.dc = dc +} + +func (h *ecdsaPublicKey) UnmarshalJSON(buf []byte) error { + h.mu.Lock() + defer h.mu.Unlock() + h.algorithm = nil + h.crv = nil + h.keyID = nil + h.keyOps = nil + h.keyUsage = nil + h.x = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + h.y = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here. + if tok == tokens.CloseCurlyBracket { // End of object + break LOOP + } else if tok != tokens.OpenCurlyBracket { + return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok) + } + case string: // Objects can only have string keys + switch tok { + case KeyTypeKey: + val, err := json.ReadNextStringToken(dec) + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + if val != jwa.EC().String() { + return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val) + } + case AlgorithmKey: + var s string + if err := dec.Decode(&s); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + alg, err := jwa.KeyAlgorithmFrom(s) + if err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + h.algorithm = &alg + case ECDSACrvKey: + var decoded jwa.EllipticCurveAlgorithm + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSACrvKey, err) + } + h.crv = &decoded + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case KeyOpsKey: + var decoded KeyOperationList + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err) + } + h.keyOps = &decoded + case KeyUsageKey: + if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err) + } + case ECDSAXKey: + if err := json.AssignNextBytesToken(&h.x, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSAXKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + case ECDSAYKey: + if err := json.AssignNextBytesToken(&h.y, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSAYKey, err) + } + default: + if dc := h.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + decoded, err := localReg.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + } + } + decoded, err := registry.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + return fmt.Errorf(`could not decode field %s: %w`, tok, err) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + if h.crv == nil { + return fmt.Errorf(`required field crv is missing`) + } + if h.x == nil { + return fmt.Errorf(`required field x is missing`) + } + if h.y == nil { + return fmt.Errorf(`required field y is missing`) + } + return nil +} + +func (h ecdsaPublicKey) MarshalJSON() ([]byte, error) { + data := make(map[string]any) + fields := make([]string, 0, 11) + data[KeyTypeKey] = jwa.EC() + fields = append(fields, KeyTypeKey) + if h.algorithm != nil { + data[AlgorithmKey] = *(h.algorithm) + fields = append(fields, AlgorithmKey) + } + if h.crv != nil { + data[ECDSACrvKey] = *(h.crv) + fields = append(fields, ECDSACrvKey) + } + if h.keyID != nil { + data[KeyIDKey] = *(h.keyID) + fields = append(fields, KeyIDKey) + } + if h.keyOps != nil { + data[KeyOpsKey] = *(h.keyOps) + fields = append(fields, KeyOpsKey) + } + if h.keyUsage != nil { + data[KeyUsageKey] = *(h.keyUsage) + fields = append(fields, KeyUsageKey) + } + if h.x != nil { + data[ECDSAXKey] = h.x + fields = append(fields, ECDSAXKey) + } + if h.x509CertChain != nil { + data[X509CertChainKey] = h.x509CertChain + fields = append(fields, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + data[X509CertThumbprintKey] = *(h.x509CertThumbprint) + fields = append(fields, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256) + fields = append(fields, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + data[X509URLKey] = *(h.x509URL) + fields = append(fields, X509URLKey) + } + if h.y != nil { + data[ECDSAYKey] = h.y + fields = append(fields, ECDSAYKey) + } + for k, v := range h.privateParams { + data[k] = v + fields = append(fields, k) + } + + sort.Strings(fields) + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + buf.WriteByte(tokens.OpenCurlyBracket) + enc := json.NewEncoder(buf) + for i, f := range fields { + if i > 0 { + buf.WriteRune(tokens.Comma) + } + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(f) + buf.WriteString(`":`) + v := data[f] + switch v := v.(type) { + case []byte: + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune(tokens.DoubleQuote) + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte(tokens.CloseCurlyBracket) + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (h *ecdsaPublicKey) Keys() []string { + h.mu.RLock() + defer h.mu.RUnlock() + keys := make([]string, 0, 11+len(h.privateParams)) + keys = append(keys, KeyTypeKey) + if h.algorithm != nil { + keys = append(keys, AlgorithmKey) + } + if h.crv != nil { + keys = append(keys, ECDSACrvKey) + } + if h.keyID != nil { + keys = append(keys, KeyIDKey) + } + if h.keyOps != nil { + keys = append(keys, KeyOpsKey) + } + if h.keyUsage != nil { + keys = append(keys, KeyUsageKey) + } + if h.x != nil { + keys = append(keys, ECDSAXKey) + } + if h.x509CertChain != nil { + keys = append(keys, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + keys = append(keys, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + keys = append(keys, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + keys = append(keys, X509URLKey) + } + if h.y != nil { + keys = append(keys, ECDSAYKey) + } + for k := range h.privateParams { + keys = append(keys, k) + } + return keys +} + +type ECDSAPrivateKey interface { + Key + Crv() (jwa.EllipticCurveAlgorithm, bool) + D() ([]byte, bool) + X() ([]byte, bool) + Y() ([]byte, bool) +} + +type ecdsaPrivateKey struct { + algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4 + crv *jwa.EllipticCurveAlgorithm + d []byte + keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4 + keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3 + keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2 + x []byte + x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6 + x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7 + x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8 + x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5 + y []byte + privateParams map[string]any + mu *sync.RWMutex + dc json.DecodeCtx +} + +var _ ECDSAPrivateKey = &ecdsaPrivateKey{} +var _ Key = &ecdsaPrivateKey{} + +func newECDSAPrivateKey() *ecdsaPrivateKey { + return &ecdsaPrivateKey{ + mu: &sync.RWMutex{}, + privateParams: make(map[string]any), + } +} + +func (h ecdsaPrivateKey) KeyType() jwa.KeyType { + return jwa.EC() +} + +func (h ecdsaPrivateKey) rlock() { + h.mu.RLock() +} + +func (h ecdsaPrivateKey) runlock() { + h.mu.RUnlock() +} + +func (h ecdsaPrivateKey) IsPrivate() bool { + return true +} + +func (h *ecdsaPrivateKey) Algorithm() (jwa.KeyAlgorithm, bool) { + if h.algorithm != nil { + return *(h.algorithm), true + } + return nil, false +} + +func (h *ecdsaPrivateKey) Crv() (jwa.EllipticCurveAlgorithm, bool) { + if h.crv != nil { + return *(h.crv), true + } + return jwa.InvalidEllipticCurve(), false +} + +func (h *ecdsaPrivateKey) D() ([]byte, bool) { + if h.d != nil { + return h.d, true + } + return nil, false +} + +func (h *ecdsaPrivateKey) KeyID() (string, bool) { + if h.keyID != nil { + return *(h.keyID), true + } + return "", false +} + +func (h *ecdsaPrivateKey) KeyOps() (KeyOperationList, bool) { + if h.keyOps != nil { + return *(h.keyOps), true + } + return nil, false +} + +func (h *ecdsaPrivateKey) KeyUsage() (string, bool) { + if h.keyUsage != nil { + return *(h.keyUsage), true + } + return "", false +} + +func (h *ecdsaPrivateKey) X() ([]byte, bool) { + if h.x != nil { + return h.x, true + } + return nil, false +} + +func (h *ecdsaPrivateKey) X509CertChain() (*cert.Chain, bool) { + return h.x509CertChain, true +} + +func (h *ecdsaPrivateKey) X509CertThumbprint() (string, bool) { + if h.x509CertThumbprint != nil { + return *(h.x509CertThumbprint), true + } + return "", false +} + +func (h *ecdsaPrivateKey) X509CertThumbprintS256() (string, bool) { + if h.x509CertThumbprintS256 != nil { + return *(h.x509CertThumbprintS256), true + } + return "", false +} + +func (h *ecdsaPrivateKey) X509URL() (string, bool) { + if h.x509URL != nil { + return *(h.x509URL), true + } + return "", false +} + +func (h *ecdsaPrivateKey) Y() ([]byte, bool) { + if h.y != nil { + return h.y, true + } + return nil, false +} + +func (h *ecdsaPrivateKey) Has(name string) bool { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + return true + case AlgorithmKey: + return h.algorithm != nil + case ECDSACrvKey: + return h.crv != nil + case ECDSADKey: + return h.d != nil + case KeyIDKey: + return h.keyID != nil + case KeyOpsKey: + return h.keyOps != nil + case KeyUsageKey: + return h.keyUsage != nil + case ECDSAXKey: + return h.x != nil + case X509CertChainKey: + return h.x509CertChain != nil + case X509CertThumbprintKey: + return h.x509CertThumbprint != nil + case X509CertThumbprintS256Key: + return h.x509CertThumbprintS256 != nil + case X509URLKey: + return h.x509URL != nil + case ECDSAYKey: + return h.y != nil + default: + _, ok := h.privateParams[name] + return ok + } +} + +func (h *ecdsaPrivateKey) Get(name string, dst any) error { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + if err := blackmagic.AssignIfCompatible(dst, h.KeyType()); err != nil { + return fmt.Errorf(`ecdsaPrivateKey.Get: failed to assign value for field %q to destination object: %w`, name, err) + } + case AlgorithmKey: + if h.algorithm == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case ECDSACrvKey: + if h.crv == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.crv)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case ECDSADKey: + if h.d == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.d); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyIDKey: + if h.keyID == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyOpsKey: + if h.keyOps == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyOps)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyUsageKey: + if h.keyUsage == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyUsage)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case ECDSAXKey: + if h.x == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.x); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertChainKey: + if h.x509CertChain == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.x509CertChain); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509URLKey: + if h.x509URL == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case ECDSAYKey: + if h.y == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.y); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + default: + v, ok := h.privateParams[name] + if !ok { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, v); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + } + return nil +} + +func (h *ecdsaPrivateKey) Set(name string, value any) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *ecdsaPrivateKey) setNoLock(name string, value any) error { + switch name { + case "kty": + return nil + case AlgorithmKey: + switch v := value.(type) { + case string, jwa.SignatureAlgorithm, jwa.KeyEncryptionAlgorithm, jwa.ContentEncryptionAlgorithm: + tmp, err := jwa.KeyAlgorithmFrom(v) + if err != nil { + return fmt.Errorf(`invalid algorithm for %q key: %w`, AlgorithmKey, err) + } + h.algorithm = &tmp + default: + return fmt.Errorf(`invalid type for %q key: %T`, AlgorithmKey, value) + } + return nil + case ECDSACrvKey: + if v, ok := value.(jwa.EllipticCurveAlgorithm); ok { + h.crv = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ECDSACrvKey, value) + case ECDSADKey: + if v, ok := value.([]byte); ok { + h.d = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ECDSADKey, value) + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case KeyOpsKey: + var acceptor KeyOperationList + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err) + } + h.keyOps = &acceptor + return nil + case KeyUsageKey: + switch v := value.(type) { + case KeyUsageType: + switch v { + case ForSignature, ForEncryption: + tmp := v.String() + h.keyUsage = &tmp + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case string: + h.keyUsage = &v + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case ECDSAXKey: + if v, ok := value.([]byte); ok { + h.x = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ECDSAXKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + case ECDSAYKey: + if v, ok := value.([]byte); ok { + h.y = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ECDSAYKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]any{} + } + h.privateParams[name] = value + } + return nil +} + +func (k *ecdsaPrivateKey) Remove(key string) error { + k.mu.Lock() + defer k.mu.Unlock() + switch key { + case AlgorithmKey: + k.algorithm = nil + case ECDSACrvKey: + k.crv = nil + case ECDSADKey: + k.d = nil + case KeyIDKey: + k.keyID = nil + case KeyOpsKey: + k.keyOps = nil + case KeyUsageKey: + k.keyUsage = nil + case ECDSAXKey: + k.x = nil + case X509CertChainKey: + k.x509CertChain = nil + case X509CertThumbprintKey: + k.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + k.x509CertThumbprintS256 = nil + case X509URLKey: + k.x509URL = nil + case ECDSAYKey: + k.y = nil + default: + delete(k.privateParams, key) + } + return nil +} + +func (k *ecdsaPrivateKey) Clone() (Key, error) { + key, err := cloneKey(k) + if err != nil { + return nil, fmt.Errorf(`ecdsaPrivateKey.Clone: %w`, err) + } + return key, nil +} + +func (k *ecdsaPrivateKey) DecodeCtx() json.DecodeCtx { + k.mu.RLock() + defer k.mu.RUnlock() + return k.dc +} + +func (k *ecdsaPrivateKey) SetDecodeCtx(dc json.DecodeCtx) { + k.mu.Lock() + defer k.mu.Unlock() + k.dc = dc +} + +func (h *ecdsaPrivateKey) UnmarshalJSON(buf []byte) error { + h.mu.Lock() + defer h.mu.Unlock() + h.algorithm = nil + h.crv = nil + h.d = nil + h.keyID = nil + h.keyOps = nil + h.keyUsage = nil + h.x = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + h.y = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here. + if tok == tokens.CloseCurlyBracket { // End of object + break LOOP + } else if tok != tokens.OpenCurlyBracket { + return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok) + } + case string: // Objects can only have string keys + switch tok { + case KeyTypeKey: + val, err := json.ReadNextStringToken(dec) + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + if val != jwa.EC().String() { + return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val) + } + case AlgorithmKey: + var s string + if err := dec.Decode(&s); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + alg, err := jwa.KeyAlgorithmFrom(s) + if err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + h.algorithm = &alg + case ECDSACrvKey: + var decoded jwa.EllipticCurveAlgorithm + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSACrvKey, err) + } + h.crv = &decoded + case ECDSADKey: + if err := json.AssignNextBytesToken(&h.d, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSADKey, err) + } + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case KeyOpsKey: + var decoded KeyOperationList + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err) + } + h.keyOps = &decoded + case KeyUsageKey: + if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err) + } + case ECDSAXKey: + if err := json.AssignNextBytesToken(&h.x, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSAXKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + case ECDSAYKey: + if err := json.AssignNextBytesToken(&h.y, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSAYKey, err) + } + default: + if dc := h.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + decoded, err := localReg.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + } + } + decoded, err := registry.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + return fmt.Errorf(`could not decode field %s: %w`, tok, err) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + if h.crv == nil { + return fmt.Errorf(`required field crv is missing`) + } + if h.d == nil { + return fmt.Errorf(`required field d is missing`) + } + if h.x == nil { + return fmt.Errorf(`required field x is missing`) + } + if h.y == nil { + return fmt.Errorf(`required field y is missing`) + } + return nil +} + +func (h ecdsaPrivateKey) MarshalJSON() ([]byte, error) { + data := make(map[string]any) + fields := make([]string, 0, 12) + data[KeyTypeKey] = jwa.EC() + fields = append(fields, KeyTypeKey) + if h.algorithm != nil { + data[AlgorithmKey] = *(h.algorithm) + fields = append(fields, AlgorithmKey) + } + if h.crv != nil { + data[ECDSACrvKey] = *(h.crv) + fields = append(fields, ECDSACrvKey) + } + if h.d != nil { + data[ECDSADKey] = h.d + fields = append(fields, ECDSADKey) + } + if h.keyID != nil { + data[KeyIDKey] = *(h.keyID) + fields = append(fields, KeyIDKey) + } + if h.keyOps != nil { + data[KeyOpsKey] = *(h.keyOps) + fields = append(fields, KeyOpsKey) + } + if h.keyUsage != nil { + data[KeyUsageKey] = *(h.keyUsage) + fields = append(fields, KeyUsageKey) + } + if h.x != nil { + data[ECDSAXKey] = h.x + fields = append(fields, ECDSAXKey) + } + if h.x509CertChain != nil { + data[X509CertChainKey] = h.x509CertChain + fields = append(fields, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + data[X509CertThumbprintKey] = *(h.x509CertThumbprint) + fields = append(fields, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256) + fields = append(fields, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + data[X509URLKey] = *(h.x509URL) + fields = append(fields, X509URLKey) + } + if h.y != nil { + data[ECDSAYKey] = h.y + fields = append(fields, ECDSAYKey) + } + for k, v := range h.privateParams { + data[k] = v + fields = append(fields, k) + } + + sort.Strings(fields) + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + buf.WriteByte(tokens.OpenCurlyBracket) + enc := json.NewEncoder(buf) + for i, f := range fields { + if i > 0 { + buf.WriteRune(tokens.Comma) + } + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(f) + buf.WriteString(`":`) + v := data[f] + switch v := v.(type) { + case []byte: + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune(tokens.DoubleQuote) + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte(tokens.CloseCurlyBracket) + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (h *ecdsaPrivateKey) Keys() []string { + h.mu.RLock() + defer h.mu.RUnlock() + keys := make([]string, 0, 12+len(h.privateParams)) + keys = append(keys, KeyTypeKey) + if h.algorithm != nil { + keys = append(keys, AlgorithmKey) + } + if h.crv != nil { + keys = append(keys, ECDSACrvKey) + } + if h.d != nil { + keys = append(keys, ECDSADKey) + } + if h.keyID != nil { + keys = append(keys, KeyIDKey) + } + if h.keyOps != nil { + keys = append(keys, KeyOpsKey) + } + if h.keyUsage != nil { + keys = append(keys, KeyUsageKey) + } + if h.x != nil { + keys = append(keys, ECDSAXKey) + } + if h.x509CertChain != nil { + keys = append(keys, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + keys = append(keys, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + keys = append(keys, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + keys = append(keys, X509URLKey) + } + if h.y != nil { + keys = append(keys, ECDSAYKey) + } + for k := range h.privateParams { + keys = append(keys, k) + } + return keys +} + +var ecdsaStandardFields KeyFilter + +func init() { + ecdsaStandardFields = NewFieldNameFilter(KeyTypeKey, KeyUsageKey, KeyOpsKey, AlgorithmKey, KeyIDKey, X509URLKey, X509CertChainKey, X509CertThumbprintKey, X509CertThumbprintS256Key, ECDSACrvKey, ECDSAXKey, ECDSAYKey, ECDSADKey) +} + +// ECDSAStandardFieldsFilter returns a KeyFilter that filters out standard ECDSA fields. +func ECDSAStandardFieldsFilter() KeyFilter { + return ecdsaStandardFields +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/errors.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/errors.go new file mode 100644 index 0000000000..af7e00d952 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/errors.go @@ -0,0 +1,79 @@ +package jwk + +import ( + "errors" + "fmt" +) + +var cpe = &continueError{} + +// ContinueError returns an opaque error that can be returned +// when a `KeyParser`, `KeyImporter`, or `KeyExporter` cannot handle the given payload, +// but would like the process to continue with the next handler. +func ContinueError() error { + return cpe +} + +type continueError struct{} + +func (e *continueError) Error() string { + return "continue parsing" +} + +type importError struct { + error +} + +func (e importError) Unwrap() error { + return e.error +} + +func (importError) Is(err error) bool { + _, ok := err.(importError) + return ok +} + +func importerr(f string, args ...any) error { + return importError{fmt.Errorf(`jwk.Import: `+f, args...)} +} + +var errDefaultImportError = importError{errors.New(`import error`)} + +func ImportError() error { + return errDefaultImportError +} + +type parseError struct { + error +} + +func (e parseError) Unwrap() error { + return e.error +} + +func (parseError) Is(err error) bool { + _, ok := err.(parseError) + return ok +} + +func bparseerr(prefix string, f string, args ...any) error { + return parseError{fmt.Errorf(prefix+`: `+f, args...)} +} + +func parseerr(f string, args ...any) error { + return bparseerr(`jwk.Parse`, f, args...) +} + +func rparseerr(f string, args ...any) error { + return bparseerr(`jwk.ParseReader`, f, args...) +} + +func sparseerr(f string, args ...any) error { + return bparseerr(`jwk.ParseString`, f, args...) +} + +var errDefaultParseError = parseError{errors.New(`parse error`)} + +func ParseError() error { + return errDefaultParseError +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/es256k.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/es256k.go new file mode 100644 index 0000000000..293988db4b --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/es256k.go @@ -0,0 +1,13 @@ +//go:build jwx_es256k + +package jwk + +import ( + "github.com/decred/dcrd/dcrec/secp256k1/v4" + "github.com/lestrrat-go/jwx/v3/jwa" + ourecdsa "github.com/lestrrat-go/jwx/v3/jwk/ecdsa" +) + +func init() { + ourecdsa.RegisterCurve(jwa.Secp256k1(), secp256k1.S256()) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/fetch.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/fetch.go new file mode 100644 index 0000000000..2c80a369dc --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/fetch.go @@ -0,0 +1,117 @@ +package jwk + +import ( + "context" + "fmt" + "io" + "net/http" +) + +// Fetcher is an interface that represents an object that fetches a JWKS. +// Currently this is only used in the `jws.WithVerifyAuto` option. +// +// Particularly, do not confuse this as the backend to `jwk.Fetch()` function. +// If you need to control how `jwk.Fetch()` implements HTTP requests look into +// providing a custom `http.Client` object via `jwk.WithHTTPClient` option +type Fetcher interface { + Fetch(context.Context, string, ...FetchOption) (Set, error) +} + +// FetchFunc describes a type of Fetcher that is represented as a function. +// +// You can use this to wrap functions (e.g. `jwk.Fetch“) as a Fetcher object. +type FetchFunc func(context.Context, string, ...FetchOption) (Set, error) + +func (ff FetchFunc) Fetch(ctx context.Context, u string, options ...FetchOption) (Set, error) { + return ff(ctx, u, options...) +} + +// CachedFetcher wraps `jwk.Cache` so that it can be used as a `jwk.Fetcher`. +// +// One notable diffence from a general use fetcher is that `jwk.CachedFetcher` +// can only be used with JWKS URLs that have been registered with the cache. +// Please read the documentation fo `(jwk.CachedFetcher).Fetch` for more details. +// +// This object is intended to be used with `jws.WithVerifyAuto` option, specifically +// for a scenario where there is a very small number of JWKS URLs that are trusted +// and used to verify JWS messages. It is NOT meant to be used as a general purpose +// caching fetcher object. +type CachedFetcher struct { + cache *Cache +} + +// NewCachedFetcher creates a new `jwk.CachedFetcher` object. +func NewCachedFetcher(cache *Cache) *CachedFetcher { + return &CachedFetcher{cache} +} + +// Fetch fetches a JWKS from the cache. If the JWKS URL has not been registered with +// the cache, an error is returned. +func (f *CachedFetcher) Fetch(ctx context.Context, u string, _ ...FetchOption) (Set, error) { + if !f.cache.IsRegistered(ctx, u) { + return nil, fmt.Errorf(`jwk.CachedFetcher: url %q has not been registered`, u) + } + return f.cache.Lookup(ctx, u) +} + +// Fetch fetches a JWK resource specified by a URL. The url must be +// pointing to a resource that is supported by `net/http`. +// +// This function is just a wrapper around `net/http` and `jwk.Parse`. +// There is nothing special here, so you are safe to use your own +// mechanism to fetch the JWKS. +// +// If you are using the same `jwk.Set` for long periods of time during +// the lifecycle of your program, and would like to periodically refresh the +// contents of the object with the data at the remote resource, +// consider using `jwk.Cache`, which automatically refreshes +// jwk.Set objects asynchronously. +func Fetch(ctx context.Context, u string, options ...FetchOption) (Set, error) { + var parseOptions []ParseOption + //nolint:revive // I want to keep the type of `wl` as `Whitelist` instead of `InsecureWhitelist` + var wl Whitelist = InsecureWhitelist{} + var client HTTPClient = http.DefaultClient + for _, option := range options { + if parseOpt, ok := option.(ParseOption); ok { + parseOptions = append(parseOptions, parseOpt) + continue + } + + switch option.Ident() { + case identHTTPClient{}: + if err := option.Value(&client); err != nil { + return nil, fmt.Errorf(`failed to retrieve HTTPClient option value: %w`, err) + } + case identFetchWhitelist{}: + if err := option.Value(&wl); err != nil { + return nil, fmt.Errorf(`failed to retrieve fetch whitelist option value: %w`, err) + } + } + } + + if !wl.IsAllowed(u) { + return nil, fmt.Errorf(`jwk.Fetch: url %q has been rejected by whitelist`, u) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) + if err != nil { + return nil, fmt.Errorf(`jwk.Fetch: failed to create new request: %w`, err) + } + + res, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf(`jwk.Fetch: request failed: %w`, err) + } + defer res.Body.Close() + + if res.StatusCode != http.StatusOK { + return nil, fmt.Errorf(`jwk.Fetch: request returned status %d, expected 200`, res.StatusCode) + } + + buf, err := io.ReadAll(res.Body) + if err != nil { + return nil, fmt.Errorf(`jwk.Fetch: failed to read response body for %q: %w`, u, err) + } + + return Parse(buf, parseOptions...) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/filter.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/filter.go new file mode 100644 index 0000000000..e73b0757da --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/filter.go @@ -0,0 +1,28 @@ +package jwk + +import ( + "github.com/lestrrat-go/jwx/v3/transform" +) + +// KeyFilter is an interface that allows users to filter JWK key fields. +// It provides two methods: Filter and Reject; Filter returns a new key with only +// the fields that match the filter criteria, while Reject returns a new key with +// only the fields that DO NOT match the filter. +// +// EXPERIMENTAL: This API is experimental and its interface and behavior is +// subject to change in future releases. This API is not subject to semver +// compatibility guarantees. +type KeyFilter interface { + Filter(key Key) (Key, error) + Reject(key Key) (Key, error) +} + +// NewFieldNameFilter creates a new FieldNameFilter with the specified field names. +// +// Note that because some JWK fields are associated with the type instead of +// stored as data, this filter will not be able to remove them. An example would +// be the `kty` field: it's associated with the underlying JWK key type, and will +// always be present even if you attempt to remove it. +func NewFieldNameFilter(names ...string) KeyFilter { + return transform.NewNameBasedFilter[Key](names...) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/interface.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/interface.go new file mode 100644 index 0000000000..c5a22a43fb --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/interface.go @@ -0,0 +1,148 @@ +package jwk + +import ( + "sync" + + "github.com/lestrrat-go/jwx/v3/internal/json" +) + +// AsymmetricKey describes a Key that represents a key in an asymmetric key pair, +// which in turn can be either a private or a public key. This interface +// allows those keys to be queried if they are one or the other. +type AsymmetricKey interface { + IsPrivate() bool +} + +// KeyUsageType is used to denote what this key should be used for +type KeyUsageType string + +const ( + // ForSignature is the value used in the headers to indicate that + // this key should be used for signatures + ForSignature KeyUsageType = "sig" + // ForEncryption is the value used in the headers to indicate that + // this key should be used for encrypting + ForEncryption KeyUsageType = "enc" +) + +type KeyOperation string +type KeyOperationList []KeyOperation + +const ( + KeyOpSign KeyOperation = "sign" // (compute digital signature or MAC) + KeyOpVerify KeyOperation = "verify" // (verify digital signature or MAC) + KeyOpEncrypt KeyOperation = "encrypt" // (encrypt content) + KeyOpDecrypt KeyOperation = "decrypt" // (decrypt content and validate decryption, if applicable) + KeyOpWrapKey KeyOperation = "wrapKey" // (encrypt key) + KeyOpUnwrapKey KeyOperation = "unwrapKey" // (decrypt key and validate decryption, if applicable) + KeyOpDeriveKey KeyOperation = "deriveKey" // (derive key) + KeyOpDeriveBits KeyOperation = "deriveBits" // (derive bits not to be used as a key) +) + +// Set represents JWKS object, a collection of jwk.Key objects. +// +// Sets can be safely converted to and from JSON using the standard +// `"encoding/json".Marshal` and `"encoding/json".Unmarshal`. However, +// if you do not know if the payload contains a single JWK or a JWK set, +// consider using `jwk.Parse()` to always get a `jwk.Set` out of it. +// +// Since v1.2.12, JWK sets with private parameters can be parsed as well. +// Such private parameters can be accessed via the `Field()` method. +// If a resource contains a single JWK instead of a JWK set, private parameters +// are stored in _both_ the resulting `jwk.Set` object and the `jwk.Key` object . +// +//nolint:interfacebloat +type Set interface { + // AddKey adds the specified key. If the key already exists in the set, + // an error is returned. + AddKey(Key) error + + // Clear resets the list of keys associated with this set, emptying the + // internal list of `jwk.Key`s, as well as clearing any other non-key + // fields + Clear() error + + // Get returns the key at index `idx`. If the index is out of range, + // then the second return value is false. + Key(int) (Key, bool) + + // Get returns the value of a private field in the key set. + // + // For the purposes of a key set, any field other than the "keys" field is + // considered to be a private field. In other words, you cannot use this + // method to directly access the list of keys in the set + Get(string, any) error + + // Set sets the value of a single field. + // + // This method, which takes an `any`, exists because + // these objects can contain extra _arbitrary_ fields that users can + // specify, and there is no way of knowing what type they could be. + Set(string, any) error + + // Remove removes the specified non-key field from the set. + // Keys may not be removed using this method. See RemoveKey for + // removing keys. + Remove(string) error + + // Index returns the index where the given key exists, -1 otherwise + Index(Key) int + + // Len returns the number of keys in the set + Len() int + + // LookupKeyID returns the first key matching the given key id. + // + // The second return value is false if there are no keys matching the key id. + // The set *may* contain multiple keys with the same key id. If you + // need all of them, Len() and Key(int) + // + // This method is meant to be used to lookup a key with a unique ID. + // Bacauseof this, you cannot use this method to lookup keys with an empty key ID + // (i.e. `kid` is not specified, or is an empty string). + LookupKeyID(string) (Key, bool) + + // RemoveKey removes the key from the set. + // RemoveKey returns an error when the specified key does not exist + // in set. + RemoveKey(Key) error + + // Keys returns the list of keys present in the Set, except for `keys`. + // e.g. if you had `{"keys": ["a", "b"], "c": .., "d": ...}`, this method would + // return `["c", "d"]`. Note that the order of the keys is not guaranteed. + // + // TODO: name is confusing between this and Key() + Keys() []string + + // Clone create a new set with identical keys. Keys themselves are not cloned. + Clone() (Set, error) +} + +type set struct { + keys []Key + mu sync.RWMutex + dc DecodeCtx + privateParams map[string]any +} + +type PublicKeyer interface { + // PublicKey creates the corresponding PublicKey type for this object. + // All fields are copied onto the new public key, except for those that are not allowed. + // Returned value must not be the receiver itself. + PublicKey() (Key, error) +} + +type DecodeCtx interface { + json.DecodeCtx + IgnoreParseError() bool +} +type KeyWithDecodeCtx interface { + SetDecodeCtx(DecodeCtx) + DecodeCtx() DecodeCtx +} + +// Used internally: It's used to lock a key +type rlocker interface { + rlock() + runlock() +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/interface_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/interface_gen.go new file mode 100644 index 0000000000..4f23d96cb0 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/interface_gen.go @@ -0,0 +1,109 @@ +// Code generated by tools/cmd/genjwk/main.go. DO NOT EDIT. + +package jwk + +import ( + "crypto" + + "github.com/lestrrat-go/jwx/v3/cert" + "github.com/lestrrat-go/jwx/v3/jwa" +) + +const ( + KeyTypeKey = "kty" + KeyUsageKey = "use" + KeyOpsKey = "key_ops" + AlgorithmKey = "alg" + KeyIDKey = "kid" + X509URLKey = "x5u" + X509CertChainKey = "x5c" + X509CertThumbprintKey = "x5t" + X509CertThumbprintS256Key = "x5t#S256" +) + +// Key defines the minimal interface for each of the +// key types. Their use and implementation differ significantly +// between each key type, so you should use type assertions +// to perform more specific tasks with each key +type Key interface { + + // Has returns true if the specified field has a value, even if + // the value is empty-ish (e.g. 0, false, "") as long as it has been + // explicitly set. + Has(string) bool + + // Get is used to extract the value of any field, including non-standard fields, out of the key. + // + // The first argument is the name of the field. The second argument is a pointer + // to a variable that will receive the value of the field. The method returns + // an error if the field does not exist, or if the value cannot be assigned to + // the destination variable. Note that a field is considered to "exist" even if + // the value is empty-ish (e.g. 0, false, ""), as long as it is explicitly set. + Get(string, any) error + + // Set sets the value of a single field. Note that certain fields, + // notably "kty", cannot be altered, but will not return an error + // + // This method, which takes an `any`, exists because + // these objects can contain extra _arbitrary_ fields that users can + // specify, and there is no way of knowing what type they could be + Set(string, any) error + + // Remove removes the field associated with the specified key. + // There is no way to remove the `kty` (key type). You will ALWAYS be left with one field in a jwk.Key. + Remove(string) error + // Validate performs _minimal_ checks if the data stored in the key are valid. + // By minimal, we mean that it does not check if the key is valid for use in + // cryptographic operations. For example, it does not check if an RSA key's + // `e` field is a valid exponent, or if the `n` field is a valid modulus. + // Instead, it checks for things such as the _presence_ of some required fields, + // or if certain keys' values are of particular length. + // + // Note that depending on th underlying key type, use of this method requires + // that multiple fields in the key are properly populated. For example, an EC + // key's "x", "y" fields cannot be validated unless the "crv" field is populated first. + // + // Validate is never called by `UnmarshalJSON()` or `Set`. It must explicitly be + // called by the user + Validate() error + + // Thumbprint returns the JWK thumbprint using the indicated + // hashing algorithm, according to RFC 7638 + Thumbprint(crypto.Hash) ([]byte, error) + + // Keys returns a list of the keys contained in this jwk.Key. + Keys() []string + + // Clone creates a new instance of the same type + Clone() (Key, error) + + // PublicKey creates the corresponding PublicKey type for this object. + // All fields are copied onto the new public key, except for those that are not allowed. + // + // If the key is already a public key, it returns a new copy minus the disallowed fields as above. + PublicKey() (Key, error) + + // KeyType returns the `kty` of a JWK + KeyType() jwa.KeyType + // KeyUsage returns `use` of a JWK + KeyUsage() (string, bool) + // KeyOps returns `key_ops` of a JWK + KeyOps() (KeyOperationList, bool) + // Algorithm returns `alg` of a JWK + + // Algorithm returns the value of the `alg` field. + // + // This field may contain either `jwk.SignatureAlgorithm`, `jwk.KeyEncryptionAlgorithm`, or `jwk.ContentEncryptionAlgorithm`. + // This is why there exists a `jwa.KeyAlgorithm` type that encompasses both types. + Algorithm() (jwa.KeyAlgorithm, bool) + // KeyID returns `kid` of a JWK + KeyID() (string, bool) + // X509URL returns `x5u` of a JWK + X509URL() (string, bool) + // X509CertChain returns `x5c` of a JWK + X509CertChain() (*cert.Chain, bool) + // X509CertThumbprint returns `x5t` of a JWK + X509CertThumbprint() (string, bool) + // X509CertThumbprintS256 returns `x5t#S256` of a JWK + X509CertThumbprintS256() (string, bool) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/io.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/io.go new file mode 100644 index 0000000000..29b30274cc --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/io.go @@ -0,0 +1,42 @@ +// Code generated by tools/cmd/genreadfile/main.go. DO NOT EDIT. + +package jwk + +import ( + "fmt" + "io/fs" + "os" +) + +type sysFS struct{} + +func (sysFS) Open(path string) (fs.File, error) { + return os.Open(path) +} + +func ReadFile(path string, options ...ReadFileOption) (Set, error) { + var parseOptions []ParseOption + for _, option := range options { + if po, ok := option.(ParseOption); ok { + parseOptions = append(parseOptions, po) + } + } + + var srcFS fs.FS = sysFS{} + for _, option := range options { + switch option.Ident() { + case identFS{}: + if err := option.Value(&srcFS); err != nil { + return nil, fmt.Errorf("failed to set fs.FS: %w", err) + } + } + } + + f, err := srcFS.Open(path) + if err != nil { + return nil, err + } + + defer f.Close() + return ParseReader(f, parseOptions...) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwk.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwk.go new file mode 100644 index 0000000000..22d4950d8f --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwk.go @@ -0,0 +1,710 @@ +//go:generate ../tools/cmd/genjwk.sh + +package jwk + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "slices" + + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/json" +) + +var registry = json.NewRegistry() + +func bigIntToBytes(n *big.Int) ([]byte, error) { + if n == nil { + return nil, fmt.Errorf(`invalid *big.Int value`) + } + return n.Bytes(), nil +} + +func init() { + if err := RegisterProbeField(reflect.StructField{ + Name: "Kty", + Type: reflect.TypeFor[string](), + Tag: `json:"kty"`, + }); err != nil { + panic(fmt.Errorf("failed to register mandatory probe for 'kty' field: %w", err)) + } + if err := RegisterProbeField(reflect.StructField{ + Name: "D", + Type: reflect.TypeFor[json.RawMessage](), + Tag: `json:"d,omitempty"`, + }); err != nil { + panic(fmt.Errorf("failed to register mandatory probe for 'kty' field: %w", err)) + } +} + +// Import creates a jwk.Key from the given key (RSA/ECDSA/symmetric keys). +// +// The constructor auto-detects the type of key to be instantiated +// based on the input type: +// +// - "crypto/rsa".PrivateKey and "crypto/rsa".PublicKey creates an RSA based key +// - "crypto/ecdsa".PrivateKey and "crypto/ecdsa".PublicKey creates an EC based key +// - "crypto/ed25519".PrivateKey and "crypto/ed25519".PublicKey creates an OKP based key +// - "crypto/ecdh".PrivateKey and "crypto/ecdh".PublicKey creates an OKP based key +// - []byte creates a symmetric key +func Import(raw any) (Key, error) { + if raw == nil { + return nil, importerr(`a non-nil key is required`) + } + + muKeyImporters.RLock() + conv, ok := keyImporters[reflect.TypeOf(raw)] + muKeyImporters.RUnlock() + if !ok { + return nil, importerr(`failed to convert %T to jwk.Key: no converters were able to convert`, raw) + } + + return conv.Import(raw) +} + +// PublicSetOf returns a new jwk.Set consisting of +// public keys of the keys contained in the set. +// +// This is useful when you are generating a set of private keys, and +// you want to generate the corresponding public versions for the +// users to verify with. +// +// Be aware that all fields will be copied onto the new public key. It is the caller's +// responsibility to remove any fields, if necessary. +func PublicSetOf(v Set) (Set, error) { + newSet := NewSet() + + n := v.Len() + for i := range n { + k, ok := v.Key(i) + if !ok { + return nil, fmt.Errorf(`key not found`) + } + pubKey, err := PublicKeyOf(k) + if err != nil { + return nil, fmt.Errorf(`failed to get public key of %T: %w`, k, err) + } + if err := newSet.AddKey(pubKey); err != nil { + return nil, fmt.Errorf(`failed to add key to public key set: %w`, err) + } + } + + return newSet, nil +} + +// PublicKeyOf returns the corresponding public version of the jwk.Key. +// If `v` is a SymmetricKey, then the same value is returned. +// If `v` is already a public key, the key itself is returned. +// +// If `v` is a private key type that has a `PublicKey()` method, be aware +// that all fields will be copied onto the new public key. It is the caller's +// responsibility to remove any fields, if necessary +// +// If `v` is a raw key, the key is first converted to a `jwk.Key` +func PublicKeyOf(v any) (Key, error) { + // This should catch all jwk.Key instances + if pk, ok := v.(PublicKeyer); ok { + return pk.PublicKey() + } + + jk, err := Import(v) + if err != nil { + return nil, fmt.Errorf(`jwk.PublicKeyOf: failed to convert key into JWK: %w`, err) + } + + return jk.PublicKey() +} + +// PublicRawKeyOf returns the corresponding public key of the given +// value `v` (e.g. given *rsa.PrivateKey, *rsa.PublicKey is returned) +// If `v` is already a public key, the key itself is returned. +// +// The returned value will always be a pointer to the public key, +// except when a []byte (e.g. symmetric key, ed25519 key) is passed to `v`. +// In this case, the same []byte value is returned. +// +// This function must go through converting the object once to a jwk.Key, +// then back to a raw key, so it's not exactly efficient. +func PublicRawKeyOf(v any) (any, error) { + pk, ok := v.(PublicKeyer) + if !ok { + k, err := Import(v) + if err != nil { + return nil, fmt.Errorf(`jwk.PublicRawKeyOf: failed to convert key to jwk.Key: %w`, err) + } + + pk, ok = k.(PublicKeyer) + if !ok { + return nil, fmt.Errorf(`jwk.PublicRawKeyOf: failed to convert key to jwk.PublicKeyer: %w`, err) + } + } + + pubk, err := pk.PublicKey() + if err != nil { + return nil, fmt.Errorf(`jwk.PublicRawKeyOf: failed to obtain public key from %T: %w`, v, err) + } + + var raw any + if err := Export(pubk, &raw); err != nil { + return nil, fmt.Errorf(`jwk.PublicRawKeyOf: failed to obtain raw key from %T: %w`, pubk, err) + } + return raw, nil +} + +// ParseRawKey is a combination of ParseKey and Raw. It parses a single JWK key, +// and assigns the "raw" key to the given parameter. The key must either be +// a pointer to an empty interface, or a pointer to the actual raw key type +// such as *rsa.PrivateKey, *ecdsa.PublicKey, *[]byte, etc. +func ParseRawKey(data []byte, rawkey any) error { + key, err := ParseKey(data) + if err != nil { + return fmt.Errorf(`failed to parse key: %w`, err) + } + + if err := Export(key, rawkey); err != nil { + return fmt.Errorf(`failed to assign to raw key variable: %w`, err) + } + + return nil +} + +type setDecodeCtx struct { + json.DecodeCtx + + ignoreParseError bool +} + +func (ctx *setDecodeCtx) IgnoreParseError() bool { + return ctx.ignoreParseError +} + +// ParseKey parses a single key JWK. Unlike `jwk.Parse` this method will +// report failure if you attempt to pass a JWK set. Only use this function +// when you know that the data is a single JWK. +// +// Given a WithPEM(true) option, this function assumes that the given input +// is PEM encoded ASN.1 DER format key. +// +// Note that a successful parsing of any type of key does NOT necessarily +// guarantee a valid key. For example, no checks against expiration dates +// are performed for certificate expiration, no checks against missing +// parameters are performed, etc. +func ParseKey(data []byte, options ...ParseOption) (Key, error) { + var parsePEM bool + var localReg *json.Registry + var pemDecoder PEMDecoder + for _, option := range options { + switch option.Ident() { + case identPEM{}: + if err := option.Value(&parsePEM); err != nil { + return nil, fmt.Errorf(`failed to retrieve PEM option value: %w`, err) + } + case identPEMDecoder{}: + if err := option.Value(&pemDecoder); err != nil { + return nil, fmt.Errorf(`failed to retrieve PEMDecoder option value: %w`, err) + } + case identLocalRegistry{}: + if err := option.Value(&localReg); err != nil { + return nil, fmt.Errorf(`failed to retrieve local registry option value: %w`, err) + } + case identTypedField{}: + var pair typedFieldPair // temporary var needed for typed field + if err := option.Value(&pair); err != nil { + return nil, fmt.Errorf(`failed to retrieve typed field option value: %w`, err) + } + if localReg == nil { + localReg = json.NewRegistry() + } + localReg.Register(pair.Name, pair.Value) + case identIgnoreParseError{}: + return nil, fmt.Errorf(`jwk.WithIgnoreParseError() cannot be used for ParseKey()`) + } + } + + if parsePEM { + var raw any + + // PEMDecoder should probably be deprecated, because of being a misnomer. + if pemDecoder != nil { + if err := decodeX509WithPEMDEcoder(&raw, data, pemDecoder); err != nil { + return nil, fmt.Errorf(`failed to decode PEM encoded key: %w`, err) + } + } else { + // This version takes into account the various X509 decoders that are + // pre-registered. + if err := decodeX509(&raw, data); err != nil { + return nil, fmt.Errorf(`failed to decode X.509 encoded key: %w`, err) + } + } + return Import(raw) + } + + probe, err := keyProbe.Probe(data) + if err != nil { + return nil, fmt.Errorf(`jwk.Parse: failed to probe data: %w`, err) + } + + unmarshaler := keyUnmarshaler{localReg: localReg} + + muKeyParser.RLock() + parsers := make([]KeyParser, len(keyParsers)) + copy(parsers, keyParsers) + muKeyParser.RUnlock() + + for i := len(parsers) - 1; i >= 0; i-- { + parser := parsers[i] + key, err := parser.ParseKey(probe, &unmarshaler, data) + if err == nil { + return key, nil + } + + if errors.Is(err, ContinueError()) { + continue + } + + return nil, err + } + return nil, fmt.Errorf(`jwk.Parse: no parser was able to parse the key`) +} + +// Parse parses JWK from the incoming []byte. +// +// For JWK sets, this is a convenience function. You could just as well +// call `json.Unmarshal` against an empty set created by `jwk.NewSet()` +// to parse a JSON buffer into a `jwk.Set`. +// +// This function exists because many times the user does not know before hand +// if a JWK(s) resource at a remote location contains a single JWK key or +// a JWK set, and `jwk.Parse()` can handle either case, returning a JWK Set +// even if the data only contains a single JWK key +// +// If you are looking for more information on how JWKs are parsed, or if +// you know for sure that you have a single key, please see the documentation +// for `jwk.ParseKey()`. +func Parse(src []byte, options ...ParseOption) (Set, error) { + var parsePEM bool + var parseX509 bool + var localReg *json.Registry + var ignoreParseError bool + var pemDecoder PEMDecoder + for _, option := range options { + switch option.Ident() { + case identPEM{}: + if err := option.Value(&parsePEM); err != nil { + return nil, parseerr(`failed to retrieve PEM option value: %w`, err) + } + case identX509{}: + if err := option.Value(&parseX509); err != nil { + return nil, parseerr(`failed to retrieve X509 option value: %w`, err) + } + case identPEMDecoder{}: + if err := option.Value(&pemDecoder); err != nil { + return nil, parseerr(`failed to retrieve PEMDecoder option value: %w`, err) + } + case identIgnoreParseError{}: + if err := option.Value(&ignoreParseError); err != nil { + return nil, parseerr(`failed to retrieve IgnoreParseError option value: %w`, err) + } + case identTypedField{}: + var pair typedFieldPair // temporary var needed for typed field + if err := option.Value(&pair); err != nil { + return nil, parseerr(`failed to retrieve typed field option value: %w`, err) + } + if localReg == nil { + localReg = json.NewRegistry() + } + localReg.Register(pair.Name, pair.Value) + } + } + + s := NewSet() + + if parsePEM || parseX509 { + if pemDecoder == nil { + pemDecoder = NewPEMDecoder() + } + src = bytes.TrimSpace(src) + for len(src) > 0 { + raw, rest, err := pemDecoder.Decode(src) + if err != nil { + return nil, parseerr(`failed to parse PEM encoded key: %w`, err) + } + key, err := Import(raw) + if err != nil { + return nil, parseerr(`failed to create jwk.Key from %T: %w`, raw, err) + } + if err := s.AddKey(key); err != nil { + return nil, parseerr(`failed to add jwk.Key to set: %w`, err) + } + src = bytes.TrimSpace(rest) + } + return s, nil + } + + if localReg != nil || ignoreParseError { + dcKs, ok := s.(KeyWithDecodeCtx) + if !ok { + return nil, parseerr(`typed field was requested, but the key set (%T) does not support DecodeCtx`, s) + } + dc := &setDecodeCtx{ + DecodeCtx: json.NewDecodeCtx(localReg), + ignoreParseError: ignoreParseError, + } + dcKs.SetDecodeCtx(dc) + defer func() { dcKs.SetDecodeCtx(nil) }() + } + + if err := json.Unmarshal(src, s); err != nil { + return nil, parseerr(`failed to unmarshal JWK set: %w`, err) + } + + return s, nil +} + +// ParseReader parses a JWK set from the incoming byte buffer. +func ParseReader(src io.Reader, options ...ParseOption) (Set, error) { + // meh, there's no way to tell if a stream has "ended" a single + // JWKs except when we encounter an EOF, so just... ReadAll + buf, err := io.ReadAll(src) + if err != nil { + return nil, rparseerr(`failed to read from io.Reader: %w`, err) + } + + set, err := Parse(buf, options...) + if err != nil { + return nil, rparseerr(`failed to parse reader: %w`, err) + } + return set, nil +} + +// ParseString parses a JWK set from the incoming string. +func ParseString(s string, options ...ParseOption) (Set, error) { + set, err := Parse([]byte(s), options...) + if err != nil { + return nil, sparseerr(`failed to parse string: %w`, err) + } + return set, nil +} + +// AssignKeyID is a convenience function to automatically assign the "kid" +// section of the key, if it already doesn't have one. It uses Key.Thumbprint +// method with crypto.SHA256 as the default hashing algorithm +func AssignKeyID(key Key, options ...AssignKeyIDOption) error { + if key.Has(KeyIDKey) { + return nil + } + + hash := crypto.SHA256 + for _, option := range options { + switch option.Ident() { + case identThumbprintHash{}: + if err := option.Value(&hash); err != nil { + return fmt.Errorf(`failed to retrieve thumbprint hash option value: %w`, err) + } + } + } + + h, err := key.Thumbprint(hash) + if err != nil { + return fmt.Errorf(`failed to generate thumbprint: %w`, err) + } + + if err := key.Set(KeyIDKey, base64.EncodeToString(h)); err != nil { + return fmt.Errorf(`failed to set "kid": %w`, err) + } + + return nil +} + +// NOTE: may need to remove this to allow pluggale key types +func cloneKey(src Key) (Key, error) { + var dst Key + switch src.(type) { + case RSAPrivateKey: + dst = newRSAPrivateKey() + case RSAPublicKey: + dst = newRSAPublicKey() + case ECDSAPrivateKey: + dst = newECDSAPrivateKey() + case ECDSAPublicKey: + dst = newECDSAPublicKey() + case OKPPrivateKey: + dst = newOKPPrivateKey() + case OKPPublicKey: + dst = newOKPPublicKey() + case SymmetricKey: + dst = newSymmetricKey() + default: + return nil, fmt.Errorf(`jwk.cloneKey: unknown key type %T`, src) + } + + for _, k := range src.Keys() { + // It's absolutely + var v any + if err := src.Get(k, &v); err != nil { + return nil, fmt.Errorf(`jwk.cloneKey: failed to get %q: %w`, k, err) + } + if err := dst.Set(k, v); err != nil { + return nil, fmt.Errorf(`jwk.cloneKey: failed to set %q: %w`, k, err) + } + } + return dst, nil +} + +// Pem serializes the given jwk.Key in PEM encoded ASN.1 DER format, +// using either PKCS8 for private keys and PKIX for public keys. +// If you need to encode using PKCS1 or SEC1, you must do it yourself. +// +// # Argument must be of type jwk.Key or jwk.Set +// +// Currently only EC (including Ed25519) and RSA keys (and jwk.Set +// comprised of these key types) are supported. +func Pem(v any) ([]byte, error) { + var set Set + switch v := v.(type) { + case Key: + set = NewSet() + if err := set.AddKey(v); err != nil { + return nil, fmt.Errorf(`failed to add key to set: %w`, err) + } + case Set: + set = v + default: + return nil, fmt.Errorf(`argument to Pem must be either jwk.Key or jwk.Set: %T`, v) + } + + var ret []byte + for i := range set.Len() { + key, _ := set.Key(i) + typ, buf, err := asnEncode(key) + if err != nil { + return nil, fmt.Errorf(`failed to encode content for key #%d: %w`, i, err) + } + + var block pem.Block + block.Type = typ + block.Bytes = buf + ret = append(ret, pem.EncodeToMemory(&block)...) + } + return ret, nil +} + +func asnEncode(key Key) (string, []byte, error) { + switch key := key.(type) { + case ECDSAPrivateKey: + var rawkey ecdsa.PrivateKey + if err := Export(key, &rawkey); err != nil { + return "", nil, fmt.Errorf(`failed to get raw key from jwk.Key: %w`, err) + } + buf, err := x509.MarshalECPrivateKey(&rawkey) + if err != nil { + return "", nil, fmt.Errorf(`failed to marshal PKCS8: %w`, err) + } + return pmECPrivateKey, buf, nil + case RSAPrivateKey, OKPPrivateKey: + var rawkey any + if err := Export(key, &rawkey); err != nil { + return "", nil, fmt.Errorf(`failed to get raw key from jwk.Key: %w`, err) + } + buf, err := x509.MarshalPKCS8PrivateKey(rawkey) + if err != nil { + return "", nil, fmt.Errorf(`failed to marshal PKCS8: %w`, err) + } + return pmPrivateKey, buf, nil + case RSAPublicKey, ECDSAPublicKey, OKPPublicKey: + var rawkey any + if err := Export(key, &rawkey); err != nil { + return "", nil, fmt.Errorf(`failed to get raw key from jwk.Key: %w`, err) + } + buf, err := x509.MarshalPKIXPublicKey(rawkey) + if err != nil { + return "", nil, fmt.Errorf(`failed to marshal PKIX: %w`, err) + } + return pmPublicKey, buf, nil + default: + return "", nil, fmt.Errorf(`unsupported key type %T`, key) + } +} + +type CustomDecoder = json.CustomDecoder +type CustomDecodeFunc = json.CustomDecodeFunc + +// RegisterCustomField allows users to specify that a private field +// be decoded as an instance of the specified type. This option has +// a global effect. +// +// For example, suppose you have a custom field `x-birthday`, which +// you want to represent as a string formatted in RFC3339 in JSON, +// but want it back as `time.Time`. +// +// In such case you would register a custom field as follows +// +// jwk.RegisterCustomField(`x-birthday`, time.Time{}) +// +// Then you can use a `time.Time` variable to extract the value +// of `x-birthday` field, instead of having to use `any` +// and later convert it to `time.Time` +// +// var bday time.Time +// _ = key.Get(`x-birthday`, &bday) +// +// If you need a more fine-tuned control over the decoding process, +// you can register a `CustomDecoder`. For example, below shows +// how to register a decoder that can parse RFC1123 format string: +// +// jwk.RegisterCustomField(`x-birthday`, jwk.CustomDecodeFunc(func(data []byte) (any, error) { +// return time.Parse(time.RFC1123, string(data)) +// })) +// +// Please note that use of custom fields can be problematic if you +// are using a library that does not implement MarshalJSON/UnmarshalJSON +// and you try to roundtrip from an object to JSON, and then back to an object. +// For example, in the above example, you can _parse_ time values formatted +// in the format specified in RFC822, but when you convert an object into +// JSON, it will be formatted in RFC3339, because that's what `time.Time` +// likes to do. To avoid this, it's always better to use a custom type +// that wraps your desired type (in this case `time.Time`) and implement +// MarshalJSON and UnmashalJSON. +func RegisterCustomField(name string, object any) { + registry.Register(name, object) +} + +// Equal compares two keys and returns true if they are equal. The comparison +// is solely done against the thumbprints of k1 and k2. It is possible for keys +// that have, for example, different key IDs, key usage, etc, to be considered equal. +func Equal(k1, k2 Key) bool { + h := crypto.SHA256 + tp1, err := k1.Thumbprint(h) + if err != nil { + return false // can't report error + } + tp2, err := k2.Thumbprint(h) + if err != nil { + return false // can't report error + } + + return bytes.Equal(tp1, tp2) +} + +// IsPrivateKey returns true if the supplied key is a private key of an +// asymmetric key pair. The argument `k` must implement the `AsymmetricKey` +// interface. +// +// An error is returned if the supplied key is not an `AsymmetricKey`. +func IsPrivateKey(k Key) (bool, error) { + asymmetric, ok := k.(AsymmetricKey) + if ok { + return asymmetric.IsPrivate(), nil + } + return false, fmt.Errorf("jwk.IsPrivateKey: %T is not an asymmetric key", k) +} + +type keyValidationError struct { + err error +} + +func (e *keyValidationError) Error() string { + return fmt.Sprintf(`key validation failed: %s`, e.err) +} + +func (e *keyValidationError) Unwrap() error { + return e.err +} + +func (e *keyValidationError) Is(target error) bool { + _, ok := target.(*keyValidationError) + return ok +} + +// NewKeyValidationError wraps the given error with an error that denotes +// `key.Validate()` has failed. This error type should ONLY be used as +// return value from the `Validate()` method. +func NewKeyValidationError(err error) error { + return &keyValidationError{err: err} +} + +func IsKeyValidationError(err error) bool { + var kve keyValidationError + return errors.Is(err, &kve) +} + +// Configure is used to configure global behavior of the jwk package. +func Configure(options ...GlobalOption) { + var strictKeyUsagePtr *bool + for _, option := range options { + switch option.Ident() { + case identStrictKeyUsage{}: + var v bool + if err := option.Value(&v); err != nil { + continue + } + strictKeyUsagePtr = &v + } + } + + if strictKeyUsagePtr != nil { + strictKeyUsage.Store(*strictKeyUsagePtr) + } +} + +// These are used when validating keys. +type keyWithD interface { + D() ([]byte, bool) +} + +var _ keyWithD = &okpPrivateKey{} + +func extractEmbeddedKey(keyif Key, concretTypes []reflect.Type) (Key, error) { + rv := reflect.ValueOf(keyif) + + // If the value can be converted to one of the concrete types, then we're done + if slices.ContainsFunc(concretTypes, func(t reflect.Type) bool { + return rv.Type().ConvertibleTo(t) + }) { + return keyif, nil + } + + // When a struct implements the Key interface via embedding, you unfortunately + // cannot use a type switch to determine the concrete type, because + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return nil, fmt.Errorf(`invalid key value (0): %w`, ContinueError()) + } + rv = rv.Elem() + } + + if rv.Kind() != reflect.Struct { + return nil, fmt.Errorf(`invalid key value type %T (1): %w`, keyif, ContinueError()) + } + if rv.NumField() == 0 { + return nil, fmt.Errorf(`invalid key value type %T (2): %w`, keyif, ContinueError()) + } + // Iterate through the fields of the struct to find the first field that + // implements the Key interface + rt := rv.Type() + for i := range rv.NumField() { + field := rv.Field(i) + ft := rt.Field(i) + if !ft.Anonymous { + // We can only salvage this object if the object implements jwk.Key + // via embedding, so we skip fields that are not anonymous + continue + } + + if field.CanInterface() { + if k, ok := field.Interface().(Key); ok { + return extractEmbeddedKey(k, concretTypes) + } + } + } + + return nil, fmt.Errorf(`invalid key value type %T (3): %w`, keyif, ContinueError()) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwkbb/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwkbb/BUILD.bazel new file mode 100644 index 0000000000..68a4ccdc19 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwkbb/BUILD.bazel @@ -0,0 +1,17 @@ +load("@rules_go//go:def.bzl", "go_library") + +go_library( + name = "jwkbb", + srcs = ["x509.go"], + importpath = "github.com/lestrrat-go/jwx/v3/jwk/jwkbb", + visibility = ["//visibility:public"], + deps = [ + "@com_github_lestrrat_go_blackmagic//:blackmagic", + ], +) + +alias( + name = "go_default_library", + actual = ":jwkbb", + visibility = ["//visibility:public"], +) \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwkbb/x509.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwkbb/x509.go new file mode 100644 index 0000000000..3c827cfa6f --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/jwkbb/x509.go @@ -0,0 +1,111 @@ +package jwkbb + +import ( + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + + "github.com/lestrrat-go/blackmagic" +) + +const ( + PrivateKeyBlockType = `PRIVATE KEY` + PublicKeyBlockType = `PUBLIC KEY` + ECPrivateKeyBlockType = `EC PRIVATE KEY` + RSAPublicKeyBlockType = `RSA PUBLIC KEY` + RSAPrivateKeyBlockType = `RSA PRIVATE KEY` + CertificateBlockType = `CERTIFICATE` +) + +// EncodeX509 encodes the given value into ASN.1 DER format, and returns +// the encoded bytes. The value must be one of the following types: +// *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey, +// *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey. +// +// Users can pass a pre-allocated byte slice (but make sure its length is +// changed so that the encoded buffer is appended to the correct location) +// as `dst` to avoid allocations. +func EncodeX509(dst []byte, v any) ([]byte, error) { + var block pem.Block + // Try to convert it into a certificate + switch v := v.(type) { + case *rsa.PrivateKey: + block.Type = RSAPrivateKeyBlockType + block.Bytes = x509.MarshalPKCS1PrivateKey(v) + case *ecdsa.PrivateKey: + marshaled, err := x509.MarshalECPrivateKey(v) + if err != nil { + return nil, err + } + block.Type = ECPrivateKeyBlockType + block.Bytes = marshaled + case ed25519.PrivateKey: + marshaled, err := x509.MarshalPKCS8PrivateKey(v) + if err != nil { + return nil, err + } + block.Type = PrivateKeyBlockType + block.Bytes = marshaled + case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey: + marshaled, err := x509.MarshalPKIXPublicKey(v) + if err != nil { + return nil, err + } + block.Type = PublicKeyBlockType + block.Bytes = marshaled + default: + return nil, fmt.Errorf(`unsupported type %T for ASN.1 DER encoding`, v) + } + + encoded := pem.EncodeToMemory(&block) + dst = append(dst, encoded...) + return dst, nil +} + +func DecodeX509(dst any, block *pem.Block) error { + switch block.Type { + // Handle the semi-obvious cases + case RSAPrivateKeyBlockType: + key, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return fmt.Errorf(`failed to parse PKCS1 private key: %w`, err) + } + return blackmagic.AssignIfCompatible(dst, key) + case RSAPublicKeyBlockType: + key, err := x509.ParsePKCS1PublicKey(block.Bytes) + if err != nil { + return fmt.Errorf(`failed to parse PKCS1 public key: %w`, err) + } + return blackmagic.AssignIfCompatible(dst, key) + case ECPrivateKeyBlockType: + key, err := x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return fmt.Errorf(`failed to parse EC private key: %w`, err) + } + return blackmagic.AssignIfCompatible(dst, key) + case PublicKeyBlockType: + // XXX *could* return dsa.PublicKey + key, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return fmt.Errorf(`failed to parse PKIX public key: %w`, err) + } + return blackmagic.AssignIfCompatible(dst, key) + case PrivateKeyBlockType: + key, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return fmt.Errorf(`failed to parse PKCS8 private key: %w`, err) + } + return blackmagic.AssignIfCompatible(dst, key) + case CertificateBlockType: + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return fmt.Errorf(`failed to parse certificate: %w`, err) + } + return blackmagic.AssignIfCompatible(dst, cert.PublicKey) + default: + return fmt.Errorf(`invalid PEM block type %s`, block.Type) + } +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/key_ops.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/key_ops.go new file mode 100644 index 0000000000..b8c229b3af --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/key_ops.go @@ -0,0 +1,58 @@ +package jwk + +import "fmt" + +func (ops *KeyOperationList) Get() KeyOperationList { + if ops == nil { + return nil + } + return *ops +} + +func (ops *KeyOperationList) Accept(v any) error { + switch x := v.(type) { + case string: + return ops.Accept([]string{x}) + case []any: + l := make([]string, len(x)) + for i, e := range x { + if es, ok := e.(string); ok { + l[i] = es + } else { + return fmt.Errorf(`invalid list element type: expected string, got %T`, v) + } + } + return ops.Accept(l) + case []string: + list := make(KeyOperationList, len(x)) + for i, e := range x { + switch e := KeyOperation(e); e { + case KeyOpSign, KeyOpVerify, KeyOpEncrypt, KeyOpDecrypt, KeyOpWrapKey, KeyOpUnwrapKey, KeyOpDeriveKey, KeyOpDeriveBits: + list[i] = e + default: + return fmt.Errorf(`invalid keyoperation %v`, e) + } + } + + *ops = list + return nil + case []KeyOperation: + list := make(KeyOperationList, len(x)) + for i, e := range x { + switch e { + case KeyOpSign, KeyOpVerify, KeyOpEncrypt, KeyOpDecrypt, KeyOpWrapKey, KeyOpUnwrapKey, KeyOpDeriveKey, KeyOpDeriveBits: + list[i] = e + default: + return fmt.Errorf(`invalid keyoperation %v`, e) + } + } + + *ops = list + return nil + case KeyOperationList: + *ops = x + return nil + default: + return fmt.Errorf(`invalid value %T`, v) + } +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/okp.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/okp.go new file mode 100644 index 0000000000..7cbf66c2d8 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/okp.go @@ -0,0 +1,321 @@ +package jwk + +import ( + "bytes" + "crypto" + "crypto/ecdh" + "crypto/ed25519" + "fmt" + "reflect" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/jwa" +) + +func init() { + RegisterKeyExporter(jwa.OKP(), KeyExportFunc(okpJWKToRaw)) +} + +// Mental note: +// +// Curve25519 refers to a particular curve, and is represented in its Montgomery form. +// +// Ed25519 refers to the biratinally equivalent curve of Curve25519, except it's in Edwards form. +// Ed25519 is the name of the curve and the also the signature scheme using that curve. +// The full name of the scheme is Edwards Curve Digital Signature Algorithm, and thus it is +// also referred to as EdDSA. +// +// X25519 refers to the Diffie-Hellman key exchange protocol that uses Cruve25519. +// Because this is an elliptic curve based Diffie Hellman protocol, it is also referred to +// as ECDH. +// +// OKP keys are used to represent private/public pairs of thse elliptic curve +// keys. But note that the name just means Octet Key Pair. + +func (k *okpPublicKey) Import(rawKeyIf any) error { + k.mu.Lock() + defer k.mu.Unlock() + + var crv jwa.EllipticCurveAlgorithm + switch rawKey := rawKeyIf.(type) { + case ed25519.PublicKey: + k.x = rawKey + crv = jwa.Ed25519() + k.crv = &crv + case *ecdh.PublicKey: + k.x = rawKey.Bytes() + crv = jwa.X25519() + k.crv = &crv + default: + return fmt.Errorf(`unknown key type %T`, rawKeyIf) + } + + return nil +} + +func (k *okpPrivateKey) Import(rawKeyIf any) error { + k.mu.Lock() + defer k.mu.Unlock() + + var crv jwa.EllipticCurveAlgorithm + switch rawKey := rawKeyIf.(type) { + case ed25519.PrivateKey: + k.d = rawKey.Seed() + k.x = rawKey.Public().(ed25519.PublicKey) //nolint:forcetypeassert + crv = jwa.Ed25519() + k.crv = &crv + case *ecdh.PrivateKey: + // k.d = rawKey.Seed() + k.d = rawKey.Bytes() + k.x = rawKey.PublicKey().Bytes() + crv = jwa.X25519() + k.crv = &crv + default: + return fmt.Errorf(`unknown key type %T`, rawKeyIf) + } + + return nil +} + +func buildOKPPublicKey(alg jwa.EllipticCurveAlgorithm, xbuf []byte) (any, error) { + switch alg { + case jwa.Ed25519(): + return ed25519.PublicKey(xbuf), nil + case jwa.X25519(): + ret, err := ecdh.X25519().NewPublicKey(xbuf) + if err != nil { + return nil, fmt.Errorf(`failed to parse x25519 public key %x (size %d): %w`, xbuf, len(xbuf), err) + } + return ret, nil + default: + return nil, fmt.Errorf(`invalid curve algorithm %s`, alg) + } +} + +// Raw returns the EC-DSA public key represented by this JWK +func (k *okpPublicKey) Raw(v any) error { + k.mu.RLock() + defer k.mu.RUnlock() + + crv, ok := k.Crv() + if !ok { + return fmt.Errorf(`missing "crv" field`) + } + + pubk, err := buildOKPPublicKey(crv, k.x) + if err != nil { + return fmt.Errorf(`jwk.OKPPublicKey: failed to build public key: %w`, err) + } + + if err := blackmagic.AssignIfCompatible(v, pubk); err != nil { + return fmt.Errorf(`jwk.OKPPublicKey: failed to assign to destination variable: %w`, err) + } + return nil +} + +func buildOKPPrivateKey(alg jwa.EllipticCurveAlgorithm, xbuf []byte, dbuf []byte) (any, error) { + if len(dbuf) == 0 { + return nil, fmt.Errorf(`cannot use empty seed`) + } + switch alg { + case jwa.Ed25519(): + if len(dbuf) != ed25519.SeedSize { + return nil, fmt.Errorf(`ed25519: wrong private key size`) + } + ret := ed25519.NewKeyFromSeed(dbuf) + //nolint:forcetypeassert + if !bytes.Equal(xbuf, ret.Public().(ed25519.PublicKey)) { + return nil, fmt.Errorf(`ed25519: invalid x value given d value`) + } + return ret, nil + case jwa.X25519(): + ret, err := ecdh.X25519().NewPrivateKey(dbuf) + if err != nil { + return nil, fmt.Errorf(`x25519: unable to construct x25519 private key from seed: %w`, err) + } + return ret, nil + default: + return nil, fmt.Errorf(`invalid curve algorithm %s`, alg) + } +} + +var okpConvertibleKeys = []reflect.Type{ + reflect.TypeFor[OKPPrivateKey](), + reflect.TypeFor[OKPPublicKey](), +} + +// This is half baked. I think it will blow up if we used ecdh.* keys and/or x25519 keys +func okpJWKToRaw(key Key, _ any /* this is unused because this is half baked */) (any, error) { + extracted, err := extractEmbeddedKey(key, okpConvertibleKeys) + if err != nil { + return nil, fmt.Errorf(`jwk.OKP: failed to extract embedded key: %w`, err) + } + + switch key := extracted.(type) { + case OKPPrivateKey: + locker, ok := key.(rlocker) + if ok { + locker.rlock() + defer locker.runlock() + } + + crv, ok := key.Crv() + if !ok { + return nil, fmt.Errorf(`missing "crv" field`) + } + + x, ok := key.X() + if !ok { + return nil, fmt.Errorf(`missing "x" field`) + } + + d, ok := key.D() + if !ok { + return nil, fmt.Errorf(`missing "d" field`) + } + + privk, err := buildOKPPrivateKey(crv, x, d) + if err != nil { + return nil, fmt.Errorf(`jwk.OKPPrivateKey: failed to build private key: %w`, err) + } + return privk, nil + case OKPPublicKey: + locker, ok := key.(rlocker) + if ok { + locker.rlock() + defer locker.runlock() + } + + crv, ok := key.Crv() + if !ok { + return nil, fmt.Errorf(`missing "crv" field`) + } + + x, ok := key.X() + if !ok { + return nil, fmt.Errorf(`missing "x" field`) + } + pubk, err := buildOKPPublicKey(crv, x) + if err != nil { + return nil, fmt.Errorf(`jwk.OKPPublicKey: failed to build public key: %w`, err) + } + return pubk, nil + default: + return nil, ContinueError() + } +} + +func makeOKPPublicKey(src Key) (Key, error) { + newKey := newOKPPublicKey() + + // Iterate and copy everything except for the bits that should not be in the public key + for _, k := range src.Keys() { + switch k { + case OKPDKey: + continue + default: + var v any + if err := src.Get(k, &v); err != nil { + return nil, fmt.Errorf(`failed to get field %q: %w`, k, err) + } + + if err := newKey.Set(k, v); err != nil { + return nil, fmt.Errorf(`failed to set field %q: %w`, k, err) + } + } + } + + return newKey, nil +} + +func (k *okpPrivateKey) PublicKey() (Key, error) { + return makeOKPPublicKey(k) +} + +func (k *okpPublicKey) PublicKey() (Key, error) { + return makeOKPPublicKey(k) +} + +func okpThumbprint(hash crypto.Hash, crv, x string) []byte { + h := hash.New() + fmt.Fprint(h, `{"crv":"`) + fmt.Fprint(h, crv) + fmt.Fprint(h, `","kty":"OKP","x":"`) + fmt.Fprint(h, x) + fmt.Fprint(h, `"}`) + return h.Sum(nil) +} + +// Thumbprint returns the JWK thumbprint using the indicated +// hashing algorithm, according to RFC 7638 / 8037 +func (k okpPublicKey) Thumbprint(hash crypto.Hash) ([]byte, error) { + k.mu.RLock() + defer k.mu.RUnlock() + + crv, ok := k.Crv() + if !ok { + return nil, fmt.Errorf(`missing "crv" field`) + } + return okpThumbprint( + hash, + crv.String(), + base64.EncodeToString(k.x), + ), nil +} + +// Thumbprint returns the JWK thumbprint using the indicated +// hashing algorithm, according to RFC 7638 / 8037 +func (k okpPrivateKey) Thumbprint(hash crypto.Hash) ([]byte, error) { + k.mu.RLock() + defer k.mu.RUnlock() + + crv, ok := k.Crv() + if !ok { + return nil, fmt.Errorf(`missing "crv" field`) + } + + return okpThumbprint( + hash, + crv.String(), + base64.EncodeToString(k.x), + ), nil +} + +func validateOKPKey(key interface { + Crv() (jwa.EllipticCurveAlgorithm, bool) + X() ([]byte, bool) +}) error { + if v, ok := key.Crv(); !ok || v == jwa.InvalidEllipticCurve() { + return fmt.Errorf(`invalid curve algorithm`) + } + + if v, ok := key.X(); !ok || len(v) == 0 { + return fmt.Errorf(`missing "x" field`) + } + + if priv, ok := key.(keyWithD); ok { + if d, ok := priv.D(); !ok || len(d) == 0 { + return fmt.Errorf(`missing "d" field`) + } + } + return nil +} + +func (k *okpPublicKey) Validate() error { + k.mu.RLock() + defer k.mu.RUnlock() + if err := validateOKPKey(k); err != nil { + return NewKeyValidationError(fmt.Errorf(`jwk.OKPPublicKey: %w`, err)) + } + return nil +} + +func (k *okpPrivateKey) Validate() error { + k.mu.RLock() + defer k.mu.RUnlock() + if err := validateOKPKey(k); err != nil { + return NewKeyValidationError(fmt.Errorf(`jwk.OKPPrivateKey: %w`, err)) + } + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/okp_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/okp_gen.go new file mode 100644 index 0000000000..0bde986147 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/okp_gen.go @@ -0,0 +1,1347 @@ +// Code generated by tools/cmd/genjwk/main.go. DO NOT EDIT. + +package jwk + +import ( + "bytes" + "fmt" + "sort" + "sync" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v3/cert" + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/internal/tokens" + "github.com/lestrrat-go/jwx/v3/jwa" +) + +const ( + OKPCrvKey = "crv" + OKPDKey = "d" + OKPXKey = "x" +) + +type OKPPublicKey interface { + Key + Crv() (jwa.EllipticCurveAlgorithm, bool) + X() ([]byte, bool) +} + +type okpPublicKey struct { + algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4 + crv *jwa.EllipticCurveAlgorithm + keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4 + keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3 + keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2 + x []byte + x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6 + x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7 + x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8 + x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5 + privateParams map[string]any + mu *sync.RWMutex + dc json.DecodeCtx +} + +var _ OKPPublicKey = &okpPublicKey{} +var _ Key = &okpPublicKey{} + +func newOKPPublicKey() *okpPublicKey { + return &okpPublicKey{ + mu: &sync.RWMutex{}, + privateParams: make(map[string]any), + } +} + +func (h okpPublicKey) KeyType() jwa.KeyType { + return jwa.OKP() +} + +func (h okpPublicKey) rlock() { + h.mu.RLock() +} + +func (h okpPublicKey) runlock() { + h.mu.RUnlock() +} + +func (h okpPublicKey) IsPrivate() bool { + return false +} + +func (h *okpPublicKey) Algorithm() (jwa.KeyAlgorithm, bool) { + if h.algorithm != nil { + return *(h.algorithm), true + } + return nil, false +} + +func (h *okpPublicKey) Crv() (jwa.EllipticCurveAlgorithm, bool) { + if h.crv != nil { + return *(h.crv), true + } + return jwa.InvalidEllipticCurve(), false +} + +func (h *okpPublicKey) KeyID() (string, bool) { + if h.keyID != nil { + return *(h.keyID), true + } + return "", false +} + +func (h *okpPublicKey) KeyOps() (KeyOperationList, bool) { + if h.keyOps != nil { + return *(h.keyOps), true + } + return nil, false +} + +func (h *okpPublicKey) KeyUsage() (string, bool) { + if h.keyUsage != nil { + return *(h.keyUsage), true + } + return "", false +} + +func (h *okpPublicKey) X() ([]byte, bool) { + if h.x != nil { + return h.x, true + } + return nil, false +} + +func (h *okpPublicKey) X509CertChain() (*cert.Chain, bool) { + return h.x509CertChain, true +} + +func (h *okpPublicKey) X509CertThumbprint() (string, bool) { + if h.x509CertThumbprint != nil { + return *(h.x509CertThumbprint), true + } + return "", false +} + +func (h *okpPublicKey) X509CertThumbprintS256() (string, bool) { + if h.x509CertThumbprintS256 != nil { + return *(h.x509CertThumbprintS256), true + } + return "", false +} + +func (h *okpPublicKey) X509URL() (string, bool) { + if h.x509URL != nil { + return *(h.x509URL), true + } + return "", false +} + +func (h *okpPublicKey) Has(name string) bool { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + return true + case AlgorithmKey: + return h.algorithm != nil + case OKPCrvKey: + return h.crv != nil + case KeyIDKey: + return h.keyID != nil + case KeyOpsKey: + return h.keyOps != nil + case KeyUsageKey: + return h.keyUsage != nil + case OKPXKey: + return h.x != nil + case X509CertChainKey: + return h.x509CertChain != nil + case X509CertThumbprintKey: + return h.x509CertThumbprint != nil + case X509CertThumbprintS256Key: + return h.x509CertThumbprintS256 != nil + case X509URLKey: + return h.x509URL != nil + default: + _, ok := h.privateParams[name] + return ok + } +} + +func (h *okpPublicKey) Get(name string, dst any) error { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + if err := blackmagic.AssignIfCompatible(dst, h.KeyType()); err != nil { + return fmt.Errorf(`okpPublicKey.Get: failed to assign value for field %q to destination object: %w`, name, err) + } + case AlgorithmKey: + if h.algorithm == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case OKPCrvKey: + if h.crv == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.crv)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyIDKey: + if h.keyID == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyOpsKey: + if h.keyOps == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyOps)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyUsageKey: + if h.keyUsage == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyUsage)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case OKPXKey: + if h.x == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.x); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertChainKey: + if h.x509CertChain == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.x509CertChain); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509URLKey: + if h.x509URL == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + default: + v, ok := h.privateParams[name] + if !ok { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, v); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + } + return nil +} + +func (h *okpPublicKey) Set(name string, value any) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *okpPublicKey) setNoLock(name string, value any) error { + switch name { + case "kty": + return nil + case AlgorithmKey: + switch v := value.(type) { + case string, jwa.SignatureAlgorithm, jwa.KeyEncryptionAlgorithm, jwa.ContentEncryptionAlgorithm: + tmp, err := jwa.KeyAlgorithmFrom(v) + if err != nil { + return fmt.Errorf(`invalid algorithm for %q key: %w`, AlgorithmKey, err) + } + h.algorithm = &tmp + default: + return fmt.Errorf(`invalid type for %q key: %T`, AlgorithmKey, value) + } + return nil + case OKPCrvKey: + if v, ok := value.(jwa.EllipticCurveAlgorithm); ok { + h.crv = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, OKPCrvKey, value) + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case KeyOpsKey: + var acceptor KeyOperationList + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err) + } + h.keyOps = &acceptor + return nil + case KeyUsageKey: + switch v := value.(type) { + case KeyUsageType: + switch v { + case ForSignature, ForEncryption: + tmp := v.String() + h.keyUsage = &tmp + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case string: + h.keyUsage = &v + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case OKPXKey: + if v, ok := value.([]byte); ok { + h.x = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, OKPXKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]any{} + } + h.privateParams[name] = value + } + return nil +} + +func (k *okpPublicKey) Remove(key string) error { + k.mu.Lock() + defer k.mu.Unlock() + switch key { + case AlgorithmKey: + k.algorithm = nil + case OKPCrvKey: + k.crv = nil + case KeyIDKey: + k.keyID = nil + case KeyOpsKey: + k.keyOps = nil + case KeyUsageKey: + k.keyUsage = nil + case OKPXKey: + k.x = nil + case X509CertChainKey: + k.x509CertChain = nil + case X509CertThumbprintKey: + k.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + k.x509CertThumbprintS256 = nil + case X509URLKey: + k.x509URL = nil + default: + delete(k.privateParams, key) + } + return nil +} + +func (k *okpPublicKey) Clone() (Key, error) { + key, err := cloneKey(k) + if err != nil { + return nil, fmt.Errorf(`okpPublicKey.Clone: %w`, err) + } + return key, nil +} + +func (k *okpPublicKey) DecodeCtx() json.DecodeCtx { + k.mu.RLock() + defer k.mu.RUnlock() + return k.dc +} + +func (k *okpPublicKey) SetDecodeCtx(dc json.DecodeCtx) { + k.mu.Lock() + defer k.mu.Unlock() + k.dc = dc +} + +func (h *okpPublicKey) UnmarshalJSON(buf []byte) error { + h.mu.Lock() + defer h.mu.Unlock() + h.algorithm = nil + h.crv = nil + h.keyID = nil + h.keyOps = nil + h.keyUsage = nil + h.x = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here. + if tok == tokens.CloseCurlyBracket { // End of object + break LOOP + } else if tok != tokens.OpenCurlyBracket { + return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok) + } + case string: // Objects can only have string keys + switch tok { + case KeyTypeKey: + val, err := json.ReadNextStringToken(dec) + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + if val != jwa.OKP().String() { + return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val) + } + case AlgorithmKey: + var s string + if err := dec.Decode(&s); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + alg, err := jwa.KeyAlgorithmFrom(s) + if err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + h.algorithm = &alg + case OKPCrvKey: + var decoded jwa.EllipticCurveAlgorithm + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, OKPCrvKey, err) + } + h.crv = &decoded + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case KeyOpsKey: + var decoded KeyOperationList + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err) + } + h.keyOps = &decoded + case KeyUsageKey: + if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err) + } + case OKPXKey: + if err := json.AssignNextBytesToken(&h.x, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, OKPXKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + default: + if dc := h.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + decoded, err := localReg.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + } + } + decoded, err := registry.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + return fmt.Errorf(`could not decode field %s: %w`, tok, err) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + if h.crv == nil { + return fmt.Errorf(`required field crv is missing`) + } + if h.x == nil { + return fmt.Errorf(`required field x is missing`) + } + return nil +} + +func (h okpPublicKey) MarshalJSON() ([]byte, error) { + data := make(map[string]any) + fields := make([]string, 0, 10) + data[KeyTypeKey] = jwa.OKP() + fields = append(fields, KeyTypeKey) + if h.algorithm != nil { + data[AlgorithmKey] = *(h.algorithm) + fields = append(fields, AlgorithmKey) + } + if h.crv != nil { + data[OKPCrvKey] = *(h.crv) + fields = append(fields, OKPCrvKey) + } + if h.keyID != nil { + data[KeyIDKey] = *(h.keyID) + fields = append(fields, KeyIDKey) + } + if h.keyOps != nil { + data[KeyOpsKey] = *(h.keyOps) + fields = append(fields, KeyOpsKey) + } + if h.keyUsage != nil { + data[KeyUsageKey] = *(h.keyUsage) + fields = append(fields, KeyUsageKey) + } + if h.x != nil { + data[OKPXKey] = h.x + fields = append(fields, OKPXKey) + } + if h.x509CertChain != nil { + data[X509CertChainKey] = h.x509CertChain + fields = append(fields, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + data[X509CertThumbprintKey] = *(h.x509CertThumbprint) + fields = append(fields, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256) + fields = append(fields, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + data[X509URLKey] = *(h.x509URL) + fields = append(fields, X509URLKey) + } + for k, v := range h.privateParams { + data[k] = v + fields = append(fields, k) + } + + sort.Strings(fields) + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + buf.WriteByte(tokens.OpenCurlyBracket) + enc := json.NewEncoder(buf) + for i, f := range fields { + if i > 0 { + buf.WriteRune(tokens.Comma) + } + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(f) + buf.WriteString(`":`) + v := data[f] + switch v := v.(type) { + case []byte: + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune(tokens.DoubleQuote) + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte(tokens.CloseCurlyBracket) + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (h *okpPublicKey) Keys() []string { + h.mu.RLock() + defer h.mu.RUnlock() + keys := make([]string, 0, 10+len(h.privateParams)) + keys = append(keys, KeyTypeKey) + if h.algorithm != nil { + keys = append(keys, AlgorithmKey) + } + if h.crv != nil { + keys = append(keys, OKPCrvKey) + } + if h.keyID != nil { + keys = append(keys, KeyIDKey) + } + if h.keyOps != nil { + keys = append(keys, KeyOpsKey) + } + if h.keyUsage != nil { + keys = append(keys, KeyUsageKey) + } + if h.x != nil { + keys = append(keys, OKPXKey) + } + if h.x509CertChain != nil { + keys = append(keys, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + keys = append(keys, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + keys = append(keys, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + keys = append(keys, X509URLKey) + } + for k := range h.privateParams { + keys = append(keys, k) + } + return keys +} + +type OKPPrivateKey interface { + Key + Crv() (jwa.EllipticCurveAlgorithm, bool) + D() ([]byte, bool) + X() ([]byte, bool) +} + +type okpPrivateKey struct { + algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4 + crv *jwa.EllipticCurveAlgorithm + d []byte + keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4 + keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3 + keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2 + x []byte + x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6 + x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7 + x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8 + x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5 + privateParams map[string]any + mu *sync.RWMutex + dc json.DecodeCtx +} + +var _ OKPPrivateKey = &okpPrivateKey{} +var _ Key = &okpPrivateKey{} + +func newOKPPrivateKey() *okpPrivateKey { + return &okpPrivateKey{ + mu: &sync.RWMutex{}, + privateParams: make(map[string]any), + } +} + +func (h okpPrivateKey) KeyType() jwa.KeyType { + return jwa.OKP() +} + +func (h okpPrivateKey) rlock() { + h.mu.RLock() +} + +func (h okpPrivateKey) runlock() { + h.mu.RUnlock() +} + +func (h okpPrivateKey) IsPrivate() bool { + return true +} + +func (h *okpPrivateKey) Algorithm() (jwa.KeyAlgorithm, bool) { + if h.algorithm != nil { + return *(h.algorithm), true + } + return nil, false +} + +func (h *okpPrivateKey) Crv() (jwa.EllipticCurveAlgorithm, bool) { + if h.crv != nil { + return *(h.crv), true + } + return jwa.InvalidEllipticCurve(), false +} + +func (h *okpPrivateKey) D() ([]byte, bool) { + if h.d != nil { + return h.d, true + } + return nil, false +} + +func (h *okpPrivateKey) KeyID() (string, bool) { + if h.keyID != nil { + return *(h.keyID), true + } + return "", false +} + +func (h *okpPrivateKey) KeyOps() (KeyOperationList, bool) { + if h.keyOps != nil { + return *(h.keyOps), true + } + return nil, false +} + +func (h *okpPrivateKey) KeyUsage() (string, bool) { + if h.keyUsage != nil { + return *(h.keyUsage), true + } + return "", false +} + +func (h *okpPrivateKey) X() ([]byte, bool) { + if h.x != nil { + return h.x, true + } + return nil, false +} + +func (h *okpPrivateKey) X509CertChain() (*cert.Chain, bool) { + return h.x509CertChain, true +} + +func (h *okpPrivateKey) X509CertThumbprint() (string, bool) { + if h.x509CertThumbprint != nil { + return *(h.x509CertThumbprint), true + } + return "", false +} + +func (h *okpPrivateKey) X509CertThumbprintS256() (string, bool) { + if h.x509CertThumbprintS256 != nil { + return *(h.x509CertThumbprintS256), true + } + return "", false +} + +func (h *okpPrivateKey) X509URL() (string, bool) { + if h.x509URL != nil { + return *(h.x509URL), true + } + return "", false +} + +func (h *okpPrivateKey) Has(name string) bool { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + return true + case AlgorithmKey: + return h.algorithm != nil + case OKPCrvKey: + return h.crv != nil + case OKPDKey: + return h.d != nil + case KeyIDKey: + return h.keyID != nil + case KeyOpsKey: + return h.keyOps != nil + case KeyUsageKey: + return h.keyUsage != nil + case OKPXKey: + return h.x != nil + case X509CertChainKey: + return h.x509CertChain != nil + case X509CertThumbprintKey: + return h.x509CertThumbprint != nil + case X509CertThumbprintS256Key: + return h.x509CertThumbprintS256 != nil + case X509URLKey: + return h.x509URL != nil + default: + _, ok := h.privateParams[name] + return ok + } +} + +func (h *okpPrivateKey) Get(name string, dst any) error { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + if err := blackmagic.AssignIfCompatible(dst, h.KeyType()); err != nil { + return fmt.Errorf(`okpPrivateKey.Get: failed to assign value for field %q to destination object: %w`, name, err) + } + case AlgorithmKey: + if h.algorithm == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case OKPCrvKey: + if h.crv == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.crv)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case OKPDKey: + if h.d == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.d); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyIDKey: + if h.keyID == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyOpsKey: + if h.keyOps == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyOps)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyUsageKey: + if h.keyUsage == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyUsage)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case OKPXKey: + if h.x == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.x); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertChainKey: + if h.x509CertChain == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.x509CertChain); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509URLKey: + if h.x509URL == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + default: + v, ok := h.privateParams[name] + if !ok { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, v); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + } + return nil +} + +func (h *okpPrivateKey) Set(name string, value any) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *okpPrivateKey) setNoLock(name string, value any) error { + switch name { + case "kty": + return nil + case AlgorithmKey: + switch v := value.(type) { + case string, jwa.SignatureAlgorithm, jwa.KeyEncryptionAlgorithm, jwa.ContentEncryptionAlgorithm: + tmp, err := jwa.KeyAlgorithmFrom(v) + if err != nil { + return fmt.Errorf(`invalid algorithm for %q key: %w`, AlgorithmKey, err) + } + h.algorithm = &tmp + default: + return fmt.Errorf(`invalid type for %q key: %T`, AlgorithmKey, value) + } + return nil + case OKPCrvKey: + if v, ok := value.(jwa.EllipticCurveAlgorithm); ok { + h.crv = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, OKPCrvKey, value) + case OKPDKey: + if v, ok := value.([]byte); ok { + h.d = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, OKPDKey, value) + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case KeyOpsKey: + var acceptor KeyOperationList + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err) + } + h.keyOps = &acceptor + return nil + case KeyUsageKey: + switch v := value.(type) { + case KeyUsageType: + switch v { + case ForSignature, ForEncryption: + tmp := v.String() + h.keyUsage = &tmp + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case string: + h.keyUsage = &v + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case OKPXKey: + if v, ok := value.([]byte); ok { + h.x = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, OKPXKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]any{} + } + h.privateParams[name] = value + } + return nil +} + +func (k *okpPrivateKey) Remove(key string) error { + k.mu.Lock() + defer k.mu.Unlock() + switch key { + case AlgorithmKey: + k.algorithm = nil + case OKPCrvKey: + k.crv = nil + case OKPDKey: + k.d = nil + case KeyIDKey: + k.keyID = nil + case KeyOpsKey: + k.keyOps = nil + case KeyUsageKey: + k.keyUsage = nil + case OKPXKey: + k.x = nil + case X509CertChainKey: + k.x509CertChain = nil + case X509CertThumbprintKey: + k.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + k.x509CertThumbprintS256 = nil + case X509URLKey: + k.x509URL = nil + default: + delete(k.privateParams, key) + } + return nil +} + +func (k *okpPrivateKey) Clone() (Key, error) { + key, err := cloneKey(k) + if err != nil { + return nil, fmt.Errorf(`okpPrivateKey.Clone: %w`, err) + } + return key, nil +} + +func (k *okpPrivateKey) DecodeCtx() json.DecodeCtx { + k.mu.RLock() + defer k.mu.RUnlock() + return k.dc +} + +func (k *okpPrivateKey) SetDecodeCtx(dc json.DecodeCtx) { + k.mu.Lock() + defer k.mu.Unlock() + k.dc = dc +} + +func (h *okpPrivateKey) UnmarshalJSON(buf []byte) error { + h.mu.Lock() + defer h.mu.Unlock() + h.algorithm = nil + h.crv = nil + h.d = nil + h.keyID = nil + h.keyOps = nil + h.keyUsage = nil + h.x = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here. + if tok == tokens.CloseCurlyBracket { // End of object + break LOOP + } else if tok != tokens.OpenCurlyBracket { + return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok) + } + case string: // Objects can only have string keys + switch tok { + case KeyTypeKey: + val, err := json.ReadNextStringToken(dec) + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + if val != jwa.OKP().String() { + return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val) + } + case AlgorithmKey: + var s string + if err := dec.Decode(&s); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + alg, err := jwa.KeyAlgorithmFrom(s) + if err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + h.algorithm = &alg + case OKPCrvKey: + var decoded jwa.EllipticCurveAlgorithm + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, OKPCrvKey, err) + } + h.crv = &decoded + case OKPDKey: + if err := json.AssignNextBytesToken(&h.d, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, OKPDKey, err) + } + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case KeyOpsKey: + var decoded KeyOperationList + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err) + } + h.keyOps = &decoded + case KeyUsageKey: + if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err) + } + case OKPXKey: + if err := json.AssignNextBytesToken(&h.x, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, OKPXKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + default: + if dc := h.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + decoded, err := localReg.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + } + } + decoded, err := registry.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + return fmt.Errorf(`could not decode field %s: %w`, tok, err) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + if h.crv == nil { + return fmt.Errorf(`required field crv is missing`) + } + if h.d == nil { + return fmt.Errorf(`required field d is missing`) + } + if h.x == nil { + return fmt.Errorf(`required field x is missing`) + } + return nil +} + +func (h okpPrivateKey) MarshalJSON() ([]byte, error) { + data := make(map[string]any) + fields := make([]string, 0, 11) + data[KeyTypeKey] = jwa.OKP() + fields = append(fields, KeyTypeKey) + if h.algorithm != nil { + data[AlgorithmKey] = *(h.algorithm) + fields = append(fields, AlgorithmKey) + } + if h.crv != nil { + data[OKPCrvKey] = *(h.crv) + fields = append(fields, OKPCrvKey) + } + if h.d != nil { + data[OKPDKey] = h.d + fields = append(fields, OKPDKey) + } + if h.keyID != nil { + data[KeyIDKey] = *(h.keyID) + fields = append(fields, KeyIDKey) + } + if h.keyOps != nil { + data[KeyOpsKey] = *(h.keyOps) + fields = append(fields, KeyOpsKey) + } + if h.keyUsage != nil { + data[KeyUsageKey] = *(h.keyUsage) + fields = append(fields, KeyUsageKey) + } + if h.x != nil { + data[OKPXKey] = h.x + fields = append(fields, OKPXKey) + } + if h.x509CertChain != nil { + data[X509CertChainKey] = h.x509CertChain + fields = append(fields, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + data[X509CertThumbprintKey] = *(h.x509CertThumbprint) + fields = append(fields, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256) + fields = append(fields, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + data[X509URLKey] = *(h.x509URL) + fields = append(fields, X509URLKey) + } + for k, v := range h.privateParams { + data[k] = v + fields = append(fields, k) + } + + sort.Strings(fields) + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + buf.WriteByte(tokens.OpenCurlyBracket) + enc := json.NewEncoder(buf) + for i, f := range fields { + if i > 0 { + buf.WriteRune(tokens.Comma) + } + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(f) + buf.WriteString(`":`) + v := data[f] + switch v := v.(type) { + case []byte: + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune(tokens.DoubleQuote) + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte(tokens.CloseCurlyBracket) + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (h *okpPrivateKey) Keys() []string { + h.mu.RLock() + defer h.mu.RUnlock() + keys := make([]string, 0, 11+len(h.privateParams)) + keys = append(keys, KeyTypeKey) + if h.algorithm != nil { + keys = append(keys, AlgorithmKey) + } + if h.crv != nil { + keys = append(keys, OKPCrvKey) + } + if h.d != nil { + keys = append(keys, OKPDKey) + } + if h.keyID != nil { + keys = append(keys, KeyIDKey) + } + if h.keyOps != nil { + keys = append(keys, KeyOpsKey) + } + if h.keyUsage != nil { + keys = append(keys, KeyUsageKey) + } + if h.x != nil { + keys = append(keys, OKPXKey) + } + if h.x509CertChain != nil { + keys = append(keys, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + keys = append(keys, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + keys = append(keys, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + keys = append(keys, X509URLKey) + } + for k := range h.privateParams { + keys = append(keys, k) + } + return keys +} + +var okpStandardFields KeyFilter + +func init() { + okpStandardFields = NewFieldNameFilter(KeyTypeKey, KeyUsageKey, KeyOpsKey, AlgorithmKey, KeyIDKey, X509URLKey, X509CertChainKey, X509CertThumbprintKey, X509CertThumbprintS256Key, OKPCrvKey, OKPXKey, OKPDKey) +} + +// OKPStandardFieldsFilter returns a KeyFilter that filters out standard OKP fields. +func OKPStandardFieldsFilter() KeyFilter { + return okpStandardFields +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/options.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/options.go new file mode 100644 index 0000000000..56cc52625f --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/options.go @@ -0,0 +1,76 @@ +package jwk + +import ( + "time" + + "github.com/lestrrat-go/httprc/v3" + "github.com/lestrrat-go/option/v2" +) + +type identTypedField struct{} + +type typedFieldPair struct { + Name string + Value any +} + +// WithTypedField allows a private field to be parsed into the object type of +// your choice. It works much like the RegisterCustomField, but the effect +// is only applicable to the jwt.Parse function call which receives this option. +// +// While this can be extremely useful, this option should be used with caution: +// There are many caveats that your entire team/user-base needs to be aware of, +// and therefore in general its use is discouraged. Only use it when you know +// what you are doing, and you document its use clearly for others. +// +// First and foremost, this is a "per-object" option. Meaning that given the same +// serialized format, it is possible to generate two objects whose internal +// representations may differ. That is, if you parse one _WITH_ the option, +// and the other _WITHOUT_, their internal representation may completely differ. +// This could potentially lead to problems. +// +// Second, specifying this option will slightly slow down the decoding process +// as it needs to consult multiple definitions sources (global and local), so +// be careful if you are decoding a large number of tokens, as the effects will stack up. +func WithTypedField(name string, object any) ParseOption { + return &parseOption{ + option.New(identTypedField{}, + typedFieldPair{Name: name, Value: object}, + ), + } +} + +type registerResourceOption struct { + option.Interface +} + +func (registerResourceOption) registerOption() {} +func (registerResourceOption) resourceOption() {} + +type identNewResourceOption struct{} + +// WithHttprcResourceOption can be used to pass arbitrary `httprc.NewResourceOption` +// to `(httprc.Client).Add` by way of `(jwk.Cache).Register`. +func WithHttprcResourceOption(o httprc.NewResourceOption) RegisterOption { + return ®isterResourceOption{ + option.New(identNewResourceOption{}, o), + } +} + +// WithConstantInterval can be used to pass `httprc.WithConstantInterval` option to +// `(httprc.Client).Add` by way of `(jwk.Cache).Register`. +func WithConstantInterval(d time.Duration) RegisterOption { + return WithHttprcResourceOption(httprc.WithConstantInterval(d)) +} + +// WithMinInterval can be used to pass `httprc.WithMinInterval` option to +// `(httprc.Client).Add` by way of `(jwk.Cache).Register`. +func WithMinInterval(d time.Duration) RegisterOption { + return WithHttprcResourceOption(httprc.WithMinInterval(d)) +} + +// WithMaxInterval can be used to pass `httprc.WithMaxInterval` option to +// `(httprc.Client).Add` by way of `(jwk.Cache).Register`. +func WithMaxInterval(d time.Duration) RegisterOption { + return WithHttprcResourceOption(httprc.WithMaxInterval(d)) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/options.yaml b/vendor/github.com/lestrrat-go/jwx/v3/jwk/options.yaml new file mode 100644 index 0000000000..879dcba158 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/options.yaml @@ -0,0 +1,143 @@ +package_name: jwk +output: jwk/options_gen.go +interfaces: + - name: CacheOption + comment: | + CacheOption is a type of Option that can be passed to the + the `jwk.NewCache()` function. + - name: ResourceOption + comment: | + ResourceOption is a type of Option that can be passed to the `httprc.NewResource` function + by way of RegisterOption. + - name: AssignKeyIDOption + - name: FetchOption + methods: + - fetchOption + - parseOption + - registerOption + comment: | + FetchOption is a type of Option that can be passed to `jwk.Fetch()` + FetchOption also implements the `RegisterOption`, and thus can + safely be passed to `(*jwk.Cache).Register()` + - name: ParseOption + methods: + - fetchOption + - registerOption + - readFileOption + comment: | + ParseOption is a type of Option that can be passed to `jwk.Parse()` + ParseOption also implements the `ReadFileOption` and `NewCacheOption`, + and thus safely be passed to `jwk.ReadFile` and `(*jwk.Cache).Configure()` + - name: ReadFileOption + comment: | + ReadFileOption is a type of `Option` that can be passed to `jwk.ReadFile` + - name: RegisterOption + comment: | + RegisterOption describes options that can be passed to `(jwk.Cache).Register()` + - name: RegisterFetchOption + methods: + - fetchOption + - registerOption + - parseOption + comment: | + RegisterFetchOption describes options that can be passed to `(jwk.Cache).Register()` and `jwk.Fetch()` + - name: GlobalOption + comment: | + GlobalOption is a type of Option that can be passed to the `jwk.Configure()` to + change the global configuration of the jwk package. +options: + - ident: HTTPClient + interface: RegisterFetchOption + argument_type: HTTPClient + comment: | + WithHTTPClient allows users to specify the "net/http".Client object that + is used when fetching jwk.Set objects. + - ident: ThumbprintHash + interface: AssignKeyIDOption + argument_type: crypto.Hash + - ident: LocalRegistry + option_name: withLocalRegistry + interface: ParseOption + argument_type: '*json.Registry' + comment: This option is only available for internal code. Users don't get to play with it + - ident: PEM + interface: ParseOption + argument_type: bool + comment: | + WithPEM specifies that the input to `Parse()` is a PEM encoded key. + + This option is planned to be deprecated in the future. The plan is to + replace it with `jwk.WithX509(true)` + - ident: X509 + interface: ParseOption + argument_type: bool + comment: | + WithX509 specifies that the input to `Parse()` is an X.509 encoded key + - ident: PEMDecoder + interface: ParseOption + argument_type: PEMDecoder + comment: | + WithPEMDecoder specifies the PEMDecoder object to use when decoding + PEM encoded keys. This option can be passed to `jwk.Parse()` + + This option is planned to be deprecated in the future. The plan is to + use `jwk.RegisterX509Decoder()` to register a custom X.509 decoder globally. + - ident: FetchWhitelist + interface: FetchOption + argument_type: Whitelist + comment: | + WithFetchWhitelist specifies the Whitelist object to use when + fetching JWKs from a remote source. This option can be passed + to both `jwk.Fetch()` + - ident: IgnoreParseError + interface: ParseOption + argument_type: bool + comment: | + WithIgnoreParseError is only applicable when used with `jwk.Parse()` + (i.e. to parse JWK sets). If passed to `jwk.ParseKey()`, the function + will return an error no matter what the input is. + + DO NOT USE WITHOUT EXHAUSTING ALL OTHER ROUTES FIRST. + + The option specifies that errors found during parsing of individual + keys are ignored. For example, if you had keys A, B, C where B is + invalid (e.g. it does not contain the required fields), then the + resulting JWKS will contain keys A and C only. + + This options exists as an escape hatch for those times when a + key in a JWKS that is irrelevant for your use case is causing + your JWKS parsing to fail, and you want to get to the rest of the + keys in the JWKS. + + Again, DO NOT USE unless you have exhausted all other routes. + When you use this option, you will not be able to tell if you are + using a faulty JWKS, except for when there are JSON syntax errors. + - ident: FS + interface: ReadFileOption + argument_type: fs.FS + comment: | + WithFS specifies the source `fs.FS` object to read the file from. + - ident: WaitReady + interface: RegisterOption + argument_type: bool + comment: | + WithWaitReady specifies that the `jwk.Cache` should wait until the + first fetch is done before returning from the `Register()` call. + + This option is by default true. Specify a false value if you would + like to return immediately from the `Register()` call. + + This options is exactly the same as `httprc.WithWaitReady()` + - ident: StrictKeyUsage + interface: GlobalOption + argument_type: bool + comment: | + WithStrictKeyUsage specifies if during JWK parsing, the "use" field + should be confined to the values that have been registered via + `jwk.RegisterKeyType()`. By default this option is true, and the + initial allowed values are "use" and "enc" only. + + If this option is set to false, then the "use" field can be any + value. If this options is set to true, then the "use" field must + be one of the registered values, and otherwise an error will be + reported during parsing / assignment to `jwk.KeyUsageType` \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/options_gen.go new file mode 100644 index 0000000000..99e66c3e7e --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/options_gen.go @@ -0,0 +1,297 @@ +// Code generated by tools/cmd/genoptions/main.go. DO NOT EDIT. + +package jwk + +import ( + "crypto" + "io/fs" + + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/option/v2" +) + +type Option = option.Interface + +type AssignKeyIDOption interface { + Option + assignKeyIDOption() +} + +type assignKeyIDOption struct { + Option +} + +func (*assignKeyIDOption) assignKeyIDOption() {} + +// CacheOption is a type of Option that can be passed to the +// the `jwk.NewCache()` function. +type CacheOption interface { + Option + cacheOption() +} + +type cacheOption struct { + Option +} + +func (*cacheOption) cacheOption() {} + +// FetchOption is a type of Option that can be passed to `jwk.Fetch()` +// FetchOption also implements the `RegisterOption`, and thus can +// safely be passed to `(*jwk.Cache).Register()` +type FetchOption interface { + Option + fetchOption() + parseOption() + registerOption() +} + +type fetchOption struct { + Option +} + +func (*fetchOption) fetchOption() {} + +func (*fetchOption) parseOption() {} + +func (*fetchOption) registerOption() {} + +// GlobalOption is a type of Option that can be passed to the `jwk.Configure()` to +// change the global configuration of the jwk package. +type GlobalOption interface { + Option + globalOption() +} + +type globalOption struct { + Option +} + +func (*globalOption) globalOption() {} + +// ParseOption is a type of Option that can be passed to `jwk.Parse()` +// ParseOption also implements the `ReadFileOption` and `NewCacheOption`, +// and thus safely be passed to `jwk.ReadFile` and `(*jwk.Cache).Configure()` +type ParseOption interface { + Option + fetchOption() + registerOption() + readFileOption() +} + +type parseOption struct { + Option +} + +func (*parseOption) fetchOption() {} + +func (*parseOption) registerOption() {} + +func (*parseOption) readFileOption() {} + +// ReadFileOption is a type of `Option` that can be passed to `jwk.ReadFile` +type ReadFileOption interface { + Option + readFileOption() +} + +type readFileOption struct { + Option +} + +func (*readFileOption) readFileOption() {} + +// RegisterFetchOption describes options that can be passed to `(jwk.Cache).Register()` and `jwk.Fetch()` +type RegisterFetchOption interface { + Option + fetchOption() + registerOption() + parseOption() +} + +type registerFetchOption struct { + Option +} + +func (*registerFetchOption) fetchOption() {} + +func (*registerFetchOption) registerOption() {} + +func (*registerFetchOption) parseOption() {} + +// RegisterOption describes options that can be passed to `(jwk.Cache).Register()` +type RegisterOption interface { + Option + registerOption() +} + +type registerOption struct { + Option +} + +func (*registerOption) registerOption() {} + +// ResourceOption is a type of Option that can be passed to the `httprc.NewResource` function +// by way of RegisterOption. +type ResourceOption interface { + Option + resourceOption() +} + +type resourceOption struct { + Option +} + +func (*resourceOption) resourceOption() {} + +type identFS struct{} +type identFetchWhitelist struct{} +type identHTTPClient struct{} +type identIgnoreParseError struct{} +type identLocalRegistry struct{} +type identPEM struct{} +type identPEMDecoder struct{} +type identStrictKeyUsage struct{} +type identThumbprintHash struct{} +type identWaitReady struct{} +type identX509 struct{} + +func (identFS) String() string { + return "WithFS" +} + +func (identFetchWhitelist) String() string { + return "WithFetchWhitelist" +} + +func (identHTTPClient) String() string { + return "WithHTTPClient" +} + +func (identIgnoreParseError) String() string { + return "WithIgnoreParseError" +} + +func (identLocalRegistry) String() string { + return "withLocalRegistry" +} + +func (identPEM) String() string { + return "WithPEM" +} + +func (identPEMDecoder) String() string { + return "WithPEMDecoder" +} + +func (identStrictKeyUsage) String() string { + return "WithStrictKeyUsage" +} + +func (identThumbprintHash) String() string { + return "WithThumbprintHash" +} + +func (identWaitReady) String() string { + return "WithWaitReady" +} + +func (identX509) String() string { + return "WithX509" +} + +// WithFS specifies the source `fs.FS` object to read the file from. +func WithFS(v fs.FS) ReadFileOption { + return &readFileOption{option.New(identFS{}, v)} +} + +// WithFetchWhitelist specifies the Whitelist object to use when +// fetching JWKs from a remote source. This option can be passed +// to both `jwk.Fetch()` +func WithFetchWhitelist(v Whitelist) FetchOption { + return &fetchOption{option.New(identFetchWhitelist{}, v)} +} + +// WithHTTPClient allows users to specify the "net/http".Client object that +// is used when fetching jwk.Set objects. +func WithHTTPClient(v HTTPClient) RegisterFetchOption { + return ®isterFetchOption{option.New(identHTTPClient{}, v)} +} + +// WithIgnoreParseError is only applicable when used with `jwk.Parse()` +// (i.e. to parse JWK sets). If passed to `jwk.ParseKey()`, the function +// will return an error no matter what the input is. +// +// DO NOT USE WITHOUT EXHAUSTING ALL OTHER ROUTES FIRST. +// +// The option specifies that errors found during parsing of individual +// keys are ignored. For example, if you had keys A, B, C where B is +// invalid (e.g. it does not contain the required fields), then the +// resulting JWKS will contain keys A and C only. +// +// This options exists as an escape hatch for those times when a +// key in a JWKS that is irrelevant for your use case is causing +// your JWKS parsing to fail, and you want to get to the rest of the +// keys in the JWKS. +// +// Again, DO NOT USE unless you have exhausted all other routes. +// When you use this option, you will not be able to tell if you are +// using a faulty JWKS, except for when there are JSON syntax errors. +func WithIgnoreParseError(v bool) ParseOption { + return &parseOption{option.New(identIgnoreParseError{}, v)} +} + +// This option is only available for internal code. Users don't get to play with it +func withLocalRegistry(v *json.Registry) ParseOption { + return &parseOption{option.New(identLocalRegistry{}, v)} +} + +// WithPEM specifies that the input to `Parse()` is a PEM encoded key. +// +// This option is planned to be deprecated in the future. The plan is to +// replace it with `jwk.WithX509(true)` +func WithPEM(v bool) ParseOption { + return &parseOption{option.New(identPEM{}, v)} +} + +// WithPEMDecoder specifies the PEMDecoder object to use when decoding +// PEM encoded keys. This option can be passed to `jwk.Parse()` +// +// This option is planned to be deprecated in the future. The plan is to +// use `jwk.RegisterX509Decoder()` to register a custom X.509 decoder globally. +func WithPEMDecoder(v PEMDecoder) ParseOption { + return &parseOption{option.New(identPEMDecoder{}, v)} +} + +// WithStrictKeyUsage specifies if during JWK parsing, the "use" field +// should be confined to the values that have been registered via +// `jwk.RegisterKeyType()`. By default this option is true, and the +// initial allowed values are "use" and "enc" only. +// +// If this option is set to false, then the "use" field can be any +// value. If this options is set to true, then the "use" field must +// be one of the registered values, and otherwise an error will be +// reported during parsing / assignment to `jwk.KeyUsageType` +func WithStrictKeyUsage(v bool) GlobalOption { + return &globalOption{option.New(identStrictKeyUsage{}, v)} +} + +func WithThumbprintHash(v crypto.Hash) AssignKeyIDOption { + return &assignKeyIDOption{option.New(identThumbprintHash{}, v)} +} + +// WithWaitReady specifies that the `jwk.Cache` should wait until the +// first fetch is done before returning from the `Register()` call. +// +// This option is by default true. Specify a false value if you would +// like to return immediately from the `Register()` call. +// +// This options is exactly the same as `httprc.WithWaitReady()` +func WithWaitReady(v bool) RegisterOption { + return ®isterOption{option.New(identWaitReady{}, v)} +} + +// WithX509 specifies that the input to `Parse()` is an X.509 encoded key +func WithX509(v bool) ParseOption { + return &parseOption{option.New(identX509{}, v)} +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/parser.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/parser.go new file mode 100644 index 0000000000..fa8764ef72 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/parser.go @@ -0,0 +1,244 @@ +package jwk + +import ( + "fmt" + "reflect" + "sync" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/jwa" +) + +// KeyParser represents a type that can parse a JSON representation of a JWK into +// a jwk.Key. +// See KeyConvertor for a type that can convert a raw key into a jwk.Key +type KeyParser interface { + // ParseKey parses a JSON payload to a `jwk.Key` object. The first + // argument is an object that contains some hints as to what kind of + // key the JSON payload contains. + // + // If your KeyParser decides that the payload is not something + // you can parse, and you would like to continue parsing with + // the remaining KeyParser instances that are registered, + // return a `jwk.ContinueParseError`. Any other errors will immediately + // halt the parsing process. + // + // When unmarshaling JSON, use the unmarshaler object supplied as + // the second argument. This will ensure that the JSON is unmarshaled + // in a way that is compatible with the rest of the library. + ParseKey(probe *KeyProbe, unmarshaler KeyUnmarshaler, payload []byte) (Key, error) +} + +// KeyParseFunc is a type of KeyParser that is based on a function/closure +type KeyParseFunc func(probe *KeyProbe, unmarshaler KeyUnmarshaler, payload []byte) (Key, error) + +func (f KeyParseFunc) ParseKey(probe *KeyProbe, unmarshaler KeyUnmarshaler, payload []byte) (Key, error) { + return f(probe, unmarshaler, payload) +} + +// protects keyParsers +var muKeyParser sync.RWMutex + +// list of parsers +var keyParsers = []KeyParser{KeyParseFunc(defaultParseKey)} + +// RegisterKeyParser adds a new KeyParser. Parsers are called in FILO order. +// That is, the last parser to be registered is called first. There is no +// check for duplicate entries. +func RegisterKeyParser(kp KeyParser) { + muKeyParser.Lock() + defer muKeyParser.Unlock() + keyParsers = append(keyParsers, kp) +} + +func defaultParseKey(probe *KeyProbe, unmarshaler KeyUnmarshaler, data []byte) (Key, error) { + var key Key + var kty string + var d json.RawMessage + if err := probe.Get("Kty", &kty); err != nil { + return nil, fmt.Errorf(`jwk.Parse: failed to get "kty" hint: %w`, err) + } + // We ignore errors from this field, as it's optional + _ = probe.Get("D", &d) + switch v, _ := jwa.LookupKeyType(kty); v { + case jwa.RSA(): + if d != nil { + key = newRSAPrivateKey() + } else { + key = newRSAPublicKey() + } + case jwa.EC(): + if d != nil { + key = newECDSAPrivateKey() + } else { + key = newECDSAPublicKey() + } + case jwa.OctetSeq(): + key = newSymmetricKey() + case jwa.OKP(): + if d != nil { + key = newOKPPrivateKey() + } else { + key = newOKPPublicKey() + } + default: + return nil, fmt.Errorf(`invalid key type from JSON (%s)`, kty) + } + + if err := unmarshaler.UnmarshalKey(data, key); err != nil { + return nil, fmt.Errorf(`failed to unmarshal JSON into key (%T): %w`, key, err) + } + return key, nil +} + +type keyUnmarshaler struct { + localReg *json.Registry +} + +func (ku *keyUnmarshaler) UnmarshalKey(data []byte, key any) error { + if ku.localReg != nil { + dcKey, ok := key.(json.DecodeCtxContainer) + if !ok { + return fmt.Errorf(`typed field was requested, but the key (%T) does not support DecodeCtx`, key) + } + dc := json.NewDecodeCtx(ku.localReg) + dcKey.SetDecodeCtx(dc) + defer func() { dcKey.SetDecodeCtx(nil) }() + } + + if err := json.Unmarshal(data, key); err != nil { + return fmt.Errorf(`failed to unmarshal JSON into key (%T): %w`, key, err) + } + + return nil +} + +// keyProber is the object that starts the probing. When Probe() is called, +// it creates (possibly from a cached value) an object that is used to +// hold hint values. +type keyProber struct { + mu sync.RWMutex + pool *sync.Pool + fields map[string]reflect.StructField + typ reflect.Type +} + +func (kp *keyProber) AddField(field reflect.StructField) error { + kp.mu.Lock() + defer kp.mu.Unlock() + + if _, ok := kp.fields[field.Name]; ok { + return fmt.Errorf(`field name %s is already registered`, field.Name) + } + kp.fields[field.Name] = field + kp.makeStructType() + + // Update pool (note: the logic is the same, but we need to recreate it + // so that we don't accidentally use old stored values) + kp.pool = &sync.Pool{ + New: kp.makeStruct, + } + return nil +} + +func (kp *keyProber) makeStructType() { + // DOES NOT LOCK + fields := make([]reflect.StructField, 0, len(kp.fields)) + for _, f := range kp.fields { + fields = append(fields, f) + } + kp.typ = reflect.StructOf(fields) +} + +func (kp *keyProber) makeStruct() any { + return reflect.New(kp.typ) +} + +func (kp *keyProber) Probe(data []byte) (*KeyProbe, error) { + kp.mu.RLock() + defer kp.mu.RUnlock() + + // if the field list unchanged, so is the pool object, so effectively + // we should be using the cached version + v := kp.pool.Get() + if v == nil { + return nil, fmt.Errorf(`probe: failed to get object from pool`) + } + rv, ok := v.(reflect.Value) + if !ok { + return nil, fmt.Errorf(`probe: value returned from pool as of type %T, expected reflect.Value`, v) + } + + if err := json.Unmarshal(data, rv.Interface()); err != nil { + return nil, fmt.Errorf(`probe: failed to unmarshal data: %w`, err) + } + + return &KeyProbe{data: rv}, nil +} + +// KeyProbe is the object that carries the hints when parsing a key. +// The exact list of fields can vary depending on the types of key +// that are registered. +// +// Use `Get()` to access the value of a field. +// +// The underlying data stored in a KeyProbe is recycled each +// time a value is parsed, therefore you are not allowed to hold +// onto this object after ParseKey() is done. +type KeyProbe struct { + data reflect.Value +} + +// Get returns the value of the field with the given `name“. +// `dst` must be a pointer to a value that can hold the type of +// the value of the field, which is determined by the +// field type registered through `jwk.RegisterProbeField()` +func (kp *KeyProbe) Get(name string, dst any) error { + f := kp.data.Elem().FieldByName(name) + if !f.IsValid() { + return fmt.Errorf(`field %s not found`, name) + } + + if err := blackmagic.AssignIfCompatible(dst, f.Addr().Interface()); err != nil { + return fmt.Errorf(`failed to assign value of field %q to %T: %w`, name, dst, err) + } + return nil +} + +// We don't really need the object, we need to know its type +var keyProbe = &keyProber{ + fields: make(map[string]reflect.StructField), +} + +// RegisterProbeField adds a new field to be probed during the initial +// phase of parsing. This is done by partially parsing the JSON payload, +// and we do this by calling `json.Unmarshal` using a dynamic type that +// can possibly be modified during runtime. This function is used to +// add a new field to this dynamic type. +// +// Note that the `Name` field for the given `reflect.StructField` must start +// with an upper case alphabet, such that it is treated as an exported field. +// So for example, if you want to probe the "my_hint" field, you should specify +// the field name as "MyHint" or similar. +// +// Also the field name must be unique. If you believe that your field name may +// collide with other packages that may want to add their own probes, +// it is the responsibility of the caller +// to ensure that the field name is unique (possibly by prefixing the field +// name with a unique string). It is important to note that the field name +// need not be the same as the JSON field name. For example, your field name +// could be "MyPkg_MyHint", while the actual JSON field name could be "my_hint". +// +// If the field name is not unique, an error is returned. +func RegisterProbeField(p reflect.StructField) error { + // locking is done inside keyProbe + return keyProbe.AddField(p) +} + +// KeyUnmarshaler is a thin wrapper around json.Unmarshal. It behaves almost +// exactly like json.Unmarshal, but it allows us to add extra magic that +// is specific to this library before calling the actual json.Unmarshal. +type KeyUnmarshaler interface { + UnmarshalKey(data []byte, key any) error +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa.go new file mode 100644 index 0000000000..ca27681587 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa.go @@ -0,0 +1,360 @@ +package jwk + +import ( + "crypto" + "crypto/rsa" + "encoding/binary" + "fmt" + "math/big" + "reflect" + + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/jwa" +) + +func init() { + RegisterKeyExporter(jwa.RSA(), KeyExportFunc(rsaJWKToRaw)) +} + +func (k *rsaPrivateKey) Import(rawKey *rsa.PrivateKey) error { + k.mu.Lock() + defer k.mu.Unlock() + + d, err := bigIntToBytes(rawKey.D) + if err != nil { + return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err) + } + k.d = d + + l := len(rawKey.Primes) + + if l < 0 /* I know, I'm being paranoid */ || l > 2 { + return fmt.Errorf(`invalid number of primes in rsa.PrivateKey: need 0 to 2, but got %d`, len(rawKey.Primes)) + } + + if l > 0 { + p, err := bigIntToBytes(rawKey.Primes[0]) + if err != nil { + return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err) + } + k.p = p + } + + if l > 1 { + q, err := bigIntToBytes(rawKey.Primes[1]) + if err != nil { + return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err) + } + k.q = q + } + + // dp, dq, qi are optional values + if v, err := bigIntToBytes(rawKey.Precomputed.Dp); err == nil { + k.dp = v + } + if v, err := bigIntToBytes(rawKey.Precomputed.Dq); err == nil { + k.dq = v + } + if v, err := bigIntToBytes(rawKey.Precomputed.Qinv); err == nil { + k.qi = v + } + + // public key part + n, e, err := importRsaPublicKeyByteValues(&rawKey.PublicKey) + if err != nil { + return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err) + } + k.n = n + k.e = e + + return nil +} + +func importRsaPublicKeyByteValues(rawKey *rsa.PublicKey) ([]byte, []byte, error) { + n, err := bigIntToBytes(rawKey.N) + if err != nil { + return nil, nil, fmt.Errorf(`invalid rsa.PublicKey: %w`, err) + } + + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, uint64(rawKey.E)) + i := 0 + for ; i < len(data); i++ { + if data[i] != 0x0 { + break + } + } + return n, data[i:], nil +} + +func (k *rsaPublicKey) Import(rawKey *rsa.PublicKey) error { + k.mu.Lock() + defer k.mu.Unlock() + + n, e, err := importRsaPublicKeyByteValues(rawKey) + if err != nil { + return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err) + } + k.n = n + k.e = e + + return nil +} + +func buildRSAPublicKey(key *rsa.PublicKey, n, e []byte) { + bin := pool.BigInt().Get() + bie := pool.BigInt().Get() + defer pool.BigInt().Put(bie) + + bin.SetBytes(n) + bie.SetBytes(e) + + key.N = bin + key.E = int(bie.Int64()) +} + +var rsaConvertibleKeys = []reflect.Type{ + reflect.TypeFor[RSAPrivateKey](), + reflect.TypeFor[RSAPublicKey](), +} + +func rsaJWKToRaw(key Key, hint any) (any, error) { + extracted, err := extractEmbeddedKey(key, rsaConvertibleKeys) + if err != nil { + return nil, fmt.Errorf(`failed to extract embedded key: %w`, err) + } + switch key := extracted.(type) { + case RSAPrivateKey: + switch hint.(type) { + case *rsa.PrivateKey, *any: + default: + return nil, fmt.Errorf(`invalid destination object type %T for private RSA JWK: %w`, hint, ContinueError()) + } + + locker, ok := key.(rlocker) + if !ok { + locker.rlock() + defer locker.runlock() + } + + od, ok := key.D() + if !ok { + return nil, fmt.Errorf(`missing "d" value`) + } + + oq, ok := key.Q() + if !ok { + return nil, fmt.Errorf(`missing "q" value`) + } + + op, ok := key.P() + if !ok { + return nil, fmt.Errorf(`missing "p" value`) + } + + var d, q, p big.Int // note: do not use from sync.Pool + + d.SetBytes(od) + q.SetBytes(oq) + p.SetBytes(op) + + // optional fields + var dp, dq, qi *big.Int + + if odp, ok := key.DP(); ok { + dp = &big.Int{} // note: do not use from sync.Pool + dp.SetBytes(odp) + } + + if odq, ok := key.DQ(); ok { + dq = &big.Int{} // note: do not use from sync.Pool + dq.SetBytes(odq) + } + + if oqi, ok := key.QI(); ok { + qi = &big.Int{} // note: do not use from sync.Pool + qi.SetBytes(oqi) + } + + n, ok := key.N() + if !ok { + return nil, fmt.Errorf(`missing "n" value`) + } + + e, ok := key.E() + if !ok { + return nil, fmt.Errorf(`missing "e" value`) + } + + var privkey rsa.PrivateKey + buildRSAPublicKey(&privkey.PublicKey, n, e) + privkey.D = &d + privkey.Primes = []*big.Int{&p, &q} + + if dp != nil { + privkey.Precomputed.Dp = dp + } + if dq != nil { + privkey.Precomputed.Dq = dq + } + if qi != nil { + privkey.Precomputed.Qinv = qi + } + // This may look like a no-op, but it's required if we want to + // compare it against a key generated by rsa.GenerateKey + privkey.Precomputed.CRTValues = []rsa.CRTValue{} + return &privkey, nil + case RSAPublicKey: + switch hint.(type) { + case *rsa.PublicKey, *any: + default: + return nil, fmt.Errorf(`invalid destination object type %T for public RSA JWK: %w`, hint, ContinueError()) + } + + locker, ok := key.(rlocker) + if !ok { + locker.rlock() + defer locker.runlock() + } + + n, ok := key.N() + if !ok { + return nil, fmt.Errorf(`missing "n" value`) + } + + e, ok := key.E() + if !ok { + return nil, fmt.Errorf(`missing "e" value`) + } + + var pubkey rsa.PublicKey + buildRSAPublicKey(&pubkey, n, e) + + return &pubkey, nil + + default: + return nil, ContinueError() + } +} + +func makeRSAPublicKey(src Key) (Key, error) { + newKey := newRSAPublicKey() + + // Iterate and copy everything except for the bits that should not be in the public key + for _, k := range src.Keys() { + switch k { + case RSADKey, RSADPKey, RSADQKey, RSAPKey, RSAQKey, RSAQIKey: + continue + default: + var v any + if err := src.Get(k, &v); err != nil { + return nil, fmt.Errorf(`rsa: makeRSAPublicKey: failed to get field %q: %w`, k, err) + } + if err := newKey.Set(k, v); err != nil { + return nil, fmt.Errorf(`rsa: makeRSAPublicKey: failed to set field %q: %w`, k, err) + } + } + } + + return newKey, nil +} + +func (k *rsaPrivateKey) PublicKey() (Key, error) { + return makeRSAPublicKey(k) +} + +func (k *rsaPublicKey) PublicKey() (Key, error) { + return makeRSAPublicKey(k) +} + +// Thumbprint returns the JWK thumbprint using the indicated +// hashing algorithm, according to RFC 7638 +func (k rsaPrivateKey) Thumbprint(hash crypto.Hash) ([]byte, error) { + k.mu.RLock() + defer k.mu.RUnlock() + + var key rsa.PrivateKey + if err := Export(&k, &key); err != nil { + return nil, fmt.Errorf(`failed to export RSA private key: %w`, err) + } + return rsaThumbprint(hash, &key.PublicKey) +} + +func (k rsaPublicKey) Thumbprint(hash crypto.Hash) ([]byte, error) { + k.mu.RLock() + defer k.mu.RUnlock() + + var key rsa.PublicKey + if err := Export(&k, &key); err != nil { + return nil, fmt.Errorf(`failed to export RSA public key: %w`, err) + } + return rsaThumbprint(hash, &key) +} + +func rsaThumbprint(hash crypto.Hash, key *rsa.PublicKey) ([]byte, error) { + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + + buf.WriteString(`{"e":"`) + buf.WriteString(base64.EncodeUint64ToString(uint64(key.E))) + buf.WriteString(`","kty":"RSA","n":"`) + buf.WriteString(base64.EncodeToString(key.N.Bytes())) + buf.WriteString(`"}`) + + h := hash.New() + if _, err := buf.WriteTo(h); err != nil { + return nil, fmt.Errorf(`failed to write rsaThumbprint: %w`, err) + } + return h.Sum(nil), nil +} + +func validateRSAKey(key interface { + N() ([]byte, bool) + E() ([]byte, bool) +}, checkPrivate bool) error { + n, ok := key.N() + if !ok { + return fmt.Errorf(`missing "n" value`) + } + + e, ok := key.E() + if !ok { + return fmt.Errorf(`missing "e" value`) + } + + if len(n) == 0 { + // Ideally we would like to check for the actual length, but unlike + // EC keys, we have nothing in the key itself that will tell us + // how many bits this key should have. + return fmt.Errorf(`missing "n" value`) + } + if len(e) == 0 { + return fmt.Errorf(`missing "e" value`) + } + if checkPrivate { + if priv, ok := key.(keyWithD); ok { + if d, ok := priv.D(); !ok || len(d) == 0 { + return fmt.Errorf(`missing "d" value`) + } + } else { + return fmt.Errorf(`missing "d" value`) + } + } + + return nil +} + +func (k *rsaPrivateKey) Validate() error { + if err := validateRSAKey(k, true); err != nil { + return NewKeyValidationError(fmt.Errorf(`jwk.RSAPrivateKey: %w`, err)) + } + return nil +} + +func (k *rsaPublicKey) Validate() error { + if err := validateRSAKey(k, false); err != nil { + return NewKeyValidationError(fmt.Errorf(`jwk.RSAPublicKey: %w`, err)) + } + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa_gen.go new file mode 100644 index 0000000000..8e2a4f085b --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/rsa_gen.go @@ -0,0 +1,1543 @@ +// Code generated by tools/cmd/genjwk/main.go. DO NOT EDIT. + +package jwk + +import ( + "bytes" + "fmt" + "sort" + "sync" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v3/cert" + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/internal/tokens" + "github.com/lestrrat-go/jwx/v3/jwa" +) + +const ( + RSADKey = "d" + RSADPKey = "dp" + RSADQKey = "dq" + RSAEKey = "e" + RSANKey = "n" + RSAPKey = "p" + RSAQIKey = "qi" + RSAQKey = "q" +) + +type RSAPublicKey interface { + Key + E() ([]byte, bool) + N() ([]byte, bool) +} + +type rsaPublicKey struct { + algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4 + e []byte + keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4 + keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3 + keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2 + n []byte + x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6 + x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7 + x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8 + x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5 + privateParams map[string]any + mu *sync.RWMutex + dc json.DecodeCtx +} + +var _ RSAPublicKey = &rsaPublicKey{} +var _ Key = &rsaPublicKey{} + +func newRSAPublicKey() *rsaPublicKey { + return &rsaPublicKey{ + mu: &sync.RWMutex{}, + privateParams: make(map[string]any), + } +} + +func (h rsaPublicKey) KeyType() jwa.KeyType { + return jwa.RSA() +} + +func (h rsaPublicKey) rlock() { + h.mu.RLock() +} + +func (h rsaPublicKey) runlock() { + h.mu.RUnlock() +} + +func (h rsaPublicKey) IsPrivate() bool { + return false +} + +func (h *rsaPublicKey) Algorithm() (jwa.KeyAlgorithm, bool) { + if h.algorithm != nil { + return *(h.algorithm), true + } + return nil, false +} + +func (h *rsaPublicKey) E() ([]byte, bool) { + if h.e != nil { + return h.e, true + } + return nil, false +} + +func (h *rsaPublicKey) KeyID() (string, bool) { + if h.keyID != nil { + return *(h.keyID), true + } + return "", false +} + +func (h *rsaPublicKey) KeyOps() (KeyOperationList, bool) { + if h.keyOps != nil { + return *(h.keyOps), true + } + return nil, false +} + +func (h *rsaPublicKey) KeyUsage() (string, bool) { + if h.keyUsage != nil { + return *(h.keyUsage), true + } + return "", false +} + +func (h *rsaPublicKey) N() ([]byte, bool) { + if h.n != nil { + return h.n, true + } + return nil, false +} + +func (h *rsaPublicKey) X509CertChain() (*cert.Chain, bool) { + return h.x509CertChain, true +} + +func (h *rsaPublicKey) X509CertThumbprint() (string, bool) { + if h.x509CertThumbprint != nil { + return *(h.x509CertThumbprint), true + } + return "", false +} + +func (h *rsaPublicKey) X509CertThumbprintS256() (string, bool) { + if h.x509CertThumbprintS256 != nil { + return *(h.x509CertThumbprintS256), true + } + return "", false +} + +func (h *rsaPublicKey) X509URL() (string, bool) { + if h.x509URL != nil { + return *(h.x509URL), true + } + return "", false +} + +func (h *rsaPublicKey) Has(name string) bool { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + return true + case AlgorithmKey: + return h.algorithm != nil + case RSAEKey: + return h.e != nil + case KeyIDKey: + return h.keyID != nil + case KeyOpsKey: + return h.keyOps != nil + case KeyUsageKey: + return h.keyUsage != nil + case RSANKey: + return h.n != nil + case X509CertChainKey: + return h.x509CertChain != nil + case X509CertThumbprintKey: + return h.x509CertThumbprint != nil + case X509CertThumbprintS256Key: + return h.x509CertThumbprintS256 != nil + case X509URLKey: + return h.x509URL != nil + default: + _, ok := h.privateParams[name] + return ok + } +} + +func (h *rsaPublicKey) Get(name string, dst any) error { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + if err := blackmagic.AssignIfCompatible(dst, h.KeyType()); err != nil { + return fmt.Errorf(`rsaPublicKey.Get: failed to assign value for field %q to destination object: %w`, name, err) + } + case AlgorithmKey: + if h.algorithm == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case RSAEKey: + if h.e == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.e); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyIDKey: + if h.keyID == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyOpsKey: + if h.keyOps == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyOps)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyUsageKey: + if h.keyUsage == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyUsage)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case RSANKey: + if h.n == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.n); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertChainKey: + if h.x509CertChain == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.x509CertChain); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509URLKey: + if h.x509URL == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + default: + v, ok := h.privateParams[name] + if !ok { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, v); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + } + return nil +} + +func (h *rsaPublicKey) Set(name string, value any) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *rsaPublicKey) setNoLock(name string, value any) error { + switch name { + case "kty": + return nil + case AlgorithmKey: + switch v := value.(type) { + case string, jwa.SignatureAlgorithm, jwa.KeyEncryptionAlgorithm, jwa.ContentEncryptionAlgorithm: + tmp, err := jwa.KeyAlgorithmFrom(v) + if err != nil { + return fmt.Errorf(`invalid algorithm for %q key: %w`, AlgorithmKey, err) + } + h.algorithm = &tmp + default: + return fmt.Errorf(`invalid type for %q key: %T`, AlgorithmKey, value) + } + return nil + case RSAEKey: + if v, ok := value.([]byte); ok { + h.e = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSAEKey, value) + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case KeyOpsKey: + var acceptor KeyOperationList + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err) + } + h.keyOps = &acceptor + return nil + case KeyUsageKey: + switch v := value.(type) { + case KeyUsageType: + switch v { + case ForSignature, ForEncryption: + tmp := v.String() + h.keyUsage = &tmp + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case string: + h.keyUsage = &v + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case RSANKey: + if v, ok := value.([]byte); ok { + h.n = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSANKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]any{} + } + h.privateParams[name] = value + } + return nil +} + +func (k *rsaPublicKey) Remove(key string) error { + k.mu.Lock() + defer k.mu.Unlock() + switch key { + case AlgorithmKey: + k.algorithm = nil + case RSAEKey: + k.e = nil + case KeyIDKey: + k.keyID = nil + case KeyOpsKey: + k.keyOps = nil + case KeyUsageKey: + k.keyUsage = nil + case RSANKey: + k.n = nil + case X509CertChainKey: + k.x509CertChain = nil + case X509CertThumbprintKey: + k.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + k.x509CertThumbprintS256 = nil + case X509URLKey: + k.x509URL = nil + default: + delete(k.privateParams, key) + } + return nil +} + +func (k *rsaPublicKey) Clone() (Key, error) { + key, err := cloneKey(k) + if err != nil { + return nil, fmt.Errorf(`rsaPublicKey.Clone: %w`, err) + } + return key, nil +} + +func (k *rsaPublicKey) DecodeCtx() json.DecodeCtx { + k.mu.RLock() + defer k.mu.RUnlock() + return k.dc +} + +func (k *rsaPublicKey) SetDecodeCtx(dc json.DecodeCtx) { + k.mu.Lock() + defer k.mu.Unlock() + k.dc = dc +} + +func (h *rsaPublicKey) UnmarshalJSON(buf []byte) error { + h.mu.Lock() + defer h.mu.Unlock() + h.algorithm = nil + h.e = nil + h.keyID = nil + h.keyOps = nil + h.keyUsage = nil + h.n = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here. + if tok == tokens.CloseCurlyBracket { // End of object + break LOOP + } else if tok != tokens.OpenCurlyBracket { + return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok) + } + case string: // Objects can only have string keys + switch tok { + case KeyTypeKey: + val, err := json.ReadNextStringToken(dec) + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + if val != jwa.RSA().String() { + return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val) + } + case AlgorithmKey: + var s string + if err := dec.Decode(&s); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + alg, err := jwa.KeyAlgorithmFrom(s) + if err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + h.algorithm = &alg + case RSAEKey: + if err := json.AssignNextBytesToken(&h.e, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSAEKey, err) + } + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case KeyOpsKey: + var decoded KeyOperationList + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err) + } + h.keyOps = &decoded + case KeyUsageKey: + if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err) + } + case RSANKey: + if err := json.AssignNextBytesToken(&h.n, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSANKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + default: + if dc := h.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + decoded, err := localReg.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + } + } + decoded, err := registry.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + return fmt.Errorf(`could not decode field %s: %w`, tok, err) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + if h.e == nil { + return fmt.Errorf(`required field e is missing`) + } + if h.n == nil { + return fmt.Errorf(`required field n is missing`) + } + return nil +} + +func (h rsaPublicKey) MarshalJSON() ([]byte, error) { + data := make(map[string]any) + fields := make([]string, 0, 10) + data[KeyTypeKey] = jwa.RSA() + fields = append(fields, KeyTypeKey) + if h.algorithm != nil { + data[AlgorithmKey] = *(h.algorithm) + fields = append(fields, AlgorithmKey) + } + if h.e != nil { + data[RSAEKey] = h.e + fields = append(fields, RSAEKey) + } + if h.keyID != nil { + data[KeyIDKey] = *(h.keyID) + fields = append(fields, KeyIDKey) + } + if h.keyOps != nil { + data[KeyOpsKey] = *(h.keyOps) + fields = append(fields, KeyOpsKey) + } + if h.keyUsage != nil { + data[KeyUsageKey] = *(h.keyUsage) + fields = append(fields, KeyUsageKey) + } + if h.n != nil { + data[RSANKey] = h.n + fields = append(fields, RSANKey) + } + if h.x509CertChain != nil { + data[X509CertChainKey] = h.x509CertChain + fields = append(fields, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + data[X509CertThumbprintKey] = *(h.x509CertThumbprint) + fields = append(fields, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256) + fields = append(fields, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + data[X509URLKey] = *(h.x509URL) + fields = append(fields, X509URLKey) + } + for k, v := range h.privateParams { + data[k] = v + fields = append(fields, k) + } + + sort.Strings(fields) + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + buf.WriteByte(tokens.OpenCurlyBracket) + enc := json.NewEncoder(buf) + for i, f := range fields { + if i > 0 { + buf.WriteRune(tokens.Comma) + } + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(f) + buf.WriteString(`":`) + v := data[f] + switch v := v.(type) { + case []byte: + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune(tokens.DoubleQuote) + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte(tokens.CloseCurlyBracket) + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (h *rsaPublicKey) Keys() []string { + h.mu.RLock() + defer h.mu.RUnlock() + keys := make([]string, 0, 10+len(h.privateParams)) + keys = append(keys, KeyTypeKey) + if h.algorithm != nil { + keys = append(keys, AlgorithmKey) + } + if h.e != nil { + keys = append(keys, RSAEKey) + } + if h.keyID != nil { + keys = append(keys, KeyIDKey) + } + if h.keyOps != nil { + keys = append(keys, KeyOpsKey) + } + if h.keyUsage != nil { + keys = append(keys, KeyUsageKey) + } + if h.n != nil { + keys = append(keys, RSANKey) + } + if h.x509CertChain != nil { + keys = append(keys, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + keys = append(keys, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + keys = append(keys, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + keys = append(keys, X509URLKey) + } + for k := range h.privateParams { + keys = append(keys, k) + } + return keys +} + +type RSAPrivateKey interface { + Key + D() ([]byte, bool) + DP() ([]byte, bool) + DQ() ([]byte, bool) + E() ([]byte, bool) + N() ([]byte, bool) + P() ([]byte, bool) + Q() ([]byte, bool) + QI() ([]byte, bool) +} + +type rsaPrivateKey struct { + algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4 + d []byte + dp []byte + dq []byte + e []byte + keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4 + keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3 + keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2 + n []byte + p []byte + q []byte + qi []byte + x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6 + x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7 + x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8 + x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5 + privateParams map[string]any + mu *sync.RWMutex + dc json.DecodeCtx +} + +var _ RSAPrivateKey = &rsaPrivateKey{} +var _ Key = &rsaPrivateKey{} + +func newRSAPrivateKey() *rsaPrivateKey { + return &rsaPrivateKey{ + mu: &sync.RWMutex{}, + privateParams: make(map[string]any), + } +} + +func (h rsaPrivateKey) KeyType() jwa.KeyType { + return jwa.RSA() +} + +func (h rsaPrivateKey) rlock() { + h.mu.RLock() +} + +func (h rsaPrivateKey) runlock() { + h.mu.RUnlock() +} + +func (h rsaPrivateKey) IsPrivate() bool { + return true +} + +func (h *rsaPrivateKey) Algorithm() (jwa.KeyAlgorithm, bool) { + if h.algorithm != nil { + return *(h.algorithm), true + } + return nil, false +} + +func (h *rsaPrivateKey) D() ([]byte, bool) { + if h.d != nil { + return h.d, true + } + return nil, false +} + +func (h *rsaPrivateKey) DP() ([]byte, bool) { + if h.dp != nil { + return h.dp, true + } + return nil, false +} + +func (h *rsaPrivateKey) DQ() ([]byte, bool) { + if h.dq != nil { + return h.dq, true + } + return nil, false +} + +func (h *rsaPrivateKey) E() ([]byte, bool) { + if h.e != nil { + return h.e, true + } + return nil, false +} + +func (h *rsaPrivateKey) KeyID() (string, bool) { + if h.keyID != nil { + return *(h.keyID), true + } + return "", false +} + +func (h *rsaPrivateKey) KeyOps() (KeyOperationList, bool) { + if h.keyOps != nil { + return *(h.keyOps), true + } + return nil, false +} + +func (h *rsaPrivateKey) KeyUsage() (string, bool) { + if h.keyUsage != nil { + return *(h.keyUsage), true + } + return "", false +} + +func (h *rsaPrivateKey) N() ([]byte, bool) { + if h.n != nil { + return h.n, true + } + return nil, false +} + +func (h *rsaPrivateKey) P() ([]byte, bool) { + if h.p != nil { + return h.p, true + } + return nil, false +} + +func (h *rsaPrivateKey) Q() ([]byte, bool) { + if h.q != nil { + return h.q, true + } + return nil, false +} + +func (h *rsaPrivateKey) QI() ([]byte, bool) { + if h.qi != nil { + return h.qi, true + } + return nil, false +} + +func (h *rsaPrivateKey) X509CertChain() (*cert.Chain, bool) { + return h.x509CertChain, true +} + +func (h *rsaPrivateKey) X509CertThumbprint() (string, bool) { + if h.x509CertThumbprint != nil { + return *(h.x509CertThumbprint), true + } + return "", false +} + +func (h *rsaPrivateKey) X509CertThumbprintS256() (string, bool) { + if h.x509CertThumbprintS256 != nil { + return *(h.x509CertThumbprintS256), true + } + return "", false +} + +func (h *rsaPrivateKey) X509URL() (string, bool) { + if h.x509URL != nil { + return *(h.x509URL), true + } + return "", false +} + +func (h *rsaPrivateKey) Has(name string) bool { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + return true + case AlgorithmKey: + return h.algorithm != nil + case RSADKey: + return h.d != nil + case RSADPKey: + return h.dp != nil + case RSADQKey: + return h.dq != nil + case RSAEKey: + return h.e != nil + case KeyIDKey: + return h.keyID != nil + case KeyOpsKey: + return h.keyOps != nil + case KeyUsageKey: + return h.keyUsage != nil + case RSANKey: + return h.n != nil + case RSAPKey: + return h.p != nil + case RSAQKey: + return h.q != nil + case RSAQIKey: + return h.qi != nil + case X509CertChainKey: + return h.x509CertChain != nil + case X509CertThumbprintKey: + return h.x509CertThumbprint != nil + case X509CertThumbprintS256Key: + return h.x509CertThumbprintS256 != nil + case X509URLKey: + return h.x509URL != nil + default: + _, ok := h.privateParams[name] + return ok + } +} + +func (h *rsaPrivateKey) Get(name string, dst any) error { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + if err := blackmagic.AssignIfCompatible(dst, h.KeyType()); err != nil { + return fmt.Errorf(`rsaPrivateKey.Get: failed to assign value for field %q to destination object: %w`, name, err) + } + case AlgorithmKey: + if h.algorithm == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case RSADKey: + if h.d == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.d); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case RSADPKey: + if h.dp == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.dp); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case RSADQKey: + if h.dq == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.dq); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case RSAEKey: + if h.e == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.e); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyIDKey: + if h.keyID == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyOpsKey: + if h.keyOps == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyOps)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyUsageKey: + if h.keyUsage == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyUsage)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case RSANKey: + if h.n == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.n); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case RSAPKey: + if h.p == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.p); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case RSAQKey: + if h.q == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.q); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case RSAQIKey: + if h.qi == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.qi); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertChainKey: + if h.x509CertChain == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.x509CertChain); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509URLKey: + if h.x509URL == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + default: + v, ok := h.privateParams[name] + if !ok { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, v); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + } + return nil +} + +func (h *rsaPrivateKey) Set(name string, value any) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *rsaPrivateKey) setNoLock(name string, value any) error { + switch name { + case "kty": + return nil + case AlgorithmKey: + switch v := value.(type) { + case string, jwa.SignatureAlgorithm, jwa.KeyEncryptionAlgorithm, jwa.ContentEncryptionAlgorithm: + tmp, err := jwa.KeyAlgorithmFrom(v) + if err != nil { + return fmt.Errorf(`invalid algorithm for %q key: %w`, AlgorithmKey, err) + } + h.algorithm = &tmp + default: + return fmt.Errorf(`invalid type for %q key: %T`, AlgorithmKey, value) + } + return nil + case RSADKey: + if v, ok := value.([]byte); ok { + h.d = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSADKey, value) + case RSADPKey: + if v, ok := value.([]byte); ok { + h.dp = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSADPKey, value) + case RSADQKey: + if v, ok := value.([]byte); ok { + h.dq = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSADQKey, value) + case RSAEKey: + if v, ok := value.([]byte); ok { + h.e = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSAEKey, value) + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case KeyOpsKey: + var acceptor KeyOperationList + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err) + } + h.keyOps = &acceptor + return nil + case KeyUsageKey: + switch v := value.(type) { + case KeyUsageType: + switch v { + case ForSignature, ForEncryption: + tmp := v.String() + h.keyUsage = &tmp + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case string: + h.keyUsage = &v + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case RSANKey: + if v, ok := value.([]byte); ok { + h.n = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSANKey, value) + case RSAPKey: + if v, ok := value.([]byte); ok { + h.p = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSAPKey, value) + case RSAQKey: + if v, ok := value.([]byte); ok { + h.q = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSAQKey, value) + case RSAQIKey: + if v, ok := value.([]byte); ok { + h.qi = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSAQIKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]any{} + } + h.privateParams[name] = value + } + return nil +} + +func (k *rsaPrivateKey) Remove(key string) error { + k.mu.Lock() + defer k.mu.Unlock() + switch key { + case AlgorithmKey: + k.algorithm = nil + case RSADKey: + k.d = nil + case RSADPKey: + k.dp = nil + case RSADQKey: + k.dq = nil + case RSAEKey: + k.e = nil + case KeyIDKey: + k.keyID = nil + case KeyOpsKey: + k.keyOps = nil + case KeyUsageKey: + k.keyUsage = nil + case RSANKey: + k.n = nil + case RSAPKey: + k.p = nil + case RSAQKey: + k.q = nil + case RSAQIKey: + k.qi = nil + case X509CertChainKey: + k.x509CertChain = nil + case X509CertThumbprintKey: + k.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + k.x509CertThumbprintS256 = nil + case X509URLKey: + k.x509URL = nil + default: + delete(k.privateParams, key) + } + return nil +} + +func (k *rsaPrivateKey) Clone() (Key, error) { + key, err := cloneKey(k) + if err != nil { + return nil, fmt.Errorf(`rsaPrivateKey.Clone: %w`, err) + } + return key, nil +} + +func (k *rsaPrivateKey) DecodeCtx() json.DecodeCtx { + k.mu.RLock() + defer k.mu.RUnlock() + return k.dc +} + +func (k *rsaPrivateKey) SetDecodeCtx(dc json.DecodeCtx) { + k.mu.Lock() + defer k.mu.Unlock() + k.dc = dc +} + +func (h *rsaPrivateKey) UnmarshalJSON(buf []byte) error { + h.mu.Lock() + defer h.mu.Unlock() + h.algorithm = nil + h.d = nil + h.dp = nil + h.dq = nil + h.e = nil + h.keyID = nil + h.keyOps = nil + h.keyUsage = nil + h.n = nil + h.p = nil + h.q = nil + h.qi = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here. + if tok == tokens.CloseCurlyBracket { // End of object + break LOOP + } else if tok != tokens.OpenCurlyBracket { + return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok) + } + case string: // Objects can only have string keys + switch tok { + case KeyTypeKey: + val, err := json.ReadNextStringToken(dec) + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + if val != jwa.RSA().String() { + return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val) + } + case AlgorithmKey: + var s string + if err := dec.Decode(&s); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + alg, err := jwa.KeyAlgorithmFrom(s) + if err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + h.algorithm = &alg + case RSADKey: + if err := json.AssignNextBytesToken(&h.d, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSADKey, err) + } + case RSADPKey: + if err := json.AssignNextBytesToken(&h.dp, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSADPKey, err) + } + case RSADQKey: + if err := json.AssignNextBytesToken(&h.dq, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSADQKey, err) + } + case RSAEKey: + if err := json.AssignNextBytesToken(&h.e, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSAEKey, err) + } + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case KeyOpsKey: + var decoded KeyOperationList + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err) + } + h.keyOps = &decoded + case KeyUsageKey: + if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err) + } + case RSANKey: + if err := json.AssignNextBytesToken(&h.n, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSANKey, err) + } + case RSAPKey: + if err := json.AssignNextBytesToken(&h.p, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSAPKey, err) + } + case RSAQKey: + if err := json.AssignNextBytesToken(&h.q, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSAQKey, err) + } + case RSAQIKey: + if err := json.AssignNextBytesToken(&h.qi, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSAQIKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + default: + if dc := h.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + decoded, err := localReg.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + } + } + decoded, err := registry.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + return fmt.Errorf(`could not decode field %s: %w`, tok, err) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + if h.d == nil { + return fmt.Errorf(`required field d is missing`) + } + if h.e == nil { + return fmt.Errorf(`required field e is missing`) + } + if h.n == nil { + return fmt.Errorf(`required field n is missing`) + } + return nil +} + +func (h rsaPrivateKey) MarshalJSON() ([]byte, error) { + data := make(map[string]any) + fields := make([]string, 0, 16) + data[KeyTypeKey] = jwa.RSA() + fields = append(fields, KeyTypeKey) + if h.algorithm != nil { + data[AlgorithmKey] = *(h.algorithm) + fields = append(fields, AlgorithmKey) + } + if h.d != nil { + data[RSADKey] = h.d + fields = append(fields, RSADKey) + } + if h.dp != nil { + data[RSADPKey] = h.dp + fields = append(fields, RSADPKey) + } + if h.dq != nil { + data[RSADQKey] = h.dq + fields = append(fields, RSADQKey) + } + if h.e != nil { + data[RSAEKey] = h.e + fields = append(fields, RSAEKey) + } + if h.keyID != nil { + data[KeyIDKey] = *(h.keyID) + fields = append(fields, KeyIDKey) + } + if h.keyOps != nil { + data[KeyOpsKey] = *(h.keyOps) + fields = append(fields, KeyOpsKey) + } + if h.keyUsage != nil { + data[KeyUsageKey] = *(h.keyUsage) + fields = append(fields, KeyUsageKey) + } + if h.n != nil { + data[RSANKey] = h.n + fields = append(fields, RSANKey) + } + if h.p != nil { + data[RSAPKey] = h.p + fields = append(fields, RSAPKey) + } + if h.q != nil { + data[RSAQKey] = h.q + fields = append(fields, RSAQKey) + } + if h.qi != nil { + data[RSAQIKey] = h.qi + fields = append(fields, RSAQIKey) + } + if h.x509CertChain != nil { + data[X509CertChainKey] = h.x509CertChain + fields = append(fields, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + data[X509CertThumbprintKey] = *(h.x509CertThumbprint) + fields = append(fields, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256) + fields = append(fields, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + data[X509URLKey] = *(h.x509URL) + fields = append(fields, X509URLKey) + } + for k, v := range h.privateParams { + data[k] = v + fields = append(fields, k) + } + + sort.Strings(fields) + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + buf.WriteByte(tokens.OpenCurlyBracket) + enc := json.NewEncoder(buf) + for i, f := range fields { + if i > 0 { + buf.WriteRune(tokens.Comma) + } + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(f) + buf.WriteString(`":`) + v := data[f] + switch v := v.(type) { + case []byte: + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune(tokens.DoubleQuote) + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte(tokens.CloseCurlyBracket) + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (h *rsaPrivateKey) Keys() []string { + h.mu.RLock() + defer h.mu.RUnlock() + keys := make([]string, 0, 16+len(h.privateParams)) + keys = append(keys, KeyTypeKey) + if h.algorithm != nil { + keys = append(keys, AlgorithmKey) + } + if h.d != nil { + keys = append(keys, RSADKey) + } + if h.dp != nil { + keys = append(keys, RSADPKey) + } + if h.dq != nil { + keys = append(keys, RSADQKey) + } + if h.e != nil { + keys = append(keys, RSAEKey) + } + if h.keyID != nil { + keys = append(keys, KeyIDKey) + } + if h.keyOps != nil { + keys = append(keys, KeyOpsKey) + } + if h.keyUsage != nil { + keys = append(keys, KeyUsageKey) + } + if h.n != nil { + keys = append(keys, RSANKey) + } + if h.p != nil { + keys = append(keys, RSAPKey) + } + if h.q != nil { + keys = append(keys, RSAQKey) + } + if h.qi != nil { + keys = append(keys, RSAQIKey) + } + if h.x509CertChain != nil { + keys = append(keys, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + keys = append(keys, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + keys = append(keys, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + keys = append(keys, X509URLKey) + } + for k := range h.privateParams { + keys = append(keys, k) + } + return keys +} + +var rsaStandardFields KeyFilter + +func init() { + rsaStandardFields = NewFieldNameFilter(KeyTypeKey, KeyUsageKey, KeyOpsKey, AlgorithmKey, KeyIDKey, X509URLKey, X509CertChainKey, X509CertThumbprintKey, X509CertThumbprintS256Key, RSAEKey, RSANKey, RSADKey, RSADPKey, RSADQKey, RSAPKey, RSAQKey, RSAQIKey) +} + +// RSAStandardFieldsFilter returns a KeyFilter that filters out standard RSA fields. +func RSAStandardFieldsFilter() KeyFilter { + return rsaStandardFields +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/set.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/set.go new file mode 100644 index 0000000000..6f339649a8 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/set.go @@ -0,0 +1,319 @@ +package jwk + +import ( + "bytes" + "fmt" + "maps" + "reflect" + "sort" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/internal/tokens" +) + +const keysKey = `keys` // appease linter + +func newSet() *set { + return &set{ + privateParams: make(map[string]any), + } +} + +// NewSet creates and empty `jwk.Set` object +func NewSet() Set { + return newSet() +} + +func (s *set) Set(n string, v any) error { + s.mu.RLock() + defer s.mu.RUnlock() + + if n == keysKey { + vl, ok := v.([]Key) + if !ok { + return fmt.Errorf(`value for field "keys" must be []jwk.Key`) + } + s.keys = vl + return nil + } + + s.privateParams[n] = v + return nil +} + +func (s *set) Get(name string, dst any) error { + s.mu.RLock() + defer s.mu.RUnlock() + + v, ok := s.privateParams[name] + if !ok { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, v); err != nil { + return fmt.Errorf(`failed to assign value to dst: %w`, err) + } + return nil +} + +func (s *set) Key(idx int) (Key, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + + if idx >= 0 && idx < len(s.keys) { + return s.keys[idx], true + } + return nil, false +} + +func (s *set) Len() int { + s.mu.RLock() + defer s.mu.RUnlock() + + return len(s.keys) +} + +// indexNL is Index(), but without the locking +func (s *set) indexNL(key Key) int { + for i, k := range s.keys { + if k == key { + return i + } + } + return -1 +} + +func (s *set) Index(key Key) int { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.indexNL(key) +} + +func (s *set) AddKey(key Key) error { + s.mu.Lock() + defer s.mu.Unlock() + + if reflect.ValueOf(key).IsNil() { + panic("nil key") + } + + if i := s.indexNL(key); i > -1 { + return fmt.Errorf(`(jwk.Set).AddKey: key already exists`) + } + s.keys = append(s.keys, key) + return nil +} + +func (s *set) Remove(name string) error { + s.mu.Lock() + defer s.mu.Unlock() + + delete(s.privateParams, name) + return nil +} + +func (s *set) RemoveKey(key Key) error { + s.mu.Lock() + defer s.mu.Unlock() + + for i, k := range s.keys { + if k == key { + switch i { + case 0: + s.keys = s.keys[1:] + case len(s.keys) - 1: + s.keys = s.keys[:i] + default: + s.keys = append(s.keys[:i], s.keys[i+1:]...) + } + return nil + } + } + return fmt.Errorf(`(jwk.Set).RemoveKey: specified key does not exist in set`) +} + +func (s *set) Clear() error { + s.mu.Lock() + defer s.mu.Unlock() + + s.keys = nil + s.privateParams = make(map[string]any) + return nil +} + +func (s *set) Keys() []string { + ret := make([]string, len(s.privateParams)) + var i int + for k := range s.privateParams { + ret[i] = k + i++ + } + return ret +} + +func (s *set) MarshalJSON() ([]byte, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + enc := json.NewEncoder(buf) + + fields := []string{keysKey} + for k := range s.privateParams { + fields = append(fields, k) + } + sort.Strings(fields) + + buf.WriteByte(tokens.OpenCurlyBracket) + for i, field := range fields { + if i > 0 { + buf.WriteByte(tokens.Comma) + } + fmt.Fprintf(buf, `%q:`, field) + if field != keysKey { + if err := enc.Encode(s.privateParams[field]); err != nil { + return nil, fmt.Errorf(`failed to marshal field %q: %w`, field, err) + } + } else { + buf.WriteByte(tokens.OpenSquareBracket) + for j, k := range s.keys { + if j > 0 { + buf.WriteByte(tokens.Comma) + } + if err := enc.Encode(k); err != nil { + return nil, fmt.Errorf(`failed to marshal key #%d: %w`, i, err) + } + } + buf.WriteByte(tokens.CloseSquareBracket) + } + } + buf.WriteByte(tokens.CloseCurlyBracket) + + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (s *set) UnmarshalJSON(data []byte) error { + s.mu.Lock() + defer s.mu.Unlock() + + s.privateParams = make(map[string]any) + s.keys = nil + + var options []ParseOption + var ignoreParseError bool + if dc := s.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + options = append(options, withLocalRegistry(localReg)) + } + ignoreParseError = dc.IgnoreParseError() + } + + var sawKeysField bool + dec := json.NewDecoder(bytes.NewReader(data)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here. + if tok == tokens.CloseCurlyBracket { // End of object + break LOOP + } else if tok != tokens.OpenCurlyBracket { + return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok) + } + case string: + switch tok { + case "keys": + sawKeysField = true + var list []json.RawMessage + if err := dec.Decode(&list); err != nil { + return fmt.Errorf(`failed to decode "keys": %w`, err) + } + + for i, keysrc := range list { + key, err := ParseKey(keysrc, options...) + if err != nil { + if !ignoreParseError { + return fmt.Errorf(`failed to decode key #%d in "keys": %w`, i, err) + } + continue + } + s.keys = append(s.keys, key) + } + default: + var v any + if err := dec.Decode(&v); err != nil { + return fmt.Errorf(`failed to decode value for key %q: %w`, tok, err) + } + s.privateParams[tok] = v + } + } + } + + // This is really silly, but we can only detect the + // lack of the "keys" field after going through the + // entire object once + // Not checking for len(s.keys) == 0, because it could be + // an empty key set + if !sawKeysField { + key, err := ParseKey(data, options...) + if err != nil { + return fmt.Errorf(`failed to parse sole key in key set`) + } + s.keys = append(s.keys, key) + } + return nil +} + +func (s *set) LookupKeyID(kid string) (Key, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + + for i := range s.Len() { + key, ok := s.Key(i) + if !ok { + return nil, false + } + gotkid, ok := key.KeyID() + if ok && gotkid == kid { + return key, true + } + } + return nil, false +} + +func (s *set) DecodeCtx() DecodeCtx { + s.mu.RLock() + defer s.mu.RUnlock() + return s.dc +} + +func (s *set) SetDecodeCtx(dc DecodeCtx) { + s.mu.Lock() + defer s.mu.Unlock() + s.dc = dc +} + +func (s *set) Clone() (Set, error) { + s2 := newSet() + + s.mu.RLock() + defer s.mu.RUnlock() + + s2.keys = make([]Key, len(s.keys)) + copy(s2.keys, s.keys) + + maps.Copy(s2.privateParams, s.privateParams) + + return s2, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric.go new file mode 100644 index 0000000000..7db5e1591a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric.go @@ -0,0 +1,105 @@ +package jwk + +import ( + "crypto" + "fmt" + "reflect" + + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/jwa" +) + +func init() { + RegisterKeyExporter(jwa.OctetSeq(), KeyExportFunc(octetSeqToRaw)) +} + +func (k *symmetricKey) Import(rawKey []byte) error { + k.mu.Lock() + defer k.mu.Unlock() + + if len(rawKey) == 0 { + return fmt.Errorf(`non-empty []byte key required`) + } + + k.octets = rawKey + + return nil +} + +var symmetricConvertibleKeys = []reflect.Type{ + reflect.TypeFor[SymmetricKey](), +} + +func octetSeqToRaw(key Key, hint any) (any, error) { + extracted, err := extractEmbeddedKey(key, symmetricConvertibleKeys) + if err != nil { + return nil, fmt.Errorf(`failed to extract embedded key: %w`, err) + } + + switch key := extracted.(type) { + case SymmetricKey: + switch hint.(type) { + case *[]byte, *any: + default: + return nil, fmt.Errorf(`invalid destination object type %T for symmetric key: %w`, hint, ContinueError()) + } + + locker, ok := key.(rlocker) + if ok { + locker.rlock() + defer locker.runlock() + } + + ooctets, ok := key.Octets() + if !ok { + return nil, fmt.Errorf(`jwk.SymmetricKey: missing "k" field`) + } + + octets := make([]byte, len(ooctets)) + copy(octets, ooctets) + return octets, nil + default: + return nil, ContinueError() + } +} + +// Thumbprint returns the JWK thumbprint using the indicated +// hashing algorithm, according to RFC 7638 +func (k *symmetricKey) Thumbprint(hash crypto.Hash) ([]byte, error) { + k.mu.RLock() + defer k.mu.RUnlock() + var octets []byte + if err := Export(k, &octets); err != nil { + return nil, fmt.Errorf(`failed to export symmetric key: %w`, err) + } + + h := hash.New() + fmt.Fprint(h, `{"k":"`) + fmt.Fprint(h, base64.EncodeToString(octets)) + fmt.Fprint(h, `","kty":"oct"}`) + return h.Sum(nil), nil +} + +func (k *symmetricKey) PublicKey() (Key, error) { + newKey := newSymmetricKey() + + for _, key := range k.Keys() { + var v any + if err := k.Get(key, &v); err != nil { + return nil, fmt.Errorf(`failed to get field %q: %w`, key, err) + } + + if err := newKey.Set(key, v); err != nil { + return nil, fmt.Errorf(`failed to set field %q: %w`, key, err) + } + } + return newKey, nil +} + +func (k *symmetricKey) Validate() error { + octets, ok := k.Octets() + if !ok || len(octets) == 0 { + return NewKeyValidationError(fmt.Errorf(`jwk.SymmetricKey: missing "k" field`)) + } + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric_gen.go new file mode 100644 index 0000000000..bfd2f8497d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/symmetric_gen.go @@ -0,0 +1,620 @@ +// Code generated by tools/cmd/genjwk/main.go. DO NOT EDIT. + +package jwk + +import ( + "bytes" + "fmt" + "sort" + "sync" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v3/cert" + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/internal/tokens" + "github.com/lestrrat-go/jwx/v3/jwa" +) + +const ( + SymmetricOctetsKey = "k" +) + +type SymmetricKey interface { + Key + Octets() ([]byte, bool) +} + +type symmetricKey struct { + algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4 + keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4 + keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3 + keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2 + octets []byte + x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6 + x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7 + x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8 + x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5 + privateParams map[string]any + mu *sync.RWMutex + dc json.DecodeCtx +} + +var _ SymmetricKey = &symmetricKey{} +var _ Key = &symmetricKey{} + +func newSymmetricKey() *symmetricKey { + return &symmetricKey{ + mu: &sync.RWMutex{}, + privateParams: make(map[string]any), + } +} + +func (h symmetricKey) KeyType() jwa.KeyType { + return jwa.OctetSeq() +} + +func (h symmetricKey) rlock() { + h.mu.RLock() +} + +func (h symmetricKey) runlock() { + h.mu.RUnlock() +} + +func (h *symmetricKey) Algorithm() (jwa.KeyAlgorithm, bool) { + if h.algorithm != nil { + return *(h.algorithm), true + } + return nil, false +} + +func (h *symmetricKey) KeyID() (string, bool) { + if h.keyID != nil { + return *(h.keyID), true + } + return "", false +} + +func (h *symmetricKey) KeyOps() (KeyOperationList, bool) { + if h.keyOps != nil { + return *(h.keyOps), true + } + return nil, false +} + +func (h *symmetricKey) KeyUsage() (string, bool) { + if h.keyUsage != nil { + return *(h.keyUsage), true + } + return "", false +} + +func (h *symmetricKey) Octets() ([]byte, bool) { + if h.octets != nil { + return h.octets, true + } + return nil, false +} + +func (h *symmetricKey) X509CertChain() (*cert.Chain, bool) { + return h.x509CertChain, true +} + +func (h *symmetricKey) X509CertThumbprint() (string, bool) { + if h.x509CertThumbprint != nil { + return *(h.x509CertThumbprint), true + } + return "", false +} + +func (h *symmetricKey) X509CertThumbprintS256() (string, bool) { + if h.x509CertThumbprintS256 != nil { + return *(h.x509CertThumbprintS256), true + } + return "", false +} + +func (h *symmetricKey) X509URL() (string, bool) { + if h.x509URL != nil { + return *(h.x509URL), true + } + return "", false +} + +func (h *symmetricKey) Has(name string) bool { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + return true + case AlgorithmKey: + return h.algorithm != nil + case KeyIDKey: + return h.keyID != nil + case KeyOpsKey: + return h.keyOps != nil + case KeyUsageKey: + return h.keyUsage != nil + case SymmetricOctetsKey: + return h.octets != nil + case X509CertChainKey: + return h.x509CertChain != nil + case X509CertThumbprintKey: + return h.x509CertThumbprint != nil + case X509CertThumbprintS256Key: + return h.x509CertThumbprintS256 != nil + case X509URLKey: + return h.x509URL != nil + default: + _, ok := h.privateParams[name] + return ok + } +} + +func (h *symmetricKey) Get(name string, dst any) error { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + if err := blackmagic.AssignIfCompatible(dst, h.KeyType()); err != nil { + return fmt.Errorf(`symmetricKey.Get: failed to assign value for field %q to destination object: %w`, name, err) + } + case AlgorithmKey: + if h.algorithm == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyIDKey: + if h.keyID == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyOpsKey: + if h.keyOps == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyOps)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyUsageKey: + if h.keyUsage == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyUsage)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case SymmetricOctetsKey: + if h.octets == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.octets); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertChainKey: + if h.x509CertChain == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, h.x509CertChain); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509URLKey: + if h.x509URL == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + default: + v, ok := h.privateParams[name] + if !ok { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, v); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + } + return nil +} + +func (h *symmetricKey) Set(name string, value any) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *symmetricKey) setNoLock(name string, value any) error { + switch name { + case "kty": + return nil + case AlgorithmKey: + switch v := value.(type) { + case string, jwa.SignatureAlgorithm, jwa.KeyEncryptionAlgorithm, jwa.ContentEncryptionAlgorithm: + tmp, err := jwa.KeyAlgorithmFrom(v) + if err != nil { + return fmt.Errorf(`invalid algorithm for %q key: %w`, AlgorithmKey, err) + } + h.algorithm = &tmp + default: + return fmt.Errorf(`invalid type for %q key: %T`, AlgorithmKey, value) + } + return nil + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case KeyOpsKey: + var acceptor KeyOperationList + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err) + } + h.keyOps = &acceptor + return nil + case KeyUsageKey: + switch v := value.(type) { + case KeyUsageType: + switch v { + case ForSignature, ForEncryption: + tmp := v.String() + h.keyUsage = &tmp + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case string: + h.keyUsage = &v + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case SymmetricOctetsKey: + if v, ok := value.([]byte); ok { + h.octets = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, SymmetricOctetsKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]any{} + } + h.privateParams[name] = value + } + return nil +} + +func (k *symmetricKey) Remove(key string) error { + k.mu.Lock() + defer k.mu.Unlock() + switch key { + case AlgorithmKey: + k.algorithm = nil + case KeyIDKey: + k.keyID = nil + case KeyOpsKey: + k.keyOps = nil + case KeyUsageKey: + k.keyUsage = nil + case SymmetricOctetsKey: + k.octets = nil + case X509CertChainKey: + k.x509CertChain = nil + case X509CertThumbprintKey: + k.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + k.x509CertThumbprintS256 = nil + case X509URLKey: + k.x509URL = nil + default: + delete(k.privateParams, key) + } + return nil +} + +func (k *symmetricKey) Clone() (Key, error) { + key, err := cloneKey(k) + if err != nil { + return nil, fmt.Errorf(`symmetricKey.Clone: %w`, err) + } + return key, nil +} + +func (k *symmetricKey) DecodeCtx() json.DecodeCtx { + k.mu.RLock() + defer k.mu.RUnlock() + return k.dc +} + +func (k *symmetricKey) SetDecodeCtx(dc json.DecodeCtx) { + k.mu.Lock() + defer k.mu.Unlock() + k.dc = dc +} + +func (h *symmetricKey) UnmarshalJSON(buf []byte) error { + h.mu.Lock() + defer h.mu.Unlock() + h.algorithm = nil + h.keyID = nil + h.keyOps = nil + h.keyUsage = nil + h.octets = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here. + if tok == tokens.CloseCurlyBracket { // End of object + break LOOP + } else if tok != tokens.OpenCurlyBracket { + return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok) + } + case string: // Objects can only have string keys + switch tok { + case KeyTypeKey: + val, err := json.ReadNextStringToken(dec) + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + if val != jwa.OctetSeq().String() { + return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val) + } + case AlgorithmKey: + var s string + if err := dec.Decode(&s); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + alg, err := jwa.KeyAlgorithmFrom(s) + if err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + h.algorithm = &alg + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case KeyOpsKey: + var decoded KeyOperationList + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err) + } + h.keyOps = &decoded + case KeyUsageKey: + if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err) + } + case SymmetricOctetsKey: + if err := json.AssignNextBytesToken(&h.octets, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, SymmetricOctetsKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + default: + if dc := h.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + decoded, err := localReg.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + } + } + decoded, err := registry.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + return fmt.Errorf(`could not decode field %s: %w`, tok, err) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + if h.octets == nil { + return fmt.Errorf(`required field k is missing`) + } + return nil +} + +func (h symmetricKey) MarshalJSON() ([]byte, error) { + data := make(map[string]any) + fields := make([]string, 0, 9) + data[KeyTypeKey] = jwa.OctetSeq() + fields = append(fields, KeyTypeKey) + if h.algorithm != nil { + data[AlgorithmKey] = *(h.algorithm) + fields = append(fields, AlgorithmKey) + } + if h.keyID != nil { + data[KeyIDKey] = *(h.keyID) + fields = append(fields, KeyIDKey) + } + if h.keyOps != nil { + data[KeyOpsKey] = *(h.keyOps) + fields = append(fields, KeyOpsKey) + } + if h.keyUsage != nil { + data[KeyUsageKey] = *(h.keyUsage) + fields = append(fields, KeyUsageKey) + } + if h.octets != nil { + data[SymmetricOctetsKey] = h.octets + fields = append(fields, SymmetricOctetsKey) + } + if h.x509CertChain != nil { + data[X509CertChainKey] = h.x509CertChain + fields = append(fields, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + data[X509CertThumbprintKey] = *(h.x509CertThumbprint) + fields = append(fields, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256) + fields = append(fields, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + data[X509URLKey] = *(h.x509URL) + fields = append(fields, X509URLKey) + } + for k, v := range h.privateParams { + data[k] = v + fields = append(fields, k) + } + + sort.Strings(fields) + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + buf.WriteByte(tokens.OpenCurlyBracket) + enc := json.NewEncoder(buf) + for i, f := range fields { + if i > 0 { + buf.WriteRune(tokens.Comma) + } + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(f) + buf.WriteString(`":`) + v := data[f] + switch v := v.(type) { + case []byte: + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune(tokens.DoubleQuote) + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte(tokens.CloseCurlyBracket) + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (h *symmetricKey) Keys() []string { + h.mu.RLock() + defer h.mu.RUnlock() + keys := make([]string, 0, 9+len(h.privateParams)) + keys = append(keys, KeyTypeKey) + if h.algorithm != nil { + keys = append(keys, AlgorithmKey) + } + if h.keyID != nil { + keys = append(keys, KeyIDKey) + } + if h.keyOps != nil { + keys = append(keys, KeyOpsKey) + } + if h.keyUsage != nil { + keys = append(keys, KeyUsageKey) + } + if h.octets != nil { + keys = append(keys, SymmetricOctetsKey) + } + if h.x509CertChain != nil { + keys = append(keys, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + keys = append(keys, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + keys = append(keys, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + keys = append(keys, X509URLKey) + } + for k := range h.privateParams { + keys = append(keys, k) + } + return keys +} + +var symmetricStandardFields KeyFilter + +func init() { + symmetricStandardFields = NewFieldNameFilter(KeyTypeKey, KeyUsageKey, KeyOpsKey, AlgorithmKey, KeyIDKey, X509URLKey, X509CertChainKey, X509CertThumbprintKey, X509CertThumbprintS256Key, SymmetricOctetsKey) +} + +// SymmetricStandardFieldsFilter returns a KeyFilter that filters out standard Symmetric fields. +func SymmetricStandardFieldsFilter() KeyFilter { + return symmetricStandardFields +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/usage.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/usage.go new file mode 100644 index 0000000000..ed724153b8 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/usage.go @@ -0,0 +1,74 @@ +package jwk + +import ( + "fmt" + "sync" + "sync/atomic" +) + +var strictKeyUsage = atomic.Bool{} +var keyUsageNames = map[string]struct{}{} +var muKeyUsageName sync.RWMutex + +// RegisterKeyUsage registers a possible value that can be used for KeyUsageType. +// Normally, key usage (or the "use" field in a JWK) is either "sig" or "enc", +// but other values may be used. +// +// While this module only works with "sig" and "enc", it is possible that +// systems choose to use other values. This function allows users to register +// new values to be accepted as valid key usage types. Values are case sensitive. +// +// Furthermore, the check against registered values can be completely turned off +// by setting the global option `jwk.WithStrictKeyUsage(false)`. +func RegisterKeyUsage(v string) { + muKeyUsageName.Lock() + defer muKeyUsageName.Unlock() + keyUsageNames[v] = struct{}{} +} + +func UnregisterKeyUsage(v string) { + muKeyUsageName.Lock() + defer muKeyUsageName.Unlock() + delete(keyUsageNames, v) +} + +func init() { + strictKeyUsage.Store(true) + RegisterKeyUsage("sig") + RegisterKeyUsage("enc") +} + +func isValidUsage(v string) bool { + // This function can return true if strictKeyUsage is false + if !strictKeyUsage.Load() { + return true + } + + muKeyUsageName.RLock() + defer muKeyUsageName.RUnlock() + _, ok := keyUsageNames[v] + return ok +} + +func (k KeyUsageType) String() string { + return string(k) +} + +func (k *KeyUsageType) Accept(v any) error { + switch v := v.(type) { + case KeyUsageType: + if !isValidUsage(v.String()) { + return fmt.Errorf("invalid key usage type: %q", v) + } + *k = v + return nil + case string: + if !isValidUsage(v) { + return fmt.Errorf("invalid key usage type: %q", v) + } + *k = KeyUsageType(v) + return nil + } + + return fmt.Errorf("invalid Go type for key usage type: %T", v) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/whitelist.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/whitelist.go new file mode 100644 index 0000000000..0b0df701ae --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/whitelist.go @@ -0,0 +1,38 @@ +package jwk + +import "github.com/lestrrat-go/httprc/v3" + +type Whitelist = httprc.Whitelist +type WhitelistFunc = httprc.WhitelistFunc + +// InsecureWhitelist is an alias to httprc.InsecureWhitelist. Use +// functions in the `httprc` package to interact with this type. +type InsecureWhitelist = httprc.InsecureWhitelist + +func NewInsecureWhitelist() InsecureWhitelist { + return httprc.NewInsecureWhitelist() +} + +// BlockAllWhitelist is an alias to httprc.BlockAllWhitelist. Use +// functions in the `httprc` package to interact with this type. +type BlockAllWhitelist = httprc.BlockAllWhitelist + +func NewBlockAllWhitelist() BlockAllWhitelist { + return httprc.NewBlockAllWhitelist() +} + +// RegexpWhitelist is an alias to httprc.RegexpWhitelist. Use +// functions in the `httprc` package to interact with this type. +type RegexpWhitelist = httprc.RegexpWhitelist + +func NewRegexpWhitelist() *RegexpWhitelist { + return httprc.NewRegexpWhitelist() +} + +// MapWhitelist is an alias to httprc.MapWhitelist. Use +// functions in the `httprc` package to interact with this type. +type MapWhitelist = httprc.MapWhitelist + +func NewMapWhitelist() MapWhitelist { + return httprc.NewMapWhitelist() +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwk/x509.go b/vendor/github.com/lestrrat-go/jwx/v3/jwk/x509.go new file mode 100644 index 0000000000..f06063c6ed --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwk/x509.go @@ -0,0 +1,249 @@ +package jwk + +import ( + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "sync" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v3/jwk/jwkbb" +) + +// PEMDecoder is an interface to describe an object that can decode +// a key from PEM encoded ASN.1 DER format. +// +// A PEMDecoder can be specified as an option to `jwk.Parse()` or `jwk.ParseKey()` +// along with the `jwk.WithPEM()` option. +type PEMDecoder interface { + Decode([]byte) (any, []byte, error) +} + +// PEMEncoder is an interface to describe an object that can encode +// a key into PEM encoded ASN.1 DER format. +// +// `jwk.Key` instances do not implement a way to encode themselves into +// PEM format. Normally you can just use `jwk.EncodePEM()` to do this, but +// this interface allows you to generalize the encoding process by +// abstracting the `jwk.EncodePEM()` function using `jwk.PEMEncodeFunc` +// along with alternate implementations, should you need them. +type PEMEncoder interface { + Encode(any) (string, []byte, error) +} + +type PEMEncodeFunc func(any) (string, []byte, error) + +func (f PEMEncodeFunc) Encode(v any) (string, []byte, error) { + return f(v) +} + +func encodeX509(v any) (string, []byte, error) { + // we can't import jwk, so just use the interface + if key, ok := v.(Key); ok { + var raw any + if err := Export(key, &raw); err != nil { + return "", nil, fmt.Errorf(`failed to get raw key out of %T: %w`, key, err) + } + + v = raw + } + + // Try to convert it into a certificate + switch v := v.(type) { + case *rsa.PrivateKey: + return pmRSAPrivateKey, x509.MarshalPKCS1PrivateKey(v), nil + case *ecdsa.PrivateKey: + marshaled, err := x509.MarshalECPrivateKey(v) + if err != nil { + return "", nil, err + } + return pmECPrivateKey, marshaled, nil + case ed25519.PrivateKey: + marshaled, err := x509.MarshalPKCS8PrivateKey(v) + if err != nil { + return "", nil, err + } + return pmPrivateKey, marshaled, nil + case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey: + marshaled, err := x509.MarshalPKIXPublicKey(v) + if err != nil { + return "", nil, err + } + return pmPublicKey, marshaled, nil + default: + return "", nil, fmt.Errorf(`unsupported type %T for ASN.1 DER encoding`, v) + } +} + +// EncodePEM encodes the key into a PEM encoded ASN.1 DER format. +// The key can be a jwk.Key or a raw key instance, but it must be one of +// the types supported by `x509` package. +// +// Internally, it uses the same routine as `jwk.EncodeX509()`, and therefore +// the same caveats apply +func EncodePEM(v any) ([]byte, error) { + typ, marshaled, err := encodeX509(v) + if err != nil { + return nil, fmt.Errorf(`failed to encode key in x509: %w`, err) + } + + block := &pem.Block{ + Type: typ, + Bytes: marshaled, + } + return pem.EncodeToMemory(block), nil +} + +const ( + pmPrivateKey = `PRIVATE KEY` + pmPublicKey = `PUBLIC KEY` + pmECPrivateKey = `EC PRIVATE KEY` + pmRSAPublicKey = `RSA PUBLIC KEY` + pmRSAPrivateKey = `RSA PRIVATE KEY` +) + +// NewPEMDecoder returns a PEMDecoder that decodes keys in PEM encoded ASN.1 DER format. +// You can use it as argument to `jwk.WithPEMDecoder()` option. +// +// The use of this function is planned to be deprecated. The plan is to replace the +// `jwk.WithPEMDecoder()` option with globally available custom X509 decoders which +// can be registered via `jwk.RegisterX509Decoder()` function. +func NewPEMDecoder() PEMDecoder { + return pemDecoder{} +} + +type pemDecoder struct{} + +// Decode decodes a key in PEM encoded ASN.1 DER format. +// and returns a raw key. +func (pemDecoder) Decode(src []byte) (any, []byte, error) { + block, rest := pem.Decode(src) + if block == nil { + return nil, rest, fmt.Errorf(`failed to decode PEM data`) + } + var ret any + if err := jwkbb.DecodeX509(&ret, block); err != nil { + return nil, rest, err + } + return ret, rest, nil +} + +// X509Decoder is an interface that describes an object that can decode +// a PEM encoded ASN.1 DER format into a specific type of key. +// +// This interface is experimental, and may change in the future. +type X509Decoder interface { + // DecodeX509 decodes the given PEM block into the destination object. + // The destination object must be a pointer to a type that can hold the + // decoded key, such as *rsa.PrivateKey, *ecdsa.PrivateKey, etc. + DecodeX509(dst any, block *pem.Block) error +} + +// X509DecodeFunc is a function type that implements the X509Decoder interface. +// It allows you to create a custom X509Decoder by providing a function +// that takes a destination and a PEM block, and returns an error if the decoding fails. +// +// This interface is experimental, and may change in the future. +type X509DecodeFunc func(dst any, block *pem.Block) error + +func (f X509DecodeFunc) DecodeX509(dst any, block *pem.Block) error { + return f(dst, block) +} + +var muX509Decoders sync.Mutex +var x509Decoders = map[any]int{} +var x509DecoderList = []X509Decoder{} + +type identDefaultX509Decoder struct{} + +func init() { + RegisterX509Decoder(identDefaultX509Decoder{}, X509DecodeFunc(jwkbb.DecodeX509)) +} + +// RegisterX509Decoder registers a new X509Decoder that can decode PEM encoded ASN.1 DER format. +// Because the decoder could be non-comparable, you must provide an identifier that can be used +// as a map key to identify the decoder. +// +// This function is experimental, and may change in the future. +func RegisterX509Decoder(ident any, decoder X509Decoder) { + if decoder == nil { + panic(`jwk.RegisterX509Decoder: decoder cannot be nil`) + } + + muX509Decoders.Lock() + defer muX509Decoders.Unlock() + if _, ok := x509Decoders[ident]; ok { + return // already registered + } + + x509Decoders[ident] = len(x509DecoderList) + x509DecoderList = append(x509DecoderList, decoder) +} + +// UnregisterX509Decoder unregisters the X509Decoder identified by the given identifier. +// If the identifier is not registered, it does nothing. +// +// This function is experimental, and may change in the future. +func UnregisterX509Decoder(ident any) { + muX509Decoders.Lock() + defer muX509Decoders.Unlock() + idx, ok := x509Decoders[ident] + if !ok { + return // not registered + } + + delete(x509Decoders, ident) + + l := len(x509DecoderList) + switch idx { + case l - 1: + // if the last element, just truncate the slice + x509DecoderList = x509DecoderList[:l-1] + case 0: + // if the first element, just shift the slice + x509DecoderList = x509DecoderList[1:] + default: + // if the element is in the middle, remove it by slicing + // and appending the two slices together + x509DecoderList = append(x509DecoderList[:idx], x509DecoderList[idx+1:]...) + } +} + +// decodeX509 decodes a PEM encoded ASN.1 DER format into the given destination. +// It tries all registered X509 decoders until one of them succeeds. +// If no decoder can handle the PEM block, it returns an error. +func decodeX509(dst any, src []byte) error { + block, _ := pem.Decode(src) + if block == nil { + return fmt.Errorf(`failed to decode PEM data`) + } + + var errs []error + for _, d := range x509DecoderList { + if err := d.DecodeX509(dst, block); err != nil { + errs = append(errs, err) + continue + } + // successfully decoded + return nil + } + + return fmt.Errorf(`failed to decode X509 data using any of the decoders: %w`, errors.Join(errs...)) +} + +func decodeX509WithPEMDEcoder(dst any, src []byte, decoder PEMDecoder) error { + ret, _, err := decoder.Decode(src) + if err != nil { + return fmt.Errorf(`failed to decode PEM data: %w`, err) + } + + if err := blackmagic.AssignIfCompatible(dst, ret); err != nil { + return fmt.Errorf(`failed to assign decoded key to destination: %w`, err) + } + + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jws/BUILD.bazel new file mode 100644 index 0000000000..920d3f87b1 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/BUILD.bazel @@ -0,0 +1,76 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "jws", + srcs = [ + "errors.go", + "filter.go", + "headers.go", + "headers_gen.go", + "interface.go", + "io.go", + "jws.go", + "key_provider.go", + "legacy.go", + "message.go", + "options.go", + "options_gen.go", + "signer.go", + "sign_context.go", + "signature_builder.go", + "verifier.go", + "verify_context.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/jws", + visibility = ["//visibility:public"], + deps = [ + "//cert", + "//internal/base64", + "//internal/ecutil", + "//internal/json", + "//internal/jwxio", + "//internal/tokens", + "//internal/keyconv", + "//internal/pool", + "//jwa", + "//jwk", + "//jws/internal/keytype", + "//jws/jwsbb", + "//jws/legacy", + "//transform", + "@com_github_lestrrat_go_blackmagic//:blackmagic", + "@com_github_lestrrat_go_option_v2//:option", + ], +) + +go_test( + name = "jws_test", + srcs = [ + "es256k_test.go", + "filter_test.go", + "headers_test.go", + "jws_test.go", + "message_test.go", + "options_gen_test.go", + "signer_test.go", + ], + embed = [":jws"], + deps = [ + "//cert", + "//internal/base64", + "//internal/ecutil", + "//internal/json", + "//internal/jwxtest", + "//jwa", + "//jwk", + "//jwt", + "@com_github_lestrrat_go_httprc_v3//:httprc", + "@com_github_stretchr_testify//require", + ], +) + +alias( + name = "go_default_library", + actual = ":jws", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/README.md b/vendor/github.com/lestrrat-go/jwx/v3/jws/README.md new file mode 100644 index 0000000000..29ca7218e4 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/README.md @@ -0,0 +1,111 @@ +# JWS [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/jwx/v3/jws.svg)](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3/jws) + +Package jws implements JWS as described in [RFC7515](https://tools.ietf.org/html/rfc7515) and [RFC7797](https://tools.ietf.org/html/rfc7797) + +* Parse and generate compact or JSON serializations +* Sign and verify arbitrary payload +* Use any of the keys supported in [github.com/lestrrat-go/jwx/v3/jwk](../jwk) +* Add arbitrary fields in the JWS object +* Ability to add/replace existing signature methods +* Respect "b64" settings for RFC7797 + +How-to style documentation can be found in the [docs directory](../docs). + +Examples are located in the examples directory ([jws_example_test.go](../examples/jws_example_test.go)) + +Supported signature algorithms: + +| Algorithm | Supported? | Constant in [jwa](../jwa) | +|:----------------------------------------|:-----------|:-------------------------| +| HMAC using SHA-256 | YES | jwa.HS256 | +| HMAC using SHA-384 | YES | jwa.HS384 | +| HMAC using SHA-512 | YES | jwa.HS512 | +| RSASSA-PKCS-v1.5 using SHA-256 | YES | jwa.RS256 | +| RSASSA-PKCS-v1.5 using SHA-384 | YES | jwa.RS384 | +| RSASSA-PKCS-v1.5 using SHA-512 | YES | jwa.RS512 | +| ECDSA using P-256 and SHA-256 | YES | jwa.ES256 | +| ECDSA using P-384 and SHA-384 | YES | jwa.ES384 | +| ECDSA using P-521 and SHA-512 | YES | jwa.ES512 | +| ECDSA using secp256k1 and SHA-256 (2) | YES | jwa.ES256K | +| RSASSA-PSS using SHA256 and MGF1-SHA256 | YES | jwa.PS256 | +| RSASSA-PSS using SHA384 and MGF1-SHA384 | YES | jwa.PS384 | +| RSASSA-PSS using SHA512 and MGF1-SHA512 | YES | jwa.PS512 | +| EdDSA (1) | YES | jwa.EdDSA | + +* Note 1: Experimental +* Note 2: Experimental, and must be toggled using `-tags jwx_es256k` build tag + +# SYNOPSIS + +## Sign and verify arbitrary data + +```go +import( + "crypto/rand" + "crypto/rsa" + "log" + + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jws" +) + +func main() { + privkey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + log.Printf("failed to generate private key: %s", err) + return + } + + buf, err := jws.Sign([]byte("Lorem ipsum"), jws.WithKey(jwa.RS256, privkey)) + if err != nil { + log.Printf("failed to created JWS message: %s", err) + return + } + + // When you receive a JWS message, you can verify the signature + // and grab the payload sent in the message in one go: + verified, err := jws.Verify(buf, jws.WithKey(jwa.RS256, &privkey.PublicKey)) + if err != nil { + log.Printf("failed to verify message: %s", err) + return + } + + log.Printf("signed message verified! -> %s", verified) +} +``` + +## Programmatically manipulate `jws.Message` + +```go +func ExampleMessage() { + // initialization for the following variables have been omitted. + // please see jws_example_test.go for details + var decodedPayload, decodedSig1, decodedSig2 []byte + var public1, protected1, public2, protected2 jws.Header + + // Construct a message. DO NOT use values that are base64 encoded + m := jws.NewMessage(). + SetPayload(decodedPayload). + AppendSignature( + jws.NewSignature(). + SetSignature(decodedSig1). + SetProtectedHeaders(public1). + SetPublicHeaders(protected1), + ). + AppendSignature( + jws.NewSignature(). + SetSignature(decodedSig2). + SetProtectedHeaders(public2). + SetPublicHeaders(protected2), + ) + + buf, err := json.MarshalIndent(m, "", " ") + if err != nil { + fmt.Printf("%s\n", err) + return + } + + _ = buf +} +``` + diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/errors.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/errors.go new file mode 100644 index 0000000000..d5e1762a6a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/errors.go @@ -0,0 +1,112 @@ +package jws + +import ( + "fmt" +) + +type signError struct { + error +} + +var errDefaultSignError = signerr(`unknown error`) + +// SignError returns an error that can be passed to `errors.Is` to check if the error is a sign error. +func SignError() error { + return errDefaultSignError +} + +func (e signError) Unwrap() error { + return e.error +} + +func (signError) Is(err error) bool { + _, ok := err.(signError) + return ok +} + +func signerr(f string, args ...any) error { + return signError{fmt.Errorf(`jws.Sign: `+f, args...)} +} + +// This error is returned when jws.Verify fails, but note that there's another type of +// message that can be returned by jws.Verify, which is `errVerification`. +type verifyError struct { + error +} + +var errDefaultVerifyError = verifyerr(`unknown error`) + +// VerifyError returns an error that can be passed to `errors.Is` to check if the error is a verify error. +func VerifyError() error { + return errDefaultVerifyError +} + +func (e verifyError) Unwrap() error { + return e.error +} + +func (verifyError) Is(err error) bool { + _, ok := err.(verifyError) + return ok +} + +func verifyerr(f string, args ...any) error { + return verifyError{fmt.Errorf(`jws.Verify: `+f, args...)} +} + +// verificationError is returned when the actual _verification_ of the key/payload fails. +type verificationError struct { + error +} + +var errDefaultVerificationError = verificationError{fmt.Errorf(`unknown verification error`)} + +// VerificationError returns an error that can be passed to `errors.Is` to check if the error is a verification error. +func VerificationError() error { + return errDefaultVerificationError +} + +func (e verificationError) Unwrap() error { + return e.error +} + +func (verificationError) Is(err error) bool { + _, ok := err.(verificationError) + return ok +} + +type parseError struct { + error +} + +var errDefaultParseError = parseerr(`unknown error`) + +// ParseError returns an error that can be passed to `errors.Is` to check if the error is a parse error. +func ParseError() error { + return errDefaultParseError +} + +func (e parseError) Unwrap() error { + return e.error +} + +func (parseError) Is(err error) bool { + _, ok := err.(parseError) + return ok +} + +func bparseerr(prefix string, f string, args ...any) error { + return parseError{fmt.Errorf(prefix+": "+f, args...)} +} + +func parseerr(f string, args ...any) error { + return bparseerr(`jws.Parse`, f, args...) +} + +func sparseerr(f string, args ...any) error { + return bparseerr(`jws.ParseString`, f, args...) +} + +func rparseerr(f string, args ...any) error { + return bparseerr(`jws.ParseReader`, f, args...) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/es256k.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/es256k.go new file mode 100644 index 0000000000..28ebd2ea0e --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/es256k.go @@ -0,0 +1,12 @@ +//go:build jwx_es256k + +package jws + +import ( + "github.com/lestrrat-go/jwx/v3/jwa" +) + +func init() { + // Register ES256K to EC algorithm family + addAlgorithmForKeyType(jwa.EC(), jwa.ES256K()) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/filter.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/filter.go new file mode 100644 index 0000000000..9351ab870b --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/filter.go @@ -0,0 +1,36 @@ +package jws + +import ( + "github.com/lestrrat-go/jwx/v3/transform" +) + +// HeaderFilter is an interface that allows users to filter JWS header fields. +// It provides two methods: Filter and Reject; Filter returns a new header with only +// the fields that match the filter criteria, while Reject returns a new header with +// only the fields that DO NOT match the filter. +// +// EXPERIMENTAL: This API is experimental and its interface and behavior is +// subject to change in future releases. This API is not subject to semver +// compatibility guarantees. +type HeaderFilter interface { + Filter(header Headers) (Headers, error) + Reject(header Headers) (Headers, error) +} + +// StandardHeadersFilter returns a HeaderFilter that filters out standard JWS header fields. +// +// You can use this filter to create headers that either only have standard fields +// or only custom fields. +// +// If you need to configure the filter more precisely, consider +// using the HeaderNameFilter directly. +func StandardHeadersFilter() HeaderFilter { + return stdHeadersFilter +} + +var stdHeadersFilter = NewHeaderNameFilter(stdHeaderNames...) + +// NewHeaderNameFilter creates a new HeaderNameFilter with the specified field names. +func NewHeaderNameFilter(names ...string) HeaderFilter { + return transform.NewNameBasedFilter[Headers](names...) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/headers.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/headers.go new file mode 100644 index 0000000000..45f8e8959e --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/headers.go @@ -0,0 +1,52 @@ +package jws + +import ( + "fmt" +) + +func (h *stdHeaders) Copy(dst Headers) error { + for _, k := range h.Keys() { + var v any + if err := h.Get(k, &v); err != nil { + return fmt.Errorf(`failed to get header %q: %w`, k, err) + } + if err := dst.Set(k, v); err != nil { + return fmt.Errorf(`failed to set header %q: %w`, k, err) + } + } + return nil +} + +// mergeHeaders merges two headers, and works even if the first Header +// object is nil. This is not exported because ATM it felt like this +// function is not frequently used, and MergeHeaders seemed a clunky name +func mergeHeaders(h1, h2 Headers) (Headers, error) { + h3 := NewHeaders() + + if h1 != nil { + if err := h1.Copy(h3); err != nil { + return nil, fmt.Errorf(`failed to copy headers from first Header: %w`, err) + } + } + + if h2 != nil { + if err := h2.Copy(h3); err != nil { + return nil, fmt.Errorf(`failed to copy headers from second Header: %w`, err) + } + } + + return h3, nil +} + +func (h *stdHeaders) Merge(h2 Headers) (Headers, error) { + return mergeHeaders(h, h2) +} + +// Clone creates a deep copy of the header +func (h *stdHeaders) Clone() (Headers, error) { + dst := NewHeaders() + if err := h.Copy(dst); err != nil { + return nil, fmt.Errorf(`failed to copy header: %w`, err) + } + return dst, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/headers_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/headers_gen.go new file mode 100644 index 0000000000..8465eda2b1 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/headers_gen.go @@ -0,0 +1,704 @@ +// Code generated by tools/cmd/genjws/main.go. DO NOT EDIT. + +package jws + +import ( + "bytes" + "fmt" + "sort" + "sync" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v3/cert" + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/internal/tokens" + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwk" +) + +const ( + AlgorithmKey = "alg" + ContentTypeKey = "cty" + CriticalKey = "crit" + JWKKey = "jwk" + JWKSetURLKey = "jku" + KeyIDKey = "kid" + TypeKey = "typ" + X509CertChainKey = "x5c" + X509CertThumbprintKey = "x5t" + X509CertThumbprintS256Key = "x5t#S256" + X509URLKey = "x5u" +) + +// Headers describe a standard JWS Header set. It is part of the JWS message +// and is used to represet both Public or Protected headers, which in turn +// can be found in each Signature object. If you are not sure how this works, +// it is strongly recommended that you read RFC7515, especially the section +// that describes the full JSON serialization format of JWS messages. +// +// In most cases, you likely want to use the protected headers, as this is part of the signed content. +type Headers interface { + Algorithm() (jwa.SignatureAlgorithm, bool) + ContentType() (string, bool) + Critical() ([]string, bool) + JWK() (jwk.Key, bool) + JWKSetURL() (string, bool) + KeyID() (string, bool) + Type() (string, bool) + X509CertChain() (*cert.Chain, bool) + X509CertThumbprint() (string, bool) + X509CertThumbprintS256() (string, bool) + X509URL() (string, bool) + Copy(Headers) error + Merge(Headers) (Headers, error) + Clone() (Headers, error) + // Get is used to extract the value of any field, including non-standard fields, out of the header. + // + // The first argument is the name of the field. The second argument is a pointer + // to a variable that will receive the value of the field. The method returns + // an error if the field does not exist, or if the value cannot be assigned to + // the destination variable. Note that a field is considered to "exist" even if + // the value is empty-ish (e.g. 0, false, ""), as long as it is explicitly set. + Get(string, any) error + Set(string, any) error + Remove(string) error + // Has returns true if the specified header has a value, even if + // the value is empty-ish (e.g. 0, false, "") as long as it has been + // explicitly set. + Has(string) bool + Keys() []string +} + +// stdHeaderNames is a list of all standard header names defined in the JWS specification. +var stdHeaderNames = []string{AlgorithmKey, ContentTypeKey, CriticalKey, JWKKey, JWKSetURLKey, KeyIDKey, TypeKey, X509CertChainKey, X509CertThumbprintKey, X509CertThumbprintS256Key, X509URLKey} + +type stdHeaders struct { + algorithm *jwa.SignatureAlgorithm // https://tools.ietf.org/html/rfc7515#section-4.1.1 + contentType *string // https://tools.ietf.org/html/rfc7515#section-4.1.10 + critical []string // https://tools.ietf.org/html/rfc7515#section-4.1.11 + jwk jwk.Key // https://tools.ietf.org/html/rfc7515#section-4.1.3 + jwkSetURL *string // https://tools.ietf.org/html/rfc7515#section-4.1.2 + keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4 + typ *string // https://tools.ietf.org/html/rfc7515#section-4.1.9 + x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6 + x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7 + x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8 + x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5 + privateParams map[string]any + mu *sync.RWMutex + dc DecodeCtx + raw []byte // stores the raw version of the header so it can be used later +} + +func NewHeaders() Headers { + return &stdHeaders{ + mu: &sync.RWMutex{}, + } +} + +func (h *stdHeaders) Algorithm() (jwa.SignatureAlgorithm, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.algorithm == nil { + return jwa.EmptySignatureAlgorithm(), false + } + return *(h.algorithm), true +} + +func (h *stdHeaders) ContentType() (string, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.contentType == nil { + return "", false + } + return *(h.contentType), true +} + +func (h *stdHeaders) Critical() ([]string, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + return h.critical, true +} + +func (h *stdHeaders) JWK() (jwk.Key, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + return h.jwk, true +} + +func (h *stdHeaders) JWKSetURL() (string, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.jwkSetURL == nil { + return "", false + } + return *(h.jwkSetURL), true +} + +func (h *stdHeaders) KeyID() (string, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.keyID == nil { + return "", false + } + return *(h.keyID), true +} + +func (h *stdHeaders) Type() (string, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.typ == nil { + return "", false + } + return *(h.typ), true +} + +func (h *stdHeaders) X509CertChain() (*cert.Chain, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + return h.x509CertChain, true +} + +func (h *stdHeaders) X509CertThumbprint() (string, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.x509CertThumbprint == nil { + return "", false + } + return *(h.x509CertThumbprint), true +} + +func (h *stdHeaders) X509CertThumbprintS256() (string, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.x509CertThumbprintS256 == nil { + return "", false + } + return *(h.x509CertThumbprintS256), true +} + +func (h *stdHeaders) X509URL() (string, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + if h.x509URL == nil { + return "", false + } + return *(h.x509URL), true +} + +func (h *stdHeaders) clear() { + h.algorithm = nil + h.contentType = nil + h.critical = nil + h.jwk = nil + h.jwkSetURL = nil + h.keyID = nil + h.typ = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + h.privateParams = nil + h.raw = nil +} + +func (h *stdHeaders) DecodeCtx() DecodeCtx { + h.mu.RLock() + defer h.mu.RUnlock() + return h.dc +} + +func (h *stdHeaders) SetDecodeCtx(dc DecodeCtx) { + h.mu.Lock() + defer h.mu.Unlock() + h.dc = dc +} + +func (h *stdHeaders) rawBuffer() []byte { + return h.raw +} + +func (h *stdHeaders) PrivateParams() map[string]any { + h.mu.RLock() + defer h.mu.RUnlock() + return h.privateParams +} + +func (h *stdHeaders) Has(name string) bool { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case AlgorithmKey: + return h.algorithm != nil + case ContentTypeKey: + return h.contentType != nil + case CriticalKey: + return h.critical != nil + case JWKKey: + return h.jwk != nil + case JWKSetURLKey: + return h.jwkSetURL != nil + case KeyIDKey: + return h.keyID != nil + case TypeKey: + return h.typ != nil + case X509CertChainKey: + return h.x509CertChain != nil + case X509CertThumbprintKey: + return h.x509CertThumbprint != nil + case X509CertThumbprintS256Key: + return h.x509CertThumbprintS256 != nil + case X509URLKey: + return h.x509URL != nil + default: + _, ok := h.privateParams[name] + return ok + } +} + +func (h *stdHeaders) Get(name string, dst any) error { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case AlgorithmKey: + if h.algorithm == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.algorithm)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case ContentTypeKey: + if h.contentType == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.contentType)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case CriticalKey: + if h.critical == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, + h.critical); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case JWKKey: + if h.jwk == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, + h.jwk); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case JWKSetURLKey: + if h.jwkSetURL == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.jwkSetURL)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case KeyIDKey: + if h.keyID == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.keyID)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case TypeKey: + if h.typ == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.typ)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertChainKey: + if h.x509CertChain == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, + h.x509CertChain); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprint)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509CertThumbprintS256)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + case X509URLKey: + if h.x509URL == nil { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, *(h.x509URL)); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + return nil + default: + v, ok := h.privateParams[name] + if !ok { + return fmt.Errorf(`field %q not found`, name) + } + if err := blackmagic.AssignIfCompatible(dst, v); err != nil { + return fmt.Errorf(`failed to assign value for field %q: %w`, name, err) + } + } + return nil +} + +func (h *stdHeaders) Set(name string, value any) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *stdHeaders) setNoLock(name string, value any) error { + switch name { + case AlgorithmKey: + alg, err := jwa.KeyAlgorithmFrom(value) + if err != nil { + return fmt.Errorf("invalid value for %s key: %w", AlgorithmKey, err) + } + if salg, ok := alg.(jwa.SignatureAlgorithm); ok { + h.algorithm = &salg + return nil + } + return fmt.Errorf("expecte jwa.SignatureAlgorithm, received %T", alg) + case ContentTypeKey: + if v, ok := value.(string); ok { + h.contentType = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ContentTypeKey, value) + case CriticalKey: + if v, ok := value.([]string); ok { + h.critical = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, CriticalKey, value) + case JWKKey: + if v, ok := value.(jwk.Key); ok { + h.jwk = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, JWKKey, value) + case JWKSetURLKey: + if v, ok := value.(string); ok { + h.jwkSetURL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, JWKSetURLKey, value) + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case TypeKey: + if v, ok := value.(string); ok { + h.typ = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, TypeKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]any{} + } + h.privateParams[name] = value + } + return nil +} + +func (h *stdHeaders) Remove(key string) error { + h.mu.Lock() + defer h.mu.Unlock() + switch key { + case AlgorithmKey: + h.algorithm = nil + case ContentTypeKey: + h.contentType = nil + case CriticalKey: + h.critical = nil + case JWKKey: + h.jwk = nil + case JWKSetURLKey: + h.jwkSetURL = nil + case KeyIDKey: + h.keyID = nil + case TypeKey: + h.typ = nil + case X509CertChainKey: + h.x509CertChain = nil + case X509CertThumbprintKey: + h.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + h.x509CertThumbprintS256 = nil + case X509URLKey: + h.x509URL = nil + default: + delete(h.privateParams, key) + } + return nil +} + +func (h *stdHeaders) UnmarshalJSON(buf []byte) error { + h.mu.Lock() + defer h.mu.Unlock() + h.clear() + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here. + if tok == tokens.CloseCurlyBracket { // End of object + break LOOP + } else if tok != tokens.OpenCurlyBracket { + return fmt.Errorf(`expected '%c' but got '%c'`, tokens.OpenCurlyBracket, tok) + } + case string: // Objects can only have string keys + switch tok { + case AlgorithmKey: + var decoded jwa.SignatureAlgorithm + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + h.algorithm = &decoded + case ContentTypeKey: + if err := json.AssignNextStringToken(&h.contentType, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ContentTypeKey, err) + } + case CriticalKey: + var decoded []string + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, CriticalKey, err) + } + h.critical = decoded + case JWKKey: + var buf json.RawMessage + if err := dec.Decode(&buf); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, JWKKey, err) + } + key, err := jwk.ParseKey(buf) + if err != nil { + return fmt.Errorf(`failed to parse JWK for key %s: %w`, JWKKey, err) + } + h.jwk = key + case JWKSetURLKey: + if err := json.AssignNextStringToken(&h.jwkSetURL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, JWKSetURLKey, err) + } + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case TypeKey: + if err := json.AssignNextStringToken(&h.typ, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, TypeKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + default: + decoded, err := registry.Decode(dec, tok) + if err != nil { + return err + } + h.setNoLock(tok, decoded) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + h.raw = buf + return nil +} + +func (h *stdHeaders) Keys() []string { + h.mu.RLock() + defer h.mu.RUnlock() + keys := make([]string, 0, 11+len(h.privateParams)) + if h.algorithm != nil { + keys = append(keys, AlgorithmKey) + } + if h.contentType != nil { + keys = append(keys, ContentTypeKey) + } + if h.critical != nil { + keys = append(keys, CriticalKey) + } + if h.jwk != nil { + keys = append(keys, JWKKey) + } + if h.jwkSetURL != nil { + keys = append(keys, JWKSetURLKey) + } + if h.keyID != nil { + keys = append(keys, KeyIDKey) + } + if h.typ != nil { + keys = append(keys, TypeKey) + } + if h.x509CertChain != nil { + keys = append(keys, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + keys = append(keys, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + keys = append(keys, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + keys = append(keys, X509URLKey) + } + for k := range h.privateParams { + keys = append(keys, k) + } + return keys +} + +func (h stdHeaders) MarshalJSON() ([]byte, error) { + h.mu.RLock() + data := make(map[string]any) + keys := make([]string, 0, 11+len(h.privateParams)) + if h.algorithm != nil { + data[AlgorithmKey] = *(h.algorithm) + keys = append(keys, AlgorithmKey) + } + if h.contentType != nil { + data[ContentTypeKey] = *(h.contentType) + keys = append(keys, ContentTypeKey) + } + if h.critical != nil { + data[CriticalKey] = h.critical + keys = append(keys, CriticalKey) + } + if h.jwk != nil { + data[JWKKey] = h.jwk + keys = append(keys, JWKKey) + } + if h.jwkSetURL != nil { + data[JWKSetURLKey] = *(h.jwkSetURL) + keys = append(keys, JWKSetURLKey) + } + if h.keyID != nil { + data[KeyIDKey] = *(h.keyID) + keys = append(keys, KeyIDKey) + } + if h.typ != nil { + data[TypeKey] = *(h.typ) + keys = append(keys, TypeKey) + } + if h.x509CertChain != nil { + data[X509CertChainKey] = h.x509CertChain + keys = append(keys, X509CertChainKey) + } + if h.x509CertThumbprint != nil { + data[X509CertThumbprintKey] = *(h.x509CertThumbprint) + keys = append(keys, X509CertThumbprintKey) + } + if h.x509CertThumbprintS256 != nil { + data[X509CertThumbprintS256Key] = *(h.x509CertThumbprintS256) + keys = append(keys, X509CertThumbprintS256Key) + } + if h.x509URL != nil { + data[X509URLKey] = *(h.x509URL) + keys = append(keys, X509URLKey) + } + for k, v := range h.privateParams { + data[k] = v + keys = append(keys, k) + } + h.mu.RUnlock() + sort.Strings(keys) + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + enc := json.NewEncoder(buf) + buf.WriteByte(tokens.OpenCurlyBracket) + for i, k := range keys { + if i > 0 { + buf.WriteRune(tokens.Comma) + } + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(k) + buf.WriteString(`":`) + switch v := data[k].(type) { + case []byte: + buf.WriteRune(tokens.DoubleQuote) + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune(tokens.DoubleQuote) + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s: %w`, k, err) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte(tokens.CloseCurlyBracket) + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/interface.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/interface.go new file mode 100644 index 0000000000..e3ad296844 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/interface.go @@ -0,0 +1,80 @@ +package jws + +import ( + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/jws/legacy" +) + +type Signer = legacy.Signer +type Verifier = legacy.Verifier +type HMACSigner = legacy.HMACSigner +type HMACVerifier = legacy.HMACVerifier + +// Base64Encoder is an interface that can be used when encoding JWS message +// components to base64. This is useful when you want to use a non-standard +// base64 encoder while generating or verifying signatures. By default JWS +// uses raw url base64 encoding (without padding), but there are apparently +// some cases where you may want to use a base64 encoders that uses padding. +// +// For example, apparently AWS ALB User Claims is provided in JWT format, +// but it uses a base64 encoding with padding. +type Base64Encoder = base64.Encoder + +type DecodeCtx interface { + CollectRaw() bool +} + +// Message represents a full JWS encoded message. Flattened serialization +// is not supported as a struct, but rather it's represented as a +// Message struct with only one `signature` element. +// +// Do not expect to use the Message object to verify or construct a +// signed payload with. You should only use this when you want to actually +// programmatically view the contents of the full JWS payload. +// +// As of this version, there is one big incompatibility when using Message +// objects to convert between compact and JSON representations. +// The protected header is sometimes encoded differently from the original +// message and the JSON serialization that we use in Go. +// +// For example, the protected header `eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9` +// decodes to +// +// {"typ":"JWT", +// "alg":"HS256"} +// +// However, when we parse this into a message, we create a jws.Header object, +// which, when we marshal into a JSON object again, becomes +// +// {"typ":"JWT","alg":"HS256"} +// +// Notice that serialization lacks a line break and a space between `"JWT",` +// and `"alg"`. This causes a problem when verifying the signatures AFTER +// a compact JWS message has been unmarshaled into a jws.Message. +// +// jws.Verify() doesn't go through this step, and therefore this does not +// manifest itself. However, you may see this discrepancy when you manually +// go through these conversions, and/or use the `jwx` tool like so: +// +// jwx jws parse message.jws | jwx jws verify --key somekey.jwk --stdin +// +// In this scenario, the first `jwx jws parse` outputs a parsed jws.Message +// which is marshaled into JSON. At this point the message's protected +// headers and the signatures don't match. +// +// To sign and verify, use the appropriate `Sign()` and `Verify()` functions. +type Message struct { + dc DecodeCtx + payload []byte + signatures []*Signature + b64 bool // true if payload should be base64 encoded +} + +type Signature struct { + encoder Base64Encoder + dc DecodeCtx + headers Headers // Unprotected Headers + protected Headers // Protected Headers + signature []byte // Signature + detached bool +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/internal/keytype/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jws/internal/keytype/BUILD.bazel new file mode 100644 index 0000000000..eb8bd94acb --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/internal/keytype/BUILD.bazel @@ -0,0 +1,11 @@ +load("@rules_go//go:def.bzl", "go_library") + +go_library( + name = "keytype", + srcs = ["keytype.go"], + importpath = "github.com/lestrrat-go/jwx/v3/jws/internal/keytype", + visibility = ["//jws:__subpackages__"], + deps = [ + "//jwk", + ], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/internal/keytype/keytype.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/internal/keytype/keytype.go new file mode 100644 index 0000000000..6b57ed10d8 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/internal/keytype/keytype.go @@ -0,0 +1,57 @@ +package keytype + +import ( + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + + "github.com/lestrrat-go/jwx/v3/jwk" +) + +// Because the keys defined in github.com/lestrrat-go/jwx/jwk may also implement +// crypto.Signer, it would be possible for to mix up key types when signing/verifying +// for example, when we specify jws.WithKey(jwa.RSA256, cryptoSigner), the cryptoSigner +// can be for RSA, or any other type that implements crypto.Signer... even if it's for the +// wrong algorithm. +// +// These functions are there to differentiate between the valid KNOWN key types. +// For any other key type that is outside of the Go std library and our own code, +// we must rely on the user to be vigilant. +// +// Notes: symmetric keys are obviously not part of this. for v2 OKP keys, +// x25519 does not implement Sign() +func IsValidRSAKey(key any) bool { + switch key.(type) { + case + ecdsa.PrivateKey, *ecdsa.PrivateKey, + ed25519.PrivateKey, + jwk.ECDSAPrivateKey, jwk.OKPPrivateKey: + // these are NOT ok + return false + } + return true +} + +func IsValidECDSAKey(key any) bool { + switch key.(type) { + case + ed25519.PrivateKey, + rsa.PrivateKey, *rsa.PrivateKey, + jwk.RSAPrivateKey, jwk.OKPPrivateKey: + // these are NOT ok + return false + } + return true +} + +func IsValidEDDSAKey(key any) bool { + switch key.(type) { + case + ecdsa.PrivateKey, *ecdsa.PrivateKey, + rsa.PrivateKey, *rsa.PrivateKey, + jwk.RSAPrivateKey, jwk.ECDSAPrivateKey: + // these are NOT ok + return false + } + return true +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/io.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/io.go new file mode 100644 index 0000000000..77a084cfda --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/io.go @@ -0,0 +1,36 @@ +// Code generated by tools/cmd/genreadfile/main.go. DO NOT EDIT. + +package jws + +import ( + "fmt" + "io/fs" + "os" +) + +type sysFS struct{} + +func (sysFS) Open(path string) (fs.File, error) { + return os.Open(path) +} + +func ReadFile(path string, options ...ReadFileOption) (*Message, error) { + + var srcFS fs.FS = sysFS{} + for _, option := range options { + switch option.Ident() { + case identFS{}: + if err := option.Value(&srcFS); err != nil { + return nil, fmt.Errorf("failed to set fs.FS: %w", err) + } + } + } + + f, err := srcFS.Open(path) + if err != nil { + return nil, err + } + + defer f.Close() + return ParseReader(f) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jws.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jws.go new file mode 100644 index 0000000000..f09e40db2d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jws.go @@ -0,0 +1,665 @@ +//go:generate ../tools/cmd/genjws.sh + +// Package jws implements the digital signature on JSON based data +// structures as described in https://tools.ietf.org/html/rfc7515 +// +// If you do not care about the details, the only things that you +// would need to use are the following functions: +// +// jws.Sign(payload, jws.WithKey(algorithm, key)) +// jws.Verify(serialized, jws.WithKey(algorithm, key)) +// +// To sign, simply use `jws.Sign`. `payload` is a []byte buffer that +// contains whatever data you want to sign. `alg` is one of the +// jwa.SignatureAlgorithm constants from package jwa. For RSA and +// ECDSA family of algorithms, you will need to prepare a private key. +// For HMAC family, you just need a []byte value. The `jws.Sign` +// function will return the encoded JWS message on success. +// +// To verify, use `jws.Verify`. It will parse the `encodedjws` buffer +// and verify the result using `algorithm` and `key`. Upon successful +// verification, the original payload is returned, so you can work on it. +// +// As a sidenote, consider using github.com/lestrrat-go/htmsig if you +// looking for HTTP Message Signatures (RFC9421) -- it uses the same +// underlying signing/verification mechanisms as this module. +package jws + +import ( + "bufio" + "crypto/ecdh" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "errors" + "fmt" + "io" + "reflect" + "sync" + "unicode" + "unicode/utf8" + + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/internal/jwxio" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/internal/tokens" + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwk" + "github.com/lestrrat-go/jwx/v3/jws/jwsbb" +) + +var registry = json.NewRegistry() + +var signers = make(map[jwa.SignatureAlgorithm]Signer) +var muSigner = &sync.Mutex{} + +func removeSigner(alg jwa.SignatureAlgorithm) { + muSigner.Lock() + defer muSigner.Unlock() + delete(signers, alg) +} + +type defaultSigner struct { + alg jwa.SignatureAlgorithm +} + +func (s defaultSigner) Algorithm() jwa.SignatureAlgorithm { + return s.alg +} + +func (s defaultSigner) Sign(key any, payload []byte) ([]byte, error) { + return jwsbb.Sign(key, s.alg.String(), payload, nil) +} + +type signerAdapter struct { + signer Signer +} + +func (s signerAdapter) Algorithm() jwa.SignatureAlgorithm { + return s.signer.Algorithm() +} + +func (s signerAdapter) Sign(key any, payload []byte) ([]byte, error) { + return s.signer.Sign(payload, key) +} + +const ( + fmtInvalid = 1 << iota + fmtCompact + fmtJSON + fmtJSONPretty + fmtMax +) + +// silence linters +var _ = fmtInvalid +var _ = fmtMax + +func validateKeyBeforeUse(key any) error { + jwkKey, ok := key.(jwk.Key) + if !ok { + converted, err := jwk.Import(key) + if err != nil { + return fmt.Errorf(`could not convert key of type %T to jwk.Key for validation: %w`, key, err) + } + jwkKey = converted + } + return jwkKey.Validate() +} + +// Sign generates a JWS message for the given payload and returns +// it in serialized form, which can be in either compact or +// JSON format. Default is compact. +// +// You must pass at least one key to `jws.Sign()` by using `jws.WithKey()` +// option. +// +// jws.Sign(payload, jws.WithKey(alg, key)) +// jws.Sign(payload, jws.WithJSON(), jws.WithKey(alg1, key1), jws.WithKey(alg2, key2)) +// +// Note that in the second example the `jws.WithJSON()` option is +// specified as well. This is because the compact serialization +// format does not support multiple signatures, and users must +// specifically ask for the JSON serialization format. +// +// Read the documentation for `jws.WithKey()` to learn more about the +// possible values that can be used for `alg` and `key`. +// +// You may create JWS messages with the "none" (jwa.NoSignature) algorithm +// if you use the `jws.WithInsecureNoSignature()` option. This option +// can be combined with one or more signature keys, as well as the +// `jws.WithJSON()` option to generate multiple signatures (though +// the usefulness of such constructs is highly debatable) +// +// Note that this library does not allow you to successfully call `jws.Verify()` on +// signatures with the "none" algorithm. To parse these, use `jws.Parse()` instead. +// +// If you want to use a detached payload, use `jws.WithDetachedPayload()` as +// one of the options. When you use this option, you must always set the +// first parameter (`payload`) to `nil`, or the function will return an error +// +// You may also want to look at how to pass protected headers to the +// signing process, as you will likely be required to set the `b64` field +// when using detached payload. +// +// Look for options that return `jws.SignOption` or `jws.SignVerifyOption` +// for a complete list of options that can be passed to this function. +// +// You can use `errors.Is` with `jws.SignError()` to check if an error is from this function. +func Sign(payload []byte, options ...SignOption) ([]byte, error) { + sc := signContextPool.Get() + defer signContextPool.Put(sc) + + sc.payload = payload + + if err := sc.ProcessOptions(options); err != nil { + return nil, signerr(`failed to process options: %w`, err) + } + + lsigner := len(sc.sigbuilders) + if lsigner == 0 { + return nil, signerr(`no signers available. Specify an algorithm and a key using jws.WithKey()`) + } + + // Design note: while we could have easily set format = fmtJSON when + // lsigner > 1, I believe the decision to change serialization formats + // must be explicitly stated by the caller. Otherwise, I'm pretty sure + // there would be people filing issues saying "I get JSON when I expected + // compact serialization". + // + // Therefore, instead of making implicit format conversions, we force the + // user to spell it out as `jws.Sign(..., jws.WithJSON(), jws.WithKey(...), jws.WithKey(...))` + if sc.format == fmtCompact && lsigner != 1 { + return nil, signerr(`cannot have multiple signers (keys) specified for compact serialization. Use only one jws.WithKey()`) + } + + // Create a Message object with all the bits and bobs, and we'll + // serialize it in the end + var result Message + + if err := sc.PopulateMessage(&result); err != nil { + return nil, signerr(`failed to populate message: %w`, err) + } + switch sc.format { + case fmtJSON: + return json.Marshal(result) + case fmtJSONPretty: + return json.MarshalIndent(result, "", " ") + case fmtCompact: + // Take the only signature object, and convert it into a Compact + // serialization format + var compactOpts []CompactOption + if sc.detached { + compactOpts = append(compactOpts, WithDetached(true)) + } + for _, option := range options { + if copt, ok := option.(CompactOption); ok { + compactOpts = append(compactOpts, copt) + } + } + return Compact(&result, compactOpts...) + default: + return nil, signerr(`invalid serialization format`) + } +} + +var allowNoneWhitelist = jwk.WhitelistFunc(func(string) bool { + return false +}) + +// Verify checks if the given JWS message is verifiable using `alg` and `key`. +// `key` may be a "raw" key (e.g. rsa.PublicKey) or a jwk.Key +// +// If the verification is successful, `err` is nil, and the content of the +// payload that was signed is returned. If you need more fine-grained +// control of the verification process, manually generate a +// `Verifier` in `verify` subpackage, and call `Verify` method on it. +// If you need to access signatures and JOSE headers in a JWS message, +// use `Parse` function to get `Message` object. +// +// Because the use of "none" (jwa.NoSignature) algorithm is strongly discouraged, +// this function DOES NOT consider it a success when `{"alg":"none"}` is +// encountered in the message (it would also be counterintuitive when the code says +// it _verified_ something when in fact it did no such thing). If you want to +// accept messages with "none" signature algorithm, use `jws.Parse` to get the +// raw JWS message. +// +// The error returned by this function is of type can be checked against +// `jws.VerifyError()` and `jws.VerificationError()`. The latter is returned +// when the verification process itself fails (e.g. invalid signature, wrong key), +// while the former is returned when any other part of the `jws.Verify()` +// function fails. +func Verify(buf []byte, options ...VerifyOption) ([]byte, error) { + vc := verifyContextPool.Get() + defer verifyContextPool.Put(vc) + + if err := vc.ProcessOptions(options); err != nil { + return nil, verifyerr(`failed to process options: %w`, err) + } + + return vc.VerifyMessage(buf) +} + +// get the value of b64 header field. +// If the field does not exist, returns true (default) +// Otherwise return the value specified by the header field. +func getB64Value(hdr Headers) bool { + var b64 bool + if err := hdr.Get("b64", &b64); err != nil { + return true // default + } + + return b64 +} + +// Parse parses contents from the given source and creates a jws.Message +// struct. By default the input can be in either compact or full JSON serialization. +// +// You may pass `jws.WithJSON()` and/or `jws.WithCompact()` to specify +// explicitly which format to use. If neither or both is specified, the function +// will attempt to autodetect the format. If one or the other is specified, +// only the specified format will be attempted. +// +// On error, returns a jws.ParseError. +func Parse(src []byte, options ...ParseOption) (*Message, error) { + var formats int + for _, option := range options { + switch option.Ident() { + case identSerialization{}: + var v int + if err := option.Value(&v); err != nil { + return nil, parseerr(`failed to retrieve serialization option value: %w`, err) + } + switch v { + case fmtJSON: + formats |= fmtJSON + case fmtCompact: + formats |= fmtCompact + } + } + } + + // if format is 0 or both JSON/Compact, auto detect + if v := formats & (fmtJSON | fmtCompact); v == 0 || v == fmtJSON|fmtCompact { + CHECKLOOP: + for i := range src { + r := rune(src[i]) + if r >= utf8.RuneSelf { + r, _ = utf8.DecodeRune(src) + } + if !unicode.IsSpace(r) { + if r == tokens.OpenCurlyBracket { + formats = fmtJSON + } else { + formats = fmtCompact + } + break CHECKLOOP + } + } + } + + if formats&fmtCompact == fmtCompact { + msg, err := parseCompact(src) + if err != nil { + return nil, parseerr(`failed to parse compact format: %w`, err) + } + return msg, nil + } else if formats&fmtJSON == fmtJSON { + msg, err := parseJSON(src) + if err != nil { + return nil, parseerr(`failed to parse JSON format: %w`, err) + } + return msg, nil + } + + return nil, parseerr(`invalid byte sequence`) +} + +// ParseString parses contents from the given source and creates a jws.Message +// struct. The input can be in either compact or full JSON serialization. +// +// On error, returns a jws.ParseError. +func ParseString(src string) (*Message, error) { + msg, err := Parse([]byte(src)) + if err != nil { + return nil, sparseerr(`failed to parse string: %w`, err) + } + return msg, nil +} + +// ParseReader parses contents from the given source and creates a jws.Message +// struct. The input can be in either compact or full JSON serialization. +// +// On error, returns a jws.ParseError. +func ParseReader(src io.Reader) (*Message, error) { + data, err := jwxio.ReadAllFromFiniteSource(src) + if err == nil { + return Parse(data) + } + + if !errors.Is(err, jwxio.NonFiniteSourceError()) { + return nil, rparseerr(`failed to read from finite source: %w`, err) + } + + rdr := bufio.NewReader(src) + var first rune + for { + r, _, err := rdr.ReadRune() + if err != nil { + return nil, rparseerr(`failed to read rune: %w`, err) + } + if !unicode.IsSpace(r) { + first = r + if err := rdr.UnreadRune(); err != nil { + return nil, rparseerr(`failed to unread rune: %w`, err) + } + + break + } + } + + var parser func(io.Reader) (*Message, error) + if first == tokens.OpenCurlyBracket { + parser = parseJSONReader + } else { + parser = parseCompactReader + } + + m, err := parser(rdr) + if err != nil { + return nil, rparseerr(`failed to parse reader: %w`, err) + } + + return m, nil +} + +func parseJSONReader(src io.Reader) (result *Message, err error) { + var m Message + if err := json.NewDecoder(src).Decode(&m); err != nil { + return nil, fmt.Errorf(`failed to unmarshal jws message: %w`, err) + } + return &m, nil +} + +func parseJSON(data []byte) (result *Message, err error) { + var m Message + if err := json.Unmarshal(data, &m); err != nil { + return nil, fmt.Errorf(`failed to unmarshal jws message: %w`, err) + } + return &m, nil +} + +// SplitCompact splits a JWS in compact format and returns its three parts +// separately: protected headers, payload and signature. +// On error, returns a jws.ParseError. +// +// This function will be deprecated in v4. It is a low-level API, and +// thus will be available in the `jwsbb` package. +func SplitCompact(src []byte) ([]byte, []byte, []byte, error) { + hdr, payload, signature, err := jwsbb.SplitCompact(src) + if err != nil { + return nil, nil, nil, parseerr(`%w`, err) + } + return hdr, payload, signature, nil +} + +// SplitCompactString splits a JWT and returns its three parts +// separately: protected headers, payload and signature. +// On error, returns a jws.ParseError. +// +// This function will be deprecated in v4. It is a low-level API, and +// thus will be available in the `jwsbb` package. +func SplitCompactString(src string) ([]byte, []byte, []byte, error) { + hdr, payload, signature, err := jwsbb.SplitCompactString(src) + if err != nil { + return nil, nil, nil, parseerr(`%w`, err) + } + return hdr, payload, signature, nil +} + +// SplitCompactReader splits a JWT and returns its three parts +// separately: protected headers, payload and signature. +// On error, returns a jws.ParseError. +// +// This function will be deprecated in v4. It is a low-level API, and +// thus will be available in the `jwsbb` package. +func SplitCompactReader(rdr io.Reader) ([]byte, []byte, []byte, error) { + hdr, payload, signature, err := jwsbb.SplitCompactReader(rdr) + if err != nil { + return nil, nil, nil, parseerr(`%w`, err) + } + return hdr, payload, signature, nil +} + +// parseCompactReader parses a JWS value serialized via compact serialization. +func parseCompactReader(rdr io.Reader) (m *Message, err error) { + protected, payload, signature, err := SplitCompactReader(rdr) + if err != nil { + return nil, fmt.Errorf(`invalid compact serialization format: %w`, err) + } + return parse(protected, payload, signature) +} + +func parseCompact(data []byte) (m *Message, err error) { + protected, payload, signature, err := SplitCompact(data) + if err != nil { + return nil, fmt.Errorf(`invalid compact serialization format: %w`, err) + } + return parse(protected, payload, signature) +} + +func parse(protected, payload, signature []byte) (*Message, error) { + decodedHeader, err := base64.Decode(protected) + if err != nil { + return nil, fmt.Errorf(`failed to decode protected headers: %w`, err) + } + + hdr := NewHeaders() + if err := json.Unmarshal(decodedHeader, hdr); err != nil { + return nil, fmt.Errorf(`failed to parse JOSE headers: %w`, err) + } + + var decodedPayload []byte + b64 := getB64Value(hdr) + if !b64 { + decodedPayload = payload + } else { + v, err := base64.Decode(payload) + if err != nil { + return nil, fmt.Errorf(`failed to decode payload: %w`, err) + } + decodedPayload = v + } + + decodedSignature, err := base64.Decode(signature) + if err != nil { + return nil, fmt.Errorf(`failed to decode signature: %w`, err) + } + + var msg Message + msg.payload = decodedPayload + msg.signatures = append(msg.signatures, &Signature{ + protected: hdr, + signature: decodedSignature, + }) + msg.b64 = b64 + return &msg, nil +} + +type CustomDecoder = json.CustomDecoder +type CustomDecodeFunc = json.CustomDecodeFunc + +// RegisterCustomField allows users to specify that a private field +// be decoded as an instance of the specified type. This option has +// a global effect. +// +// For example, suppose you have a custom field `x-birthday`, which +// you want to represent as a string formatted in RFC3339 in JSON, +// but want it back as `time.Time`. +// +// In such case you would register a custom field as follows +// +// jws.RegisterCustomField(`x-birthday`, time.Time{}) +// +// Then you can use a `time.Time` variable to extract the value +// of `x-birthday` field, instead of having to use `any` +// and later convert it to `time.Time` +// +// var bday time.Time +// _ = hdr.Get(`x-birthday`, &bday) +// +// If you need a more fine-tuned control over the decoding process, +// you can register a `CustomDecoder`. For example, below shows +// how to register a decoder that can parse RFC1123 format string: +// +// jws.RegisterCustomField(`x-birthday`, jws.CustomDecodeFunc(func(data []byte) (any, error) { +// return time.Parse(time.RFC1123, string(data)) +// })) +// +// Please note that use of custom fields can be problematic if you +// are using a library that does not implement MarshalJSON/UnmarshalJSON +// and you try to roundtrip from an object to JSON, and then back to an object. +// For example, in the above example, you can _parse_ time values formatted +// in the format specified in RFC822, but when you convert an object into +// JSON, it will be formatted in RFC3339, because that's what `time.Time` +// likes to do. To avoid this, it's always better to use a custom type +// that wraps your desired type (in this case `time.Time`) and implement +// MarshalJSON and UnmashalJSON. +func RegisterCustomField(name string, object any) { + registry.Register(name, object) +} + +// Helpers for signature verification +var rawKeyToKeyType = make(map[reflect.Type]jwa.KeyType) +var keyTypeToAlgorithms = make(map[jwa.KeyType][]jwa.SignatureAlgorithm) + +func init() { + rawKeyToKeyType[reflect.TypeFor[[]byte]()] = jwa.OctetSeq() + rawKeyToKeyType[reflect.TypeFor[ed25519.PublicKey]()] = jwa.OKP() + rawKeyToKeyType[reflect.TypeFor[rsa.PublicKey]()] = jwa.RSA() + rawKeyToKeyType[reflect.TypeFor[*rsa.PublicKey]()] = jwa.RSA() + rawKeyToKeyType[reflect.TypeFor[ecdsa.PublicKey]()] = jwa.EC() + rawKeyToKeyType[reflect.TypeFor[*ecdsa.PublicKey]()] = jwa.EC() + + addAlgorithmForKeyType(jwa.OKP(), jwa.EdDSA()) + for _, alg := range []jwa.SignatureAlgorithm{jwa.HS256(), jwa.HS384(), jwa.HS512()} { + addAlgorithmForKeyType(jwa.OctetSeq(), alg) + } + for _, alg := range []jwa.SignatureAlgorithm{jwa.RS256(), jwa.RS384(), jwa.RS512(), jwa.PS256(), jwa.PS384(), jwa.PS512()} { + addAlgorithmForKeyType(jwa.RSA(), alg) + } + for _, alg := range []jwa.SignatureAlgorithm{jwa.ES256(), jwa.ES384(), jwa.ES512()} { + addAlgorithmForKeyType(jwa.EC(), alg) + } +} + +func addAlgorithmForKeyType(kty jwa.KeyType, alg jwa.SignatureAlgorithm) { + keyTypeToAlgorithms[kty] = append(keyTypeToAlgorithms[kty], alg) +} + +// AlgorithmsForKey returns the possible signature algorithms that can +// be used for a given key. It only takes in consideration keys/algorithms +// for verification purposes, as this is the only usage where one may need +// dynamically figure out which method to use. +func AlgorithmsForKey(key any) ([]jwa.SignatureAlgorithm, error) { + var kty jwa.KeyType + switch key := key.(type) { + case jwk.Key: + kty = key.KeyType() + case rsa.PublicKey, *rsa.PublicKey, rsa.PrivateKey, *rsa.PrivateKey: + kty = jwa.RSA() + case ecdsa.PublicKey, *ecdsa.PublicKey, ecdsa.PrivateKey, *ecdsa.PrivateKey: + kty = jwa.EC() + case ed25519.PublicKey, ed25519.PrivateKey, *ecdh.PublicKey, ecdh.PublicKey, *ecdh.PrivateKey, ecdh.PrivateKey: + kty = jwa.OKP() + case []byte: + kty = jwa.OctetSeq() + default: + return nil, fmt.Errorf(`unknown key type %T`, key) + } + + algs, ok := keyTypeToAlgorithms[kty] + if !ok { + return nil, fmt.Errorf(`unregistered key type %q`, kty) + } + return algs, nil +} + +// Settings allows you to set global settings for this JWS operations. +// +// Currently, the only setting available is `jws.WithLegacySigners()`, +// which for various reason is now a no-op. +func Settings(options ...GlobalOption) { + for _, option := range options { + switch option.Ident() { + case identLegacySigners{}: + } + } +} + +// VerifyCompactFast is a fast path verification function for JWS messages +// in compact serialization format. +// +// This function is considered experimental, and may change or be removed +// in the future. +// +// VerifyCompactFast performs signature verification on a JWS compact +// serialization without fully parsing the message into a jws.Message object. +// This makes it more efficient for cases where you only need to verify +// the signature and extract the payload, without needing access to headers +// or other JWS metadata. +// +// Returns the original payload that was signed if verification succeeds. +// +// Unlike jws.Verify(), this function requires you to specify the +// algorithm explicitly rather than extracting it from the JWS headers. +// This can be useful for performance-critical applications where the +// algorithm is known in advance. +// +// Since this function avoids doing many checks that jws.Verify would perform, +// you must ensure to perform the necessary checks including ensuring that algorithm is safe to use for your payload yourself. +func VerifyCompactFast(key any, compact []byte, alg jwa.SignatureAlgorithm) ([]byte, error) { + algstr := alg.String() + + // Split the serialized JWT into its components + hdr, payload, encodedSig, err := jwsbb.SplitCompact(compact) + if err != nil { + return nil, fmt.Errorf("jwt.verifyFast: failed to split compact: %w", err) + } + + signature, err := base64.Decode(encodedSig) + if err != nil { + return nil, fmt.Errorf("jwt.verifyFast: failed to decode signature: %w", err) + } + + // Instead of appending, copy the data from hdr/payload + lvb := len(hdr) + 1 + len(payload) + verifyBuf := pool.ByteSlice().GetCapacity(lvb) + verifyBuf = verifyBuf[:lvb] + copy(verifyBuf, hdr) + verifyBuf[len(hdr)] = tokens.Period + copy(verifyBuf[len(hdr)+1:], payload) + defer pool.ByteSlice().Put(verifyBuf) + + // Verify the signature + if verifier2, err := VerifierFor(alg); err == nil { + if err := verifier2.Verify(key, verifyBuf, signature); err != nil { + return nil, verifyError{verificationError{fmt.Errorf("jwt.VerifyCompact: signature verification failed for %s: %w", algstr, err)}} + } + } else { + legacyVerifier, err := NewVerifier(alg) + if err != nil { + return nil, verifyerr("jwt.VerifyCompact: failed to create verifier for %s: %w", algstr, err) + } + if err := legacyVerifier.Verify(verifyBuf, signature, key); err != nil { + return nil, verifyError{verificationError{fmt.Errorf("jwt.VerifyCompact: signature verification failed for %s: %w", algstr, err)}} + } + } + + decoded, err := base64.Decode(payload) + if err != nil { + return nil, verifyerr("jwt.VerifyCompact: failed to decode payload: %w", err) + } + return decoded, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/BUILD.bazel new file mode 100644 index 0000000000..0799e81110 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/BUILD.bazel @@ -0,0 +1,38 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "jwsbb", + srcs = [ + "crypto_signer.go", + "ecdsa.go", + "eddsa.go", + "format.go", + "hmac.go", + "jwsbb.go", + "rsa.go", + "sign.go", + "verify.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/jws/jwsbb", + visibility = ["//visibility:public"], + deps = [ + "//internal/base64", + "//internal/ecutil", + "//internal/jwxio", + "//internal/keyconv", + "//internal/pool", + "//internal/tokens", + "//jws/internal/keytype", + "@com_github_lestrrat_go_dsig//:dsig", + ], +) + +go_test( + name = "jwsbb_test", + srcs = ["jwsbb_test.go"], + embed = [":jwsbb"], + deps = [ + "//internal/base64", + "@com_github_stretchr_testify//require", + ], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/crypto_signer.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/crypto_signer.go new file mode 100644 index 0000000000..bd6132a4c5 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/crypto_signer.go @@ -0,0 +1,45 @@ +package jwsbb + +import ( + "crypto" + "crypto/rand" + "fmt" + "io" +) + +// cryptosign is a low-level function that signs a payload using a crypto.Signer. +// If hash is crypto.Hash(0), the payload is signed directly without hashing. +// Otherwise, the payload is hashed using the specified hash function before signing. +// +// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. +func cryptosign(signer crypto.Signer, payload []byte, hash crypto.Hash, opts crypto.SignerOpts, rr io.Reader) ([]byte, error) { + if rr == nil { + rr = rand.Reader + } + + var digest []byte + if hash == crypto.Hash(0) { + digest = payload + } else { + h := hash.New() + if _, err := h.Write(payload); err != nil { + return nil, fmt.Errorf(`failed to write payload to hash: %w`, err) + } + digest = h.Sum(nil) + } + return signer.Sign(rr, digest, opts) +} + +// SignCryptoSigner generates a signature using a crypto.Signer interface. +// This function can be used for hardware security modules, smart cards, +// and other implementations of the crypto.Signer interface. +// +// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. +// +// Returns the signature bytes or an error if signing fails. +func SignCryptoSigner(signer crypto.Signer, raw []byte, h crypto.Hash, opts crypto.SignerOpts, rr io.Reader) ([]byte, error) { + if signer == nil { + return nil, fmt.Errorf("jwsbb.SignCryptoSignerRaw: signer is nil") + } + return cryptosign(signer, raw, h, opts, rr) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/ecdsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/ecdsa.go new file mode 100644 index 0000000000..1eb492ee7b --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/ecdsa.go @@ -0,0 +1,179 @@ +package jwsbb + +import ( + "crypto" + "crypto/ecdsa" + "encoding/asn1" + "fmt" + "io" + "math/big" + + "github.com/lestrrat-go/dsig" + "github.com/lestrrat-go/jwx/v3/internal/ecutil" +) + +// ecdsaHashToDsigAlgorithm maps ECDSA hash functions to dsig algorithm constants +func ecdsaHashToDsigAlgorithm(h crypto.Hash) (string, error) { + switch h { + case crypto.SHA256: + return dsig.ECDSAWithP256AndSHA256, nil + case crypto.SHA384: + return dsig.ECDSAWithP384AndSHA384, nil + case crypto.SHA512: + return dsig.ECDSAWithP521AndSHA512, nil + default: + return "", fmt.Errorf("unsupported ECDSA hash function: %v", h) + } +} + +// UnpackASN1ECDSASignature unpacks an ASN.1 encoded ECDSA signature into r and s values. +// This is typically used when working with crypto.Signer interfaces that return ASN.1 encoded signatures. +func UnpackASN1ECDSASignature(signed []byte, r, s *big.Int) error { + // Okay, this is silly, but hear me out. When we use the + // crypto.Signer interface, the PrivateKey is hidden. + // But we need some information about the key (its bit size). + // + // So while silly, we're going to have to make another call + // here and fetch the Public key. + // (This probably means that this information should be cached somewhere) + var p struct { + R *big.Int // TODO: get this from a pool? + S *big.Int + } + if _, err := asn1.Unmarshal(signed, &p); err != nil { + return fmt.Errorf(`failed to unmarshal ASN1 encoded signature: %w`, err) + } + + r.Set(p.R) + s.Set(p.S) + return nil +} + +// UnpackECDSASignature unpacks a JWS-format ECDSA signature into r and s values. +// The signature should be in the format specified by RFC 7515 (r||s as fixed-length byte arrays). +func UnpackECDSASignature(signature []byte, pubkey *ecdsa.PublicKey, r, s *big.Int) error { + keySize := ecutil.CalculateKeySize(pubkey.Curve) + if len(signature) != keySize*2 { + return fmt.Errorf(`invalid signature length for curve %q`, pubkey.Curve.Params().Name) + } + + r.SetBytes(signature[:keySize]) + s.SetBytes(signature[keySize:]) + + return nil +} + +// PackECDSASignature packs the r and s values from an ECDSA signature into a JWS-format byte slice. +// The output format follows RFC 7515: r||s as fixed-length byte arrays. +func PackECDSASignature(r *big.Int, sbig *big.Int, curveBits int) ([]byte, error) { + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes++ + } + + // Serialize r and s into fixed-length bytes + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := sbig.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + // Output as r||s + return append(rBytesPadded, sBytesPadded...), nil +} + +// SignECDSA generates an ECDSA signature for the given payload using the specified private key and hash. +// The raw parameter should be the pre-computed signing input (typically header.payload). +// +// rr is an io.Reader that provides randomness for signing. if rr is nil, it defaults to rand.Reader. +// +// This function is now a thin wrapper around dsig.SignECDSA. For new projects, you should +// consider using dsig instead of this function. +func SignECDSA(key *ecdsa.PrivateKey, payload []byte, h crypto.Hash, rr io.Reader) ([]byte, error) { + dsigAlg, err := ecdsaHashToDsigAlgorithm(h) + if err != nil { + return nil, fmt.Errorf("jwsbb.SignECDSA: %w", err) + } + + return dsig.Sign(key, dsigAlg, payload, rr) +} + +// SignECDSACryptoSigner generates an ECDSA signature using a crypto.Signer interface. +// This function works with hardware security modules and other crypto.Signer implementations. +// The signature is converted from ASN.1 format to JWS format (r||s). +// +// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. +func SignECDSACryptoSigner(signer crypto.Signer, raw []byte, h crypto.Hash, rr io.Reader) ([]byte, error) { + signed, err := SignCryptoSigner(signer, raw, h, h, rr) + if err != nil { + return nil, fmt.Errorf(`failed to sign payload using crypto.Signer: %w`, err) + } + + return signECDSACryptoSigner(signer, signed) +} + +func signECDSACryptoSigner(signer crypto.Signer, signed []byte) ([]byte, error) { + cpub := signer.Public() + pubkey, ok := cpub.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf(`expected *ecdsa.PublicKey, got %T`, pubkey) + } + curveBits := pubkey.Curve.Params().BitSize + + var r, s big.Int + if err := UnpackASN1ECDSASignature(signed, &r, &s); err != nil { + return nil, fmt.Errorf(`failed to unpack ASN1 encoded signature: %w`, err) + } + + return PackECDSASignature(&r, &s, curveBits) +} + +func ecdsaVerify(key *ecdsa.PublicKey, buf []byte, h crypto.Hash, r, s *big.Int) error { + hasher := h.New() + hasher.Write(buf) + digest := hasher.Sum(nil) + if !ecdsa.Verify(key, digest, r, s) { + return fmt.Errorf("jwsbb.ECDSAVerifier: invalid ECDSA signature") + } + return nil +} + +// VerifyECDSA verifies an ECDSA signature for the given payload. +// This function verifies the signature using the specified public key and hash algorithm. +// The payload parameter should be the pre-computed signing input (typically header.payload). +// +// This function is now a thin wrapper around dsig.VerifyECDSA. For new projects, you should +// consider using dsig instead of this function. +func VerifyECDSA(key *ecdsa.PublicKey, payload, signature []byte, h crypto.Hash) error { + dsigAlg, err := ecdsaHashToDsigAlgorithm(h) + if err != nil { + return fmt.Errorf("jwsbb.VerifyECDSA: %w", err) + } + + return dsig.Verify(key, dsigAlg, payload, signature) +} + +// VerifyECDSACryptoSigner verifies an ECDSA signature for crypto.Signer implementations. +// This function is useful for verifying signatures created by hardware security modules +// or other implementations of the crypto.Signer interface. +// The payload parameter should be the pre-computed signing input (typically header.payload). +func VerifyECDSACryptoSigner(signer crypto.Signer, payload, signature []byte, h crypto.Hash) error { + var pubkey *ecdsa.PublicKey + switch cpub := signer.Public(); cpub := cpub.(type) { + case ecdsa.PublicKey: + pubkey = &cpub + case *ecdsa.PublicKey: + pubkey = cpub + default: + return fmt.Errorf(`jwsbb.VerifyECDSACryptoSigner: expected *ecdsa.PublicKey, got %T`, cpub) + } + + var r, s big.Int + if err := UnpackECDSASignature(signature, pubkey, &r, &s); err != nil { + return fmt.Errorf("jwsbb.ECDSAVerifier: failed to unpack ASN.1 encoded ECDSA signature: %w", err) + } + + return ecdsaVerify(pubkey, payload, h, &r, &s) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/eddsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/eddsa.go new file mode 100644 index 0000000000..960cf97dde --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/eddsa.go @@ -0,0 +1,30 @@ +package jwsbb + +import ( + "crypto/ed25519" + + "github.com/lestrrat-go/dsig" +) + +// SignEdDSA generates an EdDSA (Ed25519) signature for the given payload. +// The raw parameter should be the pre-computed signing input (typically header.payload). +// EdDSA is deterministic and doesn't require additional hashing of the input. +// +// This function is now a thin wrapper around dsig.SignEdDSA. For new projects, you should +// consider using dsig instead of this function. +func SignEdDSA(key ed25519.PrivateKey, payload []byte) ([]byte, error) { + // Use dsig.Sign with EdDSA algorithm constant + return dsig.Sign(key, dsig.EdDSA, payload, nil) +} + +// VerifyEdDSA verifies an EdDSA (Ed25519) signature for the given payload. +// This function verifies the signature using Ed25519 verification algorithm. +// The payload parameter should be the pre-computed signing input (typically header.payload). +// EdDSA is deterministic and provides strong security guarantees without requiring hash function selection. +// +// This function is now a thin wrapper around dsig.VerifyEdDSA. For new projects, you should +// consider using dsig instead of this function. +func VerifyEdDSA(key ed25519.PublicKey, payload, signature []byte) error { + // Use dsig.Verify with EdDSA algorithm constant + return dsig.Verify(key, dsig.EdDSA, payload, signature) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/es256k.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/es256k.go new file mode 100644 index 0000000000..a8761ee0fc --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/es256k.go @@ -0,0 +1,14 @@ +//go:build jwx_es256k + +package jwsbb + +import ( + dsigsecp256k1 "github.com/lestrrat-go/dsig-secp256k1" +) + +const es256k = "ES256K" + +func init() { + // Add ES256K mapping when this build tag is enabled + jwsToDsigAlgorithm[es256k] = dsigsecp256k1.ECDSAWithSecp256k1AndSHA256 +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/format.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/format.go new file mode 100644 index 0000000000..430bf625ac --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/format.go @@ -0,0 +1,235 @@ +package jwsbb + +import ( + "bytes" + "errors" + "io" + + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/jwxio" + "github.com/lestrrat-go/jwx/v3/internal/tokens" +) + +// SignBuffer combines the base64-encoded header and payload into a single byte slice +// for signing purposes. This creates the signing input according to JWS specification (RFC 7515). +// The result should be passed to signature generation functions. +// +// Parameters: +// - buf: Reusable buffer (can be nil for automatic allocation) +// - hdr: Raw header bytes (will be base64-encoded) +// - payload: Raw payload bytes (encoded based on encodePayload flag) +// - encoder: Base64 encoder to use for encoding components +// - encodePayload: If true, payload is base64-encoded; if false, payload is used as-is +// +// Returns the constructed signing input in the format: base64(header).base64(payload) or base64(header).payload +func SignBuffer(buf, hdr, payload []byte, encoder base64.Encoder, encodePayload bool) []byte { + l := encoder.EncodedLen(len(hdr)+len(payload)) + 1 + if cap(buf) < l { + buf = make([]byte, 0, l) + } + buf = buf[:0] + buf = encoder.AppendEncode(buf, hdr) + buf = append(buf, tokens.Period) + if encodePayload { + buf = encoder.AppendEncode(buf, payload) + } else { + buf = append(buf, payload...) + } + + return buf +} + +// AppendSignature appends a base64-encoded signature to a JWS signing input buffer. +// This completes the compact JWS serialization by adding the final signature component. +// The input buffer should contain the signing input (header.payload), and this function +// adds the period separator and base64-encoded signature. +// +// Parameters: +// - buf: Buffer containing the signing input (typically from SignBuffer) +// - signature: Raw signature bytes (will be base64-encoded) +// - encoder: Base64 encoder to use for encoding the signature +// +// Returns the complete compact JWS in the format: base64(header).base64(payload).base64(signature) +func AppendSignature(buf, signature []byte, encoder base64.Encoder) []byte { + l := len(buf) + len(signature) + 1 + if cap(buf) < l { + buf = make([]byte, 0, l) + } + buf = append(buf, tokens.Period) + buf = encoder.AppendEncode(buf, signature) + + return buf +} + +// JoinCompact creates a complete compact JWS serialization from individual components. +// This is a one-step function that combines header, payload, and signature into the final JWS format. +// It includes safety checks to prevent excessive memory allocation. +// +// Parameters: +// - buf: Reusable buffer (can be nil for automatic allocation) +// - hdr: Raw header bytes (will be base64-encoded) +// - payload: Raw payload bytes (encoded based on encodePayload flag) +// - signature: Raw signature bytes (will be base64-encoded) +// - encoder: Base64 encoder to use for encoding all components +// - encodePayload: If true, payload is base64-encoded; if false, payload is used as-is +// +// Returns the complete compact JWS or an error if the total size exceeds safety limits (1GB). +func JoinCompact(buf, hdr, payload, signature []byte, encoder base64.Encoder, encodePayload bool) ([]byte, error) { + const MaxBufferSize = 1 << 30 // 1 GB + totalSize := len(hdr) + len(payload) + len(signature) + 2 + if totalSize > MaxBufferSize { + return nil, errors.New("input sizes exceed maximum allowable buffer size") + } + if cap(buf) < totalSize { + buf = make([]byte, 0, totalSize) + } + buf = buf[:0] + buf = encoder.AppendEncode(buf, hdr) + buf = append(buf, tokens.Period) + if encodePayload { + buf = encoder.AppendEncode(buf, payload) + } else { + buf = append(buf, payload...) + } + buf = append(buf, tokens.Period) + buf = encoder.AppendEncode(buf, signature) + + return buf, nil +} + +var compactDelim = []byte{tokens.Period} + +var errInvalidNumberOfSegments = errors.New(`jwsbb: invalid number of segments`) + +// InvalidNumberOfSegmentsError returns the standard error for invalid JWS segment count. +// A valid compact JWS must have exactly 3 segments separated by periods: header.payload.signature +func InvalidNumberOfSegmentsError() error { + return errInvalidNumberOfSegments +} + +// SplitCompact parses a compact JWS serialization into its three components. +// This function validates that the input has exactly 3 segments separated by periods +// and returns the base64-encoded components without decoding them. +// +// Parameters: +// - src: Complete compact JWS string as bytes +// +// Returns: +// - protected: Base64-encoded protected header +// - payload: Base64-encoded payload (or raw payload if b64=false was used) +// - signature: Base64-encoded signature +// - err: Error if the format is invalid or segment count is wrong +func SplitCompact(src []byte) (protected, payload, signature []byte, err error) { + var s []byte + var ok bool + + protected, s, ok = bytes.Cut(src, compactDelim) + if !ok { // no period found + return nil, nil, nil, InvalidNumberOfSegmentsError() + } + payload, s, ok = bytes.Cut(s, compactDelim) + if !ok { // only one period found + return nil, nil, nil, InvalidNumberOfSegmentsError() + } + signature, _, ok = bytes.Cut(s, compactDelim) + if ok { // three periods found + return nil, nil, nil, InvalidNumberOfSegmentsError() + } + return protected, payload, signature, nil +} + +// SplitCompactString is a convenience wrapper around SplitCompact for string inputs. +// It converts the string to bytes and parses the compact JWS serialization. +// +// Parameters: +// - src: Complete compact JWS as a string +// +// Returns the same components as SplitCompact: protected header, payload, signature, and error. +func SplitCompactString(src string) (protected, payload, signature []byte, err error) { + return SplitCompact([]byte(src)) +} + +// SplitCompactReader parses a compact JWS serialization from an io.Reader. +// This function handles both finite and streaming sources efficiently. +// For finite sources, it reads all data at once. For streaming sources, +// it uses a buffer-based approach to find segment boundaries. +// +// Parameters: +// - rdr: Reader containing the compact JWS data +// +// Returns: +// - protected: Base64-encoded protected header +// - payload: Base64-encoded payload (or raw payload if b64=false was used) +// - signature: Base64-encoded signature +// - err: Error if reading fails or the format is invalid +// +// The function validates that exactly 3 segments are present, separated by periods. +func SplitCompactReader(rdr io.Reader) (protected, payload, signature []byte, err error) { + data, err := jwxio.ReadAllFromFiniteSource(rdr) + if err == nil { + return SplitCompact(data) + } + + if !errors.Is(err, jwxio.NonFiniteSourceError()) { + return nil, nil, nil, err + } + + var periods int + var state int + + buf := make([]byte, 4096) + var sofar []byte + + for { + // read next bytes + n, err := rdr.Read(buf) + // return on unexpected read error + if err != nil && err != io.EOF { + return nil, nil, nil, io.ErrUnexpectedEOF + } + + // append to current buffer + sofar = append(sofar, buf[:n]...) + // loop to capture multiple tokens.Period in current buffer + for loop := true; loop; { + var i = bytes.IndexByte(sofar, tokens.Period) + if i == -1 && err != io.EOF { + // no tokens.Period found -> exit and read next bytes (outer loop) + loop = false + continue + } else if i == -1 && err == io.EOF { + // no tokens.Period found -> process rest and exit + i = len(sofar) + loop = false + } else { + // tokens.Period found + periods++ + } + + // Reaching this point means we have found a tokens.Period or EOF and process the rest of the buffer + switch state { + case 0: + protected = sofar[:i] + state++ + case 1: + payload = sofar[:i] + state++ + case 2: + signature = sofar[:i] + } + // Shorten current buffer + if len(sofar) > i { + sofar = sofar[i+1:] + } + } + // Exit on EOF + if err == io.EOF { + break + } + } + if periods != 2 { + return nil, nil, nil, InvalidNumberOfSegmentsError() + } + + return protected, payload, signature, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/header.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/header.go new file mode 100644 index 0000000000..cac3987ea5 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/header.go @@ -0,0 +1,222 @@ +package jwsbb + +import ( + "fmt" + + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/valyala/fastjson" +) + +type headerNotFoundError struct { + key string +} + +func (e headerNotFoundError) Error() string { + return fmt.Sprintf(`jwsbb: header "%s" not found`, e.key) +} + +func (e headerNotFoundError) Is(target error) bool { + switch target.(type) { + case headerNotFoundError, *headerNotFoundError: + // If the target is a headerNotFoundError or a pointer to it, we + // consider it a match + return true + default: + return false + } +} + +// ErrHeaderNotFound returns an error that can be passed to `errors.Is` to check if the error is +// the result of the field not being found +func ErrHeaderNotFound() error { + return headerNotFoundError{} +} + +// ErrFieldNotFound is an alias for ErrHeaderNotFound, and is deprecated. It was a misnomer. +// It will be removed in a future release. +func ErrFieldNotFound() error { + return ErrHeaderNotFound() +} + +// Header is an object that allows you to access the JWS header in a quick and +// dirty way. It does not verify anything, it does not know anything about what +// each header field means, and it does not care about the JWS specification. +// But when you need to access the JWS header for that one field that you +// need, this is the object you want to use. +// +// As of this writing, HeaderParser cannot be used from concurrent goroutines. +// You will need to create a new instance for each goroutine that needs to parse a JWS header. +// Also, in general values obtained from this object should only be used +// while the Header object is still in scope. +// +// This type is experimental and may change or be removed in the future. +type Header interface { + // I'm hiding this behind an interface so that users won't accidentally + // rely on the underlying json handler implementation, nor the concrete + // type name that jwsbb provides, as we may choose a different one in the future. + jwsbbHeader() +} + +type header struct { + v *fastjson.Value + err error +} + +func (h *header) jwsbbHeader() {} + +// HeaderParseCompact parses a JWS header from a compact serialization format. +// You will need to call HeaderGet* functions to extract the values from the header. +// +// This function is experimental and may change or be removed in the future. +func HeaderParseCompact(buf []byte) Header { + decoded, err := base64.Decode(buf) + if err != nil { + return &header{err: err} + } + return HeaderParse(decoded) +} + +// HeaderParse parses a JWS header from a byte slice containing the decoded JSON. +// You will need to call HeaderGet* functions to extract the values from the header. +// +// Unlike HeaderParseCompact, this function does not perform any base64 decoding. +// This function is experimental and may change or be removed in the future. +func HeaderParse(decoded []byte) Header { + var p fastjson.Parser + v, err := p.ParseBytes(decoded) + if err != nil { + return &header{err: err} + } + return &header{ + v: v, + } +} + +func headerGet(h Header, key string) (*fastjson.Value, error) { + //nolint:forcetypeassert + hh := h.(*header) // we _know_ this can't be another type + if hh.err != nil { + return nil, hh.err + } + + v := hh.v.Get(key) + if v == nil { + return nil, headerNotFoundError{key: key} + } + return v, nil +} + +// HeaderGetString returns the string value for the given key from the JWS header. +// An error is returned if the JSON was not valid, if the key does not exist, +// or if the value is not a string. +// +// This function is experimental and may change or be removed in the future. +func HeaderGetString(h Header, key string) (string, error) { + v, err := headerGet(h, key) + if err != nil { + return "", err + } + + sb, err := v.StringBytes() + if err != nil { + return "", err + } + + return string(sb), nil +} + +// HeaderGetBool returns the boolean value for the given key from the JWS header. +// An error is returned if the JSON was not valid, if the key does not exist, +// or if the value is not a boolean. +// +// This function is experimental and may change or be removed in the future. +func HeaderGetBool(h Header, key string) (bool, error) { + v, err := headerGet(h, key) + if err != nil { + return false, err + } + return v.Bool() +} + +// HeaderGetFloat64 returns the float64 value for the given key from the JWS header. +// An error is returned if the JSON was not valid, if the key does not exist, +// or if the value is not a float64. +// +// This function is experimental and may change or be removed in the future. +func HeaderGetFloat64(h Header, key string) (float64, error) { + v, err := headerGet(h, key) + if err != nil { + return 0, err + } + return v.Float64() +} + +// HeaderGetInt returns the int value for the given key from the JWS header. +// An error is returned if the JSON was not valid, if the key does not exist, +// or if the value is not an int. +// +// This function is experimental and may change or be removed in the future. +func HeaderGetInt(h Header, key string) (int, error) { + v, err := headerGet(h, key) + if err != nil { + return 0, err + } + return v.Int() +} + +// HeaderGetInt64 returns the int64 value for the given key from the JWS header. +// An error is returned if the JSON was not valid, if the key does not exist, +// or if the value is not an int64. +// +// This function is experimental and may change or be removed in the future. +func HeaderGetInt64(h Header, key string) (int64, error) { + v, err := headerGet(h, key) + if err != nil { + return 0, err + } + return v.Int64() +} + +// HeaderGetStringBytes returns the byte slice value for the given key from the JWS header. +// An error is returned if the JSON was not valid, if the key does not exist, +// or if the value is not a byte slice. +// +// Because of limitations of the underlying library, you cannot use the return value +// of this function after the parser is garbage collected. +// +// This function is experimental and may change or be removed in the future. +func HeaderGetStringBytes(h Header, key string) ([]byte, error) { + v, err := headerGet(h, key) + if err != nil { + return nil, err + } + + return v.StringBytes() +} + +// HeaderGetUint returns the uint value for the given key from the JWS header. +// An error is returned if the JSON was not valid, if the key does not exist, +// or if the value is not a uint. +// +// This function is experimental and may change or be removed in the future. +func HeaderGetUint(h Header, key string) (uint, error) { + v, err := headerGet(h, key) + if err != nil { + return 0, err + } + return v.Uint() +} + +// HeaderGetUint64 returns the uint64 value for the given key from the JWS header. +// An error is returned if the JSON was not valid, if the key does not exist, +// or if the value is not a uint64. +// +// This function is experimental and may change or be removed in the future. +func HeaderGetUint64(h Header, key string) (uint64, error) { + v, err := headerGet(h, key) + if err != nil { + return 0, err + } + + return v.Uint64() +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/hmac.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/hmac.go new file mode 100644 index 0000000000..8e70eb667d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/hmac.go @@ -0,0 +1,52 @@ +package jwsbb + +import ( + "fmt" + "hash" + + "github.com/lestrrat-go/dsig" +) + +// hmacHashToDsigAlgorithm maps HMAC hash function sizes to dsig algorithm constants +func hmacHashToDsigAlgorithm(hfunc func() hash.Hash) (string, error) { + h := hfunc() + switch h.Size() { + case 32: // SHA256 + return dsig.HMACWithSHA256, nil + case 48: // SHA384 + return dsig.HMACWithSHA384, nil + case 64: // SHA512 + return dsig.HMACWithSHA512, nil + default: + return "", fmt.Errorf("unsupported HMAC hash function: size=%d", h.Size()) + } +} + +// SignHMAC generates an HMAC signature for the given payload using the specified hash function and key. +// The raw parameter should be the pre-computed signing input (typically header.payload). +// +// This function is now a thin wrapper around dsig.SignHMAC. For new projects, you should +// consider using dsig instead of this function. +func SignHMAC(key, payload []byte, hfunc func() hash.Hash) ([]byte, error) { + dsigAlg, err := hmacHashToDsigAlgorithm(hfunc) + if err != nil { + return nil, fmt.Errorf("jwsbb.SignHMAC: %w", err) + } + + return dsig.Sign(key, dsigAlg, payload, nil) +} + +// VerifyHMAC verifies an HMAC signature for the given payload. +// This function verifies the signature using the specified key and hash function. +// The payload parameter should be the pre-computed signing input (typically header.payload). +// +// This function is now a thin wrapper around dsig.VerifyHMAC. For new projects, you should +// consider using dsig instead of this function. +func VerifyHMAC(key, payload, signature []byte, hfunc func() hash.Hash) error { + dsigAlg, err := hmacHashToDsigAlgorithm(hfunc) + if err != nil { + return fmt.Errorf("jwsbb.VerifyHMAC: %w", err) + } + + return dsig.Verify(key, dsigAlg, payload, signature) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/jwsbb.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/jwsbb.go new file mode 100644 index 0000000000..6a67ee8f86 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/jwsbb.go @@ -0,0 +1,94 @@ +// Package jwsbb provides the building blocks (hence the name "bb") for JWS operations. +// It should be thought of as a low-level API, almost akin to internal packages +// that should not be used directly by users of the jwx package. However, these exist +// to provide a more efficient way to perform JWS operations without the overhead of +// the higher-level jws package to power-users who know what they are doing. +// +// This package is currently considered EXPERIMENTAL, and the API may change +// without notice. It is not recommended to use this package unless you are +// fully aware of the implications of using it. +// +// All bb packages in jwx follow the same design principles: +// 1. Does minimal checking of input parameters (for performance); callers need to ensure that the parameters are valid. +// 2. All exported functions are strongly typed (i.e. they do not take `any` types unless they absolutely have to). +// 3. Does not rely on other public jwx packages (they are standalone, except for internal packages). +// +// This implementation uses github.com/lestrrat-go/dsig as the underlying signature provider. +package jwsbb + +import ( + "github.com/lestrrat-go/dsig" +) + +// JWS algorithm name constants +const ( + // HMAC algorithms + hs256 = "HS256" + hs384 = "HS384" + hs512 = "HS512" + + // RSA PKCS#1 v1.5 algorithms + rs256 = "RS256" + rs384 = "RS384" + rs512 = "RS512" + + // RSA PSS algorithms + ps256 = "PS256" + ps384 = "PS384" + ps512 = "PS512" + + // ECDSA algorithms + es256 = "ES256" + es384 = "ES384" + es512 = "ES512" + + // EdDSA algorithm + edDSA = "EdDSA" +) + +// Signer is a generic interface that defines the method for signing payloads. +// The type parameter K represents the key type (e.g., []byte for HMAC keys, +// *rsa.PrivateKey for RSA keys, *ecdsa.PrivateKey for ECDSA keys). +type Signer[K any] interface { + Sign(key K, payload []byte) ([]byte, error) +} + +// Verifier is a generic interface that defines the method for verifying signatures. +// The type parameter K represents the key type (e.g., []byte for HMAC keys, +// *rsa.PublicKey for RSA keys, *ecdsa.PublicKey for ECDSA keys). +type Verifier[K any] interface { + Verify(key K, buf []byte, signature []byte) error +} + +// JWS to dsig algorithm mapping +var jwsToDsigAlgorithm = map[string]string{ + // HMAC algorithms + hs256: dsig.HMACWithSHA256, + hs384: dsig.HMACWithSHA384, + hs512: dsig.HMACWithSHA512, + + // RSA PKCS#1 v1.5 algorithms + rs256: dsig.RSAPKCS1v15WithSHA256, + rs384: dsig.RSAPKCS1v15WithSHA384, + rs512: dsig.RSAPKCS1v15WithSHA512, + + // RSA PSS algorithms + ps256: dsig.RSAPSSWithSHA256, + ps384: dsig.RSAPSSWithSHA384, + ps512: dsig.RSAPSSWithSHA512, + + // ECDSA algorithms + es256: dsig.ECDSAWithP256AndSHA256, + es384: dsig.ECDSAWithP384AndSHA384, + es512: dsig.ECDSAWithP521AndSHA512, + // Note: ES256K requires external dependency and is handled separately + + // EdDSA algorithm + edDSA: dsig.EdDSA, +} + +// getDsigAlgorithm returns the dsig algorithm name for a JWS algorithm +func getDsigAlgorithm(jwsAlg string) (string, bool) { + dsigAlg, ok := jwsToDsigAlgorithm[jwsAlg] + return dsigAlg, ok +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/rsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/rsa.go new file mode 100644 index 0000000000..36997cef7c --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/rsa.go @@ -0,0 +1,71 @@ +package jwsbb + +import ( + "crypto" + "crypto/rsa" + "fmt" + "io" + + "github.com/lestrrat-go/dsig" +) + +// rsaHashToDsigAlgorithm maps RSA hash functions to dsig algorithm constants +func rsaHashToDsigAlgorithm(h crypto.Hash, pss bool) (string, error) { + if pss { + switch h { + case crypto.SHA256: + return dsig.RSAPSSWithSHA256, nil + case crypto.SHA384: + return dsig.RSAPSSWithSHA384, nil + case crypto.SHA512: + return dsig.RSAPSSWithSHA512, nil + default: + return "", fmt.Errorf("unsupported hash algorithm for RSA-PSS: %v", h) + } + } else { + switch h { + case crypto.SHA256: + return dsig.RSAPKCS1v15WithSHA256, nil + case crypto.SHA384: + return dsig.RSAPKCS1v15WithSHA384, nil + case crypto.SHA512: + return dsig.RSAPKCS1v15WithSHA512, nil + default: + return "", fmt.Errorf("unsupported hash algorithm for RSA PKCS#1 v1.5: %v", h) + } + } +} + +// SignRSA generates an RSA signature for the given payload using the specified private key and options. +// The raw parameter should be the pre-computed signing input (typically header.payload). +// If pss is true, RSA-PSS is used; otherwise, PKCS#1 v1.5 is used. +// +// The rr parameter is an optional io.Reader that can be used to provide randomness for signing. +// If rr is nil, it defaults to rand.Reader. +// +// This function is now a thin wrapper around dsig.SignRSA. For new projects, you should +// consider using dsig instead of this function. +func SignRSA(key *rsa.PrivateKey, payload []byte, h crypto.Hash, pss bool, rr io.Reader) ([]byte, error) { + dsigAlg, err := rsaHashToDsigAlgorithm(h, pss) + if err != nil { + return nil, fmt.Errorf("jwsbb.SignRSA: %w", err) + } + + return dsig.Sign(key, dsigAlg, payload, rr) +} + +// VerifyRSA verifies an RSA signature for the given payload and header. +// This function constructs the signing input by encoding the header and payload according to JWS specification, +// then verifies the signature using the specified public key and hash algorithm. +// If pss is true, RSA-PSS verification is used; otherwise, PKCS#1 v1.5 verification is used. +// +// This function is now a thin wrapper around dsig.VerifyRSA. For new projects, you should +// consider using dsig instead of this function. +func VerifyRSA(key *rsa.PublicKey, payload, signature []byte, h crypto.Hash, pss bool) error { + dsigAlg, err := rsaHashToDsigAlgorithm(h, pss) + if err != nil { + return fmt.Errorf("jwsbb.VerifyRSA: %w", err) + } + + return dsig.Verify(key, dsigAlg, payload, signature) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/sign.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/sign.go new file mode 100644 index 0000000000..6f36ab0554 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/sign.go @@ -0,0 +1,110 @@ +package jwsbb + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "fmt" + "io" + + "github.com/lestrrat-go/dsig" + "github.com/lestrrat-go/jwx/v3/internal/keyconv" +) + +// Sign generates a JWS signature using the specified key and algorithm. +// +// This function loads the signer registered in the jwsbb package _ONLY_. +// It does not support custom signers that the user might have registered. +// +// rr is an io.Reader that provides randomness for signing. If rr is nil, it defaults to rand.Reader. +// Not all algorithms require this parameter, but it is included for consistency. +// 99% of the time, you can pass nil for rr, and it will work fine. +func Sign(key any, alg string, payload []byte, rr io.Reader) ([]byte, error) { + dsigAlg, ok := getDsigAlgorithm(alg) + if !ok { + return nil, fmt.Errorf(`jwsbb.Sign: unsupported signature algorithm %q`, alg) + } + + // Get dsig algorithm info to determine key conversion strategy + dsigInfo, ok := dsig.GetAlgorithmInfo(dsigAlg) + if !ok { + return nil, fmt.Errorf(`jwsbb.Sign: dsig algorithm %q not registered`, dsigAlg) + } + + switch dsigInfo.Family { + case dsig.HMAC: + return dispatchHMACSign(key, dsigAlg, payload) + case dsig.RSA: + return dispatchRSASign(key, dsigAlg, payload, rr) + case dsig.ECDSA: + return dispatchECDSASign(key, dsigAlg, payload, rr) + case dsig.EdDSAFamily: + return dispatchEdDSASign(key, dsigAlg, payload, rr) + default: + return nil, fmt.Errorf(`jwsbb.Sign: unsupported dsig algorithm family %q`, dsigInfo.Family) + } +} + +func dispatchHMACSign(key any, dsigAlg string, payload []byte) ([]byte, error) { + var hmackey []byte + if err := keyconv.ByteSliceKey(&hmackey, key); err != nil { + return nil, fmt.Errorf(`jwsbb.Sign: invalid key type %T. []byte is required: %w`, key, err) + } + + return dsig.Sign(hmackey, dsigAlg, payload, nil) +} + +func dispatchRSASign(key any, dsigAlg string, payload []byte, rr io.Reader) ([]byte, error) { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an RSA key + if _, ok := signer.Public().(*rsa.PublicKey); ok { + return dsig.Sign(signer, dsigAlg, payload, rr) + } + } + + // Fall back to concrete key types + var privkey *rsa.PrivateKey + if err := keyconv.RSAPrivateKey(&privkey, key); err != nil { + return nil, fmt.Errorf(`jwsbb.Sign: invalid key type %T. *rsa.PrivateKey is required: %w`, key, err) + } + + return dsig.Sign(privkey, dsigAlg, payload, rr) +} + +func dispatchECDSASign(key any, dsigAlg string, payload []byte, rr io.Reader) ([]byte, error) { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an ECDSA key + if _, ok := signer.Public().(*ecdsa.PublicKey); ok { + return dsig.Sign(signer, dsigAlg, payload, rr) + } + } + + // Fall back to concrete key types + var privkey *ecdsa.PrivateKey + if err := keyconv.ECDSAPrivateKey(&privkey, key); err != nil { + return nil, fmt.Errorf(`jwsbb.Sign: invalid key type %T. *ecdsa.PrivateKey is required: %w`, key, err) + } + + return dsig.Sign(privkey, dsigAlg, payload, rr) +} + +func dispatchEdDSASign(key any, dsigAlg string, payload []byte, rr io.Reader) ([]byte, error) { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an EdDSA key + if _, ok := signer.Public().(ed25519.PublicKey); ok { + return dsig.Sign(signer, dsigAlg, payload, rr) + } + } + + // Fall back to concrete key types + var privkey ed25519.PrivateKey + if err := keyconv.Ed25519PrivateKey(&privkey, key); err != nil { + return nil, fmt.Errorf(`jwsbb.Sign: invalid key type %T. ed25519.PrivateKey is required: %w`, key, err) + } + + return dsig.Sign(privkey, dsigAlg, payload, rr) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/verify.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/verify.go new file mode 100644 index 0000000000..bac3ff487e --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/jwsbb/verify.go @@ -0,0 +1,105 @@ +package jwsbb + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "fmt" + + "github.com/lestrrat-go/dsig" + "github.com/lestrrat-go/jwx/v3/internal/keyconv" +) + +// Verify verifies a JWS signature using the specified key and algorithm. +// +// This function loads the verifier registered in the jwsbb package _ONLY_. +// It does not support custom verifiers that the user might have registered. +func Verify(key any, alg string, payload, signature []byte) error { + dsigAlg, ok := getDsigAlgorithm(alg) + if !ok { + return fmt.Errorf(`jwsbb.Verify: unsupported signature algorithm %q`, alg) + } + + // Get dsig algorithm info to determine key conversion strategy + dsigInfo, ok := dsig.GetAlgorithmInfo(dsigAlg) + if !ok { + return fmt.Errorf(`jwsbb.Verify: dsig algorithm %q not registered`, dsigAlg) + } + + switch dsigInfo.Family { + case dsig.HMAC: + return dispatchHMACVerify(key, dsigAlg, payload, signature) + case dsig.RSA: + return dispatchRSAVerify(key, dsigAlg, payload, signature) + case dsig.ECDSA: + return dispatchECDSAVerify(key, dsigAlg, payload, signature) + case dsig.EdDSAFamily: + return dispatchEdDSAVerify(key, dsigAlg, payload, signature) + default: + return fmt.Errorf(`jwsbb.Verify: unsupported dsig algorithm family %q`, dsigInfo.Family) + } +} + +func dispatchHMACVerify(key any, dsigAlg string, payload, signature []byte) error { + var hmackey []byte + if err := keyconv.ByteSliceKey(&hmackey, key); err != nil { + return fmt.Errorf(`jwsbb.Verify: invalid key type %T. []byte is required: %w`, key, err) + } + + return dsig.Verify(hmackey, dsigAlg, payload, signature) +} + +func dispatchRSAVerify(key any, dsigAlg string, payload, signature []byte) error { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an RSA key + if _, ok := signer.Public().(*rsa.PublicKey); ok { + return dsig.Verify(signer, dsigAlg, payload, signature) + } + } + + // Fall back to concrete key types + var pubkey *rsa.PublicKey + if err := keyconv.RSAPublicKey(&pubkey, key); err != nil { + return fmt.Errorf(`jwsbb.Verify: invalid key type %T. *rsa.PublicKey is required: %w`, key, err) + } + + return dsig.Verify(pubkey, dsigAlg, payload, signature) +} + +func dispatchECDSAVerify(key any, dsigAlg string, payload, signature []byte) error { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an ECDSA key + if _, ok := signer.Public().(*ecdsa.PublicKey); ok { + return dsig.Verify(signer, dsigAlg, payload, signature) + } + } + + // Fall back to concrete key types + var pubkey *ecdsa.PublicKey + if err := keyconv.ECDSAPublicKey(&pubkey, key); err != nil { + return fmt.Errorf(`jwsbb.Verify: invalid key type %T. *ecdsa.PublicKey is required: %w`, key, err) + } + + return dsig.Verify(pubkey, dsigAlg, payload, signature) +} + +func dispatchEdDSAVerify(key any, dsigAlg string, payload, signature []byte) error { + // Try crypto.Signer first (dsig can handle it directly) + if signer, ok := key.(crypto.Signer); ok { + // Verify it's an EdDSA key + if _, ok := signer.Public().(ed25519.PublicKey); ok { + return dsig.Verify(signer, dsigAlg, payload, signature) + } + } + + // Fall back to concrete key types + var pubkey ed25519.PublicKey + if err := keyconv.Ed25519PublicKey(&pubkey, key); err != nil { + return fmt.Errorf(`jwsbb.Verify: invalid key type %T. ed25519.PublicKey is required: %w`, key, err) + } + + return dsig.Verify(pubkey, dsigAlg, payload, signature) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/key_provider.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/key_provider.go new file mode 100644 index 0000000000..84529a1a87 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/key_provider.go @@ -0,0 +1,291 @@ +package jws + +import ( + "context" + "fmt" + "net/url" + "sync" + + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwk" +) + +// KeyProvider is responsible for providing key(s) to sign or verify a payload. +// Multiple `jws.KeyProvider`s can be passed to `jws.Verify()` or `jws.Sign()` +// +// `jws.Sign()` can only accept static key providers via `jws.WithKey()`, +// while `jws.Verify()` can accept `jws.WithKey()`, `jws.WithKeySet()`, +// `jws.WithVerifyAuto()`, and `jws.WithKeyProvider()`. +// +// Understanding how this works is crucial to learn how this package works. +// +// `jws.Sign()` is straightforward: signatures are created for each +// provided key. +// +// `jws.Verify()` is a bit more involved, because there are cases you +// will want to compute/deduce/guess the keys that you would like to +// use for verification. +// +// The first thing that `jws.Verify()` does is to collect the +// KeyProviders from the option list that the user provided (presented in pseudocode): +// +// keyProviders := filterKeyProviders(options) +// +// Then, remember that a JWS message may contain multiple signatures in the +// message. For each signature, we call on the KeyProviders to give us +// the key(s) to use on this signature: +// +// for sig in msg.Signatures { +// for kp in keyProviders { +// kp.FetchKeys(ctx, sink, sig, msg) +// ... +// } +// } +// +// The `sink` argument passed to the KeyProvider is a temporary storage +// for the keys (either a jwk.Key or a "raw" key). The `KeyProvider` +// is responsible for sending keys into the `sink`. +// +// When called, the `KeyProvider` created by `jws.WithKey()` sends the same key, +// `jws.WithKeySet()` sends keys that matches a particular `kid` and `alg`, +// `jws.WithVerifyAuto()` fetches a JWK from the `jku` URL, +// and finally `jws.WithKeyProvider()` allows you to execute arbitrary +// logic to provide keys. If you are providing a custom `KeyProvider`, +// you should execute the necessary checks or retrieval of keys, and +// then send the key(s) to the sink: +// +// sink.Key(alg, key) +// +// These keys are then retrieved and tried for each signature, until +// a match is found: +// +// keys := sink.Keys() +// for key in keys { +// if givenSignature == makeSignature(key, payload, ...)) { +// return OK +// } +// } +type KeyProvider interface { + FetchKeys(context.Context, KeySink, *Signature, *Message) error +} + +// KeySink is a data storage where `jws.KeyProvider` objects should +// send their keys to. +type KeySink interface { + Key(jwa.SignatureAlgorithm, any) +} + +type algKeyPair struct { + alg jwa.KeyAlgorithm + key any +} + +type algKeySink struct { + mu sync.Mutex + list []algKeyPair +} + +func (s *algKeySink) Key(alg jwa.SignatureAlgorithm, key any) { + s.mu.Lock() + s.list = append(s.list, algKeyPair{alg, key}) + s.mu.Unlock() +} + +type staticKeyProvider struct { + alg jwa.SignatureAlgorithm + key any +} + +func (kp *staticKeyProvider) FetchKeys(_ context.Context, sink KeySink, _ *Signature, _ *Message) error { + sink.Key(kp.alg, kp.key) + return nil +} + +type keySetProvider struct { + set jwk.Set + requireKid bool // true if `kid` must be specified + useDefault bool // true if the first key should be used iff there's exactly one key in set + inferAlgorithm bool // true if the algorithm should be inferred from key type + multipleKeysPerKeyID bool // true if we should attempt to match multiple keys per key ID. if false we assume that only one key exists for a given key ID +} + +func (kp *keySetProvider) selectKey(sink KeySink, key jwk.Key, sig *Signature, _ *Message) error { + if usage, ok := key.KeyUsage(); ok { + // it's okay if use: "". we'll assume it's "sig" + if usage != "" && usage != jwk.ForSignature.String() { + return nil + } + } + + if v, ok := key.Algorithm(); ok { + salg, ok := jwa.LookupSignatureAlgorithm(v.String()) + if !ok { + return fmt.Errorf(`invalid signature algorithm %q`, v) + } + + sink.Key(salg, key) + return nil + } + + if kp.inferAlgorithm { + algs, err := AlgorithmsForKey(key) + if err != nil { + return fmt.Errorf(`failed to get a list of signature methods for key type %s: %w`, key.KeyType(), err) + } + + // bail out if the JWT has a `alg` field, and it doesn't match + if tokAlg, ok := sig.ProtectedHeaders().Algorithm(); ok { + for _, alg := range algs { + if tokAlg == alg { + sink.Key(alg, key) + return nil + } + } + return fmt.Errorf(`algorithm in the message does not match any of the inferred algorithms`) + } + + // Yes, you get to try them all!!!!!!! + for _, alg := range algs { + sink.Key(alg, key) + } + return nil + } + return nil +} + +func (kp *keySetProvider) FetchKeys(_ context.Context, sink KeySink, sig *Signature, msg *Message) error { + if kp.requireKid { + wantedKid, ok := sig.ProtectedHeaders().KeyID() + if !ok { + // If the kid is NOT specified... kp.useDefault needs to be true, and the + // JWKs must have exactly one key in it + if !kp.useDefault { + return fmt.Errorf(`failed to find matching key: no key ID ("kid") specified in token`) + } else if kp.useDefault && kp.set.Len() > 1 { + return fmt.Errorf(`failed to find matching key: no key ID ("kid") specified in token but multiple keys available in key set`) + } + + // if we got here, then useDefault == true AND there is exactly + // one key in the set. + key, ok := kp.set.Key(0) + if !ok { + return fmt.Errorf(`failed to get key at index 0 (empty JWKS?)`) + } + return kp.selectKey(sink, key, sig, msg) + } + + // Otherwise we better be able to look up the key. + // <= v2.0.3 backwards compatible case: only match a single key + // whose key ID matches `wantedKid` + if !kp.multipleKeysPerKeyID { + key, ok := kp.set.LookupKeyID(wantedKid) + if !ok { + return fmt.Errorf(`failed to find key with key ID %q in key set`, wantedKid) + } + return kp.selectKey(sink, key, sig, msg) + } + + // if multipleKeysPerKeyID is true, we attempt all keys whose key ID matches + // the wantedKey + ok = false + for i := range kp.set.Len() { + key, _ := kp.set.Key(i) + if kid, ok := key.KeyID(); !ok || kid != wantedKid { + continue + } + + if err := kp.selectKey(sink, key, sig, msg); err != nil { + continue + } + ok = true + // continue processing so that we try all keys with the same key ID + } + if !ok { + return fmt.Errorf(`failed to find key with key ID %q in key set`, wantedKid) + } + return nil + } + + // Otherwise just try all keys + for i := range kp.set.Len() { + key, ok := kp.set.Key(i) + if !ok { + return fmt.Errorf(`failed to get key at index %d`, i) + } + if err := kp.selectKey(sink, key, sig, msg); err != nil { + continue + } + } + return nil +} + +type jkuProvider struct { + fetcher jwk.Fetcher + options []jwk.FetchOption +} + +func (kp jkuProvider) FetchKeys(ctx context.Context, sink KeySink, sig *Signature, _ *Message) error { + if kp.fetcher == nil { + kp.fetcher = jwk.FetchFunc(jwk.Fetch) + } + + kid, ok := sig.ProtectedHeaders().KeyID() + if !ok { + return fmt.Errorf(`use of "jku" requires that the payload contain a "kid" field in the protected header`) + } + + // errors here can't be reliably passed to the consumers. + // it's unfortunate, but if you need this control, you are + // going to have to write your own fetcher + u, ok := sig.ProtectedHeaders().JWKSetURL() + if !ok || u == "" { + return fmt.Errorf(`use of "jku" field specified, but the field is empty`) + } + uo, err := url.Parse(u) + if err != nil { + return fmt.Errorf(`failed to parse "jku": %w`, err) + } + if uo.Scheme != "https" { + return fmt.Errorf(`url in "jku" must be HTTPS`) + } + + set, err := kp.fetcher.Fetch(ctx, u, kp.options...) + if err != nil { + return fmt.Errorf(`failed to fetch %q: %w`, u, err) + } + + key, ok := set.LookupKeyID(kid) + if !ok { + // It is not an error if the key with the kid doesn't exist + return nil + } + + algs, err := AlgorithmsForKey(key) + if err != nil { + return fmt.Errorf(`failed to get a list of signature methods for key type %s: %w`, key.KeyType(), err) + } + + hdrAlg, ok := sig.ProtectedHeaders().Algorithm() + if ok { + for _, alg := range algs { + // if we have an "alg" field in the JWS, we can only proceed if + // the inferred algorithm matches + if hdrAlg != alg { + continue + } + + sink.Key(alg, key) + break + } + } + return nil +} + +// KeyProviderFunc is a type of KeyProvider that is implemented by +// a single function. You can use this to create ad-hoc `KeyProvider` +// instances. +type KeyProviderFunc func(context.Context, KeySink, *Signature, *Message) error + +func (kp KeyProviderFunc) FetchKeys(ctx context.Context, sink KeySink, sig *Signature, msg *Message) error { + return kp(ctx, sink, sig, msg) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy.go new file mode 100644 index 0000000000..767ad723a3 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy.go @@ -0,0 +1,91 @@ +package jws + +import ( + "fmt" + "sync" + + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jws/legacy" +) + +var enableLegacySignersOnce = &sync.Once{} + +func enableLegacySigners() { + for _, alg := range []jwa.SignatureAlgorithm{jwa.HS256(), jwa.HS384(), jwa.HS512()} { + if err := RegisterSigner(alg, func(alg jwa.SignatureAlgorithm) SignerFactory { + return SignerFactoryFn(func() (Signer, error) { + return legacy.NewHMACSigner(alg), nil + }) + }(alg)); err != nil { + panic(fmt.Sprintf("RegisterSigner failed: %v", err)) + } + if err := RegisterVerifier(alg, func(alg jwa.SignatureAlgorithm) VerifierFactory { + return VerifierFactoryFn(func() (Verifier, error) { + return legacy.NewHMACVerifier(alg), nil + }) + }(alg)); err != nil { + panic(fmt.Sprintf("RegisterVerifier failed: %v", err)) + } + } + + for _, alg := range []jwa.SignatureAlgorithm{jwa.RS256(), jwa.RS384(), jwa.RS512(), jwa.PS256(), jwa.PS384(), jwa.PS512()} { + if err := RegisterSigner(alg, func(alg jwa.SignatureAlgorithm) SignerFactory { + return SignerFactoryFn(func() (Signer, error) { + return legacy.NewRSASigner(alg), nil + }) + }(alg)); err != nil { + panic(fmt.Sprintf("RegisterSigner failed: %v", err)) + } + if err := RegisterVerifier(alg, func(alg jwa.SignatureAlgorithm) VerifierFactory { + return VerifierFactoryFn(func() (Verifier, error) { + return legacy.NewRSAVerifier(alg), nil + }) + }(alg)); err != nil { + panic(fmt.Sprintf("RegisterVerifier failed: %v", err)) + } + } + for _, alg := range []jwa.SignatureAlgorithm{jwa.ES256(), jwa.ES384(), jwa.ES512(), jwa.ES256K()} { + if err := RegisterSigner(alg, func(alg jwa.SignatureAlgorithm) SignerFactory { + return SignerFactoryFn(func() (Signer, error) { + return legacy.NewECDSASigner(alg), nil + }) + }(alg)); err != nil { + panic(fmt.Sprintf("RegisterSigner failed: %v", err)) + } + if err := RegisterVerifier(alg, func(alg jwa.SignatureAlgorithm) VerifierFactory { + return VerifierFactoryFn(func() (Verifier, error) { + return legacy.NewECDSAVerifier(alg), nil + }) + }(alg)); err != nil { + panic(fmt.Sprintf("RegisterVerifier failed: %v", err)) + } + } + + if err := RegisterSigner(jwa.EdDSA(), SignerFactoryFn(func() (Signer, error) { + return legacy.NewEdDSASigner(), nil + })); err != nil { + panic(fmt.Sprintf("RegisterSigner failed: %v", err)) + } + if err := RegisterVerifier(jwa.EdDSA(), VerifierFactoryFn(func() (Verifier, error) { + return legacy.NewEdDSAVerifier(), nil + })); err != nil { + panic(fmt.Sprintf("RegisterVerifier failed: %v", err)) + } +} + +func legacySignerFor(alg jwa.SignatureAlgorithm) (Signer, error) { + muSigner.Lock() + s, ok := signers[alg] + if !ok { + v, err := newLegacySigner(alg) + if err != nil { + muSigner.Unlock() + return nil, fmt.Errorf(`failed to create payload signer: %w`, err) + } + signers[alg] = v + s = v + } + muSigner.Unlock() + + return s, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/BUILD.bazel new file mode 100644 index 0000000000..8e77cece46 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/BUILD.bazel @@ -0,0 +1,21 @@ +load("@rules_go//go:def.bzl", "go_library") + +go_library( + name = "legacy", + srcs = [ + "ecdsa.go", + "eddsa.go", + "hmac.go", + "legacy.go", + "rsa.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/jws/legacy", + visibility = ["//visibility:public"], + deps = [ + "//internal/ecutil", + "//internal/keyconv", + "//internal/pool", + "//jwa", + "//jws/internal/keytype", + ], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/ecdsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/ecdsa.go new file mode 100644 index 0000000000..0b714a44b8 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/ecdsa.go @@ -0,0 +1,204 @@ +package legacy + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "encoding/asn1" + "fmt" + "math/big" + + "github.com/lestrrat-go/jwx/v3/internal/ecutil" + "github.com/lestrrat-go/jwx/v3/internal/keyconv" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jws/internal/keytype" +) + +var ecdsaSigners = make(map[jwa.SignatureAlgorithm]*ecdsaSigner) +var ecdsaVerifiers = make(map[jwa.SignatureAlgorithm]*ecdsaVerifier) + +func init() { + algs := map[jwa.SignatureAlgorithm]crypto.Hash{ + jwa.ES256(): crypto.SHA256, + jwa.ES384(): crypto.SHA384, + jwa.ES512(): crypto.SHA512, + jwa.ES256K(): crypto.SHA256, + } + for alg, hash := range algs { + ecdsaSigners[alg] = &ecdsaSigner{ + alg: alg, + hash: hash, + } + ecdsaVerifiers[alg] = &ecdsaVerifier{ + alg: alg, + hash: hash, + } + } +} + +func NewECDSASigner(alg jwa.SignatureAlgorithm) Signer { + return ecdsaSigners[alg] +} + +// ecdsaSigners are immutable. +type ecdsaSigner struct { + alg jwa.SignatureAlgorithm + hash crypto.Hash +} + +func (es ecdsaSigner) Algorithm() jwa.SignatureAlgorithm { + return es.alg +} + +func (es *ecdsaSigner) Sign(payload []byte, key any) ([]byte, error) { + if key == nil { + return nil, fmt.Errorf(`missing private key while signing payload`) + } + + h := es.hash.New() + if _, err := h.Write(payload); err != nil { + return nil, fmt.Errorf(`failed to write payload using ecdsa: %w`, err) + } + + signer, ok := key.(crypto.Signer) + if ok { + if !keytype.IsValidECDSAKey(key) { + return nil, fmt.Errorf(`cannot use key of type %T to generate ECDSA based signatures`, key) + } + switch key.(type) { + case ecdsa.PrivateKey, *ecdsa.PrivateKey: + // if it's a ecdsa.PrivateKey, it's more efficient to + // go through the non-crypto.Signer route. Set ok to false + ok = false + } + } + + var r, s *big.Int + var curveBits int + if ok { + signed, err := signer.Sign(rand.Reader, h.Sum(nil), es.hash) + if err != nil { + return nil, err + } + + var p struct { + R *big.Int + S *big.Int + } + if _, err := asn1.Unmarshal(signed, &p); err != nil { + return nil, fmt.Errorf(`failed to unmarshal ASN1 encoded signature: %w`, err) + } + + // Okay, this is silly, but hear me out. When we use the + // crypto.Signer interface, the PrivateKey is hidden. + // But we need some information about the key (its bit size). + // + // So while silly, we're going to have to make another call + // here and fetch the Public key. + // This probably means that this should be cached some where. + cpub := signer.Public() + pubkey, ok := cpub.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf(`expected *ecdsa.PublicKey, got %T`, pubkey) + } + curveBits = pubkey.Curve.Params().BitSize + + r = p.R + s = p.S + } else { + var privkey ecdsa.PrivateKey + if err := keyconv.ECDSAPrivateKey(&privkey, key); err != nil { + return nil, fmt.Errorf(`failed to retrieve ecdsa.PrivateKey out of %T: %w`, key, err) + } + curveBits = privkey.Curve.Params().BitSize + rtmp, stmp, err := ecdsa.Sign(rand.Reader, &privkey, h.Sum(nil)) + if err != nil { + return nil, fmt.Errorf(`failed to sign payload using ecdsa: %w`, err) + } + r = rtmp + s = stmp + } + + keyBytes := curveBits / 8 + // Curve bits do not need to be a multiple of 8. + if curveBits%8 > 0 { + keyBytes++ + } + + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return out, nil +} + +// ecdsaVerifiers are immutable. +type ecdsaVerifier struct { + alg jwa.SignatureAlgorithm + hash crypto.Hash +} + +func NewECDSAVerifier(alg jwa.SignatureAlgorithm) Verifier { + return ecdsaVerifiers[alg] +} + +func (v ecdsaVerifier) Algorithm() jwa.SignatureAlgorithm { + return v.alg +} + +func (v *ecdsaVerifier) Verify(payload []byte, signature []byte, key any) error { + if key == nil { + return fmt.Errorf(`missing public key while verifying payload`) + } + + var pubkey ecdsa.PublicKey + if cs, ok := key.(crypto.Signer); ok { + cpub := cs.Public() + switch cpub := cpub.(type) { + case ecdsa.PublicKey: + pubkey = cpub + case *ecdsa.PublicKey: + pubkey = *cpub + default: + return fmt.Errorf(`failed to retrieve ecdsa.PublicKey out of crypto.Signer %T`, key) + } + } else { + if err := keyconv.ECDSAPublicKey(&pubkey, key); err != nil { + return fmt.Errorf(`failed to retrieve ecdsa.PublicKey out of %T: %w`, key, err) + } + } + + if !pubkey.Curve.IsOnCurve(pubkey.X, pubkey.Y) { + return fmt.Errorf(`public key used does not contain a point (X,Y) on the curve`) + } + + r := pool.BigInt().Get() + s := pool.BigInt().Get() + defer pool.BigInt().Put(r) + defer pool.BigInt().Put(s) + + keySize := ecutil.CalculateKeySize(pubkey.Curve) + if len(signature) != keySize*2 { + return fmt.Errorf(`invalid signature length for curve %q`, pubkey.Curve.Params().Name) + } + + r.SetBytes(signature[:keySize]) + s.SetBytes(signature[keySize:]) + + h := v.hash.New() + if _, err := h.Write(payload); err != nil { + return fmt.Errorf(`failed to write payload using ecdsa: %w`, err) + } + + if !ecdsa.Verify(&pubkey, h.Sum(nil), r, s) { + return fmt.Errorf(`failed to verify signature using ecdsa`) + } + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/eddsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/eddsa.go new file mode 100644 index 0000000000..289e48e3b3 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/eddsa.go @@ -0,0 +1,79 @@ +package legacy + +import ( + "crypto" + "crypto/ed25519" + "crypto/rand" + "fmt" + + "github.com/lestrrat-go/jwx/v3/internal/keyconv" + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jws/internal/keytype" +) + +type eddsaSigner struct{} + +func NewEdDSASigner() Signer { + return &eddsaSigner{} +} + +func (s eddsaSigner) Algorithm() jwa.SignatureAlgorithm { + return jwa.EdDSA() +} + +func (s eddsaSigner) Sign(payload []byte, key any) ([]byte, error) { + if key == nil { + return nil, fmt.Errorf(`missing private key while signing payload`) + } + + // The ed25519.PrivateKey object implements crypto.Signer, so we should + // simply accept a crypto.Signer here. + signer, ok := key.(crypto.Signer) + if ok { + if !keytype.IsValidEDDSAKey(key) { + return nil, fmt.Errorf(`cannot use key of type %T to generate EdDSA based signatures`, key) + } + } else { + // This fallback exists for cases when jwk.Key was passed, or + // users gave us a pointer instead of non-pointer, etc. + var privkey ed25519.PrivateKey + if err := keyconv.Ed25519PrivateKey(&privkey, key); err != nil { + return nil, fmt.Errorf(`failed to retrieve ed25519.PrivateKey out of %T: %w`, key, err) + } + signer = privkey + } + + return signer.Sign(rand.Reader, payload, crypto.Hash(0)) +} + +type eddsaVerifier struct{} + +func NewEdDSAVerifier() Verifier { + return &eddsaVerifier{} +} + +func (v eddsaVerifier) Verify(payload, signature []byte, key any) (err error) { + if key == nil { + return fmt.Errorf(`missing public key while verifying payload`) + } + + var pubkey ed25519.PublicKey + signer, ok := key.(crypto.Signer) + if ok { + v := signer.Public() + pubkey, ok = v.(ed25519.PublicKey) + if !ok { + return fmt.Errorf(`expected crypto.Signer.Public() to return ed25519.PublicKey, but got %T`, v) + } + } else { + if err := keyconv.Ed25519PublicKey(&pubkey, key); err != nil { + return fmt.Errorf(`failed to retrieve ed25519.PublicKey out of %T: %w`, key, err) + } + } + + if !ed25519.Verify(pubkey, payload, signature) { + return fmt.Errorf(`failed to match EdDSA signature`) + } + + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/hmac.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/hmac.go new file mode 100644 index 0000000000..7a3c9d1896 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/hmac.go @@ -0,0 +1,90 @@ +package legacy + +import ( + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "fmt" + "hash" + + "github.com/lestrrat-go/jwx/v3/internal/keyconv" + "github.com/lestrrat-go/jwx/v3/jwa" +) + +func init() { + algs := map[jwa.SignatureAlgorithm]func() hash.Hash{ + jwa.HS256(): sha256.New, + jwa.HS384(): sha512.New384, + jwa.HS512(): sha512.New, + } + + for alg, h := range algs { + hmacSignFuncs[alg] = makeHMACSignFunc(h) + } +} + +// HMACSigner uses crypto/hmac to sign the payloads. +// This is for legacy support only. +type HMACSigner struct { + alg jwa.SignatureAlgorithm + sign hmacSignFunc +} + +type HMACVerifier struct { + signer Signer +} + +type hmacSignFunc func(payload []byte, key []byte) ([]byte, error) + +var hmacSignFuncs = make(map[jwa.SignatureAlgorithm]hmacSignFunc) + +func NewHMACSigner(alg jwa.SignatureAlgorithm) Signer { + return &HMACSigner{ + alg: alg, + sign: hmacSignFuncs[alg], // we know this will succeed + } +} + +func makeHMACSignFunc(hfunc func() hash.Hash) hmacSignFunc { + return func(payload []byte, key []byte) ([]byte, error) { + h := hmac.New(hfunc, key) + if _, err := h.Write(payload); err != nil { + return nil, fmt.Errorf(`failed to write payload using hmac: %w`, err) + } + return h.Sum(nil), nil + } +} + +func (s HMACSigner) Algorithm() jwa.SignatureAlgorithm { + return s.alg +} + +func (s HMACSigner) Sign(payload []byte, key any) ([]byte, error) { + var hmackey []byte + if err := keyconv.ByteSliceKey(&hmackey, key); err != nil { + return nil, fmt.Errorf(`invalid key type %T. []byte is required: %w`, key, err) + } + + if len(hmackey) == 0 { + return nil, fmt.Errorf(`missing key while signing payload`) + } + + return s.sign(payload, hmackey) +} + +func NewHMACVerifier(alg jwa.SignatureAlgorithm) Verifier { + s := NewHMACSigner(alg) + return &HMACVerifier{signer: s} +} + +func (v HMACVerifier) Verify(payload, signature []byte, key any) (err error) { + expected, err := v.signer.Sign(payload, key) + if err != nil { + return fmt.Errorf(`failed to generated signature: %w`, err) + } + + if !hmac.Equal(signature, expected) { + return fmt.Errorf(`failed to match hmac signature`) + } + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/legacy.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/legacy.go new file mode 100644 index 0000000000..fe69b55e05 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/legacy.go @@ -0,0 +1,36 @@ +// Package legacy provides support for legacy implementation of JWS signing and verification. +// Types, functions, and variables in this package are exported only for legacy support, +// and should not be relied upon for new code. +// +// This package will be available until v3 is sunset, but it will be removed in v4 +package legacy + +import ( + "github.com/lestrrat-go/jwx/v3/jwa" +) + +// Signer generates the signature for a given payload. +// This is for legacy support only. +type Signer interface { + // Sign creates a signature for the given payload. + // The second argument is the key used for signing the payload, and is usually + // the private key type associated with the signature method. For example, + // for `jwa.RSXXX` and `jwa.PSXXX` types, you need to pass the + // `*"crypto/rsa".PrivateKey` type. + // Check the documentation for each signer for details + Sign([]byte, any) ([]byte, error) + + Algorithm() jwa.SignatureAlgorithm +} + +// Verifier is for legacy support only. +type Verifier interface { + // Verify checks whether the payload and signature are valid for + // the given key. + // `key` is the key used for verifying the payload, and is usually + // the public key associated with the signature method. For example, + // for `jwa.RSXXX` and `jwa.PSXXX` types, you need to pass the + // `*"crypto/rsa".PublicKey` type. + // Check the documentation for each verifier for details + Verify(payload []byte, signature []byte, key any) error +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/rsa.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/rsa.go new file mode 100644 index 0000000000..aef110a5cf --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/legacy/rsa.go @@ -0,0 +1,145 @@ +package legacy + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "fmt" + + "github.com/lestrrat-go/jwx/v3/internal/keyconv" + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jws/internal/keytype" +) + +var rsaSigners = make(map[jwa.SignatureAlgorithm]*rsaSigner) +var rsaVerifiers = make(map[jwa.SignatureAlgorithm]*rsaVerifier) + +func init() { + data := map[jwa.SignatureAlgorithm]struct { + Hash crypto.Hash + PSS bool + }{ + jwa.RS256(): { + Hash: crypto.SHA256, + }, + jwa.RS384(): { + Hash: crypto.SHA384, + }, + jwa.RS512(): { + Hash: crypto.SHA512, + }, + jwa.PS256(): { + Hash: crypto.SHA256, + PSS: true, + }, + jwa.PS384(): { + Hash: crypto.SHA384, + PSS: true, + }, + jwa.PS512(): { + Hash: crypto.SHA512, + PSS: true, + }, + } + + for alg, item := range data { + rsaSigners[alg] = &rsaSigner{ + alg: alg, + hash: item.Hash, + pss: item.PSS, + } + rsaVerifiers[alg] = &rsaVerifier{ + alg: alg, + hash: item.Hash, + pss: item.PSS, + } + } +} + +type rsaSigner struct { + alg jwa.SignatureAlgorithm + hash crypto.Hash + pss bool +} + +func NewRSASigner(alg jwa.SignatureAlgorithm) Signer { + return rsaSigners[alg] +} + +func (rs *rsaSigner) Algorithm() jwa.SignatureAlgorithm { + return rs.alg +} + +func (rs *rsaSigner) Sign(payload []byte, key any) ([]byte, error) { + if key == nil { + return nil, fmt.Errorf(`missing private key while signing payload`) + } + + signer, ok := key.(crypto.Signer) + if ok { + if !keytype.IsValidRSAKey(key) { + return nil, fmt.Errorf(`cannot use key of type %T to generate RSA based signatures`, key) + } + } else { + var privkey rsa.PrivateKey + if err := keyconv.RSAPrivateKey(&privkey, key); err != nil { + return nil, fmt.Errorf(`failed to retrieve rsa.PrivateKey out of %T: %w`, key, err) + } + signer = &privkey + } + + h := rs.hash.New() + if _, err := h.Write(payload); err != nil { + return nil, fmt.Errorf(`failed to write payload to hash: %w`, err) + } + if rs.pss { + return signer.Sign(rand.Reader, h.Sum(nil), &rsa.PSSOptions{ + Hash: rs.hash, + SaltLength: rsa.PSSSaltLengthEqualsHash, + }) + } + return signer.Sign(rand.Reader, h.Sum(nil), rs.hash) +} + +type rsaVerifier struct { + alg jwa.SignatureAlgorithm + hash crypto.Hash + pss bool +} + +func NewRSAVerifier(alg jwa.SignatureAlgorithm) Verifier { + return rsaVerifiers[alg] +} + +func (rv *rsaVerifier) Verify(payload, signature []byte, key any) error { + if key == nil { + return fmt.Errorf(`missing public key while verifying payload`) + } + + var pubkey rsa.PublicKey + if cs, ok := key.(crypto.Signer); ok { + cpub := cs.Public() + switch cpub := cpub.(type) { + case rsa.PublicKey: + pubkey = cpub + case *rsa.PublicKey: + pubkey = *cpub + default: + return fmt.Errorf(`failed to retrieve rsa.PublicKey out of crypto.Signer %T`, key) + } + } else { + if err := keyconv.RSAPublicKey(&pubkey, key); err != nil { + return fmt.Errorf(`failed to retrieve rsa.PublicKey out of %T: %w`, key, err) + } + } + + h := rv.hash.New() + if _, err := h.Write(payload); err != nil { + return fmt.Errorf(`failed to write payload to hash: %w`, err) + } + + if rv.pss { + return rsa.VerifyPSS(&pubkey, rv.hash, h.Sum(nil), signature, nil) + } + return rsa.VerifyPKCS1v15(&pubkey, rv.hash, h.Sum(nil), signature) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/message.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/message.go new file mode 100644 index 0000000000..e113d1438c --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/message.go @@ -0,0 +1,550 @@ +package jws + +import ( + "bytes" + "fmt" + + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/internal/tokens" + "github.com/lestrrat-go/jwx/v3/jwa" +) + +func NewSignature() *Signature { + return &Signature{} +} + +func (s *Signature) DecodeCtx() DecodeCtx { + return s.dc +} + +func (s *Signature) SetDecodeCtx(dc DecodeCtx) { + s.dc = dc +} + +func (s Signature) PublicHeaders() Headers { + return s.headers +} + +func (s *Signature) SetPublicHeaders(v Headers) *Signature { + s.headers = v + return s +} + +func (s Signature) ProtectedHeaders() Headers { + return s.protected +} + +func (s *Signature) SetProtectedHeaders(v Headers) *Signature { + s.protected = v + return s +} + +func (s Signature) Signature() []byte { + return s.signature +} + +func (s *Signature) SetSignature(v []byte) *Signature { + s.signature = v + return s +} + +type signatureUnmarshalProbe struct { + Header Headers `json:"header,omitempty"` + Protected *string `json:"protected,omitempty"` + Signature *string `json:"signature,omitempty"` +} + +func (s *Signature) UnmarshalJSON(data []byte) error { + var sup signatureUnmarshalProbe + sup.Header = NewHeaders() + if err := json.Unmarshal(data, &sup); err != nil { + return fmt.Errorf(`failed to unmarshal signature into temporary struct: %w`, err) + } + + s.headers = sup.Header + if buf := sup.Protected; buf != nil { + src := []byte(*buf) + if !bytes.HasPrefix(src, []byte{tokens.OpenCurlyBracket}) { + decoded, err := base64.Decode(src) + if err != nil { + return fmt.Errorf(`failed to base64 decode protected headers: %w`, err) + } + src = decoded + } + + prt := NewHeaders() + //nolint:forcetypeassert + prt.(*stdHeaders).SetDecodeCtx(s.DecodeCtx()) + if err := json.Unmarshal(src, prt); err != nil { + return fmt.Errorf(`failed to unmarshal protected headers: %w`, err) + } + //nolint:forcetypeassert + prt.(*stdHeaders).SetDecodeCtx(nil) + s.protected = prt + } + + if sup.Signature != nil { + decoded, err := base64.DecodeString(*sup.Signature) + if err != nil { + return fmt.Errorf(`failed to base decode signature: %w`, err) + } + s.signature = decoded + } + return nil +} + +// Sign populates the signature field, with a signature generated by +// given the signer object and payload. +// +// The first return value is the raw signature in binary format. +// The second return value s the full three-segment signature +// (e.g. "eyXXXX.XXXXX.XXXX") +// +// This method is deprecated, and will be remove in a future release. +// Signature objects in the future will only be used as containers, +// and signing will be done using the `jws.Sign` function, or alternatively +// you could use jwsbb package to craft the signature manually. +func (s *Signature) Sign(payload []byte, signer Signer, key any) ([]byte, []byte, error) { + return s.sign2(payload, signer, key) +} + +func (s *Signature) sign2(payload []byte, signer interface{ Algorithm() jwa.SignatureAlgorithm }, key any) ([]byte, []byte, error) { + // Create a signatureBuilder to use the shared signing logic + sb := signatureBuilderPool.Get() + defer signatureBuilderPool.Put(sb) + + sb.alg = signer.Algorithm() + sb.key = key + sb.protected = s.protected + sb.public = s.headers + + // Set up the appropriate signer interface + switch typedSigner := signer.(type) { + case Signer2: + sb.signer2 = typedSigner + case Signer: + sb.signer = typedSigner + default: + return nil, nil, fmt.Errorf(`invalid signer type: %T`, signer) + } + + // Create a minimal sign context + sc := signContextPool.Get() + defer signContextPool.Put(sc) + + sc.detached = s.detached + + encoder := s.encoder + if encoder == nil { + encoder = base64.DefaultEncoder() + } + sc.encoder = encoder + + // Build the signature using signatureBuilder + sig, err := sb.Build(sc, payload) + if err != nil { + return nil, nil, fmt.Errorf(`failed to build signature: %w`, err) + } + + // Copy the signature result back to this signature instance + s.signature = sig.signature + s.protected = sig.protected + s.headers = sig.headers + + // Build the complete JWS token for the return value + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + + // Marshal the merged headers for the final output + hdrs, err := mergeHeaders(s.headers, s.protected) + if err != nil { + return nil, nil, fmt.Errorf(`failed to merge headers: %w`, err) + } + + hdrbuf, err := json.Marshal(hdrs) + if err != nil { + return nil, nil, fmt.Errorf(`failed to marshal headers: %w`, err) + } + + buf.WriteString(encoder.EncodeToString(hdrbuf)) + buf.WriteByte(tokens.Period) + + var plen int + b64 := getB64Value(hdrs) + if b64 { + encoded := encoder.EncodeToString(payload) + plen = len(encoded) + buf.WriteString(encoded) + } else { + if !s.detached { + if bytes.Contains(payload, []byte{tokens.Period}) { + return nil, nil, fmt.Errorf(`payload must not contain a "."`) + } + } + plen = len(payload) + buf.Write(payload) + } + + // Handle detached payload + if s.detached { + buf.Truncate(buf.Len() - plen) + } + + buf.WriteByte(tokens.Period) + buf.WriteString(encoder.EncodeToString(s.signature)) + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + + return s.signature, ret, nil +} + +func NewMessage() *Message { + return &Message{} +} + +// Clears the internal raw buffer that was accumulated during +// the verify phase +func (m *Message) clearRaw() { + for _, sig := range m.signatures { + if protected := sig.protected; protected != nil { + if cr, ok := protected.(*stdHeaders); ok { + cr.raw = nil + } + } + } +} + +func (m *Message) SetDecodeCtx(dc DecodeCtx) { + m.dc = dc +} + +func (m *Message) DecodeCtx() DecodeCtx { + return m.dc +} + +// Payload returns the decoded payload +func (m Message) Payload() []byte { + return m.payload +} + +func (m *Message) SetPayload(v []byte) *Message { + m.payload = v + return m +} + +func (m Message) Signatures() []*Signature { + return m.signatures +} + +func (m *Message) AppendSignature(v *Signature) *Message { + m.signatures = append(m.signatures, v) + return m +} + +func (m *Message) ClearSignatures() *Message { + m.signatures = nil + return m +} + +// LookupSignature looks up a particular signature entry using +// the `kid` value +func (m Message) LookupSignature(kid string) []*Signature { + var sigs []*Signature + for _, sig := range m.signatures { + if hdr := sig.PublicHeaders(); hdr != nil { + hdrKeyID, ok := hdr.KeyID() + if ok && hdrKeyID == kid { + sigs = append(sigs, sig) + continue + } + } + + if hdr := sig.ProtectedHeaders(); hdr != nil { + hdrKeyID, ok := hdr.KeyID() + if ok && hdrKeyID == kid { + sigs = append(sigs, sig) + continue + } + } + } + return sigs +} + +// This struct is used to first probe for the structure of the +// incoming JSON object. We then decide how to parse it +// from the fields that are populated. +type messageUnmarshalProbe struct { + Payload *string `json:"payload"` + Signatures []json.RawMessage `json:"signatures,omitempty"` + Header Headers `json:"header,omitempty"` + Protected *string `json:"protected,omitempty"` + Signature *string `json:"signature,omitempty"` +} + +func (m *Message) UnmarshalJSON(buf []byte) error { + m.payload = nil + m.signatures = nil + m.b64 = true + + var mup messageUnmarshalProbe + mup.Header = NewHeaders() + if err := json.Unmarshal(buf, &mup); err != nil { + return fmt.Errorf(`failed to unmarshal into temporary structure: %w`, err) + } + + b64 := true + if mup.Signature == nil { // flattened signature is NOT present + if len(mup.Signatures) == 0 { + return fmt.Errorf(`required field "signatures" not present`) + } + + m.signatures = make([]*Signature, 0, len(mup.Signatures)) + for i, rawsig := range mup.Signatures { + var sig Signature + sig.SetDecodeCtx(m.DecodeCtx()) + if err := json.Unmarshal(rawsig, &sig); err != nil { + return fmt.Errorf(`failed to unmarshal signature #%d: %w`, i+1, err) + } + sig.SetDecodeCtx(nil) + + if sig.protected == nil { + // Instead of barfing on a nil protected header, use an empty header + sig.protected = NewHeaders() + } + + if i == 0 { + if !getB64Value(sig.protected) { + b64 = false + } + } else { + if b64 != getB64Value(sig.protected) { + return fmt.Errorf(`b64 value must be the same for all signatures`) + } + } + + m.signatures = append(m.signatures, &sig) + } + } else { // .signature is present, it's a flattened structure + if len(mup.Signatures) != 0 { + return fmt.Errorf(`invalid format ("signatures" and "signature" keys cannot both be present)`) + } + + var sig Signature + sig.headers = mup.Header + if src := mup.Protected; src != nil { + decoded, err := base64.DecodeString(*src) + if err != nil { + return fmt.Errorf(`failed to base64 decode flattened protected headers: %w`, err) + } + prt := NewHeaders() + //nolint:forcetypeassert + prt.(*stdHeaders).SetDecodeCtx(m.DecodeCtx()) + if err := json.Unmarshal(decoded, prt); err != nil { + return fmt.Errorf(`failed to unmarshal flattened protected headers: %w`, err) + } + //nolint:forcetypeassert + prt.(*stdHeaders).SetDecodeCtx(nil) + sig.protected = prt + } + + if sig.protected == nil { + // Instead of barfing on a nil protected header, use an empty header + sig.protected = NewHeaders() + } + + decoded, err := base64.DecodeString(*mup.Signature) + if err != nil { + return fmt.Errorf(`failed to base64 decode flattened signature: %w`, err) + } + sig.signature = decoded + + m.signatures = []*Signature{&sig} + b64 = getB64Value(sig.protected) + } + + if mup.Payload != nil { + if !b64 { // NOT base64 encoded + m.payload = []byte(*mup.Payload) + } else { + decoded, err := base64.DecodeString(*mup.Payload) + if err != nil { + return fmt.Errorf(`failed to base64 decode payload: %w`, err) + } + m.payload = decoded + } + } + m.b64 = b64 + return nil +} + +func (m Message) MarshalJSON() ([]byte, error) { + if len(m.signatures) == 1 { + return m.marshalFlattened() + } + return m.marshalFull() +} + +func (m Message) marshalFlattened() ([]byte, error) { + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + + sig := m.signatures[0] + + buf.WriteRune(tokens.OpenCurlyBracket) + var wrote bool + + if hdr := sig.headers; hdr != nil { + hdrjs, err := json.Marshal(hdr) + if err != nil { + return nil, fmt.Errorf(`failed to marshal "header" (flattened format): %w`, err) + } + buf.WriteString(`"header":`) + buf.Write(hdrjs) + wrote = true + } + + if wrote { + buf.WriteRune(tokens.Comma) + } + buf.WriteString(`"payload":"`) + buf.WriteString(base64.EncodeToString(m.payload)) + buf.WriteRune('"') + + if protected := sig.protected; protected != nil { + protectedbuf, err := json.Marshal(protected) + if err != nil { + return nil, fmt.Errorf(`failed to marshal "protected" (flattened format): %w`, err) + } + buf.WriteString(`,"protected":"`) + buf.WriteString(base64.EncodeToString(protectedbuf)) + buf.WriteRune('"') + } + + buf.WriteString(`,"signature":"`) + buf.WriteString(base64.EncodeToString(sig.signature)) + buf.WriteRune('"') + buf.WriteRune(tokens.CloseCurlyBracket) + + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (m Message) marshalFull() ([]byte, error) { + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + + buf.WriteString(`{"payload":"`) + buf.WriteString(base64.EncodeToString(m.payload)) + buf.WriteString(`","signatures":[`) + for i, sig := range m.signatures { + if i > 0 { + buf.WriteRune(tokens.Comma) + } + + buf.WriteRune(tokens.OpenCurlyBracket) + var wrote bool + if hdr := sig.headers; hdr != nil { + hdrbuf, err := json.Marshal(hdr) + if err != nil { + return nil, fmt.Errorf(`failed to marshal "header" for signature #%d: %w`, i+1, err) + } + buf.WriteString(`"header":`) + buf.Write(hdrbuf) + wrote = true + } + + if protected := sig.protected; protected != nil { + protectedbuf, err := json.Marshal(protected) + if err != nil { + return nil, fmt.Errorf(`failed to marshal "protected" for signature #%d: %w`, i+1, err) + } + if wrote { + buf.WriteRune(tokens.Comma) + } + buf.WriteString(`"protected":"`) + buf.WriteString(base64.EncodeToString(protectedbuf)) + buf.WriteRune('"') + wrote = true + } + + if len(sig.signature) > 0 { + // If InsecureNoSignature is enabled, signature may not exist + if wrote { + buf.WriteRune(tokens.Comma) + } + buf.WriteString(`"signature":"`) + buf.WriteString(base64.EncodeToString(sig.signature)) + buf.WriteString(`"`) + } + buf.WriteString(`}`) + } + buf.WriteString(`]}`) + + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +// Compact generates a JWS message in compact serialization format from +// `*jws.Message` object. The object contain exactly one signature, or +// an error is returned. +// +// If using a detached payload, the payload must already be stored in +// the `*jws.Message` object, and the `jws.WithDetached()` option +// must be passed to the function. +func Compact(msg *Message, options ...CompactOption) ([]byte, error) { + if l := len(msg.signatures); l != 1 { + return nil, fmt.Errorf(`jws.Compact: cannot serialize message with %d signatures (must be one)`, l) + } + + var detached bool + var encoder Base64Encoder = base64.DefaultEncoder() + for _, option := range options { + switch option.Ident() { + case identDetached{}: + if err := option.Value(&detached); err != nil { + return nil, fmt.Errorf(`jws.Compact: failed to retrieve detached option value: %w`, err) + } + case identBase64Encoder{}: + if err := option.Value(&encoder); err != nil { + return nil, fmt.Errorf(`jws.Compact: failed to retrieve base64 encoder option value: %w`, err) + } + } + } + + s := msg.signatures[0] + // XXX check if this is correct + hdrs := s.ProtectedHeaders() + + hdrbuf, err := json.Marshal(hdrs) + if err != nil { + return nil, fmt.Errorf(`jws.Compress: failed to marshal headers: %w`, err) + } + + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + + buf.WriteString(encoder.EncodeToString(hdrbuf)) + buf.WriteByte(tokens.Period) + + if !detached { + if getB64Value(hdrs) { + encoded := encoder.EncodeToString(msg.payload) + buf.WriteString(encoded) + } else { + if bytes.Contains(msg.payload, []byte{tokens.Period}) { + return nil, fmt.Errorf(`jws.Compress: payload must not contain a "."`) + } + buf.Write(msg.payload) + } + } + + buf.WriteByte(tokens.Period) + buf.WriteString(encoder.EncodeToString(s.signature)) + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/options.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/options.go new file mode 100644 index 0000000000..4c217c3483 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/options.go @@ -0,0 +1,259 @@ +package jws + +import ( + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwk" + "github.com/lestrrat-go/option/v2" +) + +type identInsecureNoSignature struct{} + +// WithJSON specifies that the result of `jws.Sign()` is serialized in +// JSON format. +// +// If you pass multiple keys to `jws.Sign()`, it will fail unless +// you also pass this option. +func WithJSON(options ...WithJSONSuboption) SignVerifyParseOption { + var pretty bool + for _, option := range options { + switch option.Ident() { + case identPretty{}: + if err := option.Value(&pretty); err != nil { + panic(`jws.WithJSON() option must be of type bool`) + } + } + } + + format := fmtJSON + if pretty { + format = fmtJSONPretty + } + return &signVerifyParseOption{option.New(identSerialization{}, format)} +} + +type withKey struct { + alg jwa.KeyAlgorithm + key any + protected Headers + public Headers +} + +// Protected exists as an escape hatch to modify the header values after the fact +func (w *withKey) Protected(v Headers) Headers { + if w.protected == nil && v != nil { + w.protected = v + } + return w.protected +} + +// WithKey is used to pass a static algorithm/key pair to either `jws.Sign()` or `jws.Verify()`. +// +// The `alg` parameter is the identifier for the signature algorithm that should be used. +// It is of type `jwa.KeyAlgorithm` but in reality you can only pass `jwa.SignatureAlgorithm` +// types. It is this way so that the value in `(jwk.Key).Algorithm()` can be directly +// passed to the option. If you specify other algorithm types such as `jwa.KeyEncryptionAlgorithm`, +// then you will get an error when `jws.Sign()` or `jws.Verify()` is executed. +// +// The `alg` parameter cannot be "none" (jwa.NoSignature) for security reasons. +// You will have to use a separate, more explicit option to allow the use of "none" +// algorithm (WithInsecureNoSignature). +// +// The algorithm specified in the `alg` parameter MUST be able to support +// the type of key you provided, otherwise an error is returned. +// +// Any of the following is accepted for the `key` parameter: +// * A "raw" key (e.g. rsa.PrivateKey, ecdsa.PrivateKey, etc) +// * A crypto.Signer +// * A jwk.Key +// +// Note that due to technical reasons, this library is NOT able to differentiate +// between a valid/invalid key for given algorithm if the key implements crypto.Signer +// and the key is from an external library. For example, while we can tell that it is +// invalid to use `jwk.WithKey(jwa.RSA256, ecdsaPrivateKey)` because the key is +// presumably from `crypto/ecdsa` or this library, if you use a KMS wrapper +// that implements crypto.Signer that is outside of the go standard library or this +// library, we will not be able to properly catch the misuse of such keys -- +// the output will happily generate an ECDSA signature even in the presence of +// `jwa.RSA256` +// +// A `crypto.Signer` is used when the private part of a key is +// kept in an inaccessible location, such as hardware. +// `crypto.Signer` is currently supported for RSA, ECDSA, and EdDSA +// family of algorithms. You may consider using `github.com/jwx-go/crypto-signer` +// if you would like to use keys stored in GCP/AWS KMS services. +// +// If the key is a jwk.Key and the key contains a key ID (`kid` field), +// then it is added to the protected header generated by the signature. +// +// `jws.WithKey()` can further accept suboptions to change signing behavior +// when used with `jws.Sign()`. `jws.WithProtected()` and `jws.WithPublic()` +// can be passed to specify JWS headers that should be used whe signing. +// +// If the protected headers contain "b64" field, then the boolean value for the field +// is respected when serializing. That is, if you specify a header with +// `{"b64": false}`, then the payload is not base64 encoded. +// +// These suboptions are ignored when the `jws.WithKey()` option is used with `jws.Verify()`. +func WithKey(alg jwa.KeyAlgorithm, key any, options ...WithKeySuboption) SignVerifyOption { + // Implementation note: this option is shared between Sign() and + // Verify(). As such we don't create a KeyProvider here because + // if used in Sign() we would be doing something else. + var protected, public Headers + for _, option := range options { + switch option.Ident() { + case identProtectedHeaders{}: + if err := option.Value(&protected); err != nil { + panic(`jws.WithKey() option must be of type Headers`) + } + case identPublicHeaders{}: + if err := option.Value(&public); err != nil { + panic(`jws.WithKey() option must be of type Headers`) + } + } + } + + return &signVerifyOption{ + option.New(identKey{}, &withKey{ + alg: alg, + key: key, + protected: protected, + public: public, + }), + } +} + +// WithKeySet specifies a JWKS (jwk.Set) to use for verification. +// +// Because a JWKS can contain multiple keys and this library cannot tell +// which one of the keys should be used for verification, we by default +// require that both `alg` and `kid` fields in the JWS _and_ the +// key match before a key is considered to be used. +// +// There are ways to override this behavior, but they must be explicitly +// specified by the caller. +// +// To work with keys/JWS messages not having a `kid` field, you may specify +// the suboption `WithKeySetRequired` via `jws.WithKey(key, jws.WithRequireKid(false))`. +// This will allow the library to proceed without having to match the `kid` field. +// +// However, it will still check if the `alg` fields in the JWS message and the key(s) +// match. If you must work with JWS messages that do not have an `alg` field, +// you will need to use `jws.WithKeySet(key, jws.WithInferAlgorithm(true))`. +// +// See the documentation for `WithInferAlgorithm()` for more details. +func WithKeySet(set jwk.Set, options ...WithKeySetSuboption) VerifyOption { + requireKid := true + var useDefault, inferAlgorithm, multipleKeysPerKeyID bool + for _, option := range options { + switch option.Ident() { + case identRequireKid{}: + if err := option.Value(&requireKid); err != nil { + panic(`jws.WithKeySet() option must be of type bool`) + } + case identUseDefault{}: + if err := option.Value(&useDefault); err != nil { + panic(`jws.WithKeySet() option must be of type bool`) + } + case identMultipleKeysPerKeyID{}: + if err := option.Value(&multipleKeysPerKeyID); err != nil { + panic(`jws.WithKeySet() option must be of type bool`) + } + case identInferAlgorithmFromKey{}: + if err := option.Value(&inferAlgorithm); err != nil { + panic(`jws.WithKeySet() option must be of type bool`) + } + } + } + + return WithKeyProvider(&keySetProvider{ + set: set, + requireKid: requireKid, + useDefault: useDefault, + multipleKeysPerKeyID: multipleKeysPerKeyID, + inferAlgorithm: inferAlgorithm, + }) +} + +// WithVerifyAuto enables automatic verification of the signature using the JWKS specified in +// the `jku` header. Note that by default this option will _reject_ any jku +// provided by the JWS message. Read on for details. +// +// The JWKS is retrieved by the `jwk.Fetcher` specified in the first argument. +// If the fetcher object is nil, the default fetcher, which is the `jwk.Fetch()` +// function (wrapped in the `jwk.FetchFunc` type) is used. +// +// The remaining arguments are passed to the `(jwk.Fetcher).Fetch` method +// when the JWKS is retrieved. +// +// jws.WithVerifyAuto(nil) // uses jwk.Fetch +// jws.WithVerifyAuto(jwk.NewCachedFetcher(...)) // uses cached fetcher +// jws.WithVerifyAuto(myFetcher) // use your custom fetcher +// +// By default a whitelist that disallows all URLs is added to the options +// passed to the fetcher. You must explicitly specify a whitelist that allows +// the URLs you trust. This default behavior is provided because by design +// of the JWS specification it is the/ caller's responsibility to verify if +// the URL specified in the `jku` header can be trusted -- thus by default +// we trust nothing. +// +// Users are free to specify an open whitelist if they so choose, but this must +// be explicitly done: +// +// jws.WithVerifyAuto(nil, jwk.WithFetchWhitelist(jwk.InsecureWhitelist())) +// +// You can also use `jwk.CachedFetcher` to use cached JWKS objects, but do note +// that this object is not really designed to accommodate a large set of +// arbitrary URLs. Use `jwk.CachedFetcher` as the first argument if you only +// have a small set of URLs that you trust. For anything more complex, you should +// implement your own `jwk.Fetcher` object. +func WithVerifyAuto(f jwk.Fetcher, options ...jwk.FetchOption) VerifyOption { + // the option MUST start with a "disallow no whitelist" to force + // users provide a whitelist + options = append(append([]jwk.FetchOption(nil), jwk.WithFetchWhitelist(allowNoneWhitelist)), options...) + + return WithKeyProvider(jkuProvider{ + fetcher: f, + options: options, + }) +} + +type withInsecureNoSignature struct { + protected Headers +} + +// Protected exists as an escape hatch to modify the header values after the fact +func (w *withInsecureNoSignature) Protected(v Headers) Headers { + if w.protected == nil && v != nil { + w.protected = v + } + return w.protected +} + +// WithInsecureNoSignature creates an option that allows the user to use the +// "none" signature algorithm. +// +// Please note that this is insecure, and should never be used in production +// (this is exactly why specifying "none"/jwa.NoSignature to `jws.WithKey()` +// results in an error when `jws.Sign()` is called -- we do not allow using +// "none" by accident) +// +// TODO: create specific suboption set for this option +func WithInsecureNoSignature(options ...WithKeySuboption) SignOption { + var protected Headers + for _, option := range options { + switch option.Ident() { + case identProtectedHeaders{}: + if err := option.Value(&protected); err != nil { + panic(`jws.WithInsecureNoSignature() option must be of type Headers`) + } + } + } + + return &signOption{ + option.New(identInsecureNoSignature{}, + &withInsecureNoSignature{ + protected: protected, + }, + ), + } +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/options.yaml b/vendor/github.com/lestrrat-go/jwx/v3/jws/options.yaml new file mode 100644 index 0000000000..79dbb72500 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/options.yaml @@ -0,0 +1,230 @@ +package_name: jws +output: jws/options_gen.go +interfaces: + - name: CompactOption + comment: | + CompactOption describes options that can be passed to `jws.Compact` + - name: VerifyOption + comment: | + VerifyOption describes options that can be passed to `jws.Verify` + methods: + - verifyOption + - parseOption + - name: SignOption + comment: | + SignOption describes options that can be passed to `jws.Sign` + - name: SignVerifyOption + methods: + - signOption + - verifyOption + - parseOption + comment: | + SignVerifyOption describes options that can be passed to either `jws.Verify` or `jws.Sign` + - name: SignVerifyCompactOption + methods: + - signOption + - verifyOption + - compactOption + - parseOption + comment: | + SignVerifyCompactOption describes options that can be passed to either `jws.Verify`, + `jws.Sign`, or `jws.Compact` + - name: WithJSONSuboption + concrete_type: withJSONSuboption + comment: | + JSONSuboption describes suboptions that can be passed to the `jws.WithJSON()` option. + - name: WithKeySuboption + comment: | + WithKeySuboption describes option types that can be passed to the `jws.WithKey()` + option. + - name: WithKeySetSuboption + comment: | + WithKeySetSuboption is a suboption passed to the `jws.WithKeySet()` option + - name: ParseOption + methods: + - readFileOption + comment: | + ReadFileOption is a type of `Option` that can be passed to `jwe.Parse` + - name: ReadFileOption + comment: | + ReadFileOption is a type of `Option` that can be passed to `jws.ReadFile` + - name: SignVerifyParseOption + methods: + - signOption + - verifyOption + - parseOption + - readFileOption + - name: GlobalOption + comment: | + GlobalOption can be passed to `jws.Settings()` to set global options for the JWS package. +options: + - ident: Key + skip_option: true + - ident: Serialization + skip_option: true + - ident: Serialization + option_name: WithCompact + interface: SignVerifyParseOption + constant_value: fmtCompact + comment: | + WithCompact specifies that the result of `jws.Sign()` is serialized in + compact format. + + By default `jws.Sign()` will opt to use compact format, so you usually + do not need to specify this option other than to be explicit about it + - ident: Detached + interface: CompactOption + argument_type: bool + comment: | + WithDetached specifies that the `jws.Message` should be serialized in + JWS compact serialization with detached payload. The resulting octet + sequence will not contain the payload section. + + - ident: DetachedPayload + interface: SignVerifyOption + argument_type: '[]byte' + comment: | + WithDetachedPayload can be used to both sign or verify a JWS message with a + detached payload. + Note that this option does NOT populate the `b64` header, which is sometimes + required by other JWS implementations. + + + When this option is used for `jws.Sign()`, the first parameter (normally the payload) + must be set to `nil`. + + If you have to verify using this option, you should know exactly how and why this works. + - ident: Base64Encoder + interface: SignVerifyCompactOption + argument_type: Base64Encoder + comment: | + WithBase64Encoder specifies the base64 encoder to be used while signing or + verifying the JWS message. By default, the raw URL base64 encoding (no padding) + is used. + - ident: Message + interface: VerifyOption + argument_type: '*Message' + comment: | + WithMessage can be passed to Verify() to obtain the jws.Message upon + a successful verification. + - ident: KeyUsed + interface: VerifyOption + argument_type: 'any' + comment: | + WithKeyUsed allows you to specify the `jws.Verify()` function to + return the key used for verification. This may be useful when + you specify multiple key sources or if you pass a `jwk.Set` + and you want to know which key was successful at verifying the + signature. + + `v` must be a pointer to an empty `any`. Do not use + `jwk.Key` here unless you are 100% sure that all keys that you + have provided are instances of `jwk.Key` (remember that the + jwx API allows users to specify a raw key such as *rsa.PublicKey) + - ident: ValidateKey + interface: SignVerifyOption + argument_type: bool + comment: | + WithValidateKey specifies whether the key used for signing or verification + should be validated before using. Note that this means calling + `key.Validate()` on the key, which in turn means that your key + must be a `jwk.Key` instance, or a key that can be converted to + a `jwk.Key` by calling `jwk.Import()`. This means that your + custom hardware-backed keys will probably not work. + + You can directly call `key.Validate()` yourself if you need to + mix keys that cannot be converted to `jwk.Key`. + + Please also note that use of this option will also result in + one extra conversion of raw keys to a `jwk.Key` instance. If you + care about shaving off as much as possible, consider using a + pre-validated key instead of using this option to validate + the key on-demand each time. + + By default, the key is not validated. + - ident: InferAlgorithmFromKey + interface: WithKeySetSuboption + argument_type: bool + comment: | + WithInferAlgorithmFromKey specifies whether the JWS signing algorithm name + should be inferred by looking at the provided key, in case the JWS + message or the key does not have a proper `alg` header. + + When this option is set to true, a list of algorithm(s) that is compatible + with the key type will be enumerated, and _ALL_ of them will be tried + against the key/message pair. If any of them succeeds, the verification + will be considered successful. + + Compared to providing explicit `alg` from the key this is slower, and + verification may fail to verify if somehow our heuristics are wrong + or outdated. + + Also, automatic detection of signature verification methods are always + more vulnerable for potential attack vectors. + + It is highly recommended that you fix your key to contain a proper `alg` + header field instead of resorting to using this option, but sometimes + it just needs to happen. + - ident: UseDefault + interface: WithKeySetSuboption + argument_type: bool + comment: | + WithUseDefault specifies that if and only if a jwk.Key contains + exactly one jwk.Key, that key should be used. + - ident: RequireKid + interface: WithKeySetSuboption + argument_type: bool + comment: | + WithRequiredKid specifies whether the keys in the jwk.Set should + only be matched if the target JWS message's Key ID and the Key ID + in the given key matches. + - ident: MultipleKeysPerKeyID + interface: WithKeySetSuboption + argument_type: bool + comment: | + WithMultipleKeysPerKeyID specifies if we should expect multiple keys + to match against a key ID. By default it is assumed that key IDs are + unique, i.e. for a given key ID, the key set only contains a single + key that has the matching ID. When this option is set to true, + multiple keys that match the same key ID in the set can be tried. + - ident: Pretty + interface: WithJSONSuboption + argument_type: bool + comment: | + WithPretty specifies whether the JSON output should be formatted and + indented + - ident: KeyProvider + interface: VerifyOption + argument_type: KeyProvider + - ident: Context + interface: VerifyOption + argument_type: context.Context + - ident: ProtectedHeaders + interface: WithKeySuboption + argument_type: Headers + comment: | + WithProtected is used with `jws.WithKey()` option when used with `jws.Sign()` + to specify a protected header to be attached to the JWS signature. + + It has no effect if used when `jws.WithKey()` is passed to `jws.Verify()` + - ident: PublicHeaders + interface: WithKeySuboption + argument_type: Headers + comment: | + WithPublic is used with `jws.WithKey()` option when used with `jws.Sign()` + to specify a public header to be attached to the JWS signature. + + It has no effect if used when `jws.WithKey()` is passed to `jws.Verify()` + + `jws.Sign()` will result in an error if `jws.WithPublic()` is used + and the serialization format is compact serialization. + - ident: FS + interface: ReadFileOption + argument_type: fs.FS + comment: | + WithFS specifies the source `fs.FS` object to read the file from. + - ident: LegacySigners + interface: GlobalOption + constant_value: true + comment: | + WithLegacySigners is a no-op option that exists only for backwards compatibility. diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/options_gen.go new file mode 100644 index 0000000000..7013e86bd7 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/options_gen.go @@ -0,0 +1,445 @@ +// Code generated by tools/cmd/genoptions/main.go. DO NOT EDIT. + +package jws + +import ( + "context" + "io/fs" + + "github.com/lestrrat-go/option/v2" +) + +type Option = option.Interface + +// CompactOption describes options that can be passed to `jws.Compact` +type CompactOption interface { + Option + compactOption() +} + +type compactOption struct { + Option +} + +func (*compactOption) compactOption() {} + +// GlobalOption can be passed to `jws.Settings()` to set global options for the JWS package. +type GlobalOption interface { + Option + globalOption() +} + +type globalOption struct { + Option +} + +func (*globalOption) globalOption() {} + +// ReadFileOption is a type of `Option` that can be passed to `jwe.Parse` +type ParseOption interface { + Option + readFileOption() +} + +type parseOption struct { + Option +} + +func (*parseOption) readFileOption() {} + +// ReadFileOption is a type of `Option` that can be passed to `jws.ReadFile` +type ReadFileOption interface { + Option + readFileOption() +} + +type readFileOption struct { + Option +} + +func (*readFileOption) readFileOption() {} + +// SignOption describes options that can be passed to `jws.Sign` +type SignOption interface { + Option + signOption() +} + +type signOption struct { + Option +} + +func (*signOption) signOption() {} + +// SignVerifyCompactOption describes options that can be passed to either `jws.Verify`, +// `jws.Sign`, or `jws.Compact` +type SignVerifyCompactOption interface { + Option + signOption() + verifyOption() + compactOption() + parseOption() +} + +type signVerifyCompactOption struct { + Option +} + +func (*signVerifyCompactOption) signOption() {} + +func (*signVerifyCompactOption) verifyOption() {} + +func (*signVerifyCompactOption) compactOption() {} + +func (*signVerifyCompactOption) parseOption() {} + +// SignVerifyOption describes options that can be passed to either `jws.Verify` or `jws.Sign` +type SignVerifyOption interface { + Option + signOption() + verifyOption() + parseOption() +} + +type signVerifyOption struct { + Option +} + +func (*signVerifyOption) signOption() {} + +func (*signVerifyOption) verifyOption() {} + +func (*signVerifyOption) parseOption() {} + +type SignVerifyParseOption interface { + Option + signOption() + verifyOption() + parseOption() + readFileOption() +} + +type signVerifyParseOption struct { + Option +} + +func (*signVerifyParseOption) signOption() {} + +func (*signVerifyParseOption) verifyOption() {} + +func (*signVerifyParseOption) parseOption() {} + +func (*signVerifyParseOption) readFileOption() {} + +// VerifyOption describes options that can be passed to `jws.Verify` +type VerifyOption interface { + Option + verifyOption() + parseOption() +} + +type verifyOption struct { + Option +} + +func (*verifyOption) verifyOption() {} + +func (*verifyOption) parseOption() {} + +// JSONSuboption describes suboptions that can be passed to the `jws.WithJSON()` option. +type WithJSONSuboption interface { + Option + withJSONSuboption() +} + +type withJSONSuboption struct { + Option +} + +func (*withJSONSuboption) withJSONSuboption() {} + +// WithKeySetSuboption is a suboption passed to the `jws.WithKeySet()` option +type WithKeySetSuboption interface { + Option + withKeySetSuboption() +} + +type withKeySetSuboption struct { + Option +} + +func (*withKeySetSuboption) withKeySetSuboption() {} + +// WithKeySuboption describes option types that can be passed to the `jws.WithKey()` +// option. +type WithKeySuboption interface { + Option + withKeySuboption() +} + +type withKeySuboption struct { + Option +} + +func (*withKeySuboption) withKeySuboption() {} + +type identBase64Encoder struct{} +type identContext struct{} +type identDetached struct{} +type identDetachedPayload struct{} +type identFS struct{} +type identInferAlgorithmFromKey struct{} +type identKey struct{} +type identKeyProvider struct{} +type identKeyUsed struct{} +type identLegacySigners struct{} +type identMessage struct{} +type identMultipleKeysPerKeyID struct{} +type identPretty struct{} +type identProtectedHeaders struct{} +type identPublicHeaders struct{} +type identRequireKid struct{} +type identSerialization struct{} +type identUseDefault struct{} +type identValidateKey struct{} + +func (identBase64Encoder) String() string { + return "WithBase64Encoder" +} + +func (identContext) String() string { + return "WithContext" +} + +func (identDetached) String() string { + return "WithDetached" +} + +func (identDetachedPayload) String() string { + return "WithDetachedPayload" +} + +func (identFS) String() string { + return "WithFS" +} + +func (identInferAlgorithmFromKey) String() string { + return "WithInferAlgorithmFromKey" +} + +func (identKey) String() string { + return "WithKey" +} + +func (identKeyProvider) String() string { + return "WithKeyProvider" +} + +func (identKeyUsed) String() string { + return "WithKeyUsed" +} + +func (identLegacySigners) String() string { + return "WithLegacySigners" +} + +func (identMessage) String() string { + return "WithMessage" +} + +func (identMultipleKeysPerKeyID) String() string { + return "WithMultipleKeysPerKeyID" +} + +func (identPretty) String() string { + return "WithPretty" +} + +func (identProtectedHeaders) String() string { + return "WithProtectedHeaders" +} + +func (identPublicHeaders) String() string { + return "WithPublicHeaders" +} + +func (identRequireKid) String() string { + return "WithRequireKid" +} + +func (identSerialization) String() string { + return "WithSerialization" +} + +func (identUseDefault) String() string { + return "WithUseDefault" +} + +func (identValidateKey) String() string { + return "WithValidateKey" +} + +// WithBase64Encoder specifies the base64 encoder to be used while signing or +// verifying the JWS message. By default, the raw URL base64 encoding (no padding) +// is used. +func WithBase64Encoder(v Base64Encoder) SignVerifyCompactOption { + return &signVerifyCompactOption{option.New(identBase64Encoder{}, v)} +} + +func WithContext(v context.Context) VerifyOption { + return &verifyOption{option.New(identContext{}, v)} +} + +// WithDetached specifies that the `jws.Message` should be serialized in +// JWS compact serialization with detached payload. The resulting octet +// sequence will not contain the payload section. +func WithDetached(v bool) CompactOption { + return &compactOption{option.New(identDetached{}, v)} +} + +// WithDetachedPayload can be used to both sign or verify a JWS message with a +// detached payload. +// Note that this option does NOT populate the `b64` header, which is sometimes +// required by other JWS implementations. +// +// When this option is used for `jws.Sign()`, the first parameter (normally the payload) +// must be set to `nil`. +// +// If you have to verify using this option, you should know exactly how and why this works. +func WithDetachedPayload(v []byte) SignVerifyOption { + return &signVerifyOption{option.New(identDetachedPayload{}, v)} +} + +// WithFS specifies the source `fs.FS` object to read the file from. +func WithFS(v fs.FS) ReadFileOption { + return &readFileOption{option.New(identFS{}, v)} +} + +// WithInferAlgorithmFromKey specifies whether the JWS signing algorithm name +// should be inferred by looking at the provided key, in case the JWS +// message or the key does not have a proper `alg` header. +// +// When this option is set to true, a list of algorithm(s) that is compatible +// with the key type will be enumerated, and _ALL_ of them will be tried +// against the key/message pair. If any of them succeeds, the verification +// will be considered successful. +// +// Compared to providing explicit `alg` from the key this is slower, and +// verification may fail to verify if somehow our heuristics are wrong +// or outdated. +// +// Also, automatic detection of signature verification methods are always +// more vulnerable for potential attack vectors. +// +// It is highly recommended that you fix your key to contain a proper `alg` +// header field instead of resorting to using this option, but sometimes +// it just needs to happen. +func WithInferAlgorithmFromKey(v bool) WithKeySetSuboption { + return &withKeySetSuboption{option.New(identInferAlgorithmFromKey{}, v)} +} + +func WithKeyProvider(v KeyProvider) VerifyOption { + return &verifyOption{option.New(identKeyProvider{}, v)} +} + +// WithKeyUsed allows you to specify the `jws.Verify()` function to +// return the key used for verification. This may be useful when +// you specify multiple key sources or if you pass a `jwk.Set` +// and you want to know which key was successful at verifying the +// signature. +// +// `v` must be a pointer to an empty `any`. Do not use +// `jwk.Key` here unless you are 100% sure that all keys that you +// have provided are instances of `jwk.Key` (remember that the +// jwx API allows users to specify a raw key such as *rsa.PublicKey) +func WithKeyUsed(v any) VerifyOption { + return &verifyOption{option.New(identKeyUsed{}, v)} +} + +// WithLegacySigners is a no-op option that exists only for backwards compatibility. +func WithLegacySigners() GlobalOption { + return &globalOption{option.New(identLegacySigners{}, true)} +} + +// WithMessage can be passed to Verify() to obtain the jws.Message upon +// a successful verification. +func WithMessage(v *Message) VerifyOption { + return &verifyOption{option.New(identMessage{}, v)} +} + +// WithMultipleKeysPerKeyID specifies if we should expect multiple keys +// to match against a key ID. By default it is assumed that key IDs are +// unique, i.e. for a given key ID, the key set only contains a single +// key that has the matching ID. When this option is set to true, +// multiple keys that match the same key ID in the set can be tried. +func WithMultipleKeysPerKeyID(v bool) WithKeySetSuboption { + return &withKeySetSuboption{option.New(identMultipleKeysPerKeyID{}, v)} +} + +// WithPretty specifies whether the JSON output should be formatted and +// indented +func WithPretty(v bool) WithJSONSuboption { + return &withJSONSuboption{option.New(identPretty{}, v)} +} + +// WithProtected is used with `jws.WithKey()` option when used with `jws.Sign()` +// to specify a protected header to be attached to the JWS signature. +// +// It has no effect if used when `jws.WithKey()` is passed to `jws.Verify()` +func WithProtectedHeaders(v Headers) WithKeySuboption { + return &withKeySuboption{option.New(identProtectedHeaders{}, v)} +} + +// WithPublic is used with `jws.WithKey()` option when used with `jws.Sign()` +// to specify a public header to be attached to the JWS signature. +// +// It has no effect if used when `jws.WithKey()` is passed to `jws.Verify()` +// +// `jws.Sign()` will result in an error if `jws.WithPublic()` is used +// and the serialization format is compact serialization. +func WithPublicHeaders(v Headers) WithKeySuboption { + return &withKeySuboption{option.New(identPublicHeaders{}, v)} +} + +// WithRequiredKid specifies whether the keys in the jwk.Set should +// only be matched if the target JWS message's Key ID and the Key ID +// in the given key matches. +func WithRequireKid(v bool) WithKeySetSuboption { + return &withKeySetSuboption{option.New(identRequireKid{}, v)} +} + +// WithCompact specifies that the result of `jws.Sign()` is serialized in +// compact format. +// +// By default `jws.Sign()` will opt to use compact format, so you usually +// do not need to specify this option other than to be explicit about it +func WithCompact() SignVerifyParseOption { + return &signVerifyParseOption{option.New(identSerialization{}, fmtCompact)} +} + +// WithUseDefault specifies that if and only if a jwk.Key contains +// exactly one jwk.Key, that key should be used. +func WithUseDefault(v bool) WithKeySetSuboption { + return &withKeySetSuboption{option.New(identUseDefault{}, v)} +} + +// WithValidateKey specifies whether the key used for signing or verification +// should be validated before using. Note that this means calling +// `key.Validate()` on the key, which in turn means that your key +// must be a `jwk.Key` instance, or a key that can be converted to +// a `jwk.Key` by calling `jwk.Import()`. This means that your +// custom hardware-backed keys will probably not work. +// +// You can directly call `key.Validate()` yourself if you need to +// mix keys that cannot be converted to `jwk.Key`. +// +// Please also note that use of this option will also result in +// one extra conversion of raw keys to a `jwk.Key` instance. If you +// care about shaving off as much as possible, consider using a +// pre-validated key instead of using this option to validate +// the key on-demand each time. +// +// By default, the key is not validated. +func WithValidateKey(v bool) SignVerifyOption { + return &signVerifyOption{option.New(identValidateKey{}, v)} +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/sign_context.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/sign_context.go new file mode 100644 index 0000000000..49abe0abca --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/sign_context.go @@ -0,0 +1,141 @@ +package jws + +import ( + "fmt" + + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/jwa" +) + +type signContext struct { + format int + detached bool + validateKey bool + payload []byte + encoder Base64Encoder + none *signatureBuilder // special signature builder + sigbuilders []*signatureBuilder +} + +var signContextPool = pool.New[*signContext](allocSignContext, freeSignContext) + +func allocSignContext() *signContext { + return &signContext{ + format: fmtCompact, + sigbuilders: make([]*signatureBuilder, 0, 1), + encoder: base64.DefaultEncoder(), + } +} + +func freeSignContext(ctx *signContext) *signContext { + ctx.format = fmtCompact + for _, sb := range ctx.sigbuilders { + signatureBuilderPool.Put(sb) + } + ctx.sigbuilders = ctx.sigbuilders[:0] + ctx.detached = false + ctx.validateKey = false + ctx.encoder = base64.DefaultEncoder() + ctx.none = nil + ctx.payload = nil + + return ctx +} + +func (sc *signContext) ProcessOptions(options []SignOption) error { + for _, option := range options { + switch option.Ident() { + case identSerialization{}: + if err := option.Value(&sc.format); err != nil { + return signerr(`failed to retrieve serialization option value: %w`, err) + } + case identInsecureNoSignature{}: + var data withInsecureNoSignature + if err := option.Value(&data); err != nil { + return signerr(`failed to retrieve insecure-no-signature option value: %w`, err) + } + sb := signatureBuilderPool.Get() + sb.alg = jwa.NoSignature() + sb.protected = data.protected + sb.signer = noneSigner{} + sc.none = sb + sc.sigbuilders = append(sc.sigbuilders, sb) + case identKey{}: + var data *withKey + if err := option.Value(&data); err != nil { + return signerr(`jws.Sign: invalid value for WithKey option: %w`, err) + } + + alg, ok := data.alg.(jwa.SignatureAlgorithm) + if !ok { + return signerr(`expected algorithm to be of type jwa.SignatureAlgorithm but got (%[1]q, %[1]T)`, data.alg) + } + + // No, we don't accept "none" here. + if alg == jwa.NoSignature() { + return signerr(`"none" (jwa.NoSignature) cannot be used with jws.WithKey`) + } + + sb := signatureBuilderPool.Get() + sb.alg = alg + sb.protected = data.protected + sb.key = data.key + sb.public = data.public + + s2, err := SignerFor(alg) + if err == nil { + sb.signer2 = s2 + } else { + s1, err := legacySignerFor(alg) + if err != nil { + sb.signer2 = defaultSigner{alg: alg} + } else { + sb.signer = s1 + } + } + + sc.sigbuilders = append(sc.sigbuilders, sb) + case identDetachedPayload{}: + if sc.payload != nil { + return signerr(`payload must be nil when jws.WithDetachedPayload() is specified`) + } + if err := option.Value(&sc.payload); err != nil { + return signerr(`failed to retrieve detached payload option value: %w`, err) + } + sc.detached = true + case identValidateKey{}: + if err := option.Value(&sc.validateKey); err != nil { + return signerr(`failed to retrieve validate-key option value: %w`, err) + } + case identBase64Encoder{}: + if err := option.Value(&sc.encoder); err != nil { + return signerr(`failed to retrieve base64-encoder option value: %w`, err) + } + } + } + return nil +} + +func (sc *signContext) PopulateMessage(m *Message) error { + m.payload = sc.payload + m.signatures = make([]*Signature, 0, len(sc.sigbuilders)) + + for i, sb := range sc.sigbuilders { + // Create signature for each builders + if sc.validateKey { + if err := validateKeyBeforeUse(sb.key); err != nil { + return fmt.Errorf(`failed to validate key for signature %d: %w`, i, err) + } + } + + sig, err := sb.Build(sc, m.payload) + if err != nil { + return fmt.Errorf(`failed to build signature %d: %w`, i, err) + } + + m.signatures = append(m.signatures, sig) + } + + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/signature_builder.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/signature_builder.go new file mode 100644 index 0000000000..fc09a69367 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/signature_builder.go @@ -0,0 +1,118 @@ +package jws + +import ( + "bytes" + "fmt" + + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/internal/tokens" + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwk" + "github.com/lestrrat-go/jwx/v3/jws/jwsbb" +) + +var signatureBuilderPool = pool.New[*signatureBuilder](allocSignatureBuilder, freeSignatureBuilder) + +// signatureBuilder is a transient object that is used to build +// a single JWS signature. +// +// In a multi-signature JWS message, each message is paired with +// the following: +// - a signer (the object that takes a buffer and key and generates a signature) +// - a key (the key that is used to sign the payload) +// - protected headers (the headers that are protected by the signature) +// - public headers (the headers that are not protected by the signature) +// +// This object stores all of this information in one place. +// +// This object does NOT take care of any synchronization, because it is +// meant to be used in a single-threaded context. +type signatureBuilder struct { + alg jwa.SignatureAlgorithm + signer Signer + signer2 Signer2 + key any + protected Headers + public Headers +} + +func allocSignatureBuilder() *signatureBuilder { + return &signatureBuilder{} +} + +func freeSignatureBuilder(sb *signatureBuilder) *signatureBuilder { + sb.alg = jwa.EmptySignatureAlgorithm() + sb.signer = nil + sb.signer2 = nil + sb.key = nil + sb.protected = nil + sb.public = nil + return sb +} + +func (sb *signatureBuilder) Build(sc *signContext, payload []byte) (*Signature, error) { + protected := sb.protected + if protected == nil { + protected = NewHeaders() + } + + if err := protected.Set(AlgorithmKey, sb.alg); err != nil { + return nil, signerr(`failed to set "alg" header: %w`, err) + } + + if key, ok := sb.key.(jwk.Key); ok { + if kid, ok := key.KeyID(); ok && kid != "" { + if err := protected.Set(KeyIDKey, kid); err != nil { + return nil, signerr(`failed to set "kid" header: %w`, err) + } + } + } + + hdrs, err := mergeHeaders(sb.public, protected) + if err != nil { + return nil, signerr(`failed to merge headers: %w`, err) + } + + // raw, json format headers + hdrbuf, err := json.Marshal(hdrs) + if err != nil { + return nil, fmt.Errorf(`failed to marshal headers: %w`, err) + } + + // check if we need to base64 encode the payload + b64 := getB64Value(hdrs) + if !b64 && !sc.detached { + if bytes.IndexByte(payload, tokens.Period) != -1 { + return nil, fmt.Errorf(`payload must not contain a "."`) + } + } + + combined := jwsbb.SignBuffer(nil, hdrbuf, payload, sc.encoder, b64) + + var sig Signature + sig.protected = protected + sig.headers = sb.public + + if sb.signer2 != nil { + signature, err := sb.signer2.Sign(sb.key, combined) + if err != nil { + return nil, fmt.Errorf(`failed to sign payload: %w`, err) + } + sig.signature = signature + return &sig, nil + } + + if sb.signer == nil { + panic("can't get here") + } + + signature, err := sb.signer.Sign(combined, sb.key) + if err != nil { + return nil, fmt.Errorf(`failed to sign payload: %w`, err) + } + + sig.signature = signature + + return &sig, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/signer.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/signer.go new file mode 100644 index 0000000000..99005e859a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/signer.go @@ -0,0 +1,185 @@ +package jws + +import ( + "fmt" + "strings" + "sync" + + "github.com/lestrrat-go/jwx/v3/jwa" +) + +// Signer2 is an interface that represents a per-signature algorithm signing +// operation. +type Signer2 interface { + Algorithm() jwa.SignatureAlgorithm + + // Sign takes a key and a payload, and returns the signature for the payload. + // The key type is restricted by the signature algorithm that this + // signer is associated with. + // + // (Note to users of legacy Signer interface: the method signature + // is different from the legacy Signer interface) + Sign(key any, payload []byte) ([]byte, error) +} + +var muSigner2DB sync.RWMutex +var signer2DB = make(map[jwa.SignatureAlgorithm]Signer2) + +type SignerFactory interface { + Create() (Signer, error) +} +type SignerFactoryFn func() (Signer, error) + +func (fn SignerFactoryFn) Create() (Signer, error) { + return fn() +} + +func init() { + // register the signers using jwsbb. These will be used by default. + for _, alg := range jwa.SignatureAlgorithms() { + if alg == jwa.NoSignature() { + continue + } + + if err := RegisterSigner(alg, defaultSigner{alg: alg}); err != nil { + panic(fmt.Sprintf("RegisterSigner failed: %v", err)) + } + } +} + +// SignerFor returns a Signer2 for the given signature algorithm. +// +// Currently, this function will never fail. It will always return a +// valid Signer2 object. The heuristic is as follows: +// 1. If a Signer2 is registered for the given algorithm, it will return that. +// 2. If a legacy Signer(Factory) is registered for the given algorithm, it will +// return a Signer2 that wraps the legacy Signer. +// 3. If no Signer2 or legacy Signer(Factory) is registered, it will return a +// default signer that uses jwsbb.Sign. +// +// 1 and 2 will take care of 99% of the cases. The only time 3 will happen is +// when you are using a custom algorithm that is not supported out of the box. +// +// jwsbb.Sign knows how to handle a static set of algorithms, so if the +// algorithm is not supported, it will return an error when you call +// `Sign` on the default signer. +func SignerFor(alg jwa.SignatureAlgorithm) (Signer2, error) { + muSigner2DB.RLock() + defer muSigner2DB.RUnlock() + + signer, ok := signer2DB[alg] + if ok { + return signer, nil + } + + s1, err := legacySignerFor(alg) + if err == nil { + return signerAdapter{signer: s1}, nil + } + + return defaultSigner{alg: alg}, nil +} + +var muSignerDB sync.RWMutex +var signerDB = make(map[jwa.SignatureAlgorithm]SignerFactory) + +// RegisterSigner is used to register a signer for the given +// algorithm. +// +// Please note that this function is intended to be passed a +// signer object as its second argument, but due to historical +// reasons the function signature is defined as taking `any` type. +// +// You should create a signer object that implements the `Signer2` +// interface to register a signer, unless you have legacy code that +// plugged into the `SignerFactory` interface. +// +// Unlike the `UnregisterSigner` function, this function automatically +// calls `jwa.RegisterSignatureAlgorithm` to register the algorithm +// in this module's algorithm database. +// +// For backwards compatibility, this function also accepts +// `SignerFactory` implementations, but this usage is deprecated. +// You should use `Signer2` implementations instead. +// +// If you want to completely remove an algorithm, you must call +// `jwa.UnregisterSignatureAlgorithm` yourself after calling +// `UnregisterSigner`. +func RegisterSigner(alg jwa.SignatureAlgorithm, f any) error { + jwa.RegisterSignatureAlgorithm(alg) + switch s := f.(type) { + case Signer2: + muSigner2DB.Lock() + signer2DB[alg] = s + muSigner2DB.Unlock() + case SignerFactory: + muSignerDB.Lock() + signerDB[alg] = s + muSignerDB.Unlock() + default: + return fmt.Errorf(`jws.RegisterSigner: unsupported type %T for algorithm %q`, f, alg) + } + return nil +} + +// UnregisterSigner removes the signer factory associated with +// the given algorithm, as well as the signer instance created +// by the factory. +// +// Note that when you call this function, the algorithm itself is +// not automatically unregistered from this module's algorithm database. +// This is because the algorithm may still be required for verification or +// some other operation (however unlikely, it is still possible). +// Therefore, in order to completely remove the algorithm, you must +// call `jwa.UnregisterSignatureAlgorithm` yourself. +func UnregisterSigner(alg jwa.SignatureAlgorithm) { + muSigner2DB.Lock() + delete(signer2DB, alg) + muSigner2DB.Unlock() + + muSignerDB.Lock() + delete(signerDB, alg) + muSignerDB.Unlock() + // Remove previous signer + removeSigner(alg) +} + +// NewSigner creates a signer that signs payloads using the given signature algorithm. +// This function is deprecated, and will either be removed to re-purposed using +// a different signature. +// +// When you want to load a Signer object, you should use `SignerFor()` instead. +func NewSigner(alg jwa.SignatureAlgorithm) (Signer, error) { + s, err := newLegacySigner(alg) + if err == nil { + return s, nil + } + + if strings.HasPrefix(err.Error(), `jws.NewSigner: unsupported signature algorithm`) { + // When newLegacySigner fails, automatically trigger to enable signers + enableLegacySignersOnce.Do(enableLegacySigners) + return newLegacySigner(alg) + } + return nil, err +} + +func newLegacySigner(alg jwa.SignatureAlgorithm) (Signer, error) { + muSignerDB.RLock() + f, ok := signerDB[alg] + muSignerDB.RUnlock() + + if ok { + return f.Create() + } + return nil, fmt.Errorf(`jws.NewSigner: unsupported signature algorithm "%s"`, alg) +} + +type noneSigner struct{} + +func (noneSigner) Algorithm() jwa.SignatureAlgorithm { + return jwa.NoSignature() +} + +func (noneSigner) Sign([]byte, any) ([]byte, error) { + return nil, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/verifier.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/verifier.go new file mode 100644 index 0000000000..70b91c2938 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/verifier.go @@ -0,0 +1,154 @@ +package jws + +import ( + "fmt" + "sync" + + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jws/jwsbb" +) + +type defaultVerifier struct { + alg jwa.SignatureAlgorithm +} + +func (v defaultVerifier) Algorithm() jwa.SignatureAlgorithm { + return v.alg +} + +func (v defaultVerifier) Verify(key any, payload, signature []byte) error { + if err := jwsbb.Verify(key, v.alg.String(), payload, signature); err != nil { + return verifyError{verificationError{err}} + } + return nil +} + +type Verifier2 interface { + Verify(key any, payload, signature []byte) error +} + +var muVerifier2DB sync.RWMutex +var verifier2DB = make(map[jwa.SignatureAlgorithm]Verifier2) + +type verifierAdapter struct { + v Verifier +} + +func (v verifierAdapter) Verify(key any, payload, signature []byte) error { + if err := v.v.Verify(payload, signature, key); err != nil { + return verifyError{verificationError{err}} + } + return nil +} + +// VerifierFor returns a Verifier2 for the given signature algorithm. +// +// Currently, this function will never fail. It will always return a +// valid Verifier2 object. The heuristic is as follows: +// 1. If a Verifier2 is registered for the given algorithm, it will return that. +// 2. If a legacy Verifier(Factory) is registered for the given algorithm, it will +// return a Verifier2 that wraps the legacy Verifier. +// 3. If no Verifier2 or legacy Verifier(Factory) is registered, it will return a +// default verifier that uses jwsbb.Verify. +// +// jwsbb.Verify knows how to handle a static set of algorithms, so if the +// algorithm is not supported, it will return an error when you call +// `Verify` on the default verifier. +func VerifierFor(alg jwa.SignatureAlgorithm) (Verifier2, error) { + muVerifier2DB.RLock() + defer muVerifier2DB.RUnlock() + + v2, ok := verifier2DB[alg] + if ok { + return v2, nil + } + + v1, err := NewVerifier(alg) + if err == nil { + return verifierAdapter{v: v1}, nil + } + + return defaultVerifier{alg: alg}, nil +} + +type VerifierFactory interface { + Create() (Verifier, error) +} +type VerifierFactoryFn func() (Verifier, error) + +func (fn VerifierFactoryFn) Create() (Verifier, error) { + return fn() +} + +var muVerifierDB sync.RWMutex +var verifierDB = make(map[jwa.SignatureAlgorithm]VerifierFactory) + +// RegisterVerifier is used to register a verifier for the given +// algorithm. +// +// Please note that this function is intended to be passed a +// verifier object as its second argument, but due to historical +// reasons the function signature is defined as taking `any` type. +// +// You should create a signer object that implements the `Verifier2` +// interface to register a signer, unless you have legacy code that +// plugged into the `SignerFactory` interface. +// +// Unlike the `UnregisterVerifier` function, this function automatically +// calls `jwa.RegisterSignatureAlgorithm` to register the algorithm +// in this module's algorithm database. +func RegisterVerifier(alg jwa.SignatureAlgorithm, f any) error { + jwa.RegisterSignatureAlgorithm(alg) + switch v := f.(type) { + case Verifier2: + muVerifier2DB.Lock() + verifier2DB[alg] = v + muVerifier2DB.Unlock() + + muVerifierDB.Lock() + delete(verifierDB, alg) + muVerifierDB.Unlock() + case VerifierFactory: + muVerifierDB.Lock() + verifierDB[alg] = v + muVerifierDB.Unlock() + + muVerifier2DB.Lock() + delete(verifier2DB, alg) + muVerifier2DB.Unlock() + default: + return fmt.Errorf(`jws.RegisterVerifier: unsupported type %T for algorithm %q`, f, alg) + } + return nil +} + +// UnregisterVerifier removes the signer factory associated with +// the given algorithm. +// +// Note that when you call this function, the algorithm itself is +// not automatically unregistered from this module's algorithm database. +// This is because the algorithm may still be required for signing or +// some other operation (however unlikely, it is still possible). +// Therefore, in order to completely remove the algorithm, you must +// call `jwa.UnregisterSignatureAlgorithm` yourself. +func UnregisterVerifier(alg jwa.SignatureAlgorithm) { + muVerifier2DB.Lock() + delete(verifier2DB, alg) + muVerifier2DB.Unlock() + + muVerifierDB.Lock() + delete(verifierDB, alg) + muVerifierDB.Unlock() +} + +// NewVerifier creates a verifier that signs payloads using the given signature algorithm. +func NewVerifier(alg jwa.SignatureAlgorithm) (Verifier, error) { + muVerifierDB.RLock() + f, ok := verifierDB[alg] + muVerifierDB.RUnlock() + + if ok { + return f.Create() + } + return nil, fmt.Errorf(`jws.NewVerifier: unsupported signature algorithm "%s"`, alg) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jws/verify_context.go b/vendor/github.com/lestrrat-go/jwx/v3/jws/verify_context.go new file mode 100644 index 0000000000..b4807d569c --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jws/verify_context.go @@ -0,0 +1,211 @@ +package jws + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jws/jwsbb" +) + +// verifyContext holds the state during JWS verification +type verifyContext struct { + parseOptions []ParseOption + dst *Message + detachedPayload []byte + keyProviders []KeyProvider + keyUsed any + validateKey bool + encoder Base64Encoder + //nolint:containedctx + ctx context.Context +} + +var verifyContextPool = pool.New[*verifyContext](allocVerifyContext, freeVerifyContext) + +func allocVerifyContext() *verifyContext { + return &verifyContext{ + encoder: base64.DefaultEncoder(), + ctx: context.Background(), + } +} + +func freeVerifyContext(vc *verifyContext) *verifyContext { + vc.parseOptions = vc.parseOptions[:0] + vc.dst = nil + vc.detachedPayload = nil + vc.keyProviders = vc.keyProviders[:0] + vc.keyUsed = nil + vc.validateKey = false + vc.encoder = base64.DefaultEncoder() + vc.ctx = context.Background() + return vc +} + +func (vc *verifyContext) ProcessOptions(options []VerifyOption) error { + //nolint:forcetypeassert + for _, option := range options { + switch option.Ident() { + case identMessage{}: + if err := option.Value(&vc.dst); err != nil { + return verifyerr(`invalid value for option WithMessage: %w`, err) + } + case identDetachedPayload{}: + if err := option.Value(&vc.detachedPayload); err != nil { + return verifyerr(`invalid value for option WithDetachedPayload: %w`, err) + } + case identKey{}: + var pair *withKey + if err := option.Value(&pair); err != nil { + return verifyerr(`invalid value for option WithKey: %w`, err) + } + vc.keyProviders = append(vc.keyProviders, &staticKeyProvider{ + alg: pair.alg.(jwa.SignatureAlgorithm), + key: pair.key, + }) + case identKeyProvider{}: + var kp KeyProvider + if err := option.Value(&kp); err != nil { + return verifyerr(`failed to retrieve key-provider option value: %w`, err) + } + vc.keyProviders = append(vc.keyProviders, kp) + case identKeyUsed{}: + if err := option.Value(&vc.keyUsed); err != nil { + return verifyerr(`failed to retrieve key-used option value: %w`, err) + } + case identContext{}: + if err := option.Value(&vc.ctx); err != nil { + return verifyerr(`failed to retrieve context option value: %w`, err) + } + case identValidateKey{}: + if err := option.Value(&vc.validateKey); err != nil { + return verifyerr(`failed to retrieve validate-key option value: %w`, err) + } + case identSerialization{}: + vc.parseOptions = append(vc.parseOptions, option.(ParseOption)) + case identBase64Encoder{}: + if err := option.Value(&vc.encoder); err != nil { + return verifyerr(`failed to retrieve base64-encoder option value: %w`, err) + } + default: + return verifyerr(`invalid jws.VerifyOption %q passed`, `With`+strings.TrimPrefix(fmt.Sprintf(`%T`, option.Ident()), `jws.ident`)) + } + } + + if len(vc.keyProviders) < 1 { + return verifyerr(`no key providers have been provided (see jws.WithKey(), jws.WithKeySet(), jws.WithVerifyAuto(), and jws.WithKeyProvider()`) + } + + return nil +} + +func (vc *verifyContext) VerifyMessage(buf []byte) ([]byte, error) { + msg, err := Parse(buf, vc.parseOptions...) + if err != nil { + return nil, verifyerr(`failed to parse jws: %w`, err) + } + defer msg.clearRaw() + + if vc.detachedPayload != nil { + if len(msg.payload) != 0 { + return nil, verifyerr(`can't specify detached payload for JWS with payload`) + } + + msg.payload = vc.detachedPayload + } + + verifyBuf := pool.ByteSlice().Get() + + // Because deferred functions bind to the current value of the variable, + // we can't just use `defer pool.ByteSlice().Put(verifyBuf)` here. + // Instead, we use a closure to reference the _variable_. + // it would be better if we could call it directly, but there are + // too many place we may return from this function + defer func() { + pool.ByteSlice().Put(verifyBuf) + }() + + errs := pool.ErrorSlice().Get() + defer func() { + pool.ErrorSlice().Put(errs) + }() + for idx, sig := range msg.signatures { + var rawHeaders []byte + if rbp, ok := sig.protected.(interface{ rawBuffer() []byte }); ok { + if raw := rbp.rawBuffer(); raw != nil { + rawHeaders = raw + } + } + + if rawHeaders == nil { + protected, err := json.Marshal(sig.protected) + if err != nil { + return nil, verifyerr(`failed to marshal "protected" for signature #%d: %w`, idx+1, err) + } + rawHeaders = protected + } + + verifyBuf = verifyBuf[:0] + verifyBuf = jwsbb.SignBuffer(verifyBuf, rawHeaders, msg.payload, vc.encoder, msg.b64) + for i, kp := range vc.keyProviders { + var sink algKeySink + if err := kp.FetchKeys(vc.ctx, &sink, sig, msg); err != nil { + return nil, verifyerr(`key provider %d failed: %w`, i, err) + } + + for _, pair := range sink.list { + // alg is converted here because pair.alg is of type jwa.KeyAlgorithm. + // this may seem ugly, but we're trying to avoid declaring separate + // structs for `alg jwa.KeyEncryptionAlgorithm` and `alg jwa.SignatureAlgorithm` + //nolint:forcetypeassert + alg := pair.alg.(jwa.SignatureAlgorithm) + key := pair.key + + if err := vc.tryKey(verifyBuf, alg, key, msg, sig); err != nil { + errs = append(errs, verifyerr(`failed to verify signature #%d with key %T: %w`, idx+1, key, err)) + continue + } + + return msg.payload, nil + } + } + errs = append(errs, verifyerr(`signature #%d could not be verified with any of the keys`, idx+1)) + } + return nil, verifyerr(`could not verify message using any of the signatures or keys: %w`, errors.Join(errs...)) +} + +func (vc *verifyContext) tryKey(verifyBuf []byte, alg jwa.SignatureAlgorithm, key any, msg *Message, sig *Signature) error { + if vc.validateKey { + if err := validateKeyBeforeUse(key); err != nil { + return fmt.Errorf(`failed to validate key before signing: %w`, err) + } + } + + verifier, err := VerifierFor(alg) + if err != nil { + return fmt.Errorf(`failed to get verifier for algorithm %q: %w`, alg, err) + } + + if err := verifier.Verify(key, verifyBuf, sig.signature); err != nil { + return verificationError{err} + } + + // Verification succeeded + if vc.keyUsed != nil { + if err := blackmagic.AssignIfCompatible(vc.keyUsed, key); err != nil { + return fmt.Errorf(`failed to assign used key (%T) to %T: %w`, key, vc.keyUsed, err) + } + } + + if vc.dst != nil { + *(vc.dst) = *msg + } + + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwt/BUILD.bazel new file mode 100644 index 0000000000..86197d348a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/BUILD.bazel @@ -0,0 +1,72 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "jwt", + srcs = [ + "builder_gen.go", + "errors.go", + "filter.go", + "fastpath.go", + "http.go", + "interface.go", + "io.go", + "jwt.go", + "options.go", + "options_gen.go", + "serialize.go", + "token_gen.go", + "token_options.go", + "token_options_gen.go", + "validate.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/jwt", + visibility = ["//visibility:public"], + deps = [ + "//:jwx", + "//internal/base64", + "//transform", + "//internal/json", + "//internal/tokens", + "//internal/pool", + "//jwa", + "//jwe", + "//jwk", + "//jws", + "//jws/jwsbb", + "//jwt/internal/types", + "//jwt/internal/errors", + "@com_github_lestrrat_go_blackmagic//:blackmagic", + "@com_github_lestrrat_go_option_v2//:option", + ], +) + +go_test( + name = "jwt_test", + srcs = [ + "jwt_test.go", + "options_gen_test.go", + "token_options_test.go", + "token_test.go", + "validate_test.go", + "verify_test.go", + ], + embed = [":jwt"], + deps = [ + "//internal/json", + "//internal/jwxtest", + "//jwa", + "//jwe", + "//jwk", + "//jwk/ecdsa", + "//jws", + "//jwt/internal/types", + "@com_github_lestrrat_go_httprc_v3//:httprc", + "@com_github_stretchr_testify//require", + ], +) + +alias( + name = "go_default_library", + actual = ":jwt", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/README.md b/vendor/github.com/lestrrat-go/jwx/v3/jwt/README.md new file mode 100644 index 0000000000..a6b5664c94 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/README.md @@ -0,0 +1,224 @@ +# JWT [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/jwx/v3/jwt.svg)](https://pkg.go.dev/github.com/lestrrat-go/jwx/v3/jwt) + +Package jwt implements JSON Web Tokens as described in [RFC7519](https://tools.ietf.org/html/rfc7519). + +* Convenience methods for oft-used keys ("aud", "sub", "iss", etc) +* Convenience functions to extract/parse from http.Request, http.Header, url.Values +* Ability to Get/Set arbitrary keys +* Conversion to and from JSON +* Generate signed tokens +* Verify signed tokens +* Extra support for OpenID tokens via [github.com/lestrrat-go/jwx/v3/jwt/openid](./jwt/openid) + +How-to style documentation can be found in the [docs directory](../docs). + +More examples are located in the examples directory ([jwt_example_test.go](../examples/jwt_example_test.go)) + +# SYNOPSIS + +## Verify a signed JWT + +```go + token, err := jwt.Parse(payload, jwt.WithKey(alg, key)) + if err != nil { + fmt.Printf("failed to parse payload: %s\n", err) + } +``` + +## Token Usage + +```go +func ExampleJWT() { + const aLongLongTimeAgo = 233431200 + + t := jwt.New() + t.Set(jwt.SubjectKey, `https://github.com/lestrrat-go/jwx/v3/jwt`) + t.Set(jwt.AudienceKey, `Golang Users`) + t.Set(jwt.IssuedAtKey, time.Unix(aLongLongTimeAgo, 0)) + t.Set(`privateClaimKey`, `Hello, World!`) + + buf, err := json.MarshalIndent(t, "", " ") + if err != nil { + fmt.Printf("failed to generate JSON: %s\n", err) + return + } + + fmt.Printf("%s\n", buf) + fmt.Printf("aud -> '%s'\n", t.Audience()) + fmt.Printf("iat -> '%s'\n", t.IssuedAt().Format(time.RFC3339)) + if v, ok := t.Get(`privateClaimKey`); ok { + fmt.Printf("privateClaimKey -> '%s'\n", v) + } + fmt.Printf("sub -> '%s'\n", t.Subject()) + + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + log.Printf("failed to generate private key: %s", err) + return + } + + { + // Signing a token (using raw rsa.PrivateKey) + signed, err := jwt.Sign(t, jwt.WithKey(jwa.RS256, key)) + if err != nil { + log.Printf("failed to sign token: %s", err) + return + } + _ = signed + } + + { + // Signing a token (using JWK) + jwkKey, err := jwk.New(key) + if err != nil { + log.Printf("failed to create JWK key: %s", err) + return + } + + signed, err := jwt.Sign(t, jwt.WithKey(jwa.RS256, jwkKey)) + if err != nil { + log.Printf("failed to sign token: %s", err) + return + } + _ = signed + } +} +``` + +## OpenID Claims + +`jwt` package can work with token types other than the default one. +For OpenID claims, use the token created by `openid.New()`, or +use the `jwt.WithToken(openid.New())`. If you need to use other specialized +claims, use `jwt.WithToken()` to specify the exact token type + +```go +func Example_openid() { + const aLongLongTimeAgo = 233431200 + + t := openid.New() + t.Set(jwt.SubjectKey, `https://github.com/lestrrat-go/jwx/v3/jwt`) + t.Set(jwt.AudienceKey, `Golang Users`) + t.Set(jwt.IssuedAtKey, time.Unix(aLongLongTimeAgo, 0)) + t.Set(`privateClaimKey`, `Hello, World!`) + + addr := openid.NewAddress() + addr.Set(openid.AddressPostalCodeKey, `105-0011`) + addr.Set(openid.AddressCountryKey, `日本`) + addr.Set(openid.AddressRegionKey, `東京都`) + addr.Set(openid.AddressLocalityKey, `港区`) + addr.Set(openid.AddressStreetAddressKey, `芝公園 4-2-8`) + t.Set(openid.AddressKey, addr) + + buf, err := json.MarshalIndent(t, "", " ") + if err != nil { + fmt.Printf("failed to generate JSON: %s\n", err) + return + } + fmt.Printf("%s\n", buf) + + t2, err := jwt.Parse(buf, jwt.WithToken(openid.New())) + if err != nil { + fmt.Printf("failed to parse JSON: %s\n", err) + return + } + if _, ok := t2.(openid.Token); !ok { + fmt.Printf("using jwt.WithToken(openid.New()) creates an openid.Token instance") + return + } +} +``` + +# FAQ + +## Why is `jwt.Token` an interface? + +In this package, `jwt.Token` is an interface. This is not an arbitrary choice: there are actual reason for the type being an interface. + +We understand that if you are migrating from another library this may be a deal breaker, but we hope you can at least appreciate the fact that this was not done arbitrarily, and that there were real technical trade offs that were evaluated. + +### No uninitialized tokens + +First and foremost, by making it an interface, you cannot use an uninitialized token: + +```go +var token1 jwt.Token // this is nil, you can't just start using this +if err := json.Unmarshal(data, &token1); err != nil { // so you can't do this + ... +} + +// But you _can_ do this, and we _want_ you to do this so the object is properly initialized +token2 = jwt.New() +if err := json.Unmarshal(data, &token2); err != nil { // actually, in practice you should use jwt.Parse() + .... +} +``` + +### But why does it need to be initialized? + +There are several reasons, but one of the reasons is that I'm using a sync.Mutex to avoid races. We want this to be properly initialized. + +The other reason is that we support custom claims out of the box. The `map[string]interface{}` container is initialized during new. This is important when checking for equality using reflect-y methods (akin to `reflect.DeepEqual`), because if you allowed zero values, you could end up with "empty" tokens, that actually differ. Consider the following: + +```go +// assume jwt.Token was s struct, not an interface +token1 := jwt.Token{ privateClaims: make(map[string]interface{}) } +token2 := jwt.Token{ privateClaims: nil } +``` + +These are semantically equivalent, but users would need to be aware of this difference when comparing values. By forcing the user to use a constructor, we can force a uniform empty state. + +### Standard way to store values + +Unlike some other libraries, this library allows you to store standard claims and non-standard claims in the same token. + +You _want_ to store standard claims in a properly typed field, which we do for fields like "iss", "nbf", etc. +But for non-standard claims, there is just no way of doing this, so we _have_ to use a container like `map[string]interface{}` + +This means that if you allow direct access to these fields via a struct, you will have two different ways to access the claims, which is confusing: + +```go +tok.Issuer = ... +tok.PrivateClaims["foo"] = ... +``` + +So we want to hide where this data is stored, and use a standard method like `Set()` and `Get()` to store all the values. +At this point you are effectively going to hide the implementation detail from the user, so you end up with a struct like below, which is fundamentally not so different from providing just an interface{}: + +```go +type Token struct { + // unexported fields +} + +func (tok *Token) Set(...) { ... } +``` + +### Use of pointers to store values + +We wanted to differentiate the state between a claim being uninitialized, and a claim being initialized to empty. + +So we use pointers to store values: + +```go +type stdToken struct { + .... + issuer *string // if nil, uninitialized. if &(""), initialized to empty +} +``` + +This is fine for us, but we doubt that this would be something users would want to do. +This is a subtle difference, but cluttering up the API with slight variations of the same type (i.e. pointers vs non-pointers) seemed like a bad idea to us. + +```go +token.Issuer = &issuer // want to avoid this + +token.Set(jwt.IssuerKey, "foobar") // so this is what we picked +``` + +This way users no longer need to care how the data is internally stored. + +### Allow more than one type of token through the same interface + +`dgrijalva/jwt-go` does this in a different way, but we felt that it would be more intuitive for all tokens to follow a single interface so there is fewer type conversions required. + +See the `openid` token for an example. diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/builder_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/builder_gen.go new file mode 100644 index 0000000000..48d0375a28 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/builder_gen.go @@ -0,0 +1,88 @@ +// Code generated by tools/cmd/genjwt/main.go. DO NOT EDIT. + +package jwt + +import ( + "fmt" + "sync" + "time" +) + +// Builder is a convenience wrapper around the New() constructor +// and the Set() methods to assign values to Token claims. +// Users can successively call Claim() on the Builder, and have it +// construct the Token when Build() is called. This alleviates the +// need for the user to check for the return value of every single +// Set() method call. +// Note that each call to Claim() overwrites the value set from the +// previous call. +type Builder struct { + mu sync.Mutex + claims map[string]any +} + +func NewBuilder() *Builder { + return &Builder{} +} + +func (b *Builder) init() { + if b.claims == nil { + b.claims = make(map[string]any) + } +} + +func (b *Builder) Claim(name string, value any) *Builder { + b.mu.Lock() + defer b.mu.Unlock() + b.init() + b.claims[name] = value + return b +} + +func (b *Builder) Audience(v []string) *Builder { + return b.Claim(AudienceKey, v) +} + +func (b *Builder) Expiration(v time.Time) *Builder { + return b.Claim(ExpirationKey, v) +} + +func (b *Builder) IssuedAt(v time.Time) *Builder { + return b.Claim(IssuedAtKey, v) +} + +func (b *Builder) Issuer(v string) *Builder { + return b.Claim(IssuerKey, v) +} + +func (b *Builder) JwtID(v string) *Builder { + return b.Claim(JwtIDKey, v) +} + +func (b *Builder) NotBefore(v time.Time) *Builder { + return b.Claim(NotBeforeKey, v) +} + +func (b *Builder) Subject(v string) *Builder { + return b.Claim(SubjectKey, v) +} + +// Build creates a new token based on the claims that the builder has received +// so far. If a claim cannot be set, then the method returns a nil Token with +// a en error as a second return value +// +// Once `Build()` is called, all claims are cleared from the Builder, and the +// Builder can be reused to build another token +func (b *Builder) Build() (Token, error) { + b.mu.Lock() + claims := b.claims + b.claims = nil + b.mu.Unlock() + tok := New() + for k, v := range claims { + if err := tok.Set(k, v); err != nil { + return nil, fmt.Errorf(`failed to set claim %q: %w`, k, err) + } + } + return tok, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/errors.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/errors.go new file mode 100644 index 0000000000..c5b529c1ae --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/errors.go @@ -0,0 +1,95 @@ +package jwt + +import ( + jwterrs "github.com/lestrrat-go/jwx/v3/jwt/internal/errors" +) + +// ClaimNotFoundError returns the opaque error value that is returned when +// `jwt.Get` fails to find the requested claim. +// +// This value should only be used for comparison using `errors.Is()`. +func ClaimNotFoundError() error { + return jwterrs.ErrClaimNotFound +} + +// ClaimAssignmentFailedError returns the opaque error value that is returned +// when `jwt.Get` fails to assign the value to the destination. For example, +// this can happen when the value is a string, but you passed a &int as the +// destination. +// +// This value should only be used for comparison using `errors.Is()`. +func ClaimAssignmentFailedError() error { + return jwterrs.ErrClaimAssignmentFailed +} + +// UnknownPayloadTypeError returns the opaque error value that is returned when +// `jwt.Parse` fails due to not being able to deduce the format of +// the incoming buffer. +// +// This value should only be used for comparison using `errors.Is()`. +func UnknownPayloadTypeError() error { + return jwterrs.ErrUnknownPayloadType +} + +// ParseError returns the opaque error that is returned from jwt.Parse when +// the input is not a valid JWT. +// +// This value should only be used for comparison using `errors.Is()`. +func ParseError() error { + return jwterrs.ErrParse +} + +// ValidateError returns the immutable error used for validation errors +// +// This value should only be used for comparison using `errors.Is()`. +func ValidateError() error { + return jwterrs.ErrValidateDefault +} + +// InvalidIssuerError returns the immutable error used when `iss` claim +// is not satisfied +// +// This value should only be used for comparison using `errors.Is()`. +func InvalidIssuerError() error { + return jwterrs.ErrInvalidIssuerDefault +} + +// TokenExpiredError returns the immutable error used when `exp` claim +// is not satisfied. +// +// This value should only be used for comparison using `errors.Is()`. +func TokenExpiredError() error { + return jwterrs.ErrTokenExpiredDefault +} + +// InvalidIssuedAtError returns the immutable error used when `iat` claim +// is not satisfied +// +// This value should only be used for comparison using `errors.Is()`. +func InvalidIssuedAtError() error { + return jwterrs.ErrInvalidIssuedAtDefault +} + +// TokenNotYetValidError returns the immutable error used when `nbf` claim +// is not satisfied +// +// This value should only be used for comparison using `errors.Is()`. +func TokenNotYetValidError() error { + return jwterrs.ErrTokenNotYetValidDefault +} + +// InvalidAudienceError returns the immutable error used when `aud` claim +// is not satisfied +// +// This value should only be used for comparison using `errors.Is()`. +func InvalidAudienceError() error { + return jwterrs.ErrInvalidAudienceDefault +} + +// MissingRequiredClaimError returns the immutable error used when the claim +// specified by `jwt.IsRequired()` is not present. +// +// This value should only be used for comparison using `errors.Is()`. +func MissingRequiredClaimError() error { + return jwterrs.ErrMissingRequiredClaimDefault +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/fastpath.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/fastpath.go new file mode 100644 index 0000000000..43f7c966da --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/fastpath.go @@ -0,0 +1,71 @@ +package jwt + +import ( + "fmt" + + "github.com/lestrrat-go/jwx/v3/internal/base64" + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwk" + "github.com/lestrrat-go/jwx/v3/jws" + "github.com/lestrrat-go/jwx/v3/jws/jwsbb" +) + +// signFast reinvents the wheel a bit to avoid the overhead of +// going through the entire jws.Sign() machinery. +func signFast(t Token, alg jwa.SignatureAlgorithm, key any) ([]byte, error) { + algstr := alg.String() + + var kid string + if jwkKey, ok := key.(jwk.Key); ok { + if v, ok := jwkKey.KeyID(); ok && v != "" { + kid = v + } + } + + // Setup headers + // {"alg":"","typ":"JWT"} + // 1234567890123456789012 + want := len(algstr) + 22 + // also, if kid != "", we need to add "kid":"$kid" + if kid != "" { + // "kid":"" + // 12345689 + want += len(kid) + 9 + } + hdr := pool.ByteSlice().GetCapacity(want) + hdr = append(hdr, '{', '"', 'a', 'l', 'g', '"', ':', '"') + hdr = append(hdr, algstr...) + hdr = append(hdr, '"') + if kid != "" { + hdr = append(hdr, ',', '"', 'k', 'i', 'd', '"', ':', '"') + hdr = append(hdr, kid...) + hdr = append(hdr, '"') + } + hdr = append(hdr, ',', '"', 't', 'y', 'p', '"', ':', '"', 'J', 'W', 'T', '"', '}') + defer pool.ByteSlice().Put(hdr) + + // setup the buffer to sign with + payload, err := json.Marshal(t) + if err != nil { + return nil, fmt.Errorf(`jwt.signFast: failed to marshal token payload: %w`, err) + } + + combined := jwsbb.SignBuffer(nil, hdr, payload, base64.DefaultEncoder(), true) + signer, err := jws.SignerFor(alg) + if err != nil { + return nil, fmt.Errorf(`jwt.signFast: failed to get signer for %s: %w`, alg, err) + } + + signature, err := signer.Sign(key, combined) + if err != nil { + return nil, fmt.Errorf(`jwt.signFast: failed to sign payload with %s: %w`, alg, err) + } + + serialized, err := jwsbb.JoinCompact(nil, hdr, payload, signature, base64.DefaultEncoder(), true) + if err != nil { + return nil, fmt.Errorf("jwt.signFast: failed to join compact: %w", err) + } + return serialized, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/filter.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/filter.go new file mode 100644 index 0000000000..31471dbdfb --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/filter.go @@ -0,0 +1,34 @@ +package jwt + +import ( + "github.com/lestrrat-go/jwx/v3/transform" +) + +// TokenFilter is an interface that allows users to filter JWT claims. +// It provides two methods: Filter and Reject; Filter returns a new token with only +// the claims that match the filter criteria, while Reject returns a new token with +// only the claims that DO NOT match the filter. +// +// EXPERIMENTAL: This API is experimental and its interface and behavior is +// subject to change in future releases. This API is not subject to semver +// compatibility guarantees. +type TokenFilter interface { + Filter(token Token) (Token, error) + Reject(token Token) (Token, error) +} + +// StandardClaimsFilter returns a TokenFilter that filters out standard JWT claims. +// +// You can use this filter to create tokens that either only has standard claims +// or only custom claims. If you need to configure the filter more precisely, consider +// using the ClaimNameFilter directly. +func StandardClaimsFilter() TokenFilter { + return stdClaimsFilter +} + +var stdClaimsFilter = NewClaimNameFilter(stdClaimNames...) + +// NewClaimNameFilter creates a new ClaimNameFilter with the specified claim names. +func NewClaimNameFilter(names ...string) TokenFilter { + return transform.NewNameBasedFilter[Token](names...) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/http.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/http.go new file mode 100644 index 0000000000..691c5a0df4 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/http.go @@ -0,0 +1,295 @@ +package jwt + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/internal/tokens" +) + +// ParseCookie parses a JWT stored in a http.Cookie with the given name. +// If the specified cookie is not found, http.ErrNoCookie is returned. +func ParseCookie(req *http.Request, name string, options ...ParseOption) (Token, error) { + var dst **http.Cookie + for _, option := range options { + switch option.Ident() { + case identCookie{}: + if err := option.Value(&dst); err != nil { + return nil, fmt.Errorf(`jws.ParseCookie: value to option WithCookie must be **http.Cookie: %w`, err) + } + } + } + + cookie, err := req.Cookie(name) + if err != nil { + return nil, err + } + tok, err := ParseString(cookie.Value, options...) + if err != nil { + return nil, fmt.Errorf(`jws.ParseCookie: failed to parse token stored in cookie: %w`, err) + } + + if dst != nil { + *dst = cookie + } + return tok, nil +} + +// ParseHeader parses a JWT stored in a http.Header. +// +// For the header "Authorization", it will strip the prefix "Bearer " and will +// treat the remaining value as a JWT. +func ParseHeader(hdr http.Header, name string, options ...ParseOption) (Token, error) { + key := http.CanonicalHeaderKey(name) + v := strings.TrimSpace(hdr.Get(key)) + if v == "" { + return nil, fmt.Errorf(`empty header (%s)`, key) + } + + if key == "Authorization" { + // Authorization header is an exception. We strip the "Bearer " from + // the prefix + v = strings.TrimSpace(strings.TrimPrefix(v, "Bearer")) + } + + tok, err := ParseString(v, options...) + if err != nil { + return nil, fmt.Errorf(`failed to parse token stored in header (%s): %w`, key, err) + } + return tok, nil +} + +// ParseForm parses a JWT stored in a url.Value. +func ParseForm(values url.Values, name string, options ...ParseOption) (Token, error) { + v := strings.TrimSpace(values.Get(name)) + if v == "" { + return nil, fmt.Errorf(`empty value (%s)`, name) + } + + return ParseString(v, options...) +} + +// ParseRequest searches a http.Request object for a JWT token. +// +// Specifying WithHeaderKey() will tell it to search under a specific +// header key. Specifying WithFormKey() will tell it to search under +// a specific form field. +// +// If none of jwt.WithHeaderKey()/jwt.WithCookieKey()/jwt.WithFormKey() is +// used, "Authorization" header will be searched. If any of these options +// are specified, you must explicitly re-enable searching for "Authorization" header +// if you also want to search for it. +// +// # searches for "Authorization" +// jwt.ParseRequest(req) +// +// # searches for "x-my-token" ONLY. +// jwt.ParseRequest(req, jwt.WithHeaderKey("x-my-token")) +// +// # searches for "Authorization" AND "x-my-token" +// jwt.ParseRequest(req, jwt.WithHeaderKey("Authorization"), jwt.WithHeaderKey("x-my-token")) +// +// Cookies are searched using (http.Request).Cookie(). If you have multiple +// cookies with the same name, and you want to search for a specific one that +// (http.Request).Cookie() would not return, you will need to implement your +// own logic to extract the cookie and use jwt.ParseString(). +func ParseRequest(req *http.Request, options ...ParseOption) (Token, error) { + var hdrkeys []string + var formkeys []string + var cookiekeys []string + var parseOptions []ParseOption + for _, option := range options { + switch option.Ident() { + case identHeaderKey{}: + var v string + if err := option.Value(&v); err != nil { + return nil, fmt.Errorf(`jws.ParseRequest: value to option WithHeaderKey must be string: %w`, err) + } + hdrkeys = append(hdrkeys, v) + case identFormKey{}: + var v string + if err := option.Value(&v); err != nil { + return nil, fmt.Errorf(`jws.ParseRequest: value to option WithFormKey must be string: %w`, err) + } + formkeys = append(formkeys, v) + case identCookieKey{}: + var v string + if err := option.Value(&v); err != nil { + return nil, fmt.Errorf(`jws.ParseRequest: value to option WithCookieKey must be string: %w`, err) + } + cookiekeys = append(cookiekeys, v) + default: + parseOptions = append(parseOptions, option) + } + } + + if len(hdrkeys) == 0 && len(formkeys) == 0 && len(cookiekeys) == 0 { + hdrkeys = append(hdrkeys, "Authorization") + } + + mhdrs := pool.KeyToErrorMap().Get() + defer pool.KeyToErrorMap().Put(mhdrs) + mfrms := pool.KeyToErrorMap().Get() + defer pool.KeyToErrorMap().Put(mfrms) + mcookies := pool.KeyToErrorMap().Get() + defer pool.KeyToErrorMap().Put(mcookies) + + for _, hdrkey := range hdrkeys { + // Check presence via a direct map lookup + if _, ok := req.Header[http.CanonicalHeaderKey(hdrkey)]; !ok { + // if non-existent, not error + continue + } + + tok, err := ParseHeader(req.Header, hdrkey, parseOptions...) + if err != nil { + mhdrs[hdrkey] = err + continue + } + return tok, nil + } + + for _, name := range cookiekeys { + tok, err := ParseCookie(req, name, parseOptions...) + if err != nil { + if err == http.ErrNoCookie { + // not fatal + mcookies[name] = err + } + continue + } + return tok, nil + } + + if cl := req.ContentLength; cl > 0 { + if err := req.ParseForm(); err != nil { + return nil, fmt.Errorf(`failed to parse form: %w`, err) + } + } + + for _, formkey := range formkeys { + // Check presence via a direct map lookup + if _, ok := req.Form[formkey]; !ok { + // if non-existent, not error + continue + } + + tok, err := ParseForm(req.Form, formkey, parseOptions...) + if err != nil { + mfrms[formkey] = err + continue + } + return tok, nil + } + + // Everything below is a prelude to error reporting. + var triedHdrs strings.Builder + for i, hdrkey := range hdrkeys { + if i > 0 { + triedHdrs.WriteString(", ") + } + triedHdrs.WriteString(strconv.Quote(hdrkey)) + } + + var triedForms strings.Builder + for i, formkey := range formkeys { + if i > 0 { + triedForms.WriteString(", ") + } + triedForms.WriteString(strconv.Quote(formkey)) + } + + var triedCookies strings.Builder + for i, cookiekey := range cookiekeys { + if i > 0 { + triedCookies.WriteString(", ") + } + triedCookies.WriteString(strconv.Quote(cookiekey)) + } + + var b strings.Builder + b.WriteString(`failed to find a valid token in any location of the request (tried: `) + olen := b.Len() + if triedHdrs.Len() > 0 { + b.WriteString(`header keys: [`) + b.WriteString(triedHdrs.String()) + b.WriteByte(tokens.CloseSquareBracket) + } + if triedForms.Len() > 0 { + if b.Len() > olen { + b.WriteString(", ") + } + b.WriteString("form keys: [") + b.WriteString(triedForms.String()) + b.WriteByte(tokens.CloseSquareBracket) + } + + if triedCookies.Len() > 0 { + if b.Len() > olen { + b.WriteString(", ") + } + b.WriteString("cookie keys: [") + b.WriteString(triedCookies.String()) + b.WriteByte(tokens.CloseSquareBracket) + } + b.WriteByte(')') + + lmhdrs := len(mhdrs) + lmfrms := len(mfrms) + lmcookies := len(mcookies) + var errors []any + if lmhdrs > 0 || lmfrms > 0 || lmcookies > 0 { + b.WriteString(". Additionally, errors were encountered during attempts to verify using:") + + if lmhdrs > 0 { + b.WriteString(" headers: (") + count := 0 + for hdrkey, err := range mhdrs { + if count > 0 { + b.WriteString(", ") + } + b.WriteString("[header key: ") + b.WriteString(strconv.Quote(hdrkey)) + b.WriteString(", error: %w]") + errors = append(errors, err) + count++ + } + b.WriteString(")") + } + + if lmcookies > 0 { + count := 0 + b.WriteString(" cookies: (") + for cookiekey, err := range mcookies { + if count > 0 { + b.WriteString(", ") + } + b.WriteString("[cookie key: ") + b.WriteString(strconv.Quote(cookiekey)) + b.WriteString(", error: %w]") + errors = append(errors, err) + count++ + } + } + + if lmfrms > 0 { + count := 0 + b.WriteString(" forms: (") + for formkey, err := range mfrms { + if count > 0 { + b.WriteString(", ") + } + b.WriteString("[form key: ") + b.WriteString(strconv.Quote(formkey)) + b.WriteString(", error: %w]") + errors = append(errors, err) + count++ + } + } + } + return nil, fmt.Errorf(b.String(), errors...) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/interface.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/interface.go new file mode 100644 index 0000000000..f9a9d971ef --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/interface.go @@ -0,0 +1,8 @@ +package jwt + +import ( + "github.com/lestrrat-go/jwx/v3/internal/json" +) + +type DecodeCtx = json.DecodeCtx +type TokenWithDecodeCtx = json.DecodeCtxContainer diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/BUILD.bazel new file mode 100644 index 0000000000..a053e8c0aa --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/BUILD.bazel @@ -0,0 +1,16 @@ +load("@rules_go//go:def.bzl", "go_library") + +go_library( + name = "errors", + srcs = [ + "errors.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/jwt/internal/errors", + visibility = ["//jwt:__subpackages__"], +) + +alias( + name = "go_default_library", + actual = ":errors", + visibility = ["//jwt:__subpackages__"], +) \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/errors.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/errors.go new file mode 100644 index 0000000000..179763a50d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/errors/errors.go @@ -0,0 +1,185 @@ +// Package errors exist to store errors for jwt and openid packages. +// +// It's internal because we don't want to expose _anything_ about these errors +// so users absolutely cannot do anything other than use them as opaque errors. +// +//nolint:revive +package errors + +import ( + "errors" + "fmt" +) + +var ( + ErrClaimNotFound = ClaimNotFoundError{} + ErrClaimAssignmentFailed = ClaimAssignmentFailedError{Err: errors.New(`claim assignment failed`)} + ErrUnknownPayloadType = errors.New(`unknown payload type (payload is not JWT?)`) + ErrParse = ParseError{error: errors.New(`jwt.Parse: unknown error`)} + ErrValidateDefault = ValidationError{errors.New(`unknown error`)} + ErrInvalidIssuerDefault = InvalidIssuerError{errors.New(`"iss" not satisfied`)} + ErrTokenExpiredDefault = TokenExpiredError{errors.New(`"exp" not satisfied: token is expired`)} + ErrInvalidIssuedAtDefault = InvalidIssuedAtError{errors.New(`"iat" not satisfied`)} + ErrTokenNotYetValidDefault = TokenNotYetValidError{errors.New(`"nbf" not satisfied: token is not yet valid`)} + ErrInvalidAudienceDefault = InvalidAudienceError{errors.New(`"aud" not satisfied`)} + ErrMissingRequiredClaimDefault = &MissingRequiredClaimError{error: errors.New(`required claim is missing`)} +) + +type ClaimNotFoundError struct { + Name string +} + +func (e ClaimNotFoundError) Error() string { + // This error message uses "field" instead of "claim" for backwards compatibility, + // but it shuold really be "claim" since it refers to a JWT claim. + return fmt.Sprintf(`field "%s" not found`, e.Name) +} + +func (e ClaimNotFoundError) Is(target error) bool { + _, ok := target.(ClaimNotFoundError) + return ok +} + +type ClaimAssignmentFailedError struct { + Err error +} + +func (e ClaimAssignmentFailedError) Error() string { + // This error message probably should be tweaked, but it is this way + // for backwards compatibility. + return fmt.Sprintf(`failed to assign value to dst: %s`, e.Err.Error()) +} + +func (e ClaimAssignmentFailedError) Unwrap() error { + return e.Err +} + +func (e ClaimAssignmentFailedError) Is(target error) bool { + _, ok := target.(ClaimAssignmentFailedError) + return ok +} + +type ParseError struct { + error +} + +func (e ParseError) Unwrap() error { + return e.error +} + +func (ParseError) Is(err error) bool { + _, ok := err.(ParseError) + return ok +} + +func ParseErrorf(prefix, f string, args ...any) error { + return ParseError{fmt.Errorf(prefix+": "+f, args...)} +} + +type ValidationError struct { + error +} + +func (ValidationError) Is(err error) bool { + _, ok := err.(ValidationError) + return ok +} + +func (err ValidationError) Unwrap() error { + return err.error +} + +func ValidateErrorf(f string, args ...any) error { + return ValidationError{fmt.Errorf(`jwt.Validate: `+f, args...)} +} + +type InvalidIssuerError struct { + error +} + +func (err InvalidIssuerError) Is(target error) bool { + _, ok := target.(InvalidIssuerError) + return ok +} + +func (err InvalidIssuerError) Unwrap() error { + return err.error +} + +func IssuerErrorf(f string, args ...any) error { + return InvalidIssuerError{fmt.Errorf(`"iss" not satisfied: `+f, args...)} +} + +type TokenExpiredError struct { + error +} + +func (err TokenExpiredError) Is(target error) bool { + _, ok := target.(TokenExpiredError) + return ok +} + +func (err TokenExpiredError) Unwrap() error { + return err.error +} + +type InvalidIssuedAtError struct { + error +} + +func (err InvalidIssuedAtError) Is(target error) bool { + _, ok := target.(InvalidIssuedAtError) + return ok +} + +func (err InvalidIssuedAtError) Unwrap() error { + return err.error +} + +type TokenNotYetValidError struct { + error +} + +func (err TokenNotYetValidError) Is(target error) bool { + _, ok := target.(TokenNotYetValidError) + return ok +} + +func (err TokenNotYetValidError) Unwrap() error { + return err.error +} + +type InvalidAudienceError struct { + error +} + +func (err InvalidAudienceError) Is(target error) bool { + _, ok := target.(InvalidAudienceError) + return ok +} + +func (err InvalidAudienceError) Unwrap() error { + return err.error +} + +func AudienceErrorf(f string, args ...any) error { + return InvalidAudienceError{fmt.Errorf(`"aud" not satisfied: `+f, args...)} +} + +type MissingRequiredClaimError struct { + error + + claim string +} + +func (err *MissingRequiredClaimError) Is(target error) bool { + err1, ok := target.(*MissingRequiredClaimError) + if !ok { + return false + } + return err1 == ErrMissingRequiredClaimDefault || err1.claim == err.claim +} + +func MissingRequiredClaimErrorf(name string) error { + return &MissingRequiredClaimError{claim: name, error: fmt.Errorf(`required claim "%s" is missing`, name)} +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/BUILD.bazel new file mode 100644 index 0000000000..1d046a3ecc --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/BUILD.bazel @@ -0,0 +1,35 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "types", + srcs = [ + "date.go", + "string.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/jwt/internal/types", + visibility = ["//jwt:__subpackages__"], + deps = [ + "//internal/json", + "//internal/tokens", + ], +) + +go_test( + name = "types_test", + srcs = [ + "date_test.go", + "string_test.go", + ], + deps = [ + ":types", + "//internal/json", + "//jwt", + "@com_github_stretchr_testify//require", + ], +) + +alias( + name = "go_default_library", + actual = ":types", + visibility = ["//jwt:__subpackages__"], +) diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/date.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/date.go new file mode 100644 index 0000000000..3d40a9ed97 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/date.go @@ -0,0 +1,192 @@ +package types + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/internal/tokens" +) + +const ( + DefaultPrecision uint32 = 0 // second level + MaxPrecision uint32 = 9 // nanosecond level +) + +var Pedantic uint32 +var ParsePrecision = DefaultPrecision +var FormatPrecision = DefaultPrecision + +// NumericDate represents the date format used in the 'nbf' claim +type NumericDate struct { + time.Time +} + +func (n *NumericDate) Get() time.Time { + if n == nil { + return (time.Time{}).UTC() + } + return n.Time +} + +func intToTime(v any, t *time.Time) bool { + var n int64 + switch x := v.(type) { + case int64: + n = x + case int32: + n = int64(x) + case int16: + n = int64(x) + case int8: + n = int64(x) + case int: + n = int64(x) + default: + return false + } + + *t = time.Unix(n, 0) + return true +} + +func parseNumericString(x string) (time.Time, error) { + var t time.Time // empty time for empty return value + + // Only check for the escape hatch if it's the pedantic + // flag is off + if Pedantic != 1 { + // This is an escape hatch for non-conformant providers + // that gives us RFC3339 instead of epoch time + for _, r := range x { + // 0x30 = '0', 0x39 = '9', 0x2E = tokens.Period + if (r >= 0x30 && r <= 0x39) || r == 0x2E { + continue + } + + // if it got here, then it probably isn't epoch time + tv, err := time.Parse(time.RFC3339, x) + if err != nil { + return t, fmt.Errorf(`value is not number of seconds since the epoch, and attempt to parse it as RFC3339 timestamp failed: %w`, err) + } + return tv, nil + } + } + + var fractional string + whole := x + if i := strings.IndexRune(x, tokens.Period); i > 0 { + if ParsePrecision > 0 && len(x) > i+1 { + fractional = x[i+1:] // everything after the tokens.Period + if int(ParsePrecision) < len(fractional) { + // Remove insignificant digits + fractional = fractional[:int(ParsePrecision)] + } + // Replace missing fractional diits with zeros + for len(fractional) < int(MaxPrecision) { + fractional = fractional + "0" + } + } + whole = x[:i] + } + n, err := strconv.ParseInt(whole, 10, 64) + if err != nil { + return t, fmt.Errorf(`failed to parse whole value %q: %w`, whole, err) + } + var nsecs int64 + if fractional != "" { + v, err := strconv.ParseInt(fractional, 10, 64) + if err != nil { + return t, fmt.Errorf(`failed to parse fractional value %q: %w`, fractional, err) + } + nsecs = v + } + + return time.Unix(n, nsecs).UTC(), nil +} + +func (n *NumericDate) Accept(v any) error { + var t time.Time + switch x := v.(type) { + case float32: + tv, err := parseNumericString(fmt.Sprintf(`%.9f`, x)) + if err != nil { + return fmt.Errorf(`failed to accept float32 %.9f: %w`, x, err) + } + t = tv + case float64: + tv, err := parseNumericString(fmt.Sprintf(`%.9f`, x)) + if err != nil { + return fmt.Errorf(`failed to accept float32 %.9f: %w`, x, err) + } + t = tv + case json.Number: + tv, err := parseNumericString(x.String()) + if err != nil { + return fmt.Errorf(`failed to accept json.Number %q: %w`, x.String(), err) + } + t = tv + case string: + tv, err := parseNumericString(x) + if err != nil { + return fmt.Errorf(`failed to accept string %q: %w`, x, err) + } + t = tv + case time.Time: + t = x + default: + if !intToTime(v, &t) { + return fmt.Errorf(`invalid type %T`, v) + } + } + n.Time = t.UTC() + return nil +} + +func (n NumericDate) String() string { + if FormatPrecision == 0 { + return strconv.FormatInt(n.Unix(), 10) + } + + // This is cheating, but it's better (easier) than doing floating point math + // We basically munge with strings after formatting an integer value + // for nanoseconds since epoch + s := strconv.FormatInt(n.UnixNano(), 10) + for len(s) < int(MaxPrecision) { + s = "0" + s + } + + slwhole := len(s) - int(MaxPrecision) + s = s[:slwhole] + "." + s[slwhole:slwhole+int(FormatPrecision)] + if s[0] == tokens.Period { + s = "0" + s + } + + return s +} + +// MarshalJSON translates from internal representation to JSON NumericDate +// See https://tools.ietf.org/html/rfc7519#page-6 +func (n *NumericDate) MarshalJSON() ([]byte, error) { + if n.IsZero() { + return json.Marshal(nil) + } + + return json.Marshal(n.String()) +} + +func (n *NumericDate) UnmarshalJSON(data []byte) error { + var v any + if err := json.Unmarshal(data, &v); err != nil { + return fmt.Errorf(`failed to unmarshal date: %w`, err) + } + + var n2 NumericDate + if err := n2.Accept(v); err != nil { + return fmt.Errorf(`invalid value for NumericDate: %w`, err) + } + *n = n2 + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/string.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/string.go new file mode 100644 index 0000000000..8117bea358 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/internal/types/string.go @@ -0,0 +1,43 @@ +package types + +import ( + "fmt" + + "github.com/lestrrat-go/jwx/v3/internal/json" +) + +type StringList []string + +func (l StringList) Get() []string { + return []string(l) +} + +func (l *StringList) Accept(v any) error { + switch x := v.(type) { + case string: + *l = StringList([]string{x}) + case []string: + *l = StringList(x) + case []any: + list := make(StringList, len(x)) + for i, e := range x { + if s, ok := e.(string); ok { + list[i] = s + continue + } + return fmt.Errorf(`invalid list element type %T`, e) + } + *l = list + default: + return fmt.Errorf(`invalid type: %T`, v) + } + return nil +} + +func (l *StringList) UnmarshalJSON(data []byte) error { + var v any + if err := json.Unmarshal(data, &v); err != nil { + return fmt.Errorf(`failed to unmarshal data: %w`, err) + } + return l.Accept(v) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/io.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/io.go new file mode 100644 index 0000000000..812cda775e --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/io.go @@ -0,0 +1,42 @@ +// Code generated by tools/cmd/genreadfile/main.go. DO NOT EDIT. + +package jwt + +import ( + "fmt" + "io/fs" + "os" +) + +type sysFS struct{} + +func (sysFS) Open(path string) (fs.File, error) { + return os.Open(path) +} + +func ReadFile(path string, options ...ReadFileOption) (Token, error) { + var parseOptions []ParseOption + for _, option := range options { + if po, ok := option.(ParseOption); ok { + parseOptions = append(parseOptions, po) + } + } + + var srcFS fs.FS = sysFS{} + for _, option := range options { + switch option.Ident() { + case identFS{}: + if err := option.Value(&srcFS); err != nil { + return nil, fmt.Errorf("failed to set fs.FS: %w", err) + } + } + } + + f, err := srcFS.Open(path) + if err != nil { + return nil, err + } + + defer f.Close() + return ParseReader(f, parseOptions...) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/jwt.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/jwt.go new file mode 100644 index 0000000000..99b5ef37a6 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/jwt.go @@ -0,0 +1,597 @@ +//go:generate ../tools/cmd/genjwt.sh +//go:generate stringer -type=TokenOption -output=token_options_gen.go + +// Package jwt implements JSON Web Tokens as described in https://tools.ietf.org/html/rfc7519 +package jwt + +import ( + "bytes" + "fmt" + "io" + "sync/atomic" + "time" + + "github.com/lestrrat-go/jwx/v3" + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jws" + jwterrs "github.com/lestrrat-go/jwx/v3/jwt/internal/errors" + "github.com/lestrrat-go/jwx/v3/jwt/internal/types" +) + +var defaultTruncation atomic.Int64 + +// Settings controls global settings that are specific to JWTs. +func Settings(options ...GlobalOption) { + var flattenAudience bool + var parsePedantic bool + var parsePrecision = types.MaxPrecision + 1 // illegal value, so we can detect nothing was set + var formatPrecision = types.MaxPrecision + 1 // illegal value, so we can detect nothing was set + truncation := time.Duration(-1) + for _, option := range options { + switch option.Ident() { + case identTruncation{}: + if err := option.Value(&truncation); err != nil { + panic(fmt.Sprintf("jwt.Settings: value for WithTruncation must be time.Duration: %s", err)) + } + case identFlattenAudience{}: + if err := option.Value(&flattenAudience); err != nil { + panic(fmt.Sprintf("jwt.Settings: value for WithFlattenAudience must be bool: %s", err)) + } + case identNumericDateParsePedantic{}: + if err := option.Value(&parsePedantic); err != nil { + panic(fmt.Sprintf("jwt.Settings: value for WithNumericDateParsePedantic must be bool: %s", err)) + } + case identNumericDateParsePrecision{}: + var v int + if err := option.Value(&v); err != nil { + panic(fmt.Sprintf("jwt.Settings: value for WithNumericDateParsePrecision must be int: %s", err)) + } + // only accept this value if it's in our desired range + if v >= 0 && v <= int(types.MaxPrecision) { + parsePrecision = uint32(v) + } + case identNumericDateFormatPrecision{}: + var v int + if err := option.Value(&v); err != nil { + panic(fmt.Sprintf("jwt.Settings: value for WithNumericDateFormatPrecision must be int: %s", err)) + } + // only accept this value if it's in our desired range + if v >= 0 && v <= int(types.MaxPrecision) { + formatPrecision = uint32(v) + } + } + } + + if parsePrecision <= types.MaxPrecision { // remember we set default to max + 1 + v := atomic.LoadUint32(&types.ParsePrecision) + if v != parsePrecision { + atomic.CompareAndSwapUint32(&types.ParsePrecision, v, parsePrecision) + } + } + + if formatPrecision <= types.MaxPrecision { // remember we set default to max + 1 + v := atomic.LoadUint32(&types.FormatPrecision) + if v != formatPrecision { + atomic.CompareAndSwapUint32(&types.FormatPrecision, v, formatPrecision) + } + } + + { + v := atomic.LoadUint32(&types.Pedantic) + if (v == 1) != parsePedantic { + var newVal uint32 + if parsePedantic { + newVal = 1 + } + atomic.CompareAndSwapUint32(&types.Pedantic, v, newVal) + } + } + + { + defaultOptionsMu.Lock() + if flattenAudience { + defaultOptions.Enable(FlattenAudience) + } else { + defaultOptions.Disable(FlattenAudience) + } + defaultOptionsMu.Unlock() + } + + if truncation >= 0 { + defaultTruncation.Store(int64(truncation)) + } +} + +var registry = json.NewRegistry() + +// ParseString calls Parse against a string +func ParseString(s string, options ...ParseOption) (Token, error) { + tok, err := parseBytes([]byte(s), options...) + if err != nil { + return nil, jwterrs.ParseErrorf(`jwt.ParseString`, `failed to parse string: %w`, err) + } + return tok, nil +} + +// Parse parses the JWT token payload and creates a new `jwt.Token` object. +// The token must be encoded in JWS compact format, or a raw JSON form of JWT +// without any signatures. +// +// If you need JWE support on top of JWS, you will need to rollout your +// own workaround. +// +// If the token is signed, and you want to verify the payload matches the signature, +// you must pass the jwt.WithKey(alg, key) or jwt.WithKeySet(jwk.Set) option. +// If you do not specify these parameters, no verification will be performed. +// +// During verification, if the JWS headers specify a key ID (`kid`), the +// key used for verification must match the specified ID. If you are somehow +// using a key without a `kid` (which is highly unlikely if you are working +// with a JWT from a well-know provider), you can work around this by modifying +// the `jwk.Key` and setting the `kid` header. +// +// If you also want to assert the validity of the JWT itself (i.e. expiration +// and such), use the `Validate()` function on the returned token, or pass the +// `WithValidate(true)` option. Validate options can also be passed to +// `Parse` +// +// This function takes both ParseOption and ValidateOption types: +// ParseOptions control the parsing behavior, and ValidateOptions are +// passed to `Validate()` when `jwt.WithValidate` is specified. +func Parse(s []byte, options ...ParseOption) (Token, error) { + tok, err := parseBytes(s, options...) + if err != nil { + return nil, jwterrs.ParseErrorf(`jwt.Parse`, `failed to parse token: %w`, err) + } + return tok, nil +} + +// ParseInsecure is exactly the same as Parse(), but it disables +// signature verification and token validation. +// +// You cannot override `jwt.WithVerify()` or `jwt.WithValidate()` +// using this function. Providing these options would result in +// an error +func ParseInsecure(s []byte, options ...ParseOption) (Token, error) { + for _, option := range options { + switch option.Ident() { + case identVerify{}, identValidate{}: + return nil, jwterrs.ParseErrorf(`jwt.ParseInsecure`, `jwt.WithVerify() and jwt.WithValidate() may not be specified`) + } + } + + options = append(options, WithVerify(false), WithValidate(false)) + tok, err := Parse(s, options...) + if err != nil { + return nil, jwterrs.ParseErrorf(`jwt.ParseInsecure`, `failed to parse token: %w`, err) + } + return tok, nil +} + +// ParseReader calls Parse against an io.Reader +func ParseReader(src io.Reader, options ...ParseOption) (Token, error) { + // We're going to need the raw bytes regardless. Read it. + data, err := io.ReadAll(src) + if err != nil { + return nil, jwterrs.ParseErrorf(`jwt.ParseReader`, `failed to read from token data source: %w`, err) + } + tok, err := parseBytes(data, options...) + if err != nil { + return nil, jwterrs.ParseErrorf(`jwt.ParseReader`, `failed to parse token: %w`, err) + } + return tok, nil +} + +type parseCtx struct { + token Token + validateOpts []ValidateOption + verifyOpts []jws.VerifyOption + localReg *json.Registry + pedantic bool + skipVerification bool + validate bool + withKeyCount int + withKey *withKey // this is used to detect if we have a WithKey option +} + +func parseBytes(data []byte, options ...ParseOption) (Token, error) { + var ctx parseCtx + + // Validation is turned on by default. You need to specify + // jwt.WithValidate(false) if you want to disable it + ctx.validate = true + + // Verification is required (i.e., it is assumed that the incoming + // data is in JWS format) unless the user explicitly asks for + // it to be skipped. + verification := true + + var verifyOpts []Option + for _, o := range options { + if v, ok := o.(ValidateOption); ok { + ctx.validateOpts = append(ctx.validateOpts, v) + // context is used for both verification and validation, so we can't just continue + switch o.Ident() { + case identContext{}: + default: + continue + } + } + + switch o.Ident() { + case identKey{}: + // it would be nice to be able to detect if ctx.verifyOpts[0] + // is a WithKey option, but unfortunately at that point we have + // already converted the options to a jws option, which means + // we can no longer compare its Ident() to jwt.identKey{}. + // So let's just count this here + ctx.withKeyCount++ + if ctx.withKeyCount == 1 { + if err := o.Value(&ctx.withKey); err != nil { + return nil, fmt.Errorf("jws.parseBytes: value for WithKey option must be a *jwt.withKey: %w", err) + } + } + verifyOpts = append(verifyOpts, o) + case identKeySet{}, identVerifyAuto{}, identKeyProvider{}, identBase64Encoder{}, identContext{}: + verifyOpts = append(verifyOpts, o) + case identToken{}: + var token Token + if err := o.Value(&token); err != nil { + return nil, fmt.Errorf("jws.parseBytes: value for WithToken option must be a jwt.Token: %w", err) + } + ctx.token = token + case identPedantic{}: + if err := o.Value(&ctx.pedantic); err != nil { + return nil, fmt.Errorf("jws.parseBytes: value for WithPedantic option must be a bool: %w", err) + } + case identValidate{}: + if err := o.Value(&ctx.validate); err != nil { + return nil, fmt.Errorf("jws.parseBytes: value for WithValidate option must be a bool: %w", err) + } + case identVerify{}: + if err := o.Value(&verification); err != nil { + return nil, fmt.Errorf("jws.parseBytes: value for WithVerify option must be a bool: %w", err) + } + case identTypedClaim{}: + var pair claimPair + if err := o.Value(&pair); err != nil { + return nil, fmt.Errorf("jws.parseBytes: value for WithTypedClaim option must be claimPair: %w", err) + } + if ctx.localReg == nil { + ctx.localReg = json.NewRegistry() + } + ctx.localReg.Register(pair.Name, pair.Value) + } + } + + if !verification { + ctx.skipVerification = true + } + + lvo := len(verifyOpts) + if lvo == 0 && verification { + return nil, fmt.Errorf(`jwt.Parse: no keys for verification are provided (use jwt.WithVerify(false) to explicitly skip)`) + } + + if lvo > 0 { + converted, err := toVerifyOptions(verifyOpts...) + if err != nil { + return nil, fmt.Errorf(`jwt.Parse: failed to convert options into jws.VerifyOption: %w`, err) + } + ctx.verifyOpts = converted + } + + data = bytes.TrimSpace(data) + return parse(&ctx, data) +} + +const ( + _JwsVerifyInvalid = iota + _JwsVerifyDone + _JwsVerifyExpectNested + _JwsVerifySkipped +) + +var _ = _JwsVerifyInvalid + +func verifyJWS(ctx *parseCtx, payload []byte) ([]byte, int, error) { + lvo := len(ctx.verifyOpts) + if lvo == 0 { + return nil, _JwsVerifySkipped, nil + } + + if lvo == 1 && ctx.withKeyCount == 1 { + wk := ctx.withKey + alg, ok := wk.alg.(jwa.SignatureAlgorithm) + if ok && len(wk.options) == 0 { + verified, err := jws.VerifyCompactFast(wk.key, payload, alg) + if err != nil { + return nil, _JwsVerifyDone, err + } + return verified, _JwsVerifyDone, nil + } + } + + verifyOpts := append(ctx.verifyOpts, jws.WithCompact()) + verified, err := jws.Verify(payload, verifyOpts...) + return verified, _JwsVerifyDone, err +} + +// verify parameter exists to make sure that we don't accidentally skip +// over verification just because alg == "" or key == nil or something. +func parse(ctx *parseCtx, data []byte) (Token, error) { + payload := data + const maxDecodeLevels = 2 + + // If cty = `JWT`, we expect this to be a nested structure + var expectNested bool + +OUTER: + for i := range maxDecodeLevels { + switch kind := jwx.GuessFormat(payload); kind { + case jwx.JWT: + if ctx.pedantic { + if expectNested { + return nil, fmt.Errorf(`expected nested encrypted/signed payload, got raw JWT`) + } + } + + if i == 0 { + // We were NOT enveloped in other formats + if !ctx.skipVerification { + if _, _, err := verifyJWS(ctx, payload); err != nil { + return nil, err + } + } + } + + break OUTER + case jwx.InvalidFormat: + return nil, UnknownPayloadTypeError() + case jwx.UnknownFormat: + // "Unknown" may include invalid JWTs, for example, those who lack "aud" + // claim. We could be pedantic and reject these + if ctx.pedantic { + return nil, fmt.Errorf(`unknown JWT format (pedantic)`) + } + + if i == 0 { + // We were NOT enveloped in other formats + if !ctx.skipVerification { + if _, _, err := verifyJWS(ctx, payload); err != nil { + return nil, err + } + } + } + break OUTER + case jwx.JWS: + // Food for thought: This is going to break if you have multiple layers of + // JWS enveloping using different keys. It is highly unlikely use case, + // but it might happen. + + // skipVerification should only be set to true by us. It's used + // when we just want to parse the JWT out of a payload + if !ctx.skipVerification { + // nested return value means: + // false (next envelope _may_ need to be processed) + // true (next envelope MUST be processed) + v, state, err := verifyJWS(ctx, payload) + if err != nil { + return nil, err + } + + if state != _JwsVerifySkipped { + payload = v + + // We only check for cty and typ if the pedantic flag is enabled + if !ctx.pedantic { + continue + } + + if state == _JwsVerifyExpectNested { + expectNested = true + continue OUTER + } + + // if we're not nested, we found our target. bail out of this loop + break OUTER + } + } + + // No verification. + m, err := jws.Parse(data, jws.WithCompact()) + if err != nil { + return nil, fmt.Errorf(`invalid jws message: %w`, err) + } + payload = m.Payload() + default: + return nil, fmt.Errorf(`unsupported format (layer: #%d)`, i+1) + } + expectNested = false + } + + if ctx.token == nil { + ctx.token = New() + } + + if ctx.localReg != nil { + dcToken, ok := ctx.token.(TokenWithDecodeCtx) + if !ok { + return nil, fmt.Errorf(`typed claim was requested, but the token (%T) does not support DecodeCtx`, ctx.token) + } + dc := json.NewDecodeCtx(ctx.localReg) + dcToken.SetDecodeCtx(dc) + defer func() { dcToken.SetDecodeCtx(nil) }() + } + + if err := json.Unmarshal(payload, ctx.token); err != nil { + return nil, fmt.Errorf(`failed to parse token: %w`, err) + } + + if ctx.validate { + if err := Validate(ctx.token, ctx.validateOpts...); err != nil { + return nil, err + } + } + return ctx.token, nil +} + +// Sign is a convenience function to create a signed JWT token serialized in +// compact form. +// +// It accepts either a raw key (e.g. rsa.PrivateKey, ecdsa.PrivateKey, etc) +// or a jwk.Key, and the name of the algorithm that should be used to sign +// the token. +// +// For well-known algorithms with no special considerations (e.g. detached +// payloads, extra protected heders, etc), this function will automatically +// take the fast path and bypass the jws.Sign() machinery, which improves +// performance significantly. +// +// If the key is a jwk.Key and the key contains a key ID (`kid` field), +// then it is added to the protected header generated by the signature +// +// The algorithm specified in the `alg` parameter must be able to support +// the type of key you provided, otherwise an error is returned. +// For convenience `alg` is of type jwa.KeyAlgorithm so you can pass +// the return value of `(jwk.Key).Algorithm()` directly, but in practice +// it must be an instance of jwa.SignatureAlgorithm, otherwise an error +// is returned. +// +// The protected header will also automatically have the `typ` field set +// to the literal value `JWT`, unless you provide a custom value for it +// by jws.WithProtectedHeaders option, that can be passed to `jwt.WithKey“. +func Sign(t Token, options ...SignOption) ([]byte, error) { + // fast path; can only happen if there is exactly one option + if len(options) == 1 && (options[0].Ident() == identKey{}) { + // The option must be a withKey option. + var wk *withKey + if err := options[0].Value(&wk); err == nil { + alg, ok := wk.alg.(jwa.SignatureAlgorithm) + if !ok { + return nil, fmt.Errorf(`jwt.Sign: invalid algorithm type %T. jwa.SignatureAlgorithm is required`, wk.alg) + } + + // Check if option contains anything other than alg/key + if len(wk.options) == 0 { + // yay, we have something we can put in the FAST PATH! + return signFast(t, alg, wk.key) + } + // fallthrough + } + // fallthrough + } + + var soptions []jws.SignOption + if l := len(options); l > 0 { + // we need to from SignOption to Option because ... reasons + // (todo: when go1.18 prevails, use type parameters + rawoptions := make([]Option, l) + for i, option := range options { + rawoptions[i] = option + } + + converted, err := toSignOptions(rawoptions...) + if err != nil { + return nil, fmt.Errorf(`jwt.Sign: failed to convert options into jws.SignOption: %w`, err) + } + soptions = converted + } + return NewSerializer().sign(soptions...).Serialize(t) +} + +// Equal compares two JWT tokens. Do not use `reflect.Equal` or the like +// to compare tokens as they will also compare extra detail such as +// sync.Mutex objects used to control concurrent access. +// +// The comparison for values is currently done using a simple equality ("=="), +// except for time.Time, which uses time.Equal after dropping the monotonic +// clock and truncating the values to 1 second accuracy. +// +// if both t1 and t2 are nil, returns true +func Equal(t1, t2 Token) bool { + if t1 == nil && t2 == nil { + return true + } + + // we already checked for t1 == t2 == nil, so safe to do this + if t1 == nil || t2 == nil { + return false + } + + j1, err := json.Marshal(t1) + if err != nil { + return false + } + + j2, err := json.Marshal(t2) + if err != nil { + return false + } + + return bytes.Equal(j1, j2) +} + +func (t *stdToken) Clone() (Token, error) { + dst := New() + + dst.Options().Set(*(t.Options())) + for _, k := range t.Keys() { + var v any + if err := t.Get(k, &v); err != nil { + return nil, fmt.Errorf(`jwt.Clone: failed to get %s: %w`, k, err) + } + if err := dst.Set(k, v); err != nil { + return nil, fmt.Errorf(`jwt.Clone failed to set %s: %w`, k, err) + } + } + return dst, nil +} + +type CustomDecoder = json.CustomDecoder +type CustomDecodeFunc = json.CustomDecodeFunc + +// RegisterCustomField allows users to specify that a private field +// be decoded as an instance of the specified type. This option has +// a global effect. +// +// For example, suppose you have a custom field `x-birthday`, which +// you want to represent as a string formatted in RFC3339 in JSON, +// but want it back as `time.Time`. +// +// In such case you would register a custom field as follows +// +// jwt.RegisterCustomField(`x-birthday`, time.Time{}) +// +// Then you can use a `time.Time` variable to extract the value +// of `x-birthday` field, instead of having to use `any` +// and later convert it to `time.Time` +// +// var bday time.Time +// _ = token.Get(`x-birthday`, &bday) +// +// If you need a more fine-tuned control over the decoding process, +// you can register a `CustomDecoder`. For example, below shows +// how to register a decoder that can parse RFC822 format string: +// +// jwt.RegisterCustomField(`x-birthday`, jwt.CustomDecodeFunc(func(data []byte) (any, error) { +// return time.Parse(time.RFC822, string(data)) +// })) +// +// Please note that use of custom fields can be problematic if you +// are using a library that does not implement MarshalJSON/UnmarshalJSON +// and you try to roundtrip from an object to JSON, and then back to an object. +// For example, in the above example, you can _parse_ time values formatted +// in the format specified in RFC822, but when you convert an object into +// JSON, it will be formatted in RFC3339, because that's what `time.Time` +// likes to do. To avoid this, it's always better to use a custom type +// that wraps your desired type (in this case `time.Time`) and implement +// MarshalJSON and UnmashalJSON. +func RegisterCustomField(name string, object any) { + registry.Register(name, object) +} + +func getDefaultTruncation() time.Duration { + return time.Duration(defaultTruncation.Load()) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/options.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/options.go new file mode 100644 index 0000000000..4a7cfd3e5d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/options.go @@ -0,0 +1,332 @@ +package jwt + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwe" + "github.com/lestrrat-go/jwx/v3/jwk" + "github.com/lestrrat-go/jwx/v3/jws" + "github.com/lestrrat-go/option/v2" +) + +type identInsecureNoSignature struct{} +type identKey struct{} +type identKeySet struct{} +type identTypedClaim struct{} +type identVerifyAuto struct{} + +func toSignOptions(options ...Option) ([]jws.SignOption, error) { + soptions := make([]jws.SignOption, 0, len(options)) + for _, option := range options { + switch option.Ident() { + case identInsecureNoSignature{}: + soptions = append(soptions, jws.WithInsecureNoSignature()) + case identKey{}: + var wk withKey + if err := option.Value(&wk); err != nil { + return nil, fmt.Errorf(`toSignOtpions: failed to convert option value to withKey: %w`, err) + } + var wksoptions []jws.WithKeySuboption + for _, subopt := range wk.options { + wksopt, ok := subopt.(jws.WithKeySuboption) + if !ok { + return nil, fmt.Errorf(`expected optional arguments in jwt.WithKey to be jws.WithKeySuboption, but got %T`, subopt) + } + wksoptions = append(wksoptions, wksopt) + } + + soptions = append(soptions, jws.WithKey(wk.alg, wk.key, wksoptions...)) + case identSignOption{}: + var sigOpt jws.SignOption + if err := option.Value(&sigOpt); err != nil { + return nil, fmt.Errorf(`failed to decode SignOption: %w`, err) + } + soptions = append(soptions, sigOpt) + case identBase64Encoder{}: + var enc jws.Base64Encoder + if err := option.Value(&enc); err != nil { + return nil, fmt.Errorf(`failed to decode Base64Encoder: %w`, err) + } + soptions = append(soptions, jws.WithBase64Encoder(enc)) + } + } + return soptions, nil +} + +func toEncryptOptions(options ...Option) ([]jwe.EncryptOption, error) { + soptions := make([]jwe.EncryptOption, 0, len(options)) + for _, option := range options { + switch option.Ident() { + case identKey{}: + var wk withKey + if err := option.Value(&wk); err != nil { + return nil, fmt.Errorf(`toEncryptOptions: failed to convert option value to withKey: %w`, err) + } + var wksoptions []jwe.WithKeySuboption + for _, subopt := range wk.options { + wksopt, ok := subopt.(jwe.WithKeySuboption) + if !ok { + return nil, fmt.Errorf(`expected optional arguments in jwt.WithKey to be jwe.WithKeySuboption, but got %T`, subopt) + } + wksoptions = append(wksoptions, wksopt) + } + + soptions = append(soptions, jwe.WithKey(wk.alg, wk.key, wksoptions...)) + case identEncryptOption{}: + var encOpt jwe.EncryptOption + if err := option.Value(&encOpt); err != nil { + return nil, fmt.Errorf(`failed to decode EncryptOption: %w`, err) + } + soptions = append(soptions, encOpt) + } + } + return soptions, nil +} + +func toVerifyOptions(options ...Option) ([]jws.VerifyOption, error) { + voptions := make([]jws.VerifyOption, 0, len(options)) + for _, option := range options { + switch option.Ident() { + case identKey{}: + var wk withKey + if err := option.Value(&wk); err != nil { + return nil, fmt.Errorf(`toVerifyOptions: failed to convert option value to withKey: %w`, err) + } + var wksoptions []jws.WithKeySuboption + for _, subopt := range wk.options { + wksopt, ok := subopt.(jws.WithKeySuboption) + if !ok { + return nil, fmt.Errorf(`expected optional arguments in jwt.WithKey to be jws.WithKeySuboption, but got %T`, subopt) + } + wksoptions = append(wksoptions, wksopt) + } + + voptions = append(voptions, jws.WithKey(wk.alg, wk.key, wksoptions...)) + case identKeySet{}: + var wks withKeySet + if err := option.Value(&wks); err != nil { + return nil, fmt.Errorf(`failed to convert option value to withKeySet: %w`, err) + } + var wkssoptions []jws.WithKeySetSuboption + for _, subopt := range wks.options { + wkssopt, ok := subopt.(jws.WithKeySetSuboption) + if !ok { + return nil, fmt.Errorf(`expected optional arguments in jwt.WithKey to be jws.WithKeySetSuboption, but got %T`, subopt) + } + wkssoptions = append(wkssoptions, wkssopt) + } + + voptions = append(voptions, jws.WithKeySet(wks.set, wkssoptions...)) + case identVerifyAuto{}: + var vo jws.VerifyOption + if err := option.Value(&vo); err != nil { + return nil, fmt.Errorf(`failed to decode VerifyOption: %w`, err) + } + voptions = append(voptions, vo) + case identKeyProvider{}: + var kp jws.KeyProvider + if err := option.Value(&kp); err != nil { + return nil, fmt.Errorf(`failed to decode KeyProvider: %w`, err) + } + voptions = append(voptions, jws.WithKeyProvider(kp)) + case identBase64Encoder{}: + var enc jws.Base64Encoder + if err := option.Value(&enc); err != nil { + return nil, fmt.Errorf(`failed to decode Base64Encoder: %w`, err) + } + voptions = append(voptions, jws.WithBase64Encoder(enc)) + case identContext{}: + var ctx context.Context + if err := option.Value(&ctx); err != nil { + return nil, fmt.Errorf(`failed to decode Context: %w`, err) + } + voptions = append(voptions, jws.WithContext(ctx)) + default: + return nil, fmt.Errorf(`invalid jws.VerifyOption %q passed`, `With`+strings.TrimPrefix(fmt.Sprintf(`%T`, option.Ident()), `jws.ident`)) + } + } + return voptions, nil +} + +type withKey struct { + alg jwa.KeyAlgorithm + key any + options []Option +} + +// WithKey is a multipurpose option. It can be used for either jwt.Sign, jwt.Parse (and +// its siblings), and jwt.Serializer methods. For signatures, please see the documentation +// for `jws.WithKey` for more details. For encryption, please see the documentation +// for `jwe.WithKey`. +// +// It is the caller's responsibility to match the suboptions to the operation that they +// are performing. For example, you are not allowed to do this, because the operation +// is to generate a signature, and yet you are passing options for jwe: +// +// jwt.Sign(token, jwt.WithKey(alg, key, jweOptions...)) +// +// In the above example, the creation of the option via `jwt.WithKey()` will work, but +// when `jwt.Sign()` is called, the fact that you passed JWE suboptions will be +// detected, and an error will occur. +func WithKey(alg jwa.KeyAlgorithm, key any, suboptions ...Option) SignEncryptParseOption { + return &signEncryptParseOption{option.New(identKey{}, &withKey{ + alg: alg, + key: key, + options: suboptions, + })} +} + +type withKeySet struct { + set jwk.Set + options []any +} + +// WithKeySet forces the Parse method to verify the JWT message +// using one of the keys in the given key set. +// +// Key IDs (`kid`) in the JWS message and the JWK in the given `jwk.Set` +// must match in order for the key to be a candidate to be used for +// verification. +// +// This is for security reasons. If you must disable it, you can do so by +// specifying `jws.WithRequireKid(false)` in the suboptions. But we don't +// recommend it unless you know exactly what the security implications are +// +// When using this option, keys MUST have a proper 'alg' field +// set. This is because we need to know the exact algorithm that +// you (the user) wants to use to verify the token. We do NOT +// trust the token's headers, because they can easily be tampered with. +// +// However, there _is_ a workaround if you do understand the risks +// of allowing a library to automatically choose a signature verification strategy, +// and you do not mind the verification process having to possibly +// attempt using multiple times before succeeding to verify. See +// `jws.InferAlgorithmFromKey` option +// +// If you have only one key in the set, and are sure you want to +// use that key, you can use the `jwt.WithDefaultKey` option. +func WithKeySet(set jwk.Set, options ...any) ParseOption { + return &parseOption{option.New(identKeySet{}, &withKeySet{ + set: set, + options: options, + })} +} + +// WithIssuer specifies that expected issuer value. If not specified, +// the value of issuer is not verified at all. +func WithIssuer(s string) ValidateOption { + return WithValidator(issuerClaimValueIs(s)) +} + +// WithSubject specifies that expected subject value. If not specified, +// the value of subject is not verified at all. +func WithSubject(s string) ValidateOption { + return WithValidator(ClaimValueIs(SubjectKey, s)) +} + +// WithJwtID specifies that expected jti value. If not specified, +// the value of jti is not verified at all. +func WithJwtID(s string) ValidateOption { + return WithValidator(ClaimValueIs(JwtIDKey, s)) +} + +// WithAudience specifies that expected audience value. +// `Validate()` will return true if one of the values in the `aud` element +// matches this value. If not specified, the value of `aud` is not +// verified at all. +func WithAudience(s string) ValidateOption { + return WithValidator(audienceClaimContainsString(s)) +} + +// WithClaimValue specifies the expected value for a given claim +func WithClaimValue(name string, v any) ValidateOption { + return WithValidator(ClaimValueIs(name, v)) +} + +// WithTypedClaim allows a private claim to be parsed into the object type of +// your choice. It works much like the RegisterCustomField, but the effect +// is only applicable to the jwt.Parse function call which receives this option. +// +// While this can be extremely useful, this option should be used with caution: +// There are many caveats that your entire team/user-base needs to be aware of, +// and therefore in general its use is discouraged. Only use it when you know +// what you are doing, and you document its use clearly for others. +// +// First and foremost, this is a "per-object" option. Meaning that given the same +// serialized format, it is possible to generate two objects whose internal +// representations may differ. That is, if you parse one _WITH_ the option, +// and the other _WITHOUT_, their internal representation may completely differ. +// This could potentially lead to problems. +// +// Second, specifying this option will slightly slow down the decoding process +// as it needs to consult multiple definitions sources (global and local), so +// be careful if you are decoding a large number of tokens, as the effects will stack up. +// +// Finally, this option will also NOT work unless the tokens themselves support such +// parsing mechanism. For example, while tokens obtained from `jwt.New()` and +// `openid.New()` will respect this option, if you provide your own custom +// token type, it will need to implement the TokenWithDecodeCtx interface. +func WithTypedClaim(name string, object any) ParseOption { + return &parseOption{option.New(identTypedClaim{}, claimPair{Name: name, Value: object})} +} + +// WithRequiredClaim specifies that the claim identified the given name +// must exist in the token. Only the existence of the claim is checked: +// the actual value associated with that field is not checked. +func WithRequiredClaim(name string) ValidateOption { + return WithValidator(IsRequired(name)) +} + +// WithMaxDelta specifies that given two claims `c1` and `c2` that represent time, the difference in +// time.Duration must be less than equal to the value specified by `d`. If `c1` or `c2` is the +// empty string, the current time (as computed by `time.Now` or the object passed via +// `WithClock()`) is used for the comparison. +// +// `c1` and `c2` are also assumed to be required, therefore not providing either claim in the +// token will result in an error. +// +// Because there is no way of reliably knowing how to parse private claims, we currently only +// support `iat`, `exp`, and `nbf` claims. +// +// If the empty string is passed to c1 or c2, then the current time (as calculated by time.Now() or +// the clock object provided via WithClock()) is used. +// +// For example, in order to specify that `exp` - `iat` should be less than 10*time.Second, you would write +// +// jwt.Validate(token, jwt.WithMaxDelta(10*time.Second, jwt.ExpirationKey, jwt.IssuedAtKey)) +// +// If AcceptableSkew of 2 second is specified, the above will return valid for any value of +// `exp` - `iat` between 8 (10-2) and 12 (10+2). +func WithMaxDelta(dur time.Duration, c1, c2 string) ValidateOption { + return WithValidator(MaxDeltaIs(c1, c2, dur)) +} + +// WithMinDelta is almost exactly the same as WithMaxDelta, but force validation to fail if +// the difference between time claims are less than dur. +// +// For example, in order to specify that `exp` - `iat` should be greater than 10*time.Second, you would write +// +// jwt.Validate(token, jwt.WithMinDelta(10*time.Second, jwt.ExpirationKey, jwt.IssuedAtKey)) +// +// The validation would fail if the difference is less than 10 seconds. +func WithMinDelta(dur time.Duration, c1, c2 string) ValidateOption { + return WithValidator(MinDeltaIs(c1, c2, dur)) +} + +// WithVerifyAuto specifies that the JWS verification should be attempted +// by using the data available in the JWS message. Currently only verification +// method available is to use the keys available in the JWKS URL pointed +// in the `jku` field. +// +// Please read the documentation for `jws.VerifyAuto` for more details. +func WithVerifyAuto(f jwk.Fetcher, options ...jwk.FetchOption) ParseOption { + return &parseOption{option.New(identVerifyAuto{}, jws.WithVerifyAuto(f, options...))} +} + +func WithInsecureNoSignature() SignOption { + return &signEncryptParseOption{option.New(identInsecureNoSignature{}, (any)(nil))} +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/options.yaml b/vendor/github.com/lestrrat-go/jwx/v3/jwt/options.yaml new file mode 100644 index 0000000000..bfcadfac25 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/options.yaml @@ -0,0 +1,274 @@ +package_name: jwt +output: jwt/options_gen.go +interfaces: + - name: GlobalOption + comment: | + GlobalOption describes an Option that can be passed to `Settings()`. + - name: EncryptOption + comment: | + EncryptOption describes an Option that can be passed to (jwt.Serializer).Encrypt + - name: ParseOption + methods: + - parseOption + - readFileOption + comment: | + ParseOption describes an Option that can be passed to `jwt.Parse()`. + ParseOption also implements ReadFileOption, therefore it may be + safely pass them to `jwt.ReadFile()` + - name: SignOption + comment: | + SignOption describes an Option that can be passed to `jwt.Sign()` or + (jwt.Serializer).Sign + - name: SignParseOption + methods: + - signOption + - parseOption + - readFileOption + comment: | + SignParseOption describes an Option that can be passed to both `jwt.Sign()` or + `jwt.Parse()` + - name: SignEncryptParseOption + methods: + - parseOption + - encryptOption + - readFileOption + - signOption + comment: | + SignEncryptParseOption describes an Option that can be passed to both `jwt.Sign()` or + `jwt.Parse()` + - name: ValidateOption + methods: + - parseOption + - readFileOption + - validateOption + comment: | + ValidateOption describes an Option that can be passed to Validate(). + ValidateOption also implements ParseOption, therefore it may be + safely passed to `Parse()` (and thus `jwt.ReadFile()`) + - name: ReadFileOption + comment: | + ReadFileOption is a type of `Option` that can be passed to `jws.ReadFile` + - name: GlobalValidateOption + methods: + - globalOption + - parseOption + - readFileOption + - validateOption + comment: | + GlobalValidateOption describes an Option that can be passed to `jwt.Settings()` and `jwt.Validate()` +options: + - ident: AcceptableSkew + interface: ValidateOption + argument_type: time.Duration + comment: | + WithAcceptableSkew specifies the duration in which exp, iat and nbf + claims may differ by. This value should be positive + - ident: Truncation + interface: GlobalValidateOption + argument_type: time.Duration + comment: | + WithTruncation specifies the amount that should be used when + truncating time values used during time-based validation routines, + and by default this is disabled. + + In v2 of this library, time values were truncated down to second accuracy, i.e. + 1.0000001 seconds is truncated to 1 second. To restore this behavior, set + this value to `time.Second` + + Since v3, this option can be passed to `jwt.Settings()` to set the truncation + value globally, as well as per invocation of `jwt.Validate()` + - ident: Clock + interface: ValidateOption + argument_type: Clock + comment: | + WithClock specifies the `Clock` to be used when verifying + exp, iat and nbf claims. + - ident: Context + interface: ValidateOption + argument_type: context.Context + comment: | + WithContext allows you to specify a context.Context object to be used + with `jwt.Validate()` option. + + Please be aware that in the next major release of this library, + `jwt.Validate()`'s signature will change to include an explicit + `context.Context` object. + - ident: ResetValidators + interface: ValidateOption + argument_type: bool + comment: | + WithResetValidators specifies that the default validators should be + reset before applying the custom validators. By default `jwt.Validate()` + checks for the validity of JWT by checking `exp`, `nbf`, and `iat`, even + when you specify more validators through other options. + + You SHOULD NOT use this option unless you know exactly what you are doing, + as this will pose significant security issues when used incorrectly. + + Using this option with the value `true` will remove all default checks, + and will expect you to specify validators as options. This is useful when you + want to skip the default validators and only use specific validators, such as + for https://openid.net/specs/openid-connect-rpinitiated-1_0.html, where + the token could be accepted even if the token is expired. + + If you set this option to true and you do not specify any validators, + `jwt.Validate()` will return an error. + + The default value is `false` (`iat`, `exp`, and `nbf` are automatically checked). + - ident: FlattenAudience + interface: GlobalOption + argument_type: bool + comment: | + WithFlattenAudience specifies the the `jwt.FlattenAudience` option on + every token defaults to enabled. You can still disable this on a per-object + basis using the `jwt.Options().Disable(jwt.FlattenAudience)` method call. + + See the documentation for `jwt.TokenOptionSet`, `(jwt.Token).Options`, and + `jwt.FlattenAudience` for more details + - ident: FormKey + interface: ParseOption + argument_type: string + comment: | + WithFormKey is used to specify header keys to search for tokens. + + While the type system allows this option to be passed to jwt.Parse() directly, + doing so will have no effect. Only use it for HTTP request parsing functions + - ident: HeaderKey + interface: ParseOption + argument_type: string + comment: | + WithHeaderKey is used to specify header keys to search for tokens. + + While the type system allows this option to be passed to `jwt.Parse()` directly, + doing so will have no effect. Only use it for HTTP request parsing functions + - ident: Cookie + interface: ParseOption + argument_type: '**http.Cookie' + comment: | + WithCookie is used to specify a variable to store the cookie used when `jwt.ParseCookie()` + is called. This allows you to inspect the cookie for additional information after a successful + parsing of the JWT token stored in the cookie. + + While the type system allows this option to be passed to `jwt.Parse()` directly, + doing so will have no effect. Only use it for HTTP request parsing functions + - ident: CookieKey + interface: ParseOption + argument_type: string + comment: | + WithCookieKey is used to specify cookie keys to search for tokens. + + While the type system allows this option to be passed to `jwt.Parse()` directly, + doing so will have no effect. Only use it for HTTP request parsing functions + - ident: Token + interface: ParseOption + argument_type: Token + comment: | + WithToken specifies the token instance in which the resulting JWT is stored + when parsing JWT tokens + - ident: Validate + interface: ParseOption + argument_type: bool + comment: | + WithValidate is passed to `Parse()` method to denote that the + validation of the JWT token should be performed (or not) after + a successful parsing of the incoming payload. + + This option is enabled by default. + + If you would like disable validation, + you must use `jwt.WithValidate(false)` or use `jwt.ParseInsecure()` + - ident: Verify + interface: ParseOption + argument_type: bool + comment: | + WithVerify is passed to `Parse()` method to denote that the + signature verification should be performed after a successful + deserialization of the incoming payload. + + This option is enabled by default. + + If you do not provide any verification key sources, `jwt.Parse()` + would return an error. + + If you would like to only parse the JWT payload and not verify it, + you must use `jwt.WithVerify(false)` or use `jwt.ParseInsecure()` + - ident: KeyProvider + interface: ParseOption + argument_type: jws.KeyProvider + comment: | + WithKeyProvider allows users to specify an object to provide keys to + sign/verify tokens using arbitrary code. Please read the documentation + for `jws.KeyProvider` in the `jws` package for details on how this works. + - ident: Pedantic + interface: ParseOption + argument_type: bool + comment: | + WithPedantic enables pedantic mode for parsing JWTs. Currently this only + applies to checking for the correct `typ` and/or `cty` when necessary. + - ident: EncryptOption + interface: EncryptOption + argument_type: jwe.EncryptOption + comment: | + WithEncryptOption provides an escape hatch for cases where extra options to + `(jws.Serializer).Encrypt()` must be specified when using `jwt.Sign()`. Normally you do not + need to use this. + - ident: SignOption + interface: SignOption + argument_type: jws.SignOption + comment: | + WithSignOption provides an escape hatch for cases where extra options to + `jws.Sign()` must be specified when using `jwt.Sign()`. Normally you do not + need to use this. + - ident: Validator + interface: ValidateOption + argument_type: Validator + comment: | + WithValidator validates the token with the given Validator. + + For example, in order to validate tokens that are only valid during August, you would write + + validator := jwt.ValidatorFunc(func(_ context.Context, t jwt.Token) error { + if time.Now().Month() != 8 { + return fmt.Errorf(`tokens are only valid during August!`) + } + return nil + }) + err := jwt.Validate(token, jwt.WithValidator(validator)) + - ident: FS + interface: ReadFileOption + argument_type: fs.FS + comment: | + WithFS specifies the source `fs.FS` object to read the file from. + - ident: NumericDateParsePrecision + interface: GlobalOption + argument_type: int + comment: | + WithNumericDateParsePrecision sets the precision up to which the + library uses to parse fractional dates found in the numeric date + fields. Default is 0 (second, no fractions), max is 9 (nanosecond) + - ident: NumericDateFormatPrecision + interface: GlobalOption + argument_type: int + comment: | + WithNumericDateFormatPrecision sets the precision up to which the + library uses to format fractional dates found in the numeric date + fields. Default is 0 (second, no fractions), max is 9 (nanosecond) + - ident: NumericDateParsePedantic + interface: GlobalOption + argument_type: bool + comment: | + WithNumericDateParsePedantic specifies if the parser should behave + in a pedantic manner when parsing numeric dates. Normally this library + attempts to interpret timestamps as a numeric value representing + number of seconds (with an optional fractional part), but if that fails + it tries to parse using a RFC3339 parser. This allows us to parse + payloads from non-conforming servers. + + However, when you set WithNumericDateParePedantic to `true`, the + RFC3339 parser is not tried, and we expect a numeric value strictly + - ident: Base64Encoder + interface: SignParseOption + argument_type: jws.Base64Encoder + comment: | + WithBase64Encoder specifies the base64 encoder to use for signing + tokens and verifying JWS signatures. \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/options_gen.go new file mode 100644 index 0000000000..3a644a6e4c --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/options_gen.go @@ -0,0 +1,495 @@ +// Code generated by tools/cmd/genoptions/main.go. DO NOT EDIT. + +package jwt + +import ( + "context" + "io/fs" + "net/http" + "time" + + "github.com/lestrrat-go/jwx/v3/jwe" + "github.com/lestrrat-go/jwx/v3/jws" + "github.com/lestrrat-go/option/v2" +) + +type Option = option.Interface + +// EncryptOption describes an Option that can be passed to (jwt.Serializer).Encrypt +type EncryptOption interface { + Option + encryptOption() +} + +type encryptOption struct { + Option +} + +func (*encryptOption) encryptOption() {} + +// GlobalOption describes an Option that can be passed to `Settings()`. +type GlobalOption interface { + Option + globalOption() +} + +type globalOption struct { + Option +} + +func (*globalOption) globalOption() {} + +// GlobalValidateOption describes an Option that can be passed to `jwt.Settings()` and `jwt.Validate()` +type GlobalValidateOption interface { + Option + globalOption() + parseOption() + readFileOption() + validateOption() +} + +type globalValidateOption struct { + Option +} + +func (*globalValidateOption) globalOption() {} + +func (*globalValidateOption) parseOption() {} + +func (*globalValidateOption) readFileOption() {} + +func (*globalValidateOption) validateOption() {} + +// ParseOption describes an Option that can be passed to `jwt.Parse()`. +// ParseOption also implements ReadFileOption, therefore it may be +// safely pass them to `jwt.ReadFile()` +type ParseOption interface { + Option + parseOption() + readFileOption() +} + +type parseOption struct { + Option +} + +func (*parseOption) parseOption() {} + +func (*parseOption) readFileOption() {} + +// ReadFileOption is a type of `Option` that can be passed to `jws.ReadFile` +type ReadFileOption interface { + Option + readFileOption() +} + +type readFileOption struct { + Option +} + +func (*readFileOption) readFileOption() {} + +// SignEncryptParseOption describes an Option that can be passed to both `jwt.Sign()` or +// `jwt.Parse()` +type SignEncryptParseOption interface { + Option + parseOption() + encryptOption() + readFileOption() + signOption() +} + +type signEncryptParseOption struct { + Option +} + +func (*signEncryptParseOption) parseOption() {} + +func (*signEncryptParseOption) encryptOption() {} + +func (*signEncryptParseOption) readFileOption() {} + +func (*signEncryptParseOption) signOption() {} + +// SignOption describes an Option that can be passed to `jwt.Sign()` or +// (jwt.Serializer).Sign +type SignOption interface { + Option + signOption() +} + +type signOption struct { + Option +} + +func (*signOption) signOption() {} + +// SignParseOption describes an Option that can be passed to both `jwt.Sign()` or +// `jwt.Parse()` +type SignParseOption interface { + Option + signOption() + parseOption() + readFileOption() +} + +type signParseOption struct { + Option +} + +func (*signParseOption) signOption() {} + +func (*signParseOption) parseOption() {} + +func (*signParseOption) readFileOption() {} + +// ValidateOption describes an Option that can be passed to Validate(). +// ValidateOption also implements ParseOption, therefore it may be +// safely passed to `Parse()` (and thus `jwt.ReadFile()`) +type ValidateOption interface { + Option + parseOption() + readFileOption() + validateOption() +} + +type validateOption struct { + Option +} + +func (*validateOption) parseOption() {} + +func (*validateOption) readFileOption() {} + +func (*validateOption) validateOption() {} + +type identAcceptableSkew struct{} +type identBase64Encoder struct{} +type identClock struct{} +type identContext struct{} +type identCookie struct{} +type identCookieKey struct{} +type identEncryptOption struct{} +type identFS struct{} +type identFlattenAudience struct{} +type identFormKey struct{} +type identHeaderKey struct{} +type identKeyProvider struct{} +type identNumericDateFormatPrecision struct{} +type identNumericDateParsePedantic struct{} +type identNumericDateParsePrecision struct{} +type identPedantic struct{} +type identResetValidators struct{} +type identSignOption struct{} +type identToken struct{} +type identTruncation struct{} +type identValidate struct{} +type identValidator struct{} +type identVerify struct{} + +func (identAcceptableSkew) String() string { + return "WithAcceptableSkew" +} + +func (identBase64Encoder) String() string { + return "WithBase64Encoder" +} + +func (identClock) String() string { + return "WithClock" +} + +func (identContext) String() string { + return "WithContext" +} + +func (identCookie) String() string { + return "WithCookie" +} + +func (identCookieKey) String() string { + return "WithCookieKey" +} + +func (identEncryptOption) String() string { + return "WithEncryptOption" +} + +func (identFS) String() string { + return "WithFS" +} + +func (identFlattenAudience) String() string { + return "WithFlattenAudience" +} + +func (identFormKey) String() string { + return "WithFormKey" +} + +func (identHeaderKey) String() string { + return "WithHeaderKey" +} + +func (identKeyProvider) String() string { + return "WithKeyProvider" +} + +func (identNumericDateFormatPrecision) String() string { + return "WithNumericDateFormatPrecision" +} + +func (identNumericDateParsePedantic) String() string { + return "WithNumericDateParsePedantic" +} + +func (identNumericDateParsePrecision) String() string { + return "WithNumericDateParsePrecision" +} + +func (identPedantic) String() string { + return "WithPedantic" +} + +func (identResetValidators) String() string { + return "WithResetValidators" +} + +func (identSignOption) String() string { + return "WithSignOption" +} + +func (identToken) String() string { + return "WithToken" +} + +func (identTruncation) String() string { + return "WithTruncation" +} + +func (identValidate) String() string { + return "WithValidate" +} + +func (identValidator) String() string { + return "WithValidator" +} + +func (identVerify) String() string { + return "WithVerify" +} + +// WithAcceptableSkew specifies the duration in which exp, iat and nbf +// claims may differ by. This value should be positive +func WithAcceptableSkew(v time.Duration) ValidateOption { + return &validateOption{option.New(identAcceptableSkew{}, v)} +} + +// WithBase64Encoder specifies the base64 encoder to use for signing +// tokens and verifying JWS signatures. +func WithBase64Encoder(v jws.Base64Encoder) SignParseOption { + return &signParseOption{option.New(identBase64Encoder{}, v)} +} + +// WithClock specifies the `Clock` to be used when verifying +// exp, iat and nbf claims. +func WithClock(v Clock) ValidateOption { + return &validateOption{option.New(identClock{}, v)} +} + +// WithContext allows you to specify a context.Context object to be used +// with `jwt.Validate()` option. +// +// Please be aware that in the next major release of this library, +// `jwt.Validate()`'s signature will change to include an explicit +// `context.Context` object. +func WithContext(v context.Context) ValidateOption { + return &validateOption{option.New(identContext{}, v)} +} + +// WithCookie is used to specify a variable to store the cookie used when `jwt.ParseCookie()` +// is called. This allows you to inspect the cookie for additional information after a successful +// parsing of the JWT token stored in the cookie. +// +// While the type system allows this option to be passed to `jwt.Parse()` directly, +// doing so will have no effect. Only use it for HTTP request parsing functions +func WithCookie(v **http.Cookie) ParseOption { + return &parseOption{option.New(identCookie{}, v)} +} + +// WithCookieKey is used to specify cookie keys to search for tokens. +// +// While the type system allows this option to be passed to `jwt.Parse()` directly, +// doing so will have no effect. Only use it for HTTP request parsing functions +func WithCookieKey(v string) ParseOption { + return &parseOption{option.New(identCookieKey{}, v)} +} + +// WithEncryptOption provides an escape hatch for cases where extra options to +// `(jws.Serializer).Encrypt()` must be specified when using `jwt.Sign()`. Normally you do not +// need to use this. +func WithEncryptOption(v jwe.EncryptOption) EncryptOption { + return &encryptOption{option.New(identEncryptOption{}, v)} +} + +// WithFS specifies the source `fs.FS` object to read the file from. +func WithFS(v fs.FS) ReadFileOption { + return &readFileOption{option.New(identFS{}, v)} +} + +// WithFlattenAudience specifies the the `jwt.FlattenAudience` option on +// every token defaults to enabled. You can still disable this on a per-object +// basis using the `jwt.Options().Disable(jwt.FlattenAudience)` method call. +// +// See the documentation for `jwt.TokenOptionSet`, `(jwt.Token).Options`, and +// `jwt.FlattenAudience` for more details +func WithFlattenAudience(v bool) GlobalOption { + return &globalOption{option.New(identFlattenAudience{}, v)} +} + +// WithFormKey is used to specify header keys to search for tokens. +// +// While the type system allows this option to be passed to jwt.Parse() directly, +// doing so will have no effect. Only use it for HTTP request parsing functions +func WithFormKey(v string) ParseOption { + return &parseOption{option.New(identFormKey{}, v)} +} + +// WithHeaderKey is used to specify header keys to search for tokens. +// +// While the type system allows this option to be passed to `jwt.Parse()` directly, +// doing so will have no effect. Only use it for HTTP request parsing functions +func WithHeaderKey(v string) ParseOption { + return &parseOption{option.New(identHeaderKey{}, v)} +} + +// WithKeyProvider allows users to specify an object to provide keys to +// sign/verify tokens using arbitrary code. Please read the documentation +// for `jws.KeyProvider` in the `jws` package for details on how this works. +func WithKeyProvider(v jws.KeyProvider) ParseOption { + return &parseOption{option.New(identKeyProvider{}, v)} +} + +// WithNumericDateFormatPrecision sets the precision up to which the +// library uses to format fractional dates found in the numeric date +// fields. Default is 0 (second, no fractions), max is 9 (nanosecond) +func WithNumericDateFormatPrecision(v int) GlobalOption { + return &globalOption{option.New(identNumericDateFormatPrecision{}, v)} +} + +// WithNumericDateParsePedantic specifies if the parser should behave +// in a pedantic manner when parsing numeric dates. Normally this library +// attempts to interpret timestamps as a numeric value representing +// number of seconds (with an optional fractional part), but if that fails +// it tries to parse using a RFC3339 parser. This allows us to parse +// payloads from non-conforming servers. +// +// However, when you set WithNumericDateParePedantic to `true`, the +// RFC3339 parser is not tried, and we expect a numeric value strictly +func WithNumericDateParsePedantic(v bool) GlobalOption { + return &globalOption{option.New(identNumericDateParsePedantic{}, v)} +} + +// WithNumericDateParsePrecision sets the precision up to which the +// library uses to parse fractional dates found in the numeric date +// fields. Default is 0 (second, no fractions), max is 9 (nanosecond) +func WithNumericDateParsePrecision(v int) GlobalOption { + return &globalOption{option.New(identNumericDateParsePrecision{}, v)} +} + +// WithPedantic enables pedantic mode for parsing JWTs. Currently this only +// applies to checking for the correct `typ` and/or `cty` when necessary. +func WithPedantic(v bool) ParseOption { + return &parseOption{option.New(identPedantic{}, v)} +} + +// WithResetValidators specifies that the default validators should be +// reset before applying the custom validators. By default `jwt.Validate()` +// checks for the validity of JWT by checking `exp`, `nbf`, and `iat`, even +// when you specify more validators through other options. +// +// You SHOULD NOT use this option unless you know exactly what you are doing, +// as this will pose significant security issues when used incorrectly. +// +// Using this option with the value `true` will remove all default checks, +// and will expect you to specify validators as options. This is useful when you +// want to skip the default validators and only use specific validators, such as +// for https://openid.net/specs/openid-connect-rpinitiated-1_0.html, where +// the token could be accepted even if the token is expired. +// +// If you set this option to true and you do not specify any validators, +// `jwt.Validate()` will return an error. +// +// The default value is `false` (`iat`, `exp`, and `nbf` are automatically checked). +func WithResetValidators(v bool) ValidateOption { + return &validateOption{option.New(identResetValidators{}, v)} +} + +// WithSignOption provides an escape hatch for cases where extra options to +// `jws.Sign()` must be specified when using `jwt.Sign()`. Normally you do not +// need to use this. +func WithSignOption(v jws.SignOption) SignOption { + return &signOption{option.New(identSignOption{}, v)} +} + +// WithToken specifies the token instance in which the resulting JWT is stored +// when parsing JWT tokens +func WithToken(v Token) ParseOption { + return &parseOption{option.New(identToken{}, v)} +} + +// WithTruncation specifies the amount that should be used when +// truncating time values used during time-based validation routines, +// and by default this is disabled. +// +// In v2 of this library, time values were truncated down to second accuracy, i.e. +// 1.0000001 seconds is truncated to 1 second. To restore this behavior, set +// this value to `time.Second` +// +// Since v3, this option can be passed to `jwt.Settings()` to set the truncation +// value globally, as well as per invocation of `jwt.Validate()` +func WithTruncation(v time.Duration) GlobalValidateOption { + return &globalValidateOption{option.New(identTruncation{}, v)} +} + +// WithValidate is passed to `Parse()` method to denote that the +// validation of the JWT token should be performed (or not) after +// a successful parsing of the incoming payload. +// +// This option is enabled by default. +// +// If you would like disable validation, +// you must use `jwt.WithValidate(false)` or use `jwt.ParseInsecure()` +func WithValidate(v bool) ParseOption { + return &parseOption{option.New(identValidate{}, v)} +} + +// WithValidator validates the token with the given Validator. +// +// For example, in order to validate tokens that are only valid during August, you would write +// +// validator := jwt.ValidatorFunc(func(_ context.Context, t jwt.Token) error { +// if time.Now().Month() != 8 { +// return fmt.Errorf(`tokens are only valid during August!`) +// } +// return nil +// }) +// err := jwt.Validate(token, jwt.WithValidator(validator)) +func WithValidator(v Validator) ValidateOption { + return &validateOption{option.New(identValidator{}, v)} +} + +// WithVerify is passed to `Parse()` method to denote that the +// signature verification should be performed after a successful +// deserialization of the incoming payload. +// +// This option is enabled by default. +// +// If you do not provide any verification key sources, `jwt.Parse()` +// would return an error. +// +// If you would like to only parse the JWT payload and not verify it, +// you must use `jwt.WithVerify(false)` or use `jwt.ParseInsecure()` +func WithVerify(v bool) ParseOption { + return &parseOption{option.New(identVerify{}, v)} +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/serialize.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/serialize.go new file mode 100644 index 0000000000..9d3bdac94a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/serialize.go @@ -0,0 +1,264 @@ +package jwt + +import ( + "fmt" + + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/jwe" + "github.com/lestrrat-go/jwx/v3/jws" +) + +type SerializeCtx interface { + Step() int + Nested() bool +} + +type serializeCtx struct { + step int + nested bool +} + +func (ctx *serializeCtx) Step() int { + return ctx.step +} + +func (ctx *serializeCtx) Nested() bool { + return ctx.nested +} + +type SerializeStep interface { + Serialize(SerializeCtx, any) (any, error) +} + +// errStep is always an error. used to indicate that a method like +// serializer.Sign or Encrypt already errored out on configuration +type errStep struct { + err error +} + +func (e errStep) Serialize(_ SerializeCtx, _ any) (any, error) { + return nil, e.err +} + +// Serializer is a generic serializer for JWTs. Whereas other convenience +// functions can only do one thing (such as generate a JWS signed JWT), +// Using this construct you can serialize the token however you want. +// +// By default, the serializer only marshals the token into a JSON payload. +// You must set up the rest of the steps that should be taken by the +// serializer. +// +// For example, to marshal the token into JSON, then apply JWS and JWE +// in that order, you would do: +// +// serialized, err := jwt.NewSerializer(). +// Sign(jwa.RS256, key). +// Encrypt(jwe.WithEncryptOption(jwe.WithKey(jwa.RSA_OAEP(), publicKey))). +// Serialize(token) +// +// The `jwt.Sign()` function is equivalent to +// +// serialized, err := jwt.NewSerializer(). +// Sign(...args...). +// Serialize(token) +type Serializer struct { + steps []SerializeStep +} + +// NewSerializer creates a new empty serializer. +func NewSerializer() *Serializer { + return &Serializer{} +} + +// Reset clears all of the registered steps. +func (s *Serializer) Reset() *Serializer { + s.steps = nil + return s +} + +// Step adds a new Step to the serialization process +func (s *Serializer) Step(step SerializeStep) *Serializer { + s.steps = append(s.steps, step) + return s +} + +type jsonSerializer struct{} + +func (jsonSerializer) Serialize(_ SerializeCtx, v any) (any, error) { + token, ok := v.(Token) + if !ok { + return nil, fmt.Errorf(`invalid input: expected jwt.Token`) + } + + buf, err := json.Marshal(token) + if err != nil { + return nil, fmt.Errorf(`failed to serialize as JSON: %w`, err) + } + return buf, nil +} + +type genericHeader interface { + Get(string, any) error + Set(string, any) error + Has(string) bool +} + +func setTypeOrCty(ctx SerializeCtx, hdrs genericHeader) error { + // cty and typ are common between JWE/JWS, so we don't use + // the constants in jws/jwe package here + const typKey = `typ` + const ctyKey = `cty` + + if ctx.Step() == 1 { + // We are executed immediately after json marshaling + if !hdrs.Has(typKey) { + if err := hdrs.Set(typKey, `JWT`); err != nil { + return fmt.Errorf(`failed to set %s key to "JWT": %w`, typKey, err) + } + } + } else { + if ctx.Nested() { + // If this is part of a nested sequence, we should set cty = 'JWT' + // https://datatracker.ietf.org/doc/html/rfc7519#section-5.2 + if err := hdrs.Set(ctyKey, `JWT`); err != nil { + return fmt.Errorf(`failed to set %s key to "JWT": %w`, ctyKey, err) + } + } + } + return nil +} + +type jwsSerializer struct { + options []jws.SignOption +} + +func (s *jwsSerializer) Serialize(ctx SerializeCtx, v any) (any, error) { + payload, ok := v.([]byte) + if !ok { + return nil, fmt.Errorf(`expected []byte as input`) + } + + for _, option := range s.options { + var pc interface{ Protected(jws.Headers) jws.Headers } + if err := option.Value(&pc); err != nil { + continue + } + hdrs := pc.Protected(jws.NewHeaders()) + if err := setTypeOrCty(ctx, hdrs); err != nil { + return nil, err // this is already wrapped + } + + // JWTs MUST NOT use b64 = false + // https://datatracker.ietf.org/doc/html/rfc7797#section-7 + var b64 bool + if err := hdrs.Get("b64", &b64); err == nil { + if !b64 { // b64 = false + return nil, fmt.Errorf(`b64 cannot be false for JWTs`) + } + } + } + return jws.Sign(payload, s.options...) +} + +func (s *Serializer) Sign(options ...SignOption) *Serializer { + var soptions []jws.SignOption + if l := len(options); l > 0 { + // we need to from SignOption to Option because ... reasons + // (todo: when go1.18 prevails, use type parameters + rawoptions := make([]Option, l) + for i, option := range options { + rawoptions[i] = option + } + + converted, err := toSignOptions(rawoptions...) + if err != nil { + return s.Step(errStep{fmt.Errorf(`(jwt.Serializer).Sign: failed to convert options into jws.SignOption: %w`, err)}) + } + soptions = converted + } + return s.sign(soptions...) +} + +func (s *Serializer) sign(options ...jws.SignOption) *Serializer { + return s.Step(&jwsSerializer{ + options: options, + }) +} + +type jweSerializer struct { + options []jwe.EncryptOption +} + +func (s *jweSerializer) Serialize(ctx SerializeCtx, v any) (any, error) { + payload, ok := v.([]byte) + if !ok { + return nil, fmt.Errorf(`expected []byte as input`) + } + + hdrs := jwe.NewHeaders() + if err := setTypeOrCty(ctx, hdrs); err != nil { + return nil, err // this is already wrapped + } + + options := append([]jwe.EncryptOption{jwe.WithMergeProtectedHeaders(true), jwe.WithProtectedHeaders(hdrs)}, s.options...) + return jwe.Encrypt(payload, options...) +} + +// Encrypt specifies the JWT to be serialized as an encrypted payload. +// +// One notable difference between this method and `jwe.Encrypt()` is that +// while `jwe.Encrypt()` OVERWRITES the previous headers when `jwe.WithProtectedHeaders()` +// is provided, this method MERGES them. This is due to the fact that we +// MUST add some extra headers to construct a proper JWE message. +// Be careful when you pass multiple `jwe.EncryptOption`s. +func (s *Serializer) Encrypt(options ...EncryptOption) *Serializer { + var eoptions []jwe.EncryptOption + if l := len(options); l > 0 { + // we need to from SignOption to Option because ... reasons + // (todo: when go1.18 prevails, use type parameters + rawoptions := make([]Option, l) + for i, option := range options { + rawoptions[i] = option + } + + converted, err := toEncryptOptions(rawoptions...) + if err != nil { + return s.Step(errStep{fmt.Errorf(`(jwt.Serializer).Encrypt: failed to convert options into jwe.EncryptOption: %w`, err)}) + } + eoptions = converted + } + return s.encrypt(eoptions...) +} + +func (s *Serializer) encrypt(options ...jwe.EncryptOption) *Serializer { + return s.Step(&jweSerializer{ + options: options, + }) +} + +func (s *Serializer) Serialize(t Token) ([]byte, error) { + steps := make([]SerializeStep, len(s.steps)+1) + steps[0] = jsonSerializer{} + for i, step := range s.steps { + steps[i+1] = step + } + + var ctx serializeCtx + ctx.nested = len(s.steps) > 1 + var payload any = t + for i, step := range steps { + ctx.step = i + v, err := step.Serialize(&ctx, payload) + if err != nil { + return nil, fmt.Errorf(`failed to serialize token at step #%d: %w`, i+1, err) + } + payload = v + } + + res, ok := payload.([]byte) + if !ok { + return nil, fmt.Errorf(`invalid serialization produced`) + } + + return res, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_gen.go new file mode 100644 index 0000000000..2361ff5621 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_gen.go @@ -0,0 +1,635 @@ +// Code generated by tools/cmd/genjwt/main.go. DO NOT EDIT. + +package jwt + +import ( + "bytes" + "fmt" + "sort" + "sync" + "time" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v3/internal/json" + "github.com/lestrrat-go/jwx/v3/internal/pool" + "github.com/lestrrat-go/jwx/v3/internal/tokens" + jwterrs "github.com/lestrrat-go/jwx/v3/jwt/internal/errors" + "github.com/lestrrat-go/jwx/v3/jwt/internal/types" +) + +const ( + AudienceKey = "aud" + ExpirationKey = "exp" + IssuedAtKey = "iat" + IssuerKey = "iss" + JwtIDKey = "jti" + NotBeforeKey = "nbf" + SubjectKey = "sub" +) + +// stdClaimNames is a list of all standard claim names defined in the JWT specification. +var stdClaimNames = []string{AudienceKey, ExpirationKey, IssuedAtKey, IssuerKey, JwtIDKey, NotBeforeKey, SubjectKey} + +// Token represents a generic JWT token. +// which are type-aware (to an extent). Other claims may be accessed via the `Get`/`Set` +// methods but their types are not taken into consideration at all. If you have non-standard +// claims that you must frequently access, consider creating accessors functions +// like the following +// +// func SetFoo(tok jwt.Token) error +// func GetFoo(tok jwt.Token) (*Customtyp, error) +// +// Embedding jwt.Token into another struct is not recommended, because +// jwt.Token needs to handle private claims, and this really does not +// work well when it is embedded in other structure +type Token interface { + // Audience returns the value for "aud" field of the token + Audience() ([]string, bool) + + // Expiration returns the value for "exp" field of the token + Expiration() (time.Time, bool) + + // IssuedAt returns the value for "iat" field of the token + IssuedAt() (time.Time, bool) + + // Issuer returns the value for "iss" field of the token + Issuer() (string, bool) + + // JwtID returns the value for "jti" field of the token + JwtID() (string, bool) + + // NotBefore returns the value for "nbf" field of the token + NotBefore() (time.Time, bool) + + // Subject returns the value for "sub" field of the token + Subject() (string, bool) + + // Get is used to extract the value of any claim, including non-standard claims, out of the token. + // + // The first argument is the name of the claim. The second argument is a pointer + // to a variable that will receive the value of the claim. The method returns + // an error if the claim does not exist, or if the value cannot be assigned to + // the destination variable. Note that a field is considered to "exist" even if + // the value is empty-ish (e.g. 0, false, ""), as long as it is explicitly set. + // + // For standard claims, you can use the corresponding getter method, such as + // `Issuer()`, `Subject()`, `Audience()`, `IssuedAt()`, `NotBefore()`, `ExpiresAt()` + // + // Note that fields of JWS/JWE are NOT accessible through this method. You need + // to use `jws.Parse` and `jwe.Parse` to obtain the JWS/JWE message (and NOT + // the payload, which presumably is the JWT), and then use their `Get` methods in their respective packages + Get(string, any) error + + // Set assigns a value to the corresponding field in the token. Some + // pre-defined fields such as `nbf`, `iat`, `iss` need their values to + // be of a specific type. See the other getter methods in this interface + // for the types of each of these fields + Set(string, any) error + + // Has returns true if the specified claim has a value, even if + // the value is empty-ish (e.g. 0, false, "") as long as it has been + // explicitly set. + Has(string) bool + Remove(string) error + + // Options returns the per-token options associated with this token. + // The options set value will be copied when the token is cloned via `Clone()` + // but it will not survive when the token goes through marshaling/unmarshaling + // such as `json.Marshal` and `json.Unmarshal` + Options() *TokenOptionSet + Clone() (Token, error) + Keys() []string +} +type stdToken struct { + mu *sync.RWMutex + dc DecodeCtx // per-object context for decoding + options TokenOptionSet // per-object option + audience types.StringList // https://tools.ietf.org/html/rfc7519#section-4.1.3 + expiration *types.NumericDate // https://tools.ietf.org/html/rfc7519#section-4.1.4 + issuedAt *types.NumericDate // https://tools.ietf.org/html/rfc7519#section-4.1.6 + issuer *string // https://tools.ietf.org/html/rfc7519#section-4.1.1 + jwtID *string // https://tools.ietf.org/html/rfc7519#section-4.1.7 + notBefore *types.NumericDate // https://tools.ietf.org/html/rfc7519#section-4.1.5 + subject *string // https://tools.ietf.org/html/rfc7519#section-4.1.2 + privateClaims map[string]any +} + +// New creates a standard token, with minimal knowledge of +// possible claims. Standard claims include"aud", "exp", "iat", "iss", "jti", "nbf" and "sub". +// Convenience accessors are provided for these standard claims +func New() Token { + return &stdToken{ + mu: &sync.RWMutex{}, + privateClaims: make(map[string]any), + options: DefaultOptionSet(), + } +} + +func (t *stdToken) Options() *TokenOptionSet { + return &t.options +} + +func (t *stdToken) Has(name string) bool { + t.mu.RLock() + defer t.mu.RUnlock() + switch name { + case AudienceKey: + return t.audience != nil + case ExpirationKey: + return t.expiration != nil + case IssuedAtKey: + return t.issuedAt != nil + case IssuerKey: + return t.issuer != nil + case JwtIDKey: + return t.jwtID != nil + case NotBeforeKey: + return t.notBefore != nil + case SubjectKey: + return t.subject != nil + default: + _, ok := t.privateClaims[name] + return ok + } +} + +func (t *stdToken) Get(name string, dst any) error { + t.mu.RLock() + defer t.mu.RUnlock() + switch name { + case AudienceKey: + if t.audience == nil { + return jwterrs.ClaimNotFoundError{Name: name} + } + if err := blackmagic.AssignIfCompatible(dst, t.audience.Get()); err != nil { + return jwterrs.ClaimAssignmentFailedError{Err: err} + } + return nil + case ExpirationKey: + if t.expiration == nil { + return jwterrs.ClaimNotFoundError{Name: name} + } + if err := blackmagic.AssignIfCompatible(dst, t.expiration.Get()); err != nil { + return jwterrs.ClaimAssignmentFailedError{Err: err} + } + return nil + case IssuedAtKey: + if t.issuedAt == nil { + return jwterrs.ClaimNotFoundError{Name: name} + } + if err := blackmagic.AssignIfCompatible(dst, t.issuedAt.Get()); err != nil { + return jwterrs.ClaimAssignmentFailedError{Err: err} + } + return nil + case IssuerKey: + if t.issuer == nil { + return jwterrs.ClaimNotFoundError{Name: name} + } + if err := blackmagic.AssignIfCompatible(dst, *(t.issuer)); err != nil { + return jwterrs.ClaimAssignmentFailedError{Err: err} + } + return nil + case JwtIDKey: + if t.jwtID == nil { + return jwterrs.ClaimNotFoundError{Name: name} + } + if err := blackmagic.AssignIfCompatible(dst, *(t.jwtID)); err != nil { + return jwterrs.ClaimAssignmentFailedError{Err: err} + } + return nil + case NotBeforeKey: + if t.notBefore == nil { + return jwterrs.ClaimNotFoundError{Name: name} + } + if err := blackmagic.AssignIfCompatible(dst, t.notBefore.Get()); err != nil { + return jwterrs.ClaimAssignmentFailedError{Err: err} + } + return nil + case SubjectKey: + if t.subject == nil { + return jwterrs.ClaimNotFoundError{Name: name} + } + if err := blackmagic.AssignIfCompatible(dst, *(t.subject)); err != nil { + return jwterrs.ClaimAssignmentFailedError{Err: err} + } + return nil + default: + v, ok := t.privateClaims[name] + if !ok { + return jwterrs.ClaimNotFoundError{Name: name} + } + if err := blackmagic.AssignIfCompatible(dst, v); err != nil { + return jwterrs.ClaimAssignmentFailedError{Err: err} + } + return nil + } +} + +func (t *stdToken) Remove(key string) error { + t.mu.Lock() + defer t.mu.Unlock() + switch key { + case AudienceKey: + t.audience = nil + case ExpirationKey: + t.expiration = nil + case IssuedAtKey: + t.issuedAt = nil + case IssuerKey: + t.issuer = nil + case JwtIDKey: + t.jwtID = nil + case NotBeforeKey: + t.notBefore = nil + case SubjectKey: + t.subject = nil + default: + delete(t.privateClaims, key) + } + return nil +} + +func (t *stdToken) Set(name string, value any) error { + t.mu.Lock() + defer t.mu.Unlock() + return t.setNoLock(name, value) +} + +func (t *stdToken) DecodeCtx() DecodeCtx { + t.mu.RLock() + defer t.mu.RUnlock() + return t.dc +} + +func (t *stdToken) SetDecodeCtx(v DecodeCtx) { + t.mu.Lock() + defer t.mu.Unlock() + t.dc = v +} + +func (t *stdToken) setNoLock(name string, value any) error { + switch name { + case AudienceKey: + var acceptor types.StringList + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, AudienceKey, err) + } + t.audience = acceptor + return nil + case ExpirationKey: + var acceptor types.NumericDate + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, ExpirationKey, err) + } + t.expiration = &acceptor + return nil + case IssuedAtKey: + var acceptor types.NumericDate + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, IssuedAtKey, err) + } + t.issuedAt = &acceptor + return nil + case IssuerKey: + if v, ok := value.(string); ok { + t.issuer = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, IssuerKey, value) + case JwtIDKey: + if v, ok := value.(string); ok { + t.jwtID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, JwtIDKey, value) + case NotBeforeKey: + var acceptor types.NumericDate + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, NotBeforeKey, err) + } + t.notBefore = &acceptor + return nil + case SubjectKey: + if v, ok := value.(string); ok { + t.subject = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, SubjectKey, value) + default: + if t.privateClaims == nil { + t.privateClaims = map[string]any{} + } + t.privateClaims[name] = value + } + return nil +} + +func (t *stdToken) Audience() ([]string, bool) { + t.mu.RLock() + defer t.mu.RUnlock() + if t.audience != nil { + return t.audience.Get(), true + } + return nil, false +} + +func (t *stdToken) Expiration() (time.Time, bool) { + t.mu.RLock() + defer t.mu.RUnlock() + if t.expiration != nil { + return t.expiration.Get(), true + } + return time.Time{}, false +} + +func (t *stdToken) IssuedAt() (time.Time, bool) { + t.mu.RLock() + defer t.mu.RUnlock() + if t.issuedAt != nil { + return t.issuedAt.Get(), true + } + return time.Time{}, false +} + +func (t *stdToken) Issuer() (string, bool) { + t.mu.RLock() + defer t.mu.RUnlock() + if t.issuer != nil { + return *(t.issuer), true + } + return "", false +} + +func (t *stdToken) JwtID() (string, bool) { + t.mu.RLock() + defer t.mu.RUnlock() + if t.jwtID != nil { + return *(t.jwtID), true + } + return "", false +} + +func (t *stdToken) NotBefore() (time.Time, bool) { + t.mu.RLock() + defer t.mu.RUnlock() + if t.notBefore != nil { + return t.notBefore.Get(), true + } + return time.Time{}, false +} + +func (t *stdToken) Subject() (string, bool) { + t.mu.RLock() + defer t.mu.RUnlock() + if t.subject != nil { + return *(t.subject), true + } + return "", false +} + +func (t *stdToken) PrivateClaims() map[string]any { + t.mu.RLock() + defer t.mu.RUnlock() + return t.privateClaims +} + +func (t *stdToken) UnmarshalJSON(buf []byte) error { + t.mu.Lock() + defer t.mu.Unlock() + t.audience = nil + t.expiration = nil + t.issuedAt = nil + t.issuer = nil + t.jwtID = nil + t.notBefore = nil + t.subject = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either tokens.OpenCurlyBracket or tokens.CloseCurlyBracket here. + if tok == tokens.CloseCurlyBracket { // End of object + break LOOP + } else if tok != tokens.OpenCurlyBracket { + return fmt.Errorf(`expected '%c', but got '%c'`, tokens.OpenCurlyBracket, tok) + } + case string: // Objects can only have string keys + switch tok { + case AudienceKey: + var decoded types.StringList + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AudienceKey, err) + } + t.audience = decoded + case ExpirationKey: + var decoded types.NumericDate + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ExpirationKey, err) + } + t.expiration = &decoded + case IssuedAtKey: + var decoded types.NumericDate + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, IssuedAtKey, err) + } + t.issuedAt = &decoded + case IssuerKey: + if err := json.AssignNextStringToken(&t.issuer, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, IssuerKey, err) + } + case JwtIDKey: + if err := json.AssignNextStringToken(&t.jwtID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, JwtIDKey, err) + } + case NotBeforeKey: + var decoded types.NumericDate + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, NotBeforeKey, err) + } + t.notBefore = &decoded + case SubjectKey: + if err := json.AssignNextStringToken(&t.subject, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, SubjectKey, err) + } + default: + if dc := t.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + decoded, err := localReg.Decode(dec, tok) + if err == nil { + t.setNoLock(tok, decoded) + continue + } + } + } + decoded, err := registry.Decode(dec, tok) + if err == nil { + t.setNoLock(tok, decoded) + continue + } + return fmt.Errorf(`could not decode field %s: %w`, tok, err) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + return nil +} + +func (t *stdToken) Keys() []string { + t.mu.RLock() + defer t.mu.RUnlock() + keys := make([]string, 0, 7+len(t.privateClaims)) + if t.audience != nil { + keys = append(keys, AudienceKey) + } + if t.expiration != nil { + keys = append(keys, ExpirationKey) + } + if t.issuedAt != nil { + keys = append(keys, IssuedAtKey) + } + if t.issuer != nil { + keys = append(keys, IssuerKey) + } + if t.jwtID != nil { + keys = append(keys, JwtIDKey) + } + if t.notBefore != nil { + keys = append(keys, NotBeforeKey) + } + if t.subject != nil { + keys = append(keys, SubjectKey) + } + for k := range t.privateClaims { + keys = append(keys, k) + } + return keys +} + +type claimPair struct { + Name string + Value any +} + +var claimPairPool = sync.Pool{ + New: func() any { + return make([]claimPair, 0, 7) + }, +} + +func getClaimPairList() []claimPair { + return claimPairPool.Get().([]claimPair) +} + +func putClaimPairList(list []claimPair) { + list = list[:0] + claimPairPool.Put(list) +} + +// makePairs creates a list of claimPair objects that are sorted by +// their key names. The key names are always their JSON names, and +// the values are already JSON encoded. +// Because makePairs needs to allocate a slice, it _slows_ down +// marshaling of the token to JSON. The upside is that it allows us to +// marshal the token keys in a deterministic order. +// Do we really need it...? Well, technically we don't, but it's so +// much nicer to have this to make the example tests actually work +// deterministically. Also if for whatever reason this becomes a +// performance issue, we can always/ add a flag to use a more _optimized_ code path. +// +// The caller is responsible to call putClaimPairList() to return the +// allocated slice back to the pool. + +func (t *stdToken) makePairs() ([]claimPair, error) { + pairs := getClaimPairList() + if t.audience != nil { + buf, err := json.MarshalAudience(t.audience, t.options.IsEnabled(FlattenAudience)) + if err != nil { + return nil, fmt.Errorf(`failed to encode "aud": %w`, err) + } + pairs = append(pairs, claimPair{Name: AudienceKey, Value: buf}) + } + if t.expiration != nil { + buf, err := json.Marshal(t.expiration.Unix()) + if err != nil { + return nil, fmt.Errorf(`failed to encode "exp": %w`, err) + } + pairs = append(pairs, claimPair{Name: ExpirationKey, Value: buf}) + } + if t.issuedAt != nil { + buf, err := json.Marshal(t.issuedAt.Unix()) + if err != nil { + return nil, fmt.Errorf(`failed to encode "iat": %w`, err) + } + pairs = append(pairs, claimPair{Name: IssuedAtKey, Value: buf}) + } + if t.issuer != nil { + buf, err := json.Marshal(*(t.issuer)) + if err != nil { + return nil, fmt.Errorf(`failed to encode field "iss": %w`, err) + } + pairs = append(pairs, claimPair{Name: IssuerKey, Value: buf}) + } + if t.jwtID != nil { + buf, err := json.Marshal(*(t.jwtID)) + if err != nil { + return nil, fmt.Errorf(`failed to encode field "jti": %w`, err) + } + pairs = append(pairs, claimPair{Name: JwtIDKey, Value: buf}) + } + if t.notBefore != nil { + buf, err := json.Marshal(t.notBefore.Unix()) + if err != nil { + return nil, fmt.Errorf(`failed to encode "nbf": %w`, err) + } + pairs = append(pairs, claimPair{Name: NotBeforeKey, Value: buf}) + } + if t.subject != nil { + buf, err := json.Marshal(*(t.subject)) + if err != nil { + return nil, fmt.Errorf(`failed to encode field "sub": %w`, err) + } + pairs = append(pairs, claimPair{Name: SubjectKey, Value: buf}) + } + for k, v := range t.privateClaims { + buf, err := json.Marshal(v) + if err != nil { + return nil, fmt.Errorf(`failed to encode field %q: %w`, k, err) + } + pairs = append(pairs, claimPair{Name: k, Value: buf}) + } + + sort.Slice(pairs, func(i, j int) bool { + return pairs[i].Name < pairs[j].Name + }) + + return pairs, nil +} + +func (t stdToken) MarshalJSON() ([]byte, error) { + buf := pool.BytesBuffer().Get() + defer pool.BytesBuffer().Put(buf) + pairs, err := t.makePairs() + if err != nil { + return nil, fmt.Errorf(`failed to make pairs: %w`, err) + } + buf.WriteByte(tokens.OpenCurlyBracket) + + for i, pair := range pairs { + if i > 0 { + buf.WriteByte(tokens.Comma) + } + fmt.Fprintf(buf, "%q: %s", pair.Name, pair.Value) + } + buf.WriteByte(tokens.CloseCurlyBracket) + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + putClaimPairList(pairs) + return ret, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options.go new file mode 100644 index 0000000000..088c4263be --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options.go @@ -0,0 +1,78 @@ +package jwt + +import "sync" + +// TokenOptionSet is a bit flag containing per-token options. +type TokenOptionSet uint64 + +var defaultOptions TokenOptionSet +var defaultOptionsMu sync.RWMutex + +// TokenOption describes a single token option that can be set on +// the per-token option set (TokenOptionSet) +type TokenOption uint64 + +const ( + // FlattenAudience option controls whether the "aud" claim should be flattened + // to a single string upon the token being serialized to JSON. + // + // This is sometimes important when a JWT consumer does not understand that + // the "aud" claim can actually take the form of an array of strings. + // (We have been notified by users that AWS Cognito has manifested this behavior + // at some point) + // + // Unless the global option is set using `jwt.Settings()`, the default value is + // `disabled`, which means that "aud" claims are always rendered as a arrays of + // strings when serialized to JSON. + FlattenAudience TokenOption = 1 << iota + + // MaxPerTokenOption is a marker to denote the last value that an option can take. + // This value has no meaning other than to be used as a marker. + MaxPerTokenOption +) + +// Value returns the uint64 value of a single option +func (o TokenOption) Value() uint64 { + return uint64(o) +} + +// Value returns the uint64 bit flag value of an option set +func (o TokenOptionSet) Value() uint64 { + return uint64(o) +} + +// DefaultOptionSet creates a new TokenOptionSet using the default +// option set. This may differ depending on if/when functions that +// change the global state has been called, such as `jwt.Settings` +func DefaultOptionSet() TokenOptionSet { + return TokenOptionSet(defaultOptions.Value()) +} + +// Clear sets all bits to zero, effectively disabling all options +func (o *TokenOptionSet) Clear() { + *o = TokenOptionSet(uint64(0)) +} + +// Set sets the value of this option set, effectively *replacing* +// the entire option set with the new value. This is NOT the same +// as Enable/Disable. +func (o *TokenOptionSet) Set(s TokenOptionSet) { + *o = s +} + +// Enable sets the appropriate value to enable the option in the +// option set +func (o *TokenOptionSet) Enable(flag TokenOption) { + *o = TokenOptionSet(o.Value() | uint64(flag)) +} + +// Disable sets the appropriate value to disable the option in the +// option set +func (o *TokenOptionSet) Disable(flag TokenOption) { + *o = TokenOptionSet(o.Value() & ^uint64(flag)) +} + +// IsEnabled returns true if the given bit on the option set is enabled. +func (o TokenOptionSet) IsEnabled(flag TokenOption) bool { + return (uint64(o)&uint64(flag) == uint64(flag)) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options_gen.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options_gen.go new file mode 100644 index 0000000000..c1f333d13b --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/token_options_gen.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=TokenOption -output=token_options_gen.go"; DO NOT EDIT. + +package jwt + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[FlattenAudience-1] + _ = x[MaxPerTokenOption-2] +} + +const _TokenOption_name = "FlattenAudienceMaxPerTokenOption" + +var _TokenOption_index = [...]uint8{0, 15, 32} + +func (i TokenOption) String() string { + idx := int(i) - 1 + if i < 1 || idx >= len(_TokenOption_index)-1 { + return "TokenOption(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _TokenOption_name[_TokenOption_index[idx]:_TokenOption_index[idx+1]] +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwt/validate.go b/vendor/github.com/lestrrat-go/jwx/v3/jwt/validate.go new file mode 100644 index 0000000000..af46868d8b --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwt/validate.go @@ -0,0 +1,417 @@ +package jwt + +import ( + "context" + "fmt" + "slices" + "strconv" + "time" + + jwterrs "github.com/lestrrat-go/jwx/v3/jwt/internal/errors" +) + +type Clock interface { + Now() time.Time +} +type ClockFunc func() time.Time + +func (f ClockFunc) Now() time.Time { + return f() +} + +func isSupportedTimeClaim(c string) error { + switch c { + case ExpirationKey, IssuedAtKey, NotBeforeKey: + return nil + } + return fmt.Errorf(`unsupported time claim %s`, strconv.Quote(c)) +} + +func timeClaim(t Token, clock Clock, c string) time.Time { + // We don't check if the claims already exist. It should have been done + // by piggybacking on `required` check. + switch c { + case ExpirationKey: + tv, _ := t.Expiration() + return tv + case IssuedAtKey: + tv, _ := t.IssuedAt() + return tv + case NotBeforeKey: + tv, _ := t.NotBefore() + return tv + case "": + return clock.Now() + } + return time.Time{} // should *NEVER* reach here, but... +} + +// Validate makes sure that the essential claims stand. +// +// See the various `WithXXX` functions for optional parameters +// that can control the behavior of this method. +func Validate(t Token, options ...ValidateOption) error { + ctx := context.Background() + trunc := getDefaultTruncation() + + var clock Clock = ClockFunc(time.Now) + var skew time.Duration + var baseValidators = []Validator{ + IsIssuedAtValid(), + IsExpirationValid(), + IsNbfValid(), + } + var extraValidators []Validator + var resetValidators bool + for _, o := range options { + switch o.Ident() { + case identClock{}: + if err := o.Value(&clock); err != nil { + return fmt.Errorf(`jwt.Validate: value for WithClock() option must be jwt.Clock: %w`, err) + } + case identAcceptableSkew{}: + if err := o.Value(&skew); err != nil { + return fmt.Errorf(`jwt.Validate: value for WithAcceptableSkew() option must be time.Duration: %w`, err) + } + case identTruncation{}: + if err := o.Value(&trunc); err != nil { + return fmt.Errorf(`jwt.Validate: value for WithTruncation() option must be time.Duration: %w`, err) + } + case identContext{}: + if err := o.Value(&ctx); err != nil { + return fmt.Errorf(`jwt.Validate: value for WithContext() option must be context.Context: %w`, err) + } + case identResetValidators{}: + if err := o.Value(&resetValidators); err != nil { + return fmt.Errorf(`jwt.Validate: value for WithResetValidators() option must be bool: %w`, err) + } + case identValidator{}: + var v Validator + if err := o.Value(&v); err != nil { + return fmt.Errorf(`jwt.Validate: value for WithValidator() option must be jwt.Validator: %w`, err) + } + switch v := v.(type) { + case *isInTimeRange: + if v.c1 != "" { + if err := isSupportedTimeClaim(v.c1); err != nil { + return err + } + extraValidators = append(extraValidators, IsRequired(v.c1)) + } + if v.c2 != "" { + if err := isSupportedTimeClaim(v.c2); err != nil { + return err + } + extraValidators = append(extraValidators, IsRequired(v.c2)) + } + } + extraValidators = append(extraValidators, v) + } + } + + ctx = SetValidationCtxSkew(ctx, skew) + ctx = SetValidationCtxClock(ctx, clock) + ctx = SetValidationCtxTruncation(ctx, trunc) + + var validators []Validator + if !resetValidators { + validators = append(baseValidators, extraValidators...) + } else { + if len(extraValidators) == 0 { + return jwterrs.ValidateErrorf(`no validators specified: jwt.WithResetValidators(true) and no jwt.WithValidator() specified`) + } + validators = extraValidators + } + + for _, v := range validators { + if err := v.Validate(ctx, t); err != nil { + return jwterrs.ValidateErrorf(`validation failed: %w`, err) + } + } + + return nil +} + +type isInTimeRange struct { + c1 string + c2 string + dur time.Duration + less bool // if true, d =< c1 - c2. otherwise d >= c1 - c2 +} + +// MaxDeltaIs implements the logic behind `WithMaxDelta()` option +func MaxDeltaIs(c1, c2 string, dur time.Duration) Validator { + return &isInTimeRange{ + c1: c1, + c2: c2, + dur: dur, + less: true, + } +} + +// MinDeltaIs implements the logic behind `WithMinDelta()` option +func MinDeltaIs(c1, c2 string, dur time.Duration) Validator { + return &isInTimeRange{ + c1: c1, + c2: c2, + dur: dur, + less: false, + } +} + +func (iitr *isInTimeRange) Validate(ctx context.Context, t Token) error { + clock := ValidationCtxClock(ctx) // MUST be populated + skew := ValidationCtxSkew(ctx) // MUST be populated + // We don't check if the claims already exist, because we already did that + // by piggybacking on `required` check. + t1 := timeClaim(t, clock, iitr.c1) + t2 := timeClaim(t, clock, iitr.c2) + if iitr.less { // t1 - t2 <= iitr.dur + // t1 - t2 < iitr.dur + skew + if t1.Sub(t2) > iitr.dur+skew { + return fmt.Errorf(`iitr between %s and %s exceeds %s (skew %s)`, iitr.c1, iitr.c2, iitr.dur, skew) + } + } else { + if t1.Sub(t2) < iitr.dur-skew { + return fmt.Errorf(`iitr between %s and %s is less than %s (skew %s)`, iitr.c1, iitr.c2, iitr.dur, skew) + } + } + return nil +} + +// Validator describes interface to validate a Token. +type Validator interface { + // Validate should return an error if a required conditions is not met. + Validate(context.Context, Token) error +} + +// ValidatorFunc is a type of Validator that does not have any +// state, that is implemented as a function +type ValidatorFunc func(context.Context, Token) error + +func (vf ValidatorFunc) Validate(ctx context.Context, tok Token) error { + return vf(ctx, tok) +} + +type identValidationCtxClock struct{} +type identValidationCtxSkew struct{} +type identValidationCtxTruncation struct{} + +func SetValidationCtxClock(ctx context.Context, cl Clock) context.Context { + return context.WithValue(ctx, identValidationCtxClock{}, cl) +} + +func SetValidationCtxTruncation(ctx context.Context, dur time.Duration) context.Context { + return context.WithValue(ctx, identValidationCtxTruncation{}, dur) +} + +func SetValidationCtxSkew(ctx context.Context, dur time.Duration) context.Context { + return context.WithValue(ctx, identValidationCtxSkew{}, dur) +} + +// ValidationCtxClock returns the Clock object associated with +// the current validation context. This value will always be available +// during validation of tokens. +func ValidationCtxClock(ctx context.Context) Clock { + //nolint:forcetypeassert + return ctx.Value(identValidationCtxClock{}).(Clock) +} + +func ValidationCtxSkew(ctx context.Context) time.Duration { + //nolint:forcetypeassert + return ctx.Value(identValidationCtxSkew{}).(time.Duration) +} + +func ValidationCtxTruncation(ctx context.Context) time.Duration { + //nolint:forcetypeassert + return ctx.Value(identValidationCtxTruncation{}).(time.Duration) +} + +// IsExpirationValid is one of the default validators that will be executed. +// It does not need to be specified by users, but it exists as an +// exported field so that you can check what it does. +// +// The supplied context.Context object must have the "clock" and "skew" +// populated with appropriate values using SetValidationCtxClock() and +// SetValidationCtxSkew() +func IsExpirationValid() Validator { + return ValidatorFunc(isExpirationValid) +} + +func isExpirationValid(ctx context.Context, t Token) error { + tv, ok := t.Expiration() + if !ok { + return nil + } + + clock := ValidationCtxClock(ctx) // MUST be populated + skew := ValidationCtxSkew(ctx) // MUST be populated + trunc := ValidationCtxTruncation(ctx) // MUST be populated + + now := clock.Now().Truncate(trunc) + ttv := tv.Truncate(trunc) + + // expiration date must be after NOW + if !now.Before(ttv.Add(skew)) { + return TokenExpiredError() + } + return nil +} + +// IsIssuedAtValid is one of the default validators that will be executed. +// It does not need to be specified by users, but it exists as an +// exported field so that you can check what it does. +// +// The supplied context.Context object must have the "clock" and "skew" +// populated with appropriate values using SetValidationCtxClock() and +// SetValidationCtxSkew() +func IsIssuedAtValid() Validator { + return ValidatorFunc(isIssuedAtValid) +} + +func isIssuedAtValid(ctx context.Context, t Token) error { + tv, ok := t.IssuedAt() + if !ok { + return nil + } + + clock := ValidationCtxClock(ctx) // MUST be populated + skew := ValidationCtxSkew(ctx) // MUST be populated + trunc := ValidationCtxTruncation(ctx) // MUST be populated + + now := clock.Now().Truncate(trunc) + ttv := tv.Truncate(trunc) + + if now.Before(ttv.Add(-1 * skew)) { + return InvalidIssuedAtError() + } + return nil +} + +// IsNbfValid is one of the default validators that will be executed. +// It does not need to be specified by users, but it exists as an +// exported field so that you can check what it does. +// +// The supplied context.Context object must have the "clock" and "skew" +// populated with appropriate values using SetValidationCtxClock() and +// SetValidationCtxSkew() +func IsNbfValid() Validator { + return ValidatorFunc(isNbfValid) +} + +func isNbfValid(ctx context.Context, t Token) error { + tv, ok := t.NotBefore() + if !ok { + return nil + } + + clock := ValidationCtxClock(ctx) // MUST be populated + skew := ValidationCtxSkew(ctx) // MUST be populated + trunc := ValidationCtxTruncation(ctx) // MUST be populated + + // Truncation always happens even for trunc = 0 because + // we also use this to strip monotonic clocks + now := clock.Now().Truncate(trunc) + ttv := tv.Truncate(trunc) + + // "now" cannot be before t - skew, so we check for now > t - skew + ttv = ttv.Add(-1 * skew) + if now.Before(ttv) { + return TokenNotYetValidError() + } + return nil +} + +type claimContainsString struct { + name string + value string + makeErr func(string, ...any) error +} + +// ClaimContainsString can be used to check if the claim called `name`, which is +// expected to be a list of strings, contains `value`. Currently, because of the +// implementation, this will probably only work for `aud` fields. +func ClaimContainsString(name, value string) Validator { + return claimContainsString{ + name: name, + value: value, + makeErr: fmt.Errorf, + } +} + +func (ccs claimContainsString) Validate(_ context.Context, t Token) error { + var list []string + if err := t.Get(ccs.name, &list); err != nil { + return ccs.makeErr(`claim %q does not exist or is not a []string: %w`, ccs.name, err) + } + + if !slices.Contains(list, ccs.value) { + return ccs.makeErr(`%q not satisfied`, ccs.name) + } + return nil +} + +// audienceClaimContainsString can be used to check if the audience claim, which is +// expected to be a list of strings, contains `value`. +func audienceClaimContainsString(value string) Validator { + return claimContainsString{ + name: AudienceKey, + value: value, + makeErr: jwterrs.AudienceErrorf, + } +} + +type claimValueIs struct { + name string + value any + makeErr func(string, ...any) error +} + +// ClaimValueIs creates a Validator that checks if the value of claim `name` +// matches `value`. The comparison is done using a simple `==` comparison, +// and therefore complex comparisons may fail using this code. If you +// need to do more, use a custom Validator. +func ClaimValueIs(name string, value any) Validator { + return &claimValueIs{ + name: name, + value: value, + makeErr: fmt.Errorf, + } +} + +func (cv *claimValueIs) Validate(_ context.Context, t Token) error { + var v any + if err := t.Get(cv.name, &v); err != nil { + return cv.makeErr(`claim %[1]q does not exist or is not a []string: %[2]w`, cv.name, err) + } + if v != cv.value { + return cv.makeErr(`claim %[1]q does not have the expected value`, cv.name) + } + return nil +} + +// issuerClaimValueIs creates a Validator that checks if the issuer claim +// matches `value`. +func issuerClaimValueIs(value string) Validator { + return &claimValueIs{ + name: IssuerKey, + value: value, + makeErr: jwterrs.IssuerErrorf, + } +} + +// IsRequired creates a Validator that checks if the required claim `name` +// exists in the token +func IsRequired(name string) Validator { + return isRequired(name) +} + +type isRequired string + +func (ir isRequired) Validate(_ context.Context, t Token) error { + name := string(ir) + if !t.Has(name) { + return jwterrs.MissingRequiredClaimErrorf(name) + } + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/jwx.go b/vendor/github.com/lestrrat-go/jwx/v3/jwx.go new file mode 100644 index 0000000000..fc394e5137 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/jwx.go @@ -0,0 +1,45 @@ +//go:generate ./tools/cmd/genreadfile.sh +//go:generate ./tools/cmd/genoptions.sh +//go:generate stringer -type=FormatKind +//go:generate mv formatkind_string.go formatkind_string_gen.go + +// Package jwx contains tools that deal with the various JWx (JOSE) +// technologies such as JWT, JWS, JWE, etc in Go. +// +// JWS (https://tools.ietf.org/html/rfc7515) +// JWE (https://tools.ietf.org/html/rfc7516) +// JWK (https://tools.ietf.org/html/rfc7517) +// JWA (https://tools.ietf.org/html/rfc7518) +// JWT (https://tools.ietf.org/html/rfc7519) +// +// Examples are stored in a separate Go module (to avoid adding +// dependencies to this module), and thus does not appear in the +// online documentation for this module. +// You can find the examples in Github at https://github.com/lestrrat-go/jwx/tree/v3/examples +// +// You can find more high level documentation at Github (https://github.com/lestrrat-go/jwx/tree/v2) +// +// FAQ style documentation can be found in the repository (https://github.com/lestrrat-go/jwx/tree/develop/v3/docs) +package jwx + +import ( + "github.com/lestrrat-go/jwx/v3/internal/json" +) + +// DecoderSettings gives you a access to configure the "encoding/json".Decoder +// used to decode JSON objects within the jwx framework. +func DecoderSettings(options ...JSONOption) { + // XXX We're using this format instead of just passing a single boolean + // in case a new option is to be added some time later + var useNumber bool + for _, option := range options { + switch option.Ident() { + case identUseNumber{}: + if err := option.Value(&useNumber); err != nil { + panic("jwx.DecoderSettings: useNumber option must be a boolean") + } + } + } + + json.DecoderSettings(useNumber) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/options.go b/vendor/github.com/lestrrat-go/jwx/v3/options.go new file mode 100644 index 0000000000..b642a199d8 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/options.go @@ -0,0 +1,30 @@ +package jwx + +import "github.com/lestrrat-go/option/v2" + +type identUseNumber struct{} + +type Option = option.Interface + +type JSONOption interface { + Option + isJSONOption() +} + +type jsonOption struct { + Option +} + +func (o *jsonOption) isJSONOption() {} + +func newJSONOption(n any, v any) JSONOption { + return &jsonOption{option.New(n, v)} +} + +// WithUseNumber controls whether the jwx package should unmarshal +// JSON objects with the "encoding/json".Decoder.UseNumber feature on. +// +// Default is false. +func WithUseNumber(b bool) JSONOption { + return newJSONOption(identUseNumber{}, b) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/transform/BUILD.bazel b/vendor/github.com/lestrrat-go/jwx/v3/transform/BUILD.bazel new file mode 100644 index 0000000000..3333c6607c --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/transform/BUILD.bazel @@ -0,0 +1,32 @@ +load("@rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "transform", + srcs = [ + "filter.go", + "map.go", + ], + importpath = "github.com/lestrrat-go/jwx/v3/transform", + visibility = ["//visibility:public"], + deps = [ + "@com_github_lestrrat_go_blackmagic//:blackmagic", + ], +) + +go_test( + name = "transform_test", + srcs = [ + "map_test.go", + ], + deps = [ + ":transform", + "//jwt", + "@com_github_stretchr_testify//require", + ], +) + +alias( + name = "go_default_library", + actual = ":transform", + visibility = ["//visibility:public"], +) \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/jwx/v3/transform/filter.go b/vendor/github.com/lestrrat-go/jwx/v3/transform/filter.go new file mode 100644 index 0000000000..da2972db6a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/transform/filter.go @@ -0,0 +1,115 @@ +package transform + +import "sync" + +// FilterLogic is an interface that defines the logic for filtering objects. +type FilterLogic interface { + Apply(key string, object any) bool +} + +// FilterLogicFunc is a function type that implements the FilterLogic interface. +type FilterLogicFunc func(key string, object any) bool + +func (f FilterLogicFunc) Apply(key string, object any) bool { + return f(key, object) +} + +// Filterable is an interface that must be implemented by objects that can be filtered. +type Filterable[T any] interface { + // Keys returns the names of all fields in the object. + Keys() []string + + // Clone returns a deep copy of the object. + Clone() (T, error) + + // Remove removes a field from the object. + Remove(string) error +} + +// Apply is a standalone function that provides type-safe filtering based on +// specified filter logic. +// +// It returns a new object with only the fields that match the result of `logic.Apply`. +func Apply[T Filterable[T]](object T, logic FilterLogic) (T, error) { + return filterWith(object, logic, true) +} + +// Reject is a standalone function that provides type-safe filtering based on +// specified filter logic. +// +// It returns a new object with only the fields that DO NOT match the result +// of `logic.Apply`. +func Reject[T Filterable[T]](object T, logic FilterLogic) (T, error) { + return filterWith(object, logic, false) +} + +// filterWith is an internal function used by both Apply and Reject functions +// to apply the filtering logic to an object. If include is true, only fields +// matching the logic are included. If include is false, fields matching +// the logic are excluded. +func filterWith[T Filterable[T]](object T, logic FilterLogic, include bool) (T, error) { + var zero T + + result, err := object.Clone() + if err != nil { + return zero, err + } + + for _, k := range result.Keys() { + if ok := logic.Apply(k, object); (include && ok) || (!include && !ok) { + continue + } + + if err := result.Remove(k); err != nil { + return zero, err + } + } + + return result, nil +} + +// NameBasedFilter is a filter that filters fields based on their field names. +type NameBasedFilter[T Filterable[T]] struct { + names map[string]struct{} + mu sync.RWMutex + logic FilterLogic +} + +// NewNameBasedFilter creates a new NameBasedFilter with the specified field names. +// +// NameBasedFilter is the underlying implementation of the +// various filters in jwe, jwk, jws, and jwt packages. You normally do not +// need to use this directly. +func NewNameBasedFilter[T Filterable[T]](names ...string) *NameBasedFilter[T] { + nameMap := make(map[string]struct{}, len(names)) + for _, name := range names { + nameMap[name] = struct{}{} + } + + nf := &NameBasedFilter[T]{ + names: nameMap, + } + + nf.logic = FilterLogicFunc(nf.filter) + return nf +} + +func (nf *NameBasedFilter[T]) filter(k string, _ any) bool { + _, ok := nf.names[k] + return ok +} + +// Filter returns a new object with only the fields that match the specified names. +func (nf *NameBasedFilter[T]) Filter(object T) (T, error) { + nf.mu.RLock() + defer nf.mu.RUnlock() + + return Apply(object, nf.logic) +} + +// Reject returns a new object with only the fields that DO NOT match the specified names. +func (nf *NameBasedFilter[T]) Reject(object T) (T, error) { + nf.mu.RLock() + defer nf.mu.RUnlock() + return Reject(object, nf.logic) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v3/transform/map.go b/vendor/github.com/lestrrat-go/jwx/v3/transform/map.go new file mode 100644 index 0000000000..4eb80cb99f --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v3/transform/map.go @@ -0,0 +1,46 @@ +package transform + +import ( + "errors" + "fmt" + + "github.com/lestrrat-go/blackmagic" +) + +// Mappable is an interface that defines methods required when converting +// a jwx structure into a map[string]any. +// +// EXPERIMENTAL: This API is experimental and its interface and behavior is +// subject to change in future releases. This API is not subject to semver +// compatibility guarantees. +type Mappable interface { + Get(key string, dst any) error + Keys() []string +} + +// AsMap takes the specified Mappable object and populates the map +// `dst` with the key-value pairs from the Mappable object. +// Many objects in jwe, jwk, jws, and jwt packages including +// `jwt.Token`, `jwk.Key`, `jws.Header`, etc. +// +// EXPERIMENTAL: This API is experimental and its interface and behavior is +// subject to change in future releases. This API is not subject to semver +// compatibility guarantees. +func AsMap(m Mappable, dst map[string]any) error { + if dst == nil { + return fmt.Errorf("transform.AsMap: destination map cannot be nil") + } + + for _, k := range m.Keys() { + var val any + if err := m.Get(k, &val); err != nil { + // Allow invalid value errors. Assume they are just nil values. + if !errors.Is(err, blackmagic.InvalidValueError()) { + return fmt.Errorf(`transform.AsMap: failed to get key %q: %w`, k, err) + } + } + dst[k] = val + } + + return nil +} diff --git a/vendor/github.com/lestrrat-go/option/v2/.gitignore b/vendor/github.com/lestrrat-go/option/v2/.gitignore new file mode 100644 index 0000000000..66fd13c903 --- /dev/null +++ b/vendor/github.com/lestrrat-go/option/v2/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/lestrrat-go/option/v2/LICENSE b/vendor/github.com/lestrrat-go/option/v2/LICENSE new file mode 100644 index 0000000000..188ea7685c --- /dev/null +++ b/vendor/github.com/lestrrat-go/option/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 lestrrat-go + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/option/v2/README.md b/vendor/github.com/lestrrat-go/option/v2/README.md new file mode 100644 index 0000000000..cab0044ed3 --- /dev/null +++ b/vendor/github.com/lestrrat-go/option/v2/README.md @@ -0,0 +1,245 @@ +# option + +Base object for the "Optional Parameters Pattern". + +# DESCRIPTION + +The beauty of this pattern is that you can achieve a method that can +take the following simple calling style + +```go +obj.Method(mandatory1, mandatory2) +``` + +or the following, if you want to modify its behavior with optional parameters + +```go +obj.Method(mandatory1, mandatory2, optional1, optional2, optional3) +``` + +Instead of the more clunky zero value for optionals style + +```go +obj.Method(mandatory1, mandatory2, nil, "", 0) +``` + +or the equally clunky config object style, which requires you to create a +struct with `NamesThatLookReallyLongBecauseItNeedsToIncludeMethodNamesConfig + +```go +cfg := &ConfigForMethod{ + Optional1: ..., + Optional2: ..., + Optional3: ..., +} +obj.Method(mandatory1, mandatory2, &cfg) +``` + +# SYNOPSIS + +Create an "identifier" for the option. We recommend using an unexported empty struct, +because + +1. It is uniquely identifiable globally +1. Takes minimal space +1. Since it's unexported, you do not have to worry about it leaking elsewhere or having it changed by consumers + +```go +// an unexported empty struct +type identFeatureX struct{} +``` + +Then define a method to create an option using this identifier. Here we assume +that the option will be a boolean option. + +```go +// this is optional, but for readability we usually use a wrapper +// around option.Interface, or a type alias. +type Option +func WithFeatureX(v bool) Option { + // use the constructor to create a new option + return option.New(identFeatureX{}, v) +} +``` + +Now you can create an option, which essentially a two element tuple consisting +of an identifier and its associated value. + +To consume this, you will need to create a function with variadic parameters, +and iterate over the list looking for a particular identifier: + +```go +func MyAwesomeFunc( /* mandatory parameters omitted */, options ...[]Option) { + var enableFeatureX bool + // The nolint directive is recommended if you are using linters such + // as golangci-lint + //nolint:forcetypeassert + for _, option := range options { + switch option.Ident() { + case identFeatureX{}: + enableFeatureX = option.Value().(bool) + // other cases omitted + } + } + if enableFeatureX { + .... + } +} +``` + +# Option objects + +Option objects take two arguments, its identifier and the value it contains. + +The identifier can be anything, but it's usually better to use a an unexported +empty struct so that only you have the ability to generate said option: + +```go +type identOptionalParamOne struct{} +type identOptionalParamTwo struct{} +type identOptionalParamThree struct{} + +func WithOptionOne(v ...) Option { + return option.New(identOptionalParamOne{}, v) +} +``` + +Then you can call the method we described above as + +```go +obj.Method(m1, m2, WithOptionOne(...), WithOptionTwo(...), WithOptionThree(...)) +``` + +Options should be parsed in a code that looks somewhat like this + +```go +func (obj *Object) Method(m1 Type1, m2 Type2, options ...Option) { + paramOne := defaultValueParamOne + for _, option := range options { + switch option.Ident() { + case identOptionalParamOne{}: + paramOne = option.Value().(...) + } + } + ... +} +``` + +The loop requires a bit of boilerplate, and admittedly, this is the main downside +of this module. However, if you think you want use the Option as a Function pattern, +please check the FAQ below for rationale. + +# Simple usage + +Most of the times all you need to do is to declare the Option type as an alias +in your code: + +```go +package myawesomepkg + +import "github.com/lestrrat-go/option" + +type Option = option.Interface +``` + +Then you can start defining options like they are described in the SYNOPSIS section. + +# Differentiating Options + +When you have multiple methods and options, and those options can only be passed to +each one the methods, it's hard to see which options should be passed to which method. + +```go +func WithX() Option { ... } +func WithY() Option { ... } + +// Now, which of WithX/WithY go to which method? +func (*Obj) Method1(options ...Option) {} +func (*Obj) Method2(options ...Option) {} +``` + +In this case the easiest way to make it obvious is to put an extra layer around +the options so that they have different types + +```go +type Method1Option interface { + Option + method1Option() +} + +type method1Option struct { Option } +func (*method1Option) method1Option() {} + +func WithX() Method1Option { + return &methodOption{option.New(...)} +} + +func (*Obj) Method1(options ...Method1Option) {} +``` + +This way the compiler knows if an option can be passed to a given method. + +# FAQ + +## Why aren't these function-based? + +Using a base option type like `type Option func(ctx interface{})` is certainly one way to achieve the same goal. In this case, you are giving the option itself the ability to "configure" the main object. For example: + +```go +type Foo struct { + optionaValue bool +} + +type Option func(*Foo) error + +func WithOptionalValue(v bool) Option { + return Option(func(f *Foo) error { + f.optionalValue = v + return nil + }) +} + +func NewFoo(options ...Option) (*Foo, error) { + var f Foo + for _, o := range options { + if err := o(&f); err != nil { + return nil, err + } + } + return &f +} +``` + +This in itself is fine, but we think there are a few problems: + +### 1. It's hard to create a reusable "Option" type + +We create many libraries using this optional pattern. We would like to provide a default base object. However, this function based approach is not reusuable because each "Option" type requires that it has a context-specific input type. For example, if the "Option" type in the previous example was `func(interface{}) error`, then its usability will significantly decrease because of the type conversion. + +This is not to say that this library's approach is better as it also requires type conversion to convert the _value_ of the option. However, part of the beauty of the original function based approach was the ease of its use, and we claim that this significantly decreases the merits of the function based approach. + +### 2. The receiver requires exported fields + +Part of the appeal for a function-based option pattern is by giving the option itself the ability to do what it wants, you open up the possibility of allowing third-parties to create options that do things that the library authors did not think about. + +```go +package thirdparty +, but when I read drum sheet music, I kind of get thrown off b/c many times it says to hit the bass drum where I feel like it's a snare hit. +func WithMyAwesomeOption( ... ) mypkg.Option { + return mypkg.Option(func(f *mypkg) error { + f.X = ... + f.Y = ... + f.Z = ... + return nil + }) +} +``` + +However, for any third party code to access and set field values, these fields (`X`, `Y`, `Z`) must be exported. Basically you will need an "open" struct. + +Exported fields are absolutely no problem when you have a struct that represents data alone (i.e., API calls that refer or change state information) happen, but we think that casually expose fields for a library struct is a sure way to maintenance hell in the future. What happens when you want to change the API? What happens when you realize that you want to use the field as state (i.e. use it for more than configuration)? What if they kept referring to that field, and then you have concurrent code accessing it? + +Giving third parties complete access to exported fields is like handing out a loaded weapon to the users, and you are at their mercy. + +Of course, providing public APIs for everything so you can validate and control concurrency is an option, but then ... it's a lot of work, and you may have to provide APIs _only_ so that users can refer it in the option-configuration phase. That sounds like a lot of extra work. + diff --git a/vendor/github.com/lestrrat-go/option/v2/option.go b/vendor/github.com/lestrrat-go/option/v2/option.go new file mode 100644 index 0000000000..f4fcca3b58 --- /dev/null +++ b/vendor/github.com/lestrrat-go/option/v2/option.go @@ -0,0 +1,47 @@ +package option + +import ( + "fmt" + + "github.com/lestrrat-go/blackmagic" +) + +// Interface defines the minimum interface that an option must fulfill +type Interface interface { + // Ident returns the "identity" of this option, a unique identifier that + // can be used to differentiate between options + Ident() any + + // Value assigns the stored value into the dst argument, which must be + // a pointer to a variable that can store the value. If the assignment + // is successful, it return nil, otherwise it returns an error. + Value(dst any) error +} + +type pair[T any] struct { + ident any + value T +} + +// New creates a new Option +func New[T any](ident any, value T) Interface { + return &pair[T]{ + ident: ident, + value: value, + } +} + +func (p *pair[T]) Ident() any { + return p.ident +} + +func (p *pair[T]) Value(dst any) error { + if err := blackmagic.AssignIfCompatible(dst, p.value); err != nil { + return fmt.Errorf("failed to assign value %T to %T: %s", p.value, dst, err) + } + return nil +} + +func (p *pair[T]) String() string { + return fmt.Sprintf(`%v(%v)`, p.ident, p.value) +} diff --git a/vendor/github.com/lestrrat-go/option/v2/set.go b/vendor/github.com/lestrrat-go/option/v2/set.go new file mode 100644 index 0000000000..def943407a --- /dev/null +++ b/vendor/github.com/lestrrat-go/option/v2/set.go @@ -0,0 +1,92 @@ +package option + +import ( + "sync" +) + +// Set is a container to store multiple options. Because options are +// usually used all over the place to configure various aspects of +// a system, it is often useful to be able to collect multiple options +// together and pass them around as a single entity. +// +// Note that Set is meant to be add-only; You usually do not remove +// options from a Set. +// +// The intention is to create a set using a sync.Pool; we would like +// to provide a centralized pool of Sets so that you don't need to +// instantiate a new pool for every type of option you want to +// store, but that is not quite possible because of the limitations +// of parameterized types in Go. Instead create a `*option.SetPool` +// with an appropriate type parameter and allocator. +type Set[T Interface] struct { + mu sync.RWMutex + options []T +} + +func NewSet[T Interface]() *Set[T] { + return &Set[T]{ + options: make([]T, 0, 1), + } +} + +func (s *Set[T]) Add(opt T) { + s.mu.Lock() + defer s.mu.Unlock() + s.options = append(s.options, opt) +} + +func (s *Set[T]) Reset() { + s.mu.Lock() + defer s.mu.Unlock() + s.options = s.options[:0] // Reset the options slice to avoid memory leaks +} + +func (s *Set[T]) Len() int { + s.mu.RLock() + defer s.mu.RUnlock() + return len(s.options) +} + +func (s *Set[T]) Option(i int) T { + var zero T + s.mu.RLock() + defer s.mu.RUnlock() + if i < 0 || i >= len(s.options) { + return zero + } + return s.options[i] +} + +// List returns a slice of all options stored in the Set. +// Note that the slice is the same slice that is used internally, so +// you should not modify the contents of the slice directly. +// This to avoid unnecessary allocations and copying of the slice for +// performance reasons. +func (s *Set[T]) List() []T { + s.mu.RLock() + defer s.mu.RUnlock() + return s.options +} + +// SetPool is a pool of Sets that can be used to efficiently manage +// the lifecycle of Sets. It uses a sync.Pool to store and retrieve +// Sets, allowing for efficient reuse of memory and reducing the +// number of allocations required when creating new Sets. +type SetPool[T Interface] struct { + pool *sync.Pool // sync.Pool that contains *Set[T] +} + +func NewSetPool[T Interface](pool *sync.Pool) *SetPool[T] { + return &SetPool[T]{ + pool: pool, + } +} + +func (p *SetPool[T]) Get() *Set[T] { + return p.pool.Get().(*Set[T]) +} + +func (p *SetPool[T]) Put(s *Set[T]) { + s.Reset() + p.pool.Put(s) +} diff --git a/vendor/github.com/munnerz/goautoneg/LICENSE b/vendor/github.com/munnerz/goautoneg/LICENSE deleted file mode 100644 index bbc7b897cb..0000000000 --- a/vendor/github.com/munnerz/goautoneg/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/munnerz/goautoneg/Makefile b/vendor/github.com/munnerz/goautoneg/Makefile deleted file mode 100644 index e33ee17303..0000000000 --- a/vendor/github.com/munnerz/goautoneg/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -include $(GOROOT)/src/Make.inc - -TARG=bitbucket.org/ww/goautoneg -GOFILES=autoneg.go - -include $(GOROOT)/src/Make.pkg - -format: - gofmt -w *.go - -docs: - gomake clean - godoc ${TARG} > README.txt diff --git a/vendor/github.com/munnerz/goautoneg/README.txt b/vendor/github.com/munnerz/goautoneg/README.txt deleted file mode 100644 index 7723656d58..0000000000 --- a/vendor/github.com/munnerz/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/vendor/github.com/munnerz/goautoneg/autoneg.go b/vendor/github.com/munnerz/goautoneg/autoneg.go deleted file mode 100644 index 1dd1cad646..0000000000 --- a/vendor/github.com/munnerz/goautoneg/autoneg.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// acceptSlice is defined to implement sort interface. -type acceptSlice []Accept - -func (slice acceptSlice) Len() int { - return len(slice) -} - -func (slice acceptSlice) Less(i, j int) bool { - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (slice acceptSlice) Swap(i, j int) { - slice[i], slice[j] = slice[j], slice[i] -} - -func stringTrimSpaceCutset(r rune) bool { - return r == ' ' -} - -func nextSplitElement(s, sep string) (item string, remaining string) { - if index := strings.Index(s, sep); index != -1 { - return s[:index], s[index+1:] - } - return s, "" -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) acceptSlice { - partsCount := 0 - remaining := header - for len(remaining) > 0 { - partsCount++ - _, remaining = nextSplitElement(remaining, ",") - } - accept := make(acceptSlice, 0, partsCount) - - remaining = header - var part string - for len(remaining) > 0 { - part, remaining = nextSplitElement(remaining, ",") - part = strings.TrimFunc(part, stringTrimSpaceCutset) - - a := Accept{ - Q: 1.0, - } - - sp, remainingPart := nextSplitElement(part, ";") - - sp0, spRemaining := nextSplitElement(sp, "/") - a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset) - - switch { - case len(spRemaining) == 0: - if a.Type == "*" { - a.SubType = "*" - } else { - continue - } - default: - var sp1 string - sp1, spRemaining = nextSplitElement(spRemaining, "/") - if len(spRemaining) > 0 { - continue - } - a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset) - } - - if len(remainingPart) == 0 { - accept = append(accept, a) - continue - } - - a.Params = make(map[string]string) - for len(remainingPart) > 0 { - sp, remainingPart = nextSplitElement(remainingPart, ";") - sp0, spRemaining = nextSplitElement(sp, "=") - if len(spRemaining) == 0 { - continue - } - var sp1 string - sp1, spRemaining = nextSplitElement(spRemaining, "=") - if len(spRemaining) != 0 { - continue - } - token := strings.TrimFunc(sp0, stringTrimSpaceCutset) - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp1, 32) - } else { - a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset) - } - } - - accept = append(accept, a) - } - - sort.Sort(accept) - return accept -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/vendor/github.com/open-policy-agent/opa/ast/annotations.go b/vendor/github.com/open-policy-agent/opa/ast/annotations.go index d6267a0e64..3bc5fb36a5 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/annotations.go +++ b/vendor/github.com/open-policy-agent/opa/ast/annotations.go @@ -5,973 +5,33 @@ package ast import ( - "encoding/json" - "fmt" - "net/url" - "sort" - "strings" - - astJSON "github.com/open-policy-agent/opa/ast/json" - "github.com/open-policy-agent/opa/internal/deepcopy" - "github.com/open-policy-agent/opa/util" -) - -const ( - annotationScopePackage = "package" - annotationScopeImport = "import" - annotationScopeRule = "rule" - annotationScopeDocument = "document" - annotationScopeSubpackages = "subpackages" + v1 "github.com/open-policy-agent/opa/v1/ast" ) type ( // Annotations represents metadata attached to other AST nodes such as rules. - Annotations struct { - Scope string `json:"scope"` - Title string `json:"title,omitempty"` - Entrypoint bool `json:"entrypoint,omitempty"` - Description string `json:"description,omitempty"` - Organizations []string `json:"organizations,omitempty"` - RelatedResources []*RelatedResourceAnnotation `json:"related_resources,omitempty"` - Authors []*AuthorAnnotation `json:"authors,omitempty"` - Schemas []*SchemaAnnotation `json:"schemas,omitempty"` - Custom map[string]interface{} `json:"custom,omitempty"` - Location *Location `json:"location,omitempty"` - - comments []*Comment - node Node - jsonOptions astJSON.Options - } + Annotations = v1.Annotations // SchemaAnnotation contains a schema declaration for the document identified by the path. - SchemaAnnotation struct { - Path Ref `json:"path"` - Schema Ref `json:"schema,omitempty"` - Definition *interface{} `json:"definition,omitempty"` - } - - AuthorAnnotation struct { - Name string `json:"name"` - Email string `json:"email,omitempty"` - } - - RelatedResourceAnnotation struct { - Ref url.URL `json:"ref"` - Description string `json:"description,omitempty"` - } - - AnnotationSet struct { - byRule map[*Rule][]*Annotations - byPackage map[int]*Annotations - byPath *annotationTreeNode - modules []*Module // Modules this set was constructed from - } + SchemaAnnotation = v1.SchemaAnnotation - annotationTreeNode struct { - Value *Annotations - Children map[Value]*annotationTreeNode // we assume key elements are hashable (vars and strings only!) - } + AuthorAnnotation = v1.AuthorAnnotation - AnnotationsRef struct { - Path Ref `json:"path"` // The path of the node the annotations are applied to - Annotations *Annotations `json:"annotations,omitempty"` - Location *Location `json:"location,omitempty"` // The location of the node the annotations are applied to + RelatedResourceAnnotation = v1.RelatedResourceAnnotation - jsonOptions astJSON.Options + AnnotationSet = v1.AnnotationSet - node Node // The node the annotations are applied to - } + AnnotationsRef = v1.AnnotationsRef - AnnotationsRefSet []*AnnotationsRef + AnnotationsRefSet = v1.AnnotationsRefSet - FlatAnnotationsRefSet AnnotationsRefSet + FlatAnnotationsRefSet = v1.FlatAnnotationsRefSet ) -func (a *Annotations) String() string { - bs, _ := a.MarshalJSON() - return string(bs) -} - -// Loc returns the location of this annotation. -func (a *Annotations) Loc() *Location { - return a.Location -} - -// SetLoc updates the location of this annotation. -func (a *Annotations) SetLoc(l *Location) { - a.Location = l -} - -// EndLoc returns the location of this annotation's last comment line. -func (a *Annotations) EndLoc() *Location { - count := len(a.comments) - if count == 0 { - return a.Location - } - return a.comments[count-1].Location -} - -// Compare returns an integer indicating if a is less than, equal to, or greater -// than other. -func (a *Annotations) Compare(other *Annotations) int { - - if a == nil && other == nil { - return 0 - } - - if a == nil { - return -1 - } - - if other == nil { - return 1 - } - - if cmp := scopeCompare(a.Scope, other.Scope); cmp != 0 { - return cmp - } - - if cmp := strings.Compare(a.Title, other.Title); cmp != 0 { - return cmp - } - - if cmp := strings.Compare(a.Description, other.Description); cmp != 0 { - return cmp - } - - if cmp := compareStringLists(a.Organizations, other.Organizations); cmp != 0 { - return cmp - } - - if cmp := compareRelatedResources(a.RelatedResources, other.RelatedResources); cmp != 0 { - return cmp - } - - if cmp := compareAuthors(a.Authors, other.Authors); cmp != 0 { - return cmp - } - - if cmp := compareSchemas(a.Schemas, other.Schemas); cmp != 0 { - return cmp - } - - if a.Entrypoint != other.Entrypoint { - if a.Entrypoint { - return 1 - } - return -1 - } - - if cmp := util.Compare(a.Custom, other.Custom); cmp != 0 { - return cmp - } - - return 0 -} - -// GetTargetPath returns the path of the node these Annotations are applied to (the target) -func (a *Annotations) GetTargetPath() Ref { - switch n := a.node.(type) { - case *Package: - return n.Path - case *Rule: - return n.Ref().GroundPrefix() - default: - return nil - } -} - -func (a *Annotations) setJSONOptions(opts astJSON.Options) { - a.jsonOptions = opts - if a.Location != nil { - a.Location.JSONOptions = opts - } -} - -func (a *Annotations) MarshalJSON() ([]byte, error) { - if a == nil { - return []byte(`{"scope":""}`), nil - } - - data := map[string]interface{}{ - "scope": a.Scope, - } - - if a.Title != "" { - data["title"] = a.Title - } - - if a.Description != "" { - data["description"] = a.Description - } - - if a.Entrypoint { - data["entrypoint"] = a.Entrypoint - } - - if len(a.Organizations) > 0 { - data["organizations"] = a.Organizations - } - - if len(a.RelatedResources) > 0 { - data["related_resources"] = a.RelatedResources - } - - if len(a.Authors) > 0 { - data["authors"] = a.Authors - } - - if len(a.Schemas) > 0 { - data["schemas"] = a.Schemas - } - - if len(a.Custom) > 0 { - data["custom"] = a.Custom - } - - if a.jsonOptions.MarshalOptions.IncludeLocation.Annotations { - if a.Location != nil { - data["location"] = a.Location - } - } - - return json.Marshal(data) -} - func NewAnnotationsRef(a *Annotations) *AnnotationsRef { - var loc *Location - if a.node != nil { - loc = a.node.Loc() - } - - return &AnnotationsRef{ - Location: loc, - Path: a.GetTargetPath(), - Annotations: a, - node: a.node, - jsonOptions: a.jsonOptions, - } -} - -func (ar *AnnotationsRef) GetPackage() *Package { - switch n := ar.node.(type) { - case *Package: - return n - case *Rule: - return n.Module.Package - default: - return nil - } -} - -func (ar *AnnotationsRef) GetRule() *Rule { - switch n := ar.node.(type) { - case *Rule: - return n - default: - return nil - } -} - -func (ar *AnnotationsRef) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "path": ar.Path, - } - - if ar.Annotations != nil { - data["annotations"] = ar.Annotations - } - - if ar.jsonOptions.MarshalOptions.IncludeLocation.AnnotationsRef { - if ar.Location != nil { - data["location"] = ar.Location - } - } - - return json.Marshal(data) -} - -func scopeCompare(s1, s2 string) int { - - o1 := scopeOrder(s1) - o2 := scopeOrder(s2) - - if o2 < o1 { - return 1 - } else if o2 > o1 { - return -1 - } - - if s1 < s2 { - return -1 - } else if s2 < s1 { - return 1 - } - - return 0 -} - -func scopeOrder(s string) int { - switch s { - case annotationScopeRule: - return 1 - } - return 0 -} - -func compareAuthors(a, b []*AuthorAnnotation) int { - if len(a) > len(b) { - return 1 - } else if len(a) < len(b) { - return -1 - } - - for i := 0; i < len(a); i++ { - if cmp := a[i].Compare(b[i]); cmp != 0 { - return cmp - } - } - - return 0 -} - -func compareRelatedResources(a, b []*RelatedResourceAnnotation) int { - if len(a) > len(b) { - return 1 - } else if len(a) < len(b) { - return -1 - } - - for i := 0; i < len(a); i++ { - if cmp := strings.Compare(a[i].String(), b[i].String()); cmp != 0 { - return cmp - } - } - - return 0 -} - -func compareSchemas(a, b []*SchemaAnnotation) int { - maxLen := len(a) - if len(b) < maxLen { - maxLen = len(b) - } - - for i := 0; i < maxLen; i++ { - if cmp := a[i].Compare(b[i]); cmp != 0 { - return cmp - } - } - - if len(a) > len(b) { - return 1 - } else if len(a) < len(b) { - return -1 - } - - return 0 -} - -func compareStringLists(a, b []string) int { - if len(a) > len(b) { - return 1 - } else if len(a) < len(b) { - return -1 - } - - for i := 0; i < len(a); i++ { - if cmp := strings.Compare(a[i], b[i]); cmp != 0 { - return cmp - } - } - - return 0 -} - -// Copy returns a deep copy of s. -func (a *Annotations) Copy(node Node) *Annotations { - cpy := *a - - cpy.Organizations = make([]string, len(a.Organizations)) - copy(cpy.Organizations, a.Organizations) - - cpy.RelatedResources = make([]*RelatedResourceAnnotation, len(a.RelatedResources)) - for i := range a.RelatedResources { - cpy.RelatedResources[i] = a.RelatedResources[i].Copy() - } - - cpy.Authors = make([]*AuthorAnnotation, len(a.Authors)) - for i := range a.Authors { - cpy.Authors[i] = a.Authors[i].Copy() - } - - cpy.Schemas = make([]*SchemaAnnotation, len(a.Schemas)) - for i := range a.Schemas { - cpy.Schemas[i] = a.Schemas[i].Copy() - } - - cpy.Custom = deepcopy.Map(a.Custom) - - cpy.node = node - - return &cpy -} - -// toObject constructs an AST Object from the annotation. -func (a *Annotations) toObject() (*Object, *Error) { - obj := NewObject() - - if a == nil { - return &obj, nil - } - - if len(a.Scope) > 0 { - obj.Insert(StringTerm("scope"), StringTerm(a.Scope)) - } - - if len(a.Title) > 0 { - obj.Insert(StringTerm("title"), StringTerm(a.Title)) - } - - if a.Entrypoint { - obj.Insert(StringTerm("entrypoint"), BooleanTerm(true)) - } - - if len(a.Description) > 0 { - obj.Insert(StringTerm("description"), StringTerm(a.Description)) - } - - if len(a.Organizations) > 0 { - orgs := make([]*Term, 0, len(a.Organizations)) - for _, org := range a.Organizations { - orgs = append(orgs, StringTerm(org)) - } - obj.Insert(StringTerm("organizations"), ArrayTerm(orgs...)) - } - - if len(a.RelatedResources) > 0 { - rrs := make([]*Term, 0, len(a.RelatedResources)) - for _, rr := range a.RelatedResources { - rrObj := NewObject(Item(StringTerm("ref"), StringTerm(rr.Ref.String()))) - if len(rr.Description) > 0 { - rrObj.Insert(StringTerm("description"), StringTerm(rr.Description)) - } - rrs = append(rrs, NewTerm(rrObj)) - } - obj.Insert(StringTerm("related_resources"), ArrayTerm(rrs...)) - } - - if len(a.Authors) > 0 { - as := make([]*Term, 0, len(a.Authors)) - for _, author := range a.Authors { - aObj := NewObject() - if len(author.Name) > 0 { - aObj.Insert(StringTerm("name"), StringTerm(author.Name)) - } - if len(author.Email) > 0 { - aObj.Insert(StringTerm("email"), StringTerm(author.Email)) - } - as = append(as, NewTerm(aObj)) - } - obj.Insert(StringTerm("authors"), ArrayTerm(as...)) - } - - if len(a.Schemas) > 0 { - ss := make([]*Term, 0, len(a.Schemas)) - for _, s := range a.Schemas { - sObj := NewObject() - if len(s.Path) > 0 { - sObj.Insert(StringTerm("path"), NewTerm(s.Path.toArray())) - } - if len(s.Schema) > 0 { - sObj.Insert(StringTerm("schema"), NewTerm(s.Schema.toArray())) - } - if s.Definition != nil { - def, err := InterfaceToValue(s.Definition) - if err != nil { - return nil, NewError(CompileErr, a.Location, "invalid definition in schema annotation: %s", err.Error()) - } - sObj.Insert(StringTerm("definition"), NewTerm(def)) - } - ss = append(ss, NewTerm(sObj)) - } - obj.Insert(StringTerm("schemas"), ArrayTerm(ss...)) - } - - if len(a.Custom) > 0 { - c, err := InterfaceToValue(a.Custom) - if err != nil { - return nil, NewError(CompileErr, a.Location, "invalid custom annotation %s", err.Error()) - } - obj.Insert(StringTerm("custom"), NewTerm(c)) - } - - return &obj, nil -} - -func attachRuleAnnotations(mod *Module) { - // make a copy of the annotations - cpy := make([]*Annotations, len(mod.Annotations)) - for i, a := range mod.Annotations { - cpy[i] = a.Copy(a.node) - } - - for _, rule := range mod.Rules { - var j int - var found bool - for i, a := range cpy { - if rule.Ref().GroundPrefix().Equal(a.GetTargetPath()) { - if a.Scope == annotationScopeDocument { - rule.Annotations = append(rule.Annotations, a) - } else if a.Scope == annotationScopeRule && rule.Loc().Row > a.Location.Row { - j = i - found = true - rule.Annotations = append(rule.Annotations, a) - } - } - } - - if found && j < len(cpy) { - cpy = append(cpy[:j], cpy[j+1:]...) - } - } -} - -func attachAnnotationsNodes(mod *Module) Errors { - var errs Errors - - // Find first non-annotation statement following each annotation and attach - // the annotation to that statement. - for _, a := range mod.Annotations { - for _, stmt := range mod.stmts { - _, ok := stmt.(*Annotations) - if !ok { - if stmt.Loc().Row > a.Location.Row { - a.node = stmt - break - } - } - } - - if a.Scope == "" { - switch a.node.(type) { - case *Rule: - if a.Entrypoint { - a.Scope = annotationScopeDocument - } else { - a.Scope = annotationScopeRule - } - case *Package: - a.Scope = annotationScopePackage - case *Import: - a.Scope = annotationScopeImport - } - } - - if err := validateAnnotationScopeAttachment(a); err != nil { - errs = append(errs, err) - } - - if err := validateAnnotationEntrypointAttachment(a); err != nil { - errs = append(errs, err) - } - } - - return errs -} - -func validateAnnotationScopeAttachment(a *Annotations) *Error { - - switch a.Scope { - case annotationScopeRule, annotationScopeDocument: - if _, ok := a.node.(*Rule); ok { - return nil - } - return newScopeAttachmentErr(a, "rule") - case annotationScopePackage, annotationScopeSubpackages: - if _, ok := a.node.(*Package); ok { - return nil - } - return newScopeAttachmentErr(a, "package") - } - - return NewError(ParseErr, a.Loc(), "invalid annotation scope '%v'. Use one of '%s', '%s', '%s', or '%s'", - a.Scope, annotationScopeRule, annotationScopeDocument, annotationScopePackage, annotationScopeSubpackages) -} - -func validateAnnotationEntrypointAttachment(a *Annotations) *Error { - if a.Entrypoint && !(a.Scope == annotationScopeDocument || a.Scope == annotationScopePackage) { - return NewError( - ParseErr, a.Loc(), "annotation entrypoint applied to non-document or package scope '%v'", a.Scope) - } - return nil -} - -// Copy returns a deep copy of a. -func (a *AuthorAnnotation) Copy() *AuthorAnnotation { - cpy := *a - return &cpy -} - -// Compare returns an integer indicating if s is less than, equal to, or greater -// than other. -func (a *AuthorAnnotation) Compare(other *AuthorAnnotation) int { - if cmp := strings.Compare(a.Name, other.Name); cmp != 0 { - return cmp - } - - if cmp := strings.Compare(a.Email, other.Email); cmp != 0 { - return cmp - } - - return 0 -} - -func (a *AuthorAnnotation) String() string { - if len(a.Email) == 0 { - return a.Name - } else if len(a.Name) == 0 { - return fmt.Sprintf("<%s>", a.Email) - } - return fmt.Sprintf("%s <%s>", a.Name, a.Email) -} - -// Copy returns a deep copy of rr. -func (rr *RelatedResourceAnnotation) Copy() *RelatedResourceAnnotation { - cpy := *rr - return &cpy -} - -// Compare returns an integer indicating if s is less than, equal to, or greater -// than other. -func (rr *RelatedResourceAnnotation) Compare(other *RelatedResourceAnnotation) int { - if cmp := strings.Compare(rr.Description, other.Description); cmp != 0 { - return cmp - } - - if cmp := strings.Compare(rr.Ref.String(), other.Ref.String()); cmp != 0 { - return cmp - } - - return 0 -} - -func (rr *RelatedResourceAnnotation) String() string { - bs, _ := json.Marshal(rr) - return string(bs) -} - -func (rr *RelatedResourceAnnotation) MarshalJSON() ([]byte, error) { - d := map[string]interface{}{ - "ref": rr.Ref.String(), - } - - if len(rr.Description) > 0 { - d["description"] = rr.Description - } - - return json.Marshal(d) -} - -// Copy returns a deep copy of s. -func (s *SchemaAnnotation) Copy() *SchemaAnnotation { - cpy := *s - return &cpy -} - -// Compare returns an integer indicating if s is less than, equal to, or greater -// than other. -func (s *SchemaAnnotation) Compare(other *SchemaAnnotation) int { - - if cmp := s.Path.Compare(other.Path); cmp != 0 { - return cmp - } - - if cmp := s.Schema.Compare(other.Schema); cmp != 0 { - return cmp - } - - if s.Definition != nil && other.Definition == nil { - return -1 - } else if s.Definition == nil && other.Definition != nil { - return 1 - } else if s.Definition != nil && other.Definition != nil { - return util.Compare(*s.Definition, *other.Definition) - } - - return 0 -} - -func (s *SchemaAnnotation) String() string { - bs, _ := json.Marshal(s) - return string(bs) -} - -func newAnnotationSet() *AnnotationSet { - return &AnnotationSet{ - byRule: map[*Rule][]*Annotations{}, - byPackage: map[int]*Annotations{}, - byPath: newAnnotationTree(), - } + return v1.NewAnnotationsRef(a) } func BuildAnnotationSet(modules []*Module) (*AnnotationSet, Errors) { - as := newAnnotationSet() - var errs Errors - for _, m := range modules { - for _, a := range m.Annotations { - if err := as.add(a); err != nil { - errs = append(errs, err) - } - } - } - if len(errs) > 0 { - return nil, errs - } - as.modules = modules - return as, nil -} - -// NOTE(philipc): During copy propagation, the underlying Nodes can be -// stripped away from the annotations, leading to nil deref panics. We -// silently ignore these cases for now, as a workaround. -func (as *AnnotationSet) add(a *Annotations) *Error { - switch a.Scope { - case annotationScopeRule: - if rule, ok := a.node.(*Rule); ok { - as.byRule[rule] = append(as.byRule[rule], a) - } - case annotationScopePackage: - if pkg, ok := a.node.(*Package); ok { - hash := pkg.Path.Hash() - if exist, ok := as.byPackage[hash]; ok { - return errAnnotationRedeclared(a, exist.Location) - } - as.byPackage[hash] = a - } - case annotationScopeDocument: - if rule, ok := a.node.(*Rule); ok { - path := rule.Ref().GroundPrefix() - x := as.byPath.get(path) - if x != nil { - return errAnnotationRedeclared(a, x.Value.Location) - } - as.byPath.insert(path, a) - } - case annotationScopeSubpackages: - if pkg, ok := a.node.(*Package); ok { - x := as.byPath.get(pkg.Path) - if x != nil && x.Value != nil { - return errAnnotationRedeclared(a, x.Value.Location) - } - as.byPath.insert(pkg.Path, a) - } - } - return nil -} - -func (as *AnnotationSet) GetRuleScope(r *Rule) []*Annotations { - if as == nil { - return nil - } - return as.byRule[r] -} - -func (as *AnnotationSet) GetSubpackagesScope(path Ref) []*Annotations { - if as == nil { - return nil - } - return as.byPath.ancestors(path) -} - -func (as *AnnotationSet) GetDocumentScope(path Ref) *Annotations { - if as == nil { - return nil - } - if node := as.byPath.get(path); node != nil { - return node.Value - } - return nil -} - -func (as *AnnotationSet) GetPackageScope(pkg *Package) *Annotations { - if as == nil { - return nil - } - return as.byPackage[pkg.Path.Hash()] -} - -// Flatten returns a flattened list view of this AnnotationSet. -// The returned slice is sorted, first by the annotations' target path, then by their target location -func (as *AnnotationSet) Flatten() FlatAnnotationsRefSet { - // This preallocation often won't be optimal, but it's superior to starting with a nil slice. - refs := make([]*AnnotationsRef, 0, len(as.byPath.Children)+len(as.byRule)+len(as.byPackage)) - - refs = as.byPath.flatten(refs) - - for _, a := range as.byPackage { - refs = append(refs, NewAnnotationsRef(a)) - } - - for _, as := range as.byRule { - for _, a := range as { - refs = append(refs, NewAnnotationsRef(a)) - } - } - - // Sort by path, then annotation location, for stable output - sort.SliceStable(refs, func(i, j int) bool { - return refs[i].Compare(refs[j]) < 0 - }) - - return refs -} - -// Chain returns the chain of annotations leading up to the given rule. -// The returned slice is ordered as follows -// 0. Entries for the given rule, ordered from the METADATA block declared immediately above the rule, to the block declared farthest away (always at least one entry) -// 1. The 'document' scope entry, if any -// 2. The 'package' scope entry, if any -// 3. Entries for the 'subpackages' scope, if any; ordered from the closest package path to the fartest. E.g.: 'do.re.mi', 'do.re', 'do' -// The returned slice is guaranteed to always contain at least one entry, corresponding to the given rule. -func (as *AnnotationSet) Chain(rule *Rule) AnnotationsRefSet { - var refs []*AnnotationsRef - - ruleAnnots := as.GetRuleScope(rule) - - if len(ruleAnnots) >= 1 { - for _, a := range ruleAnnots { - refs = append(refs, NewAnnotationsRef(a)) - } - } else { - // Make sure there is always a leading entry representing the passed rule, even if it has no annotations - refs = append(refs, &AnnotationsRef{ - Location: rule.Location, - Path: rule.Ref().GroundPrefix(), - node: rule, - }) - } - - if len(refs) > 1 { - // Sort by annotation location; chain must start with annotations declared closest to rule, then going outward - sort.SliceStable(refs, func(i, j int) bool { - return refs[i].Annotations.Location.Compare(refs[j].Annotations.Location) > 0 - }) - } - - docAnnots := as.GetDocumentScope(rule.Ref().GroundPrefix()) - if docAnnots != nil { - refs = append(refs, NewAnnotationsRef(docAnnots)) - } - - pkg := rule.Module.Package - pkgAnnots := as.GetPackageScope(pkg) - if pkgAnnots != nil { - refs = append(refs, NewAnnotationsRef(pkgAnnots)) - } - - subPkgAnnots := as.GetSubpackagesScope(pkg.Path) - // We need to reverse the order, as subPkgAnnots ordering will start at the root, - // whereas we want to end at the root. - for i := len(subPkgAnnots) - 1; i >= 0; i-- { - refs = append(refs, NewAnnotationsRef(subPkgAnnots[i])) - } - - return refs -} - -func (ars FlatAnnotationsRefSet) Insert(ar *AnnotationsRef) FlatAnnotationsRefSet { - result := make(FlatAnnotationsRefSet, 0, len(ars)+1) - - // insertion sort, first by path, then location - for i, current := range ars { - if ar.Compare(current) < 0 { - result = append(result, ar) - result = append(result, ars[i:]...) - break - } - result = append(result, current) - } - - if len(result) < len(ars)+1 { - result = append(result, ar) - } - - return result -} - -func newAnnotationTree() *annotationTreeNode { - return &annotationTreeNode{ - Value: nil, - Children: map[Value]*annotationTreeNode{}, - } -} - -func (t *annotationTreeNode) insert(path Ref, value *Annotations) { - node := t - for _, k := range path { - child, ok := node.Children[k.Value] - if !ok { - child = newAnnotationTree() - node.Children[k.Value] = child - } - node = child - } - node.Value = value -} - -func (t *annotationTreeNode) get(path Ref) *annotationTreeNode { - node := t - for _, k := range path { - if node == nil { - return nil - } - child, ok := node.Children[k.Value] - if !ok { - return nil - } - node = child - } - return node -} - -// ancestors returns a slice of annotations in ascending order, starting with the root of ref; e.g.: 'root', 'root.foo', 'root.foo.bar'. -func (t *annotationTreeNode) ancestors(path Ref) (result []*Annotations) { - node := t - for _, k := range path { - if node == nil { - return result - } - child, ok := node.Children[k.Value] - if !ok { - return result - } - if child.Value != nil { - result = append(result, child.Value) - } - node = child - } - return result -} - -func (t *annotationTreeNode) flatten(refs []*AnnotationsRef) []*AnnotationsRef { - if a := t.Value; a != nil { - refs = append(refs, NewAnnotationsRef(a)) - } - for _, c := range t.Children { - refs = c.flatten(refs) - } - return refs -} - -func (ar *AnnotationsRef) Compare(other *AnnotationsRef) int { - if c := ar.Path.Compare(other.Path); c != 0 { - return c - } - - if c := ar.Annotations.Location.Compare(other.Annotations.Location); c != 0 { - return c - } - - return ar.Annotations.Compare(other.Annotations) + return v1.BuildAnnotationSet(modules) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/builtins.go b/vendor/github.com/open-policy-agent/opa/ast/builtins.go index f54d91d317..65de5abef2 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/builtins.go +++ b/vendor/github.com/open-policy-agent/opa/ast/builtins.go @@ -5,1348 +5,230 @@ package ast import ( - "strings" - - "github.com/open-policy-agent/opa/types" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // Builtins is the registry of built-in functions supported by OPA. // Call RegisterBuiltin to add a new built-in. -var Builtins []*Builtin +var Builtins = v1.Builtins // RegisterBuiltin adds a new built-in function to the registry. func RegisterBuiltin(b *Builtin) { - Builtins = append(Builtins, b) - BuiltinMap[b.Name] = b - if len(b.Infix) > 0 { - BuiltinMap[b.Infix] = b - } + v1.RegisterBuiltin(b) } // DefaultBuiltins is the registry of built-in functions supported in OPA // by default. When adding a new built-in function to OPA, update this // list. -var DefaultBuiltins = [...]*Builtin{ - // Unification/equality ("=") - Equality, - - // Assignment (":=") - Assign, - - // Membership, infix "in": `x in xs` - Member, - MemberWithKey, - - // Comparisons - GreaterThan, - GreaterThanEq, - LessThan, - LessThanEq, - NotEqual, - Equal, - - // Arithmetic - Plus, - Minus, - Multiply, - Divide, - Ceil, - Floor, - Round, - Abs, - Rem, - - // Bitwise Arithmetic - BitsOr, - BitsAnd, - BitsNegate, - BitsXOr, - BitsShiftLeft, - BitsShiftRight, - - // Binary - And, - Or, - - // Aggregates - Count, - Sum, - Product, - Max, - Min, - Any, - All, - - // Arrays - ArrayConcat, - ArraySlice, - ArrayReverse, - - // Conversions - ToNumber, - - // Casts (DEPRECATED) - CastObject, - CastNull, - CastBoolean, - CastString, - CastSet, - CastArray, - - // Regular Expressions - RegexIsValid, - RegexMatch, - RegexMatchDeprecated, - RegexSplit, - GlobsMatch, - RegexTemplateMatch, - RegexFind, - RegexFindAllStringSubmatch, - RegexReplace, - - // Sets - SetDiff, - Intersection, - Union, - - // Strings - AnyPrefixMatch, - AnySuffixMatch, - Concat, - FormatInt, - IndexOf, - IndexOfN, - Substring, - Lower, - Upper, - Contains, - StringCount, - StartsWith, - EndsWith, - Split, - Replace, - ReplaceN, - Trim, - TrimLeft, - TrimPrefix, - TrimRight, - TrimSuffix, - TrimSpace, - Sprintf, - StringReverse, - RenderTemplate, - - // Numbers - NumbersRange, - NumbersRangeStep, - RandIntn, - - // Encoding - JSONMarshal, - JSONMarshalWithOptions, - JSONUnmarshal, - JSONIsValid, - Base64Encode, - Base64Decode, - Base64IsValid, - Base64UrlEncode, - Base64UrlEncodeNoPad, - Base64UrlDecode, - URLQueryDecode, - URLQueryEncode, - URLQueryEncodeObject, - URLQueryDecodeObject, - YAMLMarshal, - YAMLUnmarshal, - YAMLIsValid, - HexEncode, - HexDecode, - - // Object Manipulation - ObjectUnion, - ObjectUnionN, - ObjectRemove, - ObjectFilter, - ObjectGet, - ObjectKeys, - ObjectSubset, - - // JSON Object Manipulation - JSONFilter, - JSONRemove, - JSONPatch, - - // Tokens - JWTDecode, - JWTVerifyRS256, - JWTVerifyRS384, - JWTVerifyRS512, - JWTVerifyPS256, - JWTVerifyPS384, - JWTVerifyPS512, - JWTVerifyES256, - JWTVerifyES384, - JWTVerifyES512, - JWTVerifyHS256, - JWTVerifyHS384, - JWTVerifyHS512, - JWTDecodeVerify, - JWTEncodeSignRaw, - JWTEncodeSign, - - // Time - NowNanos, - ParseNanos, - ParseRFC3339Nanos, - ParseDurationNanos, - Format, - Date, - Clock, - Weekday, - AddDate, - Diff, - - // Crypto - CryptoX509ParseCertificates, - CryptoX509ParseAndVerifyCertificates, - CryptoX509ParseAndVerifyCertificatesWithOptions, - CryptoMd5, - CryptoSha1, - CryptoSha256, - CryptoX509ParseCertificateRequest, - CryptoX509ParseRSAPrivateKey, - CryptoX509ParseKeyPair, - CryptoParsePrivateKeys, - CryptoHmacMd5, - CryptoHmacSha1, - CryptoHmacSha256, - CryptoHmacSha512, - CryptoHmacEqual, - - // Graphs - WalkBuiltin, - ReachableBuiltin, - ReachablePathsBuiltin, - - // Sort - Sort, - - // Types - IsNumber, - IsString, - IsBoolean, - IsArray, - IsSet, - IsObject, - IsNull, - TypeNameBuiltin, - - // HTTP - HTTPSend, - - // GraphQL - GraphQLParse, - GraphQLParseAndVerify, - GraphQLParseQuery, - GraphQLParseSchema, - GraphQLIsValid, - GraphQLSchemaIsValid, - - // JSON Schema - JSONSchemaVerify, - JSONMatchSchema, - - // Cloud Provider Helpers - ProvidersAWSSignReqObj, - - // Rego - RegoParseModule, - RegoMetadataChain, - RegoMetadataRule, - - // OPA - OPARuntime, - - // Tracing - Trace, - - // Networking - NetCIDROverlap, - NetCIDRIntersects, - NetCIDRContains, - NetCIDRContainsMatches, - NetCIDRExpand, - NetCIDRMerge, - NetLookupIPAddr, - NetCIDRIsValid, - - // Glob - GlobMatch, - GlobQuoteMeta, - - // Units - UnitsParse, - UnitsParseBytes, - - // UUIDs - UUIDRFC4122, - UUIDParse, - - // SemVers - SemVerIsValid, - SemVerCompare, - - // Printing - Print, - InternalPrint, -} +var DefaultBuiltins = v1.DefaultBuiltins // BuiltinMap provides a convenient mapping of built-in names to // built-in definitions. -var BuiltinMap map[string]*Builtin +var BuiltinMap = v1.BuiltinMap // Deprecated: Builtins can now be directly annotated with the // Nondeterministic property, and when set to true, will be ignored // for partial evaluation. -var IgnoreDuringPartialEval = []*Builtin{ - RandIntn, - UUIDRFC4122, - JWTDecodeVerify, - JWTEncodeSignRaw, - JWTEncodeSign, - NowNanos, - HTTPSend, - OPARuntime, - NetLookupIPAddr, -} +var IgnoreDuringPartialEval = v1.IgnoreDuringPartialEval //nolint:staticcheck /** * Unification */ // Equality represents the "=" operator. -var Equality = &Builtin{ - Name: "eq", - Infix: "=", - Decl: types.NewFunction( - types.Args(types.A, types.A), - types.B, - ), -} +var Equality = v1.Equality /** * Assignment */ // Assign represents the assignment (":=") operator. -var Assign = &Builtin{ - Name: "assign", - Infix: ":=", - Decl: types.NewFunction( - types.Args(types.A, types.A), - types.B, - ), -} +var Assign = v1.Assign // Member represents the `in` (infix) operator. -var Member = &Builtin{ - Name: "internal.member_2", - Infix: "in", - Decl: types.NewFunction( - types.Args( - types.A, - types.A, - ), - types.B, - ), -} +var Member = v1.Member // MemberWithKey represents the `in` (infix) operator when used // with two terms on the lhs, i.e., `k, v in obj`. -var MemberWithKey = &Builtin{ - Name: "internal.member_3", - Infix: "in", - Decl: types.NewFunction( - types.Args( - types.A, - types.A, - types.A, - ), - types.B, - ), -} +var MemberWithKey = v1.MemberWithKey -/** - * Comparisons - */ -var comparison = category("comparison") - -var GreaterThan = &Builtin{ - Name: "gt", - Infix: ">", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is greater than `y`; false otherwise"), - ), -} +var GreaterThan = v1.GreaterThan -var GreaterThanEq = &Builtin{ - Name: "gte", - Infix: ">=", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is greater or equal to `y`; false otherwise"), - ), -} +var GreaterThanEq = v1.GreaterThanEq // LessThan represents the "<" comparison operator. -var LessThan = &Builtin{ - Name: "lt", - Infix: "<", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is less than `y`; false otherwise"), - ), -} +var LessThan = v1.LessThan -var LessThanEq = &Builtin{ - Name: "lte", - Infix: "<=", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is less than or equal to `y`; false otherwise"), - ), -} +var LessThanEq = v1.LessThanEq -var NotEqual = &Builtin{ - Name: "neq", - Infix: "!=", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is not equal to `y`; false otherwise"), - ), -} +var NotEqual = v1.NotEqual // Equal represents the "==" comparison operator. -var Equal = &Builtin{ - Name: "equal", - Infix: "==", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is equal to `y`; false otherwise"), - ), -} +var Equal = v1.Equal -/** - * Arithmetic - */ -var number = category("numbers") - -var Plus = &Builtin{ - Name: "plus", - Infix: "+", - Description: "Plus adds two numbers together.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N).Description("the sum of `x` and `y`"), - ), - Categories: number, -} +var Plus = v1.Plus -var Minus = &Builtin{ - Name: "minus", - Infix: "-", - Description: "Minus subtracts the second number from the first number or computes the difference between two sets.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny(types.N, types.NewSet(types.A))), - types.Named("y", types.NewAny(types.N, types.NewSet(types.A))), - ), - types.Named("z", types.NewAny(types.N, types.NewSet(types.A))).Description("the difference of `x` and `y`"), - ), - Categories: category("sets", "numbers"), -} +var Minus = v1.Minus -var Multiply = &Builtin{ - Name: "mul", - Infix: "*", - Description: "Multiplies two numbers.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N).Description("the product of `x` and `y`"), - ), - Categories: number, -} +var Multiply = v1.Multiply -var Divide = &Builtin{ - Name: "div", - Infix: "/", - Description: "Divides the first number by the second number.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N).Description("the dividend"), - types.Named("y", types.N).Description("the divisor"), - ), - types.Named("z", types.N).Description("the result of `x` divided by `y`"), - ), - Categories: number, -} +var Divide = v1.Divide -var Round = &Builtin{ - Name: "round", - Description: "Rounds the number to the nearest integer.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N).Description("the number to round"), - ), - types.Named("y", types.N).Description("the result of rounding `x`"), - ), - Categories: number, -} +var Round = v1.Round -var Ceil = &Builtin{ - Name: "ceil", - Description: "Rounds the number _up_ to the nearest integer.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N).Description("the number to round"), - ), - types.Named("y", types.N).Description("the result of rounding `x` _up_"), - ), - Categories: number, -} +var Ceil = v1.Ceil -var Floor = &Builtin{ - Name: "floor", - Description: "Rounds the number _down_ to the nearest integer.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N).Description("the number to round"), - ), - types.Named("y", types.N).Description("the result of rounding `x` _down_"), - ), - Categories: number, -} +var Floor = v1.Floor -var Abs = &Builtin{ - Name: "abs", - Description: "Returns the number without its sign.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - ), - types.Named("y", types.N).Description("the absolute value of `x`"), - ), - Categories: number, -} +var Abs = v1.Abs -var Rem = &Builtin{ - Name: "rem", - Infix: "%", - Description: "Returns the remainder for of `x` divided by `y`, for `y != 0`.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N).Description("the remainder"), - ), - Categories: number, -} +var Rem = v1.Rem /** * Bitwise */ -var BitsOr = &Builtin{ - Name: "bits.or", - Description: "Returns the bitwise \"OR\" of two integers.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsOr = v1.BitsOr -var BitsAnd = &Builtin{ - Name: "bits.and", - Description: "Returns the bitwise \"AND\" of two integers.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsAnd = v1.BitsAnd -var BitsNegate = &Builtin{ - Name: "bits.negate", - Description: "Returns the bitwise negation (flip) of an integer.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsNegate = v1.BitsNegate -var BitsXOr = &Builtin{ - Name: "bits.xor", - Description: "Returns the bitwise \"XOR\" (exclusive-or) of two integers.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsXOr = v1.BitsXOr -var BitsShiftLeft = &Builtin{ - Name: "bits.lsh", - Description: "Returns a new integer with its bits shifted `s` bits to the left.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("s", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsShiftLeft = v1.BitsShiftLeft -var BitsShiftRight = &Builtin{ - Name: "bits.rsh", - Description: "Returns a new integer with its bits shifted `s` bits to the right.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("s", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsShiftRight = v1.BitsShiftRight /** * Sets */ -var sets = category("sets") - -var And = &Builtin{ - Name: "and", - Infix: "&", - Description: "Returns the intersection of two sets.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewSet(types.A)), - types.Named("y", types.NewSet(types.A)), - ), - types.Named("z", types.NewSet(types.A)).Description("the intersection of `x` and `y`"), - ), - Categories: sets, -} +var And = v1.And // Or performs a union operation on sets. -var Or = &Builtin{ - Name: "or", - Infix: "|", - Description: "Returns the union of two sets.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewSet(types.A)), - types.Named("y", types.NewSet(types.A)), - ), - types.Named("z", types.NewSet(types.A)).Description("the union of `x` and `y`"), - ), - Categories: sets, -} +var Or = v1.Or -var Intersection = &Builtin{ - Name: "intersection", - Description: "Returns the intersection of the given input sets.", - Decl: types.NewFunction( - types.Args( - types.Named("xs", types.NewSet(types.NewSet(types.A))).Description("set of sets to intersect"), - ), - types.Named("y", types.NewSet(types.A)).Description("the intersection of all `xs` sets"), - ), - Categories: sets, -} +var Intersection = v1.Intersection -var Union = &Builtin{ - Name: "union", - Description: "Returns the union of the given input sets.", - Decl: types.NewFunction( - types.Args( - types.Named("xs", types.NewSet(types.NewSet(types.A))).Description("set of sets to merge"), - ), - types.Named("y", types.NewSet(types.A)).Description("the union of all `xs` sets"), - ), - Categories: sets, -} +var Union = v1.Union /** * Aggregates */ -var aggregates = category("aggregates") - -var Count = &Builtin{ - Name: "count", - Description: " Count takes a collection or string and returns the number of elements (or characters) in it.", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.S, - )).Description("the set/array/object/string to be counted"), - ), - types.Named("n", types.N).Description("the count of elements, key/val pairs, or characters, respectively."), - ), - Categories: aggregates, -} +var Count = v1.Count -var Sum = &Builtin{ - Name: "sum", - Description: "Sums elements of an array or set of numbers.", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewSet(types.N), - types.NewArray(nil, types.N), - )), - ), - types.Named("n", types.N).Description("the sum of all elements"), - ), - Categories: aggregates, -} +var Sum = v1.Sum -var Product = &Builtin{ - Name: "product", - Description: "Muliplies elements of an array or set of numbers", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewSet(types.N), - types.NewArray(nil, types.N), - )), - ), - types.Named("n", types.N).Description("the product of all elements"), - ), - Categories: aggregates, -} +var Product = v1.Product -var Max = &Builtin{ - Name: "max", - Description: "Returns the maximum value in a collection.", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A), - )), - ), - types.Named("n", types.A).Description("the maximum of all elements"), - ), - Categories: aggregates, -} +var Max = v1.Max -var Min = &Builtin{ - Name: "min", - Description: "Returns the minimum value in a collection.", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A), - )), - ), - types.Named("n", types.A).Description("the minimum of all elements"), - ), - Categories: aggregates, -} +var Min = v1.Min /** * Sorting */ -var Sort = &Builtin{ - Name: "sort", - Description: "Returns a sorted array.", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewArray(nil, types.A), - types.NewSet(types.A), - )).Description("the array or set to be sorted"), - ), - types.Named("n", types.NewArray(nil, types.A)).Description("the sorted array"), - ), - Categories: aggregates, -} +var Sort = v1.Sort /** * Arrays */ -var ArrayConcat = &Builtin{ - Name: "array.concat", - Description: "Concatenates two arrays.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewArray(nil, types.A)), - types.Named("y", types.NewArray(nil, types.A)), - ), - types.Named("z", types.NewArray(nil, types.A)).Description("the concatenation of `x` and `y`"), - ), -} +var ArrayConcat = v1.ArrayConcat -var ArraySlice = &Builtin{ - Name: "array.slice", - Description: "Returns a slice of a given array. If `start` is greater or equal than `stop`, `slice` is `[]`.", - Decl: types.NewFunction( - types.Args( - types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be sliced"), - types.Named("start", types.NewNumber()).Description("the start index of the returned slice; if less than zero, it's clamped to 0"), - types.Named("stop", types.NewNumber()).Description("the stop index of the returned slice; if larger than `count(arr)`, it's clamped to `count(arr)`"), - ), - types.Named("slice", types.NewArray(nil, types.A)).Description("the subslice of `array`, from `start` to `end`, including `arr[start]`, but excluding `arr[end]`"), - ), -} // NOTE(sr): this function really needs examples - -var ArrayReverse = &Builtin{ - Name: "array.reverse", - Description: "Returns the reverse of a given array.", - Decl: types.NewFunction( - types.Args( - types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be reversed"), - ), - types.Named("rev", types.NewArray(nil, types.A)).Description("an array containing the elements of `arr` in reverse order"), - ), -} +var ArraySlice = v1.ArraySlice + +var ArrayReverse = v1.ArrayReverse /** * Conversions */ -var conversions = category("conversions") - -var ToNumber = &Builtin{ - Name: "to_number", - Description: "Converts a string, bool, or number value to a number: Strings are converted to numbers using `strconv.Atoi`, Boolean `false` is converted to 0 and `true` is converted to 1.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny( - types.N, - types.S, - types.B, - types.NewNull(), - )), - ), - types.Named("num", types.N), - ), - Categories: conversions, -} + +var ToNumber = v1.ToNumber /** * Regular Expressions */ -var RegexMatch = &Builtin{ - Name: "regex.match", - Description: "Matches a string against a regular expression.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S).Description("regular expression"), - types.Named("value", types.S).Description("value to match against `pattern`"), - ), - types.Named("result", types.B), - ), -} +var RegexMatch = v1.RegexMatch -var RegexIsValid = &Builtin{ - Name: "regex.is_valid", - Description: "Checks if a string is a valid regular expression: the detailed syntax for patterns is defined by https://github.com/google/re2/wiki/Syntax.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S).Description("regular expression"), - ), - types.Named("result", types.B), - ), -} +var RegexIsValid = v1.RegexIsValid -var RegexFindAllStringSubmatch = &Builtin{ - Name: "regex.find_all_string_submatch_n", - Description: "Returns all successive matches of the expression.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S).Description("regular expression"), - types.Named("value", types.S).Description("string to match"), - types.Named("number", types.N).Description("number of matches to return; `-1` means all matches"), - ), - types.Named("output", types.NewArray(nil, types.NewArray(nil, types.S))), - ), -} +var RegexFindAllStringSubmatch = v1.RegexFindAllStringSubmatch -var RegexTemplateMatch = &Builtin{ - Name: "regex.template_match", - Description: "Matches a string against a pattern, where there pattern may be glob-like", - Decl: types.NewFunction( - types.Args( - types.Named("template", types.S).Description("template expression containing `0..n` regular expressions"), - types.Named("value", types.S).Description("string to match"), - types.Named("delimiter_start", types.S).Description("start delimiter of the regular expression in `template`"), - types.Named("delimiter_end", types.S).Description("end delimiter of the regular expression in `template`"), - ), - types.Named("result", types.B), - ), -} // TODO(sr): example:`regex.template_match("urn:foo:{.*}", "urn:foo:bar:baz", "{", "}")`` returns ``true``. - -var RegexSplit = &Builtin{ - Name: "regex.split", - Description: "Splits the input string by the occurrences of the given pattern.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S).Description("regular expression"), - types.Named("value", types.S).Description("string to match"), - ), - types.Named("output", types.NewArray(nil, types.S)).Description("the parts obtained by splitting `value`"), - ), -} +var RegexTemplateMatch = v1.RegexTemplateMatch + +var RegexSplit = v1.RegexSplit // RegexFind takes two strings and a number, the pattern, the value and number of match values to // return, -1 means all match values. -var RegexFind = &Builtin{ - Name: "regex.find_n", - Description: "Returns the specified number of matches when matching the input against the pattern.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S).Description("regular expression"), - types.Named("value", types.S).Description("string to match"), - types.Named("number", types.N).Description("number of matches to return, if `-1`, returns all matches"), - ), - types.Named("output", types.NewArray(nil, types.S)).Description("collected matches"), - ), -} +var RegexFind = v1.RegexFind // GlobsMatch takes two strings regexp-style strings and evaluates to true if their // intersection matches a non-empty set of non-empty strings. // Examples: // - "a.a." and ".b.b" -> true. // - "[a-z]*" and [0-9]+" -> not true. -var GlobsMatch = &Builtin{ - Name: "regex.globs_match", - Description: `Checks if the intersection of two glob-style regular expressions matches a non-empty set of non-empty strings. -The set of regex symbols is limited for this builtin: only ` + "`.`, `*`, `+`, `[`, `-`, `]` and `\\` are treated as special symbols.", - Decl: types.NewFunction( - types.Args( - types.Named("glob1", types.S), - types.Named("glob2", types.S), - ), - types.Named("result", types.B), - ), -} +var GlobsMatch = v1.GlobsMatch /** * Strings */ -var stringsCat = category("strings") - -var AnyPrefixMatch = &Builtin{ - Name: "strings.any_prefix_match", - Description: "Returns true if any of the search strings begins with any of the base strings.", - Decl: types.NewFunction( - types.Args( - types.Named("search", types.NewAny( - types.S, - types.NewSet(types.S), - types.NewArray(nil, types.S), - )).Description("search string(s)"), - types.Named("base", types.NewAny( - types.S, - types.NewSet(types.S), - types.NewArray(nil, types.S), - )).Description("base string(s)"), - ), - types.Named("result", types.B).Description("result of the prefix check"), - ), - Categories: stringsCat, -} -var AnySuffixMatch = &Builtin{ - Name: "strings.any_suffix_match", - Description: "Returns true if any of the search strings ends with any of the base strings.", - Decl: types.NewFunction( - types.Args( - types.Named("search", types.NewAny( - types.S, - types.NewSet(types.S), - types.NewArray(nil, types.S), - )).Description("search string(s)"), - types.Named("base", types.NewAny( - types.S, - types.NewSet(types.S), - types.NewArray(nil, types.S), - )).Description("base string(s)"), - ), - types.Named("result", types.B).Description("result of the suffix check"), - ), - Categories: stringsCat, -} +var AnyPrefixMatch = v1.AnyPrefixMatch -var Concat = &Builtin{ - Name: "concat", - Description: "Joins a set or array of strings with a delimiter.", - Decl: types.NewFunction( - types.Args( - types.Named("delimiter", types.S), - types.Named("collection", types.NewAny( - types.NewSet(types.S), - types.NewArray(nil, types.S), - )).Description("strings to join"), - ), - types.Named("output", types.S), - ), - Categories: stringsCat, -} +var AnySuffixMatch = v1.AnySuffixMatch -var FormatInt = &Builtin{ - Name: "format_int", - Description: "Returns the string representation of the number in the given base after rounding it down to an integer value.", - Decl: types.NewFunction( - types.Args( - types.Named("number", types.N).Description("number to format"), - types.Named("base", types.N).Description("base of number representation to use"), - ), - types.Named("output", types.S).Description("formatted number"), - ), - Categories: stringsCat, -} +var Concat = v1.Concat -var IndexOf = &Builtin{ - Name: "indexof", - Description: "Returns the index of a substring contained inside a string.", - Decl: types.NewFunction( - types.Args( - types.Named("haystack", types.S).Description("string to search in"), - types.Named("needle", types.S).Description("substring to look for"), - ), - types.Named("output", types.N).Description("index of first occurrence, `-1` if not found"), - ), - Categories: stringsCat, -} +var FormatInt = v1.FormatInt -var IndexOfN = &Builtin{ - Name: "indexof_n", - Description: "Returns a list of all the indexes of a substring contained inside a string.", - Decl: types.NewFunction( - types.Args( - types.Named("haystack", types.S).Description("string to search in"), - types.Named("needle", types.S).Description("substring to look for"), - ), - types.Named("output", types.NewArray(nil, types.N)).Description("all indices at which `needle` occurs in `haystack`, may be empty"), - ), - Categories: stringsCat, -} +var IndexOf = v1.IndexOf -var Substring = &Builtin{ - Name: "substring", - Description: "Returns the portion of a string for a given `offset` and a `length`. If `length < 0`, `output` is the remainder of the string.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S), - types.Named("offset", types.N).Description("offset, must be positive"), - types.Named("length", types.N).Description("length of the substring starting from `offset`"), - ), - types.Named("output", types.S).Description("substring of `value` from `offset`, of length `length`"), - ), - Categories: stringsCat, -} +var IndexOfN = v1.IndexOfN -var Contains = &Builtin{ - Name: "contains", - Description: "Returns `true` if the search string is included in the base string", - Decl: types.NewFunction( - types.Args( - types.Named("haystack", types.S).Description("string to search in"), - types.Named("needle", types.S).Description("substring to look for"), - ), - types.Named("result", types.B).Description("result of the containment check"), - ), - Categories: stringsCat, -} +var Substring = v1.Substring -var StringCount = &Builtin{ - Name: "strings.count", - Description: "Returns the number of non-overlapping instances of a substring in a string.", - Decl: types.NewFunction( - types.Args( - types.Named("search", types.S).Description("string to search in"), - types.Named("substring", types.S).Description("substring to look for"), - ), - types.Named("output", types.N).Description("count of occurrences, `0` if not found"), - ), - Categories: stringsCat, -} +var Contains = v1.Contains -var StartsWith = &Builtin{ - Name: "startswith", - Description: "Returns true if the search string begins with the base string.", - Decl: types.NewFunction( - types.Args( - types.Named("search", types.S).Description("search string"), - types.Named("base", types.S).Description("base string"), - ), - types.Named("result", types.B).Description("result of the prefix check"), - ), - Categories: stringsCat, -} +var StringCount = v1.StringCount -var EndsWith = &Builtin{ - Name: "endswith", - Description: "Returns true if the search string ends with the base string.", - Decl: types.NewFunction( - types.Args( - types.Named("search", types.S).Description("search string"), - types.Named("base", types.S).Description("base string"), - ), - types.Named("result", types.B).Description("result of the suffix check"), - ), - Categories: stringsCat, -} +var StartsWith = v1.StartsWith -var Lower = &Builtin{ - Name: "lower", - Description: "Returns the input string but with all characters in lower-case.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("string that is converted to lower-case"), - ), - types.Named("y", types.S).Description("lower-case of x"), - ), - Categories: stringsCat, -} +var EndsWith = v1.EndsWith -var Upper = &Builtin{ - Name: "upper", - Description: "Returns the input string but with all characters in upper-case.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("string that is converted to upper-case"), - ), - types.Named("y", types.S).Description("upper-case of x"), - ), - Categories: stringsCat, -} +var Lower = v1.Lower -var Split = &Builtin{ - Name: "split", - Description: "Split returns an array containing elements of the input string split on a delimiter.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("string that is split"), - types.Named("delimiter", types.S).Description("delimiter used for splitting"), - ), - types.Named("ys", types.NewArray(nil, types.S)).Description("split parts"), - ), - Categories: stringsCat, -} +var Upper = v1.Upper -var Replace = &Builtin{ - Name: "replace", - Description: "Replace replaces all instances of a sub-string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("string being processed"), - types.Named("old", types.S).Description("substring to replace"), - types.Named("new", types.S).Description("string to replace `old` with"), - ), - types.Named("y", types.S).Description("string with replaced substrings"), - ), - Categories: stringsCat, -} +var Split = v1.Split -var ReplaceN = &Builtin{ - Name: "strings.replace_n", - Description: `Replaces a string from a list of old, new string pairs. -Replacements are performed in the order they appear in the target string, without overlapping matches. -The old string comparisons are done in argument order.`, - Decl: types.NewFunction( - types.Args( - types.Named("patterns", types.NewObject( - nil, - types.NewDynamicProperty( - types.S, - types.S)), - ).Description("replacement pairs"), - types.Named("value", types.S).Description("string to replace substring matches in"), - ), - types.Named("output", types.S), - ), -} +var Replace = v1.Replace -var RegexReplace = &Builtin{ - Name: "regex.replace", - Description: `Find and replaces the text using the regular expression pattern.`, - Decl: types.NewFunction( - types.Args( - types.Named("s", types.S).Description("string being processed"), - types.Named("pattern", types.S).Description("regex pattern to be applied"), - types.Named("value", types.S).Description("regex value"), - ), - types.Named("output", types.S), - ), -} +var ReplaceN = v1.ReplaceN -var Trim = &Builtin{ - Name: "trim", - Description: "Returns `value` with all leading or trailing instances of the `cutset` characters removed.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - types.Named("cutset", types.S).Description("string of characters that are cut off"), - ), - types.Named("output", types.S).Description("string trimmed of `cutset` characters"), - ), - Categories: stringsCat, -} +var RegexReplace = v1.RegexReplace -var TrimLeft = &Builtin{ - Name: "trim_left", - Description: "Returns `value` with all leading instances of the `cutset` characters removed.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - types.Named("cutset", types.S).Description("string of characters that are cut off on the left"), - ), - types.Named("output", types.S).Description("string left-trimmed of `cutset` characters"), - ), - Categories: stringsCat, -} +var Trim = v1.Trim -var TrimPrefix = &Builtin{ - Name: "trim_prefix", - Description: "Returns `value` without the prefix. If `value` doesn't start with `prefix`, it is returned unchanged.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - types.Named("prefix", types.S).Description("prefix to cut off"), - ), - types.Named("output", types.S).Description("string with `prefix` cut off"), - ), - Categories: stringsCat, -} +var TrimLeft = v1.TrimLeft -var TrimRight = &Builtin{ - Name: "trim_right", - Description: "Returns `value` with all trailing instances of the `cutset` characters removed.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - types.Named("cutset", types.S).Description("string of characters that are cut off on the right"), - ), - types.Named("output", types.S).Description("string right-trimmed of `cutset` characters"), - ), - Categories: stringsCat, -} +var TrimPrefix = v1.TrimPrefix -var TrimSuffix = &Builtin{ - Name: "trim_suffix", - Description: "Returns `value` without the suffix. If `value` doesn't end with `suffix`, it is returned unchanged.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - types.Named("suffix", types.S).Description("suffix to cut off"), - ), - types.Named("output", types.S).Description("string with `suffix` cut off"), - ), - Categories: stringsCat, -} +var TrimRight = v1.TrimRight -var TrimSpace = &Builtin{ - Name: "trim_space", - Description: "Return the given string with all leading and trailing white space removed.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - ), - types.Named("output", types.S).Description("string leading and trailing white space cut off"), - ), - Categories: stringsCat, -} +var TrimSuffix = v1.TrimSuffix -var Sprintf = &Builtin{ - Name: "sprintf", - Description: "Returns the given string, formatted.", - Decl: types.NewFunction( - types.Args( - types.Named("format", types.S).Description("string with formatting verbs"), - types.Named("values", types.NewArray(nil, types.A)).Description("arguments to format into formatting verbs"), - ), - types.Named("output", types.S).Description("`format` formatted by the values in `values`"), - ), - Categories: stringsCat, -} +var TrimSpace = v1.TrimSpace -var StringReverse = &Builtin{ - Name: "strings.reverse", - Description: "Reverses a given string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S), - ), - Categories: stringsCat, -} +var Sprintf = v1.Sprintf -var RenderTemplate = &Builtin{ - Name: "strings.render_template", - Description: `Renders a templated string with given template variables injected. For a given templated string and key/value mapping, values will be injected into the template where they are referenced by key. - For examples of templating syntax, see https://pkg.go.dev/text/template`, - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("a templated string"), - types.Named("vars", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("a mapping of template variable keys to values"), - ), - types.Named("result", types.S).Description("rendered template with template variables injected"), - ), - Categories: stringsCat, -} +var StringReverse = v1.StringReverse + +var RenderTemplate = v1.RenderTemplate /** * Numbers @@ -1354,82 +236,19 @@ var RenderTemplate = &Builtin{ // RandIntn returns a random number 0 - n // Marked non-deterministic because it relies on RNG internally. -var RandIntn = &Builtin{ - Name: "rand.intn", - Description: "Returns a random integer between `0` and `n` (`n` exclusive). If `n` is `0`, then `y` is always `0`. For any given argument pair (`str`, `n`), the output will be consistent throughout a query evaluation.", - Decl: types.NewFunction( - types.Args( - types.Named("str", types.S), - types.Named("n", types.N), - ), - types.Named("y", types.N).Description("random integer in the range `[0, abs(n))`"), - ), - Categories: number, - Nondeterministic: true, -} +var RandIntn = v1.RandIntn -var NumbersRange = &Builtin{ - Name: "numbers.range", - Description: "Returns an array of numbers in the given (inclusive) range. If `a==b`, then `range == [a]`; if `a > b`, then `range` is in descending order.", - Decl: types.NewFunction( - types.Args( - types.Named("a", types.N), - types.Named("b", types.N), - ), - types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b`"), - ), -} +var NumbersRange = v1.NumbersRange -var NumbersRangeStep = &Builtin{ - Name: "numbers.range_step", - Description: `Returns an array of numbers in the given (inclusive) range incremented by a positive step. - If "a==b", then "range == [a]"; if "a > b", then "range" is in descending order. - If the provided "step" is less then 1, an error will be thrown. - If "b" is not in the range of the provided "step", "b" won't be included in the result. - `, - Decl: types.NewFunction( - types.Args( - types.Named("a", types.N), - types.Named("b", types.N), - types.Named("step", types.N), - ), - types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b` in `step` increments"), - ), -} +var NumbersRangeStep = v1.NumbersRangeStep /** * Units */ -var UnitsParse = &Builtin{ - Name: "units.parse", - Description: `Converts strings like "10G", "5K", "4M", "1500m" and the like into a number. -This number can be a non-integer, such as 1.5, 0.22, etc. Supports standard metric decimal and -binary SI units (e.g., K, Ki, M, Mi, G, Gi etc.) m, K, M, G, T, P, and E are treated as decimal -units and Ki, Mi, Gi, Ti, Pi, and Ei are treated as binary units. - -Note that 'm' and 'M' are case-sensitive, to allow distinguishing between "milli" and "mega" units respectively. Other units are case-insensitive.`, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("the unit to parse"), - ), - types.Named("y", types.N).Description("the parsed number"), - ), -} +var UnitsParse = v1.UnitsParse -var UnitsParseBytes = &Builtin{ - Name: "units.parse_bytes", - Description: `Converts strings like "10GB", "5K", "4mb" into an integer number of bytes. -Supports standard byte units (e.g., KB, KiB, etc.) KB, MB, GB, and TB are treated as decimal -units and KiB, MiB, GiB, and TiB are treated as binary units. The bytes symbol (b/B) in the -unit is optional and omitting it wil give the same result (e.g. Mi and MiB).`, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("the byte unit to parse"), - ), - types.Named("y", types.N).Description("the parsed number"), - ), -} +var UnitsParseBytes = v1.UnitsParseBytes // /** @@ -1438,1372 +257,241 @@ unit is optional and omitting it wil give the same result (e.g. Mi and MiB).`, // UUIDRFC4122 returns a version 4 UUID string. // Marked non-deterministic because it relies on RNG internally. -var UUIDRFC4122 = &Builtin{ - Name: "uuid.rfc4122", - Description: "Returns a new UUIDv4.", - Decl: types.NewFunction( - types.Args( - types.Named("k", types.S), - ), - types.Named("output", types.S).Description("a version 4 UUID; for any given `k`, the output will be consistent throughout a query evaluation"), - ), - Nondeterministic: true, -} +var UUIDRFC4122 = v1.UUIDRFC4122 -var UUIDParse = &Builtin{ - Name: "uuid.parse", - Description: "Parses the string value as an UUID and returns an object with the well-defined fields of the UUID if valid.", - Categories: nil, - Decl: types.NewFunction( - types.Args( - types.Named("uuid", types.S), - ), - types.Named("result", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("Properties of UUID if valid (version, variant, etc). Undefined otherwise."), - ), - Relation: false, -} +var UUIDParse = v1.UUIDParse /** * JSON */ -var objectCat = category("object") - -var JSONFilter = &Builtin{ - Name: "json.filter", - Description: "Filters the object. " + - "For example: `json.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"b\": \"x\"}}`). " + - "Paths are not filtered in-order and are deduplicated before being evaluated.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )), - types.Named("paths", types.NewAny( - types.NewArray( - nil, - types.NewAny( - types.S, - types.NewArray( - nil, - types.A, - ), - ), - ), - types.NewSet( - types.NewAny( - types.S, - types.NewArray( - nil, - types.A, - ), - ), - ), - )).Description("JSON string paths"), - ), - types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `paths`"), - ), - Categories: objectCat, -} +var JSONFilter = v1.JSONFilter -var JSONRemove = &Builtin{ - Name: "json.remove", - Description: "Removes paths from an object. " + - "For example: `json.remove({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"c\": \"y\"}}`. " + - "Paths are not removed in-order and are deduplicated before being evaluated.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )), - types.Named("paths", types.NewAny( - types.NewArray( - nil, - types.NewAny( - types.S, - types.NewArray( - nil, - types.A, - ), - ), - ), - types.NewSet( - types.NewAny( - types.S, - types.NewArray( - nil, - types.A, - ), - ), - ), - )).Description("JSON string paths"), - ), - types.Named("output", types.A).Description("result of removing all keys specified in `paths`"), - ), - Categories: objectCat, -} +var JSONRemove = v1.JSONRemove -var JSONPatch = &Builtin{ - Name: "json.patch", - Description: "Patches an object according to RFC6902. " + - "For example: `json.patch({\"a\": {\"foo\": 1}}, [{\"op\": \"add\", \"path\": \"/a/bar\", \"value\": 2}])` results in `{\"a\": {\"foo\": 1, \"bar\": 2}`. " + - "The patches are applied atomically: if any of them fails, the result will be undefined. " + - "Additionally works on sets, where a value contained in the set is considered to be its path.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.A), // TODO(sr): types.A? - types.Named("patches", types.NewArray( - nil, - types.NewObject( - []*types.StaticProperty{ - {Key: "op", Value: types.S}, - {Key: "path", Value: types.A}, - }, - types.NewDynamicProperty(types.A, types.A), - ), - )), - ), - types.Named("output", types.A).Description("result obtained after consecutively applying all patch operations in `patches`"), - ), - Categories: objectCat, -} +var JSONPatch = v1.JSONPatch -var ObjectSubset = &Builtin{ - Name: "object.subset", - Description: "Determines if an object `sub` is a subset of another object `super`." + - "Object `sub` is a subset of object `super` if and only if every key in `sub` is also in `super`, " + - "**and** for all keys which `sub` and `super` share, they have the same value. " + - "This function works with objects, sets, arrays and a set of array and set." + - "If both arguments are objects, then the operation is recursive, e.g. " + - "`{\"c\": {\"x\": {10, 15, 20}}` is a subset of `{\"a\": \"b\", \"c\": {\"x\": {10, 15, 20, 25}, \"y\": \"z\"}`. " + - "If both arguments are sets, then this function checks if every element of `sub` is a member of `super`, " + - "but does not attempt to recurse. If both arguments are arrays, " + - "then this function checks if `sub` appears contiguously in order within `super`, " + - "and also does not attempt to recurse. If `super` is array and `sub` is set, " + - "then this function checks if `super` contains every element of `sub` with no consideration of ordering, " + - "and also does not attempt to recurse.", - Decl: types.NewFunction( - types.Args( - types.Named("super", types.NewAny(types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - ), - types.NewSet(types.A), - types.NewArray(nil, types.A), - )).Description("object to test if sub is a subset of"), - types.Named("sub", types.NewAny(types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - ), - types.NewSet(types.A), - types.NewArray(nil, types.A), - )).Description("object to test if super is a superset of"), - ), - types.Named("result", types.A).Description("`true` if `sub` is a subset of `super`"), - ), -} +var ObjectSubset = v1.ObjectSubset -var ObjectUnion = &Builtin{ - Name: "object.union", - Description: "Creates a new object of the asymmetric union of two objects. " + - "For example: `object.union({\"a\": 1, \"b\": 2, \"c\": {\"d\": 3}}, {\"a\": 7, \"c\": {\"d\": 4, \"e\": 5}})` will result in `{\"a\": 7, \"b\": 2, \"c\": {\"d\": 4, \"e\": 5}}`.", - Decl: types.NewFunction( - types.Args( - types.Named("a", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )), - types.Named("b", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )), - ), - types.Named("output", types.A).Description("a new object which is the result of an asymmetric recursive union of two objects where conflicts are resolved by choosing the key from the right-hand object `b`"), - ), // TODO(sr): types.A? ^^^^^^^ (also below) -} +var ObjectUnion = v1.ObjectUnion -var ObjectUnionN = &Builtin{ - Name: "object.union_n", - Description: "Creates a new object that is the asymmetric union of all objects merged from left to right. " + - "For example: `object.union_n([{\"a\": 1}, {\"b\": 2}, {\"a\": 3}])` will result in `{\"b\": 2, \"a\": 3}`.", - Decl: types.NewFunction( - types.Args( - types.Named("objects", types.NewArray( - nil, - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - )), - ), - types.Named("output", types.A).Description("asymmetric recursive union of all objects in `objects`, merged from left to right, where conflicts are resolved by choosing the key from the right-hand object"), - ), -} +var ObjectUnionN = v1.ObjectUnionN -var ObjectRemove = &Builtin{ - Name: "object.remove", - Description: "Removes specified keys from an object.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )).Description("object to remove keys from"), - types.Named("keys", types.NewAny( - types.NewArray(nil, types.A), - types.NewSet(types.A), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - )).Description("keys to remove from x"), - ), - types.Named("output", types.A).Description("result of removing the specified `keys` from `object`"), - ), -} +var ObjectRemove = v1.ObjectRemove -var ObjectFilter = &Builtin{ - Name: "object.filter", - Description: "Filters the object by keeping only specified keys. " + - "For example: `object.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}, \"d\": \"z\"}, [\"a\"])` will result in `{\"a\": {\"b\": \"x\", \"c\": \"y\"}}`).", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )).Description("object to filter keys"), - types.Named("keys", types.NewAny( - types.NewArray(nil, types.A), - types.NewSet(types.A), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - )), - ), - types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `keys`"), - ), -} +var ObjectFilter = v1.ObjectFilter -var ObjectGet = &Builtin{ - Name: "object.get", - Description: "Returns value of an object's key if present, otherwise a default. " + - "If the supplied `key` is an `array`, then `object.get` will search through a nested object or array using each key in turn. " + - "For example: `object.get({\"a\": [{ \"b\": true }]}, [\"a\", 0, \"b\"], false)` results in `true`.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get `key` from"), - types.Named("key", types.A).Description("key to lookup in `object`"), - types.Named("default", types.A).Description("default to use when lookup fails"), - ), - types.Named("value", types.A).Description("`object[key]` if present, otherwise `default`"), - ), -} +var ObjectGet = v1.ObjectGet -var ObjectKeys = &Builtin{ - Name: "object.keys", - Description: "Returns a set of an object's keys. " + - "For example: `object.keys({\"a\": 1, \"b\": true, \"c\": \"d\")` results in `{\"a\", \"b\", \"c\"}`.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get keys from"), - ), - types.Named("value", types.NewSet(types.A)).Description("set of `object`'s keys"), - ), -} +var ObjectKeys = v1.ObjectKeys /* * Encoding */ -var encoding = category("encoding") - -var JSONMarshal = &Builtin{ - Name: "json.marshal", - Description: "Serializes the input term to JSON.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A).Description("the term to serialize"), - ), - types.Named("y", types.S).Description("the JSON string representation of `x`"), - ), - Categories: encoding, -} -var JSONMarshalWithOptions = &Builtin{ - Name: "json.marshal_with_options", - Description: "Serializes the input term JSON, with additional formatting options via the `opts` parameter. " + - "`opts` accepts keys `pretty` (enable multi-line/formatted JSON), `prefix` (string to prefix lines with, default empty string) and `indent` (string to indent with, default `\\t`).", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A).Description("the term to serialize"), - types.Named("opts", types.NewObject( - []*types.StaticProperty{ - types.NewStaticProperty("pretty", types.B), - types.NewStaticProperty("indent", types.S), - types.NewStaticProperty("prefix", types.S), - }, - types.NewDynamicProperty(types.S, types.A), - )).Description("encoding options"), - ), - types.Named("y", types.S).Description("the JSON string representation of `x`, with configured prefix/indent string(s) as appropriate"), - ), - Categories: encoding, -} +var JSONMarshal = v1.JSONMarshal -var JSONUnmarshal = &Builtin{ - Name: "json.unmarshal", - Description: "Deserializes the input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("a JSON string"), - ), - types.Named("y", types.A).Description("the term deserialized from `x`"), - ), - Categories: encoding, -} +var JSONMarshalWithOptions = v1.JSONMarshalWithOptions -var JSONIsValid = &Builtin{ - Name: "json.is_valid", - Description: "Verifies the input string is a valid JSON document.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("a JSON string"), - ), - types.Named("result", types.B).Description("`true` if `x` is valid JSON, `false` otherwise"), - ), - Categories: encoding, -} +var JSONUnmarshal = v1.JSONUnmarshal -var Base64Encode = &Builtin{ - Name: "base64.encode", - Description: "Serializes the input string into base64 encoding.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("base64 serialization of `x`"), - ), - Categories: encoding, -} +var JSONIsValid = v1.JSONIsValid -var Base64Decode = &Builtin{ - Name: "base64.decode", - Description: "Deserializes the base64 encoded input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("base64 deserialization of `x`"), - ), - Categories: encoding, -} +var Base64Encode = v1.Base64Encode -var Base64IsValid = &Builtin{ - Name: "base64.is_valid", - Description: "Verifies the input string is base64 encoded.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("result", types.B).Description("`true` if `x` is valid base64 encoded value, `false` otherwise"), - ), - Categories: encoding, -} +var Base64Decode = v1.Base64Decode -var Base64UrlEncode = &Builtin{ - Name: "base64url.encode", - Description: "Serializes the input string into base64url encoding.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("base64url serialization of `x`"), - ), - Categories: encoding, -} +var Base64IsValid = v1.Base64IsValid -var Base64UrlEncodeNoPad = &Builtin{ - Name: "base64url.encode_no_pad", - Description: "Serializes the input string into base64url encoding without padding.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("base64url serialization of `x`"), - ), - Categories: encoding, -} +var Base64UrlEncode = v1.Base64UrlEncode -var Base64UrlDecode = &Builtin{ - Name: "base64url.decode", - Description: "Deserializes the base64url encoded input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("base64url deserialization of `x`"), - ), - Categories: encoding, -} +var Base64UrlEncodeNoPad = v1.Base64UrlEncodeNoPad -var URLQueryDecode = &Builtin{ - Name: "urlquery.decode", - Description: "Decodes a URL-encoded input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("URL-encoding deserialization of `x`"), - ), - Categories: encoding, -} +var Base64UrlDecode = v1.Base64UrlDecode -var URLQueryEncode = &Builtin{ - Name: "urlquery.encode", - Description: "Encodes the input string into a URL-encoded string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("URL-encoding serialization of `x`"), - ), - Categories: encoding, -} +var URLQueryDecode = v1.URLQueryDecode -var URLQueryEncodeObject = &Builtin{ - Name: "urlquery.encode_object", - Description: "Encodes the given object into a URL encoded query string.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject( - nil, - types.NewDynamicProperty( - types.S, - types.NewAny( - types.S, - types.NewArray(nil, types.S), - types.NewSet(types.S)))))), - types.Named("y", types.S).Description("the URL-encoded serialization of `object`"), - ), - Categories: encoding, -} +var URLQueryEncode = v1.URLQueryEncode -var URLQueryDecodeObject = &Builtin{ - Name: "urlquery.decode_object", - Description: "Decodes the given URL query string into an object.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("the query string"), - ), - types.Named("object", types.NewObject(nil, types.NewDynamicProperty( - types.S, - types.NewArray(nil, types.S)))).Description("the resulting object"), - ), - Categories: encoding, -} +var URLQueryEncodeObject = v1.URLQueryEncodeObject -var YAMLMarshal = &Builtin{ - Name: "yaml.marshal", - Description: "Serializes the input term to YAML.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A).Description("the term to serialize"), - ), - types.Named("y", types.S).Description("the YAML string representation of `x`"), - ), - Categories: encoding, -} +var URLQueryDecodeObject = v1.URLQueryDecodeObject -var YAMLUnmarshal = &Builtin{ - Name: "yaml.unmarshal", - Description: "Deserializes the input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("a YAML string"), - ), - types.Named("y", types.A).Description("the term deserialized from `x`"), - ), - Categories: encoding, -} +var YAMLMarshal = v1.YAMLMarshal + +var YAMLUnmarshal = v1.YAMLUnmarshal // YAMLIsValid verifies the input string is a valid YAML document. -var YAMLIsValid = &Builtin{ - Name: "yaml.is_valid", - Description: "Verifies the input string is a valid YAML document.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("a YAML string"), - ), - types.Named("result", types.B).Description("`true` if `x` is valid YAML, `false` otherwise"), - ), - Categories: encoding, -} +var YAMLIsValid = v1.YAMLIsValid -var HexEncode = &Builtin{ - Name: "hex.encode", - Description: "Serializes the input string using hex-encoding.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("serialization of `x` using hex-encoding"), - ), - Categories: encoding, -} +var HexEncode = v1.HexEncode -var HexDecode = &Builtin{ - Name: "hex.decode", - Description: "Deserializes the hex-encoded input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("a hex-encoded string"), - ), - types.Named("y", types.S).Description("deserialized from `x`"), - ), - Categories: encoding, -} +var HexDecode = v1.HexDecode /** * Tokens */ -var tokensCat = category("tokens") - -var JWTDecode = &Builtin{ - Name: "io.jwt.decode", - Description: "Decodes a JSON Web Token and outputs it as an object.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token to decode"), - ), - types.Named("output", types.NewArray([]types.Type{ - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.S, - }, nil)).Description("`[header, payload, sig]`, where `header` and `payload` are objects; `sig` is the hexadecimal representation of the signature on the token."), - ), - Categories: tokensCat, -} -var JWTVerifyRS256 = &Builtin{ - Name: "io.jwt.verify_rs256", - Description: "Verifies if a RS256 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTDecode = v1.JWTDecode -var JWTVerifyRS384 = &Builtin{ - Name: "io.jwt.verify_rs384", - Description: "Verifies if a RS384 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyRS256 = v1.JWTVerifyRS256 -var JWTVerifyRS512 = &Builtin{ - Name: "io.jwt.verify_rs512", - Description: "Verifies if a RS512 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyRS384 = v1.JWTVerifyRS384 -var JWTVerifyPS256 = &Builtin{ - Name: "io.jwt.verify_ps256", - Description: "Verifies if a PS256 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyRS512 = v1.JWTVerifyRS512 -var JWTVerifyPS384 = &Builtin{ - Name: "io.jwt.verify_ps384", - Description: "Verifies if a PS384 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyPS256 = v1.JWTVerifyPS256 -var JWTVerifyPS512 = &Builtin{ - Name: "io.jwt.verify_ps512", - Description: "Verifies if a PS512 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyPS384 = v1.JWTVerifyPS384 -var JWTVerifyES256 = &Builtin{ - Name: "io.jwt.verify_es256", - Description: "Verifies if a ES256 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyPS512 = v1.JWTVerifyPS512 -var JWTVerifyES384 = &Builtin{ - Name: "io.jwt.verify_es384", - Description: "Verifies if a ES384 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyES256 = v1.JWTVerifyES256 -var JWTVerifyES512 = &Builtin{ - Name: "io.jwt.verify_es512", - Description: "Verifies if a ES512 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyES384 = v1.JWTVerifyES384 -var JWTVerifyHS256 = &Builtin{ - Name: "io.jwt.verify_hs256", - Description: "Verifies if a HS256 (secret) JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("secret", types.S).Description("plain text secret used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyES512 = v1.JWTVerifyES512 -var JWTVerifyHS384 = &Builtin{ - Name: "io.jwt.verify_hs384", - Description: "Verifies if a HS384 (secret) JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("secret", types.S).Description("plain text secret used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyHS256 = v1.JWTVerifyHS256 -var JWTVerifyHS512 = &Builtin{ - Name: "io.jwt.verify_hs512", - Description: "Verifies if a HS512 (secret) JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("secret", types.S).Description("plain text secret used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyHS384 = v1.JWTVerifyHS384 -// Marked non-deterministic because it relies on time internally. -var JWTDecodeVerify = &Builtin{ - Name: "io.jwt.decode_verify", - Description: `Verifies a JWT signature under parameterized constraints and decodes the claims if it is valid. -Supports the following algorithms: HS256, HS384, HS512, RS256, RS384, RS512, ES256, ES384, ES512, PS256, PS384 and PS512.`, - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified and whose claims are to be checked"), - types.Named("constraints", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("claim verification constraints"), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - }, nil)).Description("`[valid, header, payload]`: if the input token is verified and meets the requirements of `constraints` then `valid` is `true`; `header` and `payload` are objects containing the JOSE header and the JWT claim set; otherwise, `valid` is `false`, `header` and `payload` are `{}`"), - ), - Categories: tokensCat, - Nondeterministic: true, -} +var JWTVerifyHS512 = v1.JWTVerifyHS512 -var tokenSign = category("tokensign") +// Marked non-deterministic because it relies on time internally. +var JWTDecodeVerify = v1.JWTDecodeVerify // Marked non-deterministic because it relies on RNG internally. -var JWTEncodeSignRaw = &Builtin{ - Name: "io.jwt.encode_sign_raw", - Description: "Encodes and optionally signs a JSON Web Token.", - Decl: types.NewFunction( - types.Args( - types.Named("headers", types.S).Description("JWS Protected Header"), - types.Named("payload", types.S).Description("JWS Payload"), - types.Named("key", types.S).Description("JSON Web Key (RFC7517)"), - ), - types.Named("output", types.S).Description("signed JWT"), - ), - Categories: tokenSign, - Nondeterministic: true, -} +var JWTEncodeSignRaw = v1.JWTEncodeSignRaw // Marked non-deterministic because it relies on RNG internally. -var JWTEncodeSign = &Builtin{ - Name: "io.jwt.encode_sign", - Description: "Encodes and optionally signs a JSON Web Token. Inputs are taken as objects, not encoded strings (see `io.jwt.encode_sign_raw`).", - Decl: types.NewFunction( - types.Args( - types.Named("headers", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWS Protected Header"), - types.Named("payload", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWS Payload"), - types.Named("key", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JSON Web Key (RFC7517)"), - ), - types.Named("output", types.S).Description("signed JWT"), - ), - Categories: tokenSign, - Nondeterministic: true, -} +var JWTEncodeSign = v1.JWTEncodeSign /** * Time */ // Marked non-deterministic because it relies on time directly. -var NowNanos = &Builtin{ - Name: "time.now_ns", - Description: "Returns the current time since epoch in nanoseconds.", - Decl: types.NewFunction( - nil, - types.Named("now", types.N).Description("nanoseconds since epoch"), - ), - Nondeterministic: true, -} +var NowNanos = v1.NowNanos -var ParseNanos = &Builtin{ - Name: "time.parse_ns", - Description: "Returns the time in nanoseconds parsed from the string in the given format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", - Decl: types.NewFunction( - types.Args( - types.Named("layout", types.S).Description("format used for parsing, see the [Go `time` package documentation](https://golang.org/pkg/time/#Parse) for more details"), - types.Named("value", types.S).Description("input to parse according to `layout`"), - ), - types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"), - ), -} +var ParseNanos = v1.ParseNanos -var ParseRFC3339Nanos = &Builtin{ - Name: "time.parse_rfc3339_ns", - Description: "Returns the time in nanoseconds parsed from the string in RFC3339 format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S), - ), - types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"), - ), -} +var ParseRFC3339Nanos = v1.ParseRFC3339Nanos -var ParseDurationNanos = &Builtin{ - Name: "time.parse_duration_ns", - Description: "Returns the duration in nanoseconds represented by a string.", - Decl: types.NewFunction( - types.Args( - types.Named("duration", types.S).Description("a duration like \"3m\"; see the [Go `time` package documentation](https://golang.org/pkg/time/#ParseDuration) for more details"), - ), - types.Named("ns", types.N).Description("the `duration` in nanoseconds"), - ), -} +var ParseDurationNanos = v1.ParseDurationNanos -var Format = &Builtin{ - Name: "time.format", - Description: "Returns the formatted timestamp for the nanoseconds since epoch.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - types.NewArray([]types.Type{types.N, types.S, types.S}, nil), - )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string; or a three-element array of ns, timezone string and a layout string or golang defined formatting constant (see golang supported time formats)"), - ), - types.Named("formatted timestamp", types.S).Description("the formatted timestamp represented for the nanoseconds since the epoch in the supplied timezone (or UTC)"), - ), -} +var Format = v1.Format -var Date = &Builtin{ - Name: "time.date", - Description: "Returns the `[year, month, day]` for the nanoseconds since epoch.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), - ), - types.Named("date", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)).Description("an array of `year`, `month` (1-12), and `day` (1-31)"), - ), -} +var Date = v1.Date -var Clock = &Builtin{ - Name: "time.clock", - Description: "Returns the `[hour, minute, second]` of the day for the nanoseconds since epoch.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), - ), - types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)). - Description("the `hour`, `minute` (0-59), and `second` (0-59) representing the time of day for the nanoseconds since epoch in the supplied timezone (or UTC)"), - ), -} +var Clock = v1.Clock -var Weekday = &Builtin{ - Name: "time.weekday", - Description: "Returns the day of the week (Monday, Tuesday, ...) for the nanoseconds since epoch.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), - ), - types.Named("day", types.S).Description("the weekday represented by `ns` nanoseconds since the epoch in the supplied timezone (or UTC)"), - ), -} +var Weekday = v1.Weekday -var AddDate = &Builtin{ - Name: "time.add_date", - Description: "Returns the nanoseconds since epoch after adding years, months and days to nanoseconds. Month & day values outside their usual ranges after the operation and will be normalized - for example, October 32 would become November 1. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", - Decl: types.NewFunction( - types.Args( - types.Named("ns", types.N).Description("nanoseconds since the epoch"), - types.Named("years", types.N), - types.Named("months", types.N), - types.Named("days", types.N), - ), - types.Named("output", types.N).Description("nanoseconds since the epoch representing the input time, with years, months and days added"), - ), -} +var AddDate = v1.AddDate -var Diff = &Builtin{ - Name: "time.diff", - Description: "Returns the difference between two unix timestamps in nanoseconds (with optional timezone strings).", - Decl: types.NewFunction( - types.Args( - types.Named("ns1", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - )), - types.Named("ns2", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - )), - ), - types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N, types.N, types.N, types.N}, nil)).Description("difference between `ns1` and `ns2` (in their supplied timezones, if supplied, or UTC) as array of numbers: `[years, months, days, hours, minutes, seconds]`"), - ), -} +var Diff = v1.Diff /** * Crypto. */ -var CryptoX509ParseCertificates = &Builtin{ - Name: "crypto.x509.parse_certificates", - Description: `Returns zero or more certificates from the given encoded string containing -DER certificate data. - -If the input is empty, the function will return null. The input string should be a list of one or more -concatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.`, - Decl: types.NewFunction( - types.Args( - types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing one or more certificates or a PEM string of one or more certificates"), - ), - types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed X.509 certificates represented as objects"), - ), -} +var CryptoX509ParseCertificates = v1.CryptoX509ParseCertificates -var CryptoX509ParseAndVerifyCertificates = &Builtin{ - Name: "crypto.x509.parse_and_verify_certificates", - Description: `Returns one or more certificates from the given string containing PEM -or base64 encoded DER certificates after verifying the supplied certificates form a complete -certificate chain back to a trusted root. - -The first certificate is treated as the root and the last is treated as the leaf, -with all others being treated as intermediates.`, - Decl: types.NewFunction( - types.Args( - types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing two or more certificates where the first is a root CA, the last is a leaf certificate, and all others are intermediate CAs"), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), - }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"), - ), -} +var CryptoX509ParseAndVerifyCertificates = v1.CryptoX509ParseAndVerifyCertificates -var CryptoX509ParseAndVerifyCertificatesWithOptions = &Builtin{ - Name: "crypto.x509.parse_and_verify_certificates_with_options", - Description: `Returns one or more certificates from the given string containing PEM -or base64 encoded DER certificates after verifying the supplied certificates form a complete -certificate chain back to a trusted root. A config option passed as the second argument can -be used to configure the validation options used. - -The first certificate is treated as the root and the last is treated as the leaf, -with all others being treated as intermediates.`, - - Decl: types.NewFunction( - types.Args( - types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing two or more certificates where the first is a root CA, the last is a leaf certificate, and all others are intermediate CAs"), - types.Named("options", types.NewObject( - nil, - types.NewDynamicProperty(types.S, types.A), - )).Description("object containing extra configs to verify the validity of certificates. `options` object supports four fields which maps to same fields in [x509.VerifyOptions struct](https://pkg.go.dev/crypto/x509#VerifyOptions). `DNSName`, `CurrentTime`: Nanoseconds since the Unix Epoch as a number, `MaxConstraintComparisons` and `KeyUsages`. `KeyUsages` is list and can have possible values as in: `\"KeyUsageAny\"`, `\"KeyUsageServerAuth\"`, `\"KeyUsageClientAuth\"`, `\"KeyUsageCodeSigning\"`, `\"KeyUsageEmailProtection\"`, `\"KeyUsageIPSECEndSystem\"`, `\"KeyUsageIPSECTunnel\"`, `\"KeyUsageIPSECUser\"`, `\"KeyUsageTimeStamping\"`, `\"KeyUsageOCSPSigning\"`, `\"KeyUsageMicrosoftServerGatedCrypto\"`, `\"KeyUsageNetscapeServerGatedCrypto\"`, `\"KeyUsageMicrosoftCommercialCodeSigning\"`, `\"KeyUsageMicrosoftKernelCodeSigning\"` "), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), - }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"), - ), -} +var CryptoX509ParseAndVerifyCertificatesWithOptions = v1.CryptoX509ParseAndVerifyCertificatesWithOptions -var CryptoX509ParseCertificateRequest = &Builtin{ - Name: "crypto.x509.parse_certificate_request", - Description: "Returns a PKCS #10 certificate signing request from the given PEM-encoded PKCS#10 certificate signing request.", - Decl: types.NewFunction( - types.Args( - types.Named("csr", types.S).Description("base64 string containing either a PEM encoded or DER CSR or a string containing a PEM CSR"), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("X.509 CSR represented as an object"), - ), -} +var CryptoX509ParseCertificateRequest = v1.CryptoX509ParseCertificateRequest -var CryptoX509ParseKeyPair = &Builtin{ - Name: "crypto.x509.parse_keypair", - Description: "Returns a valid key pair", - Decl: types.NewFunction( - types.Args( - types.Named("cert", types.S).Description("string containing PEM or base64 encoded DER certificates"), - types.Named("pem", types.S).Description("string containing PEM or base64 encoded DER keys"), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("if key pair is valid, returns the tls.certificate(https://pkg.go.dev/crypto/tls#Certificate) as an object. If the key pair is invalid, nil and an error are returned."), - ), -} -var CryptoX509ParseRSAPrivateKey = &Builtin{ - Name: "crypto.x509.parse_rsa_private_key", - Description: "Returns a JWK for signing a JWT from the given PEM-encoded RSA private key.", - Decl: types.NewFunction( - types.Args( - types.Named("pem", types.S).Description("base64 string containing a PEM encoded RSA private key"), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWK as an object"), - ), -} +var CryptoX509ParseKeyPair = v1.CryptoX509ParseKeyPair +var CryptoX509ParseRSAPrivateKey = v1.CryptoX509ParseRSAPrivateKey -var CryptoParsePrivateKeys = &Builtin{ - Name: "crypto.parse_private_keys", - Description: `Returns zero or more private keys from the given encoded string containing DER certificate data. - -If the input is empty, the function will return null. The input string should be a list of one or more concatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.`, - Decl: types.NewFunction( - types.Args( - types.Named("keys", types.S).Description("PEM encoded data containing one or more private keys as concatenated blocks. Optionally Base64 encoded."), - ), - types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed private keys represented as objects"), - ), -} +var CryptoParsePrivateKeys = v1.CryptoParsePrivateKeys -var CryptoMd5 = &Builtin{ - Name: "crypto.md5", - Description: "Returns a string representing the input string hashed with the MD5 function", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("MD5-hash of `x`"), - ), -} +var CryptoMd5 = v1.CryptoMd5 -var CryptoSha1 = &Builtin{ - Name: "crypto.sha1", - Description: "Returns a string representing the input string hashed with the SHA1 function", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("SHA1-hash of `x`"), - ), -} +var CryptoSha1 = v1.CryptoSha1 -var CryptoSha256 = &Builtin{ - Name: "crypto.sha256", - Description: "Returns a string representing the input string hashed with the SHA256 function", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("SHA256-hash of `x`"), - ), -} +var CryptoSha256 = v1.CryptoSha256 -var CryptoHmacMd5 = &Builtin{ - Name: "crypto.hmac.md5", - Description: "Returns a string representing the MD5 HMAC of the input message using the input key.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("input string"), - types.Named("key", types.S).Description("key to use"), - ), - types.Named("y", types.S).Description("MD5-HMAC of `x`"), - ), -} +var CryptoHmacMd5 = v1.CryptoHmacMd5 -var CryptoHmacSha1 = &Builtin{ - Name: "crypto.hmac.sha1", - Description: "Returns a string representing the SHA1 HMAC of the input message using the input key.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("input string"), - types.Named("key", types.S).Description("key to use"), - ), - types.Named("y", types.S).Description("SHA1-HMAC of `x`"), - ), -} +var CryptoHmacSha1 = v1.CryptoHmacSha1 -var CryptoHmacSha256 = &Builtin{ - Name: "crypto.hmac.sha256", - Description: "Returns a string representing the SHA256 HMAC of the input message using the input key.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("input string"), - types.Named("key", types.S).Description("key to use"), - ), - types.Named("y", types.S).Description("SHA256-HMAC of `x`"), - ), -} +var CryptoHmacSha256 = v1.CryptoHmacSha256 -var CryptoHmacSha512 = &Builtin{ - Name: "crypto.hmac.sha512", - Description: "Returns a string representing the SHA512 HMAC of the input message using the input key.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("input string"), - types.Named("key", types.S).Description("key to use"), - ), - types.Named("y", types.S).Description("SHA512-HMAC of `x`"), - ), -} +var CryptoHmacSha512 = v1.CryptoHmacSha512 -var CryptoHmacEqual = &Builtin{ - Name: "crypto.hmac.equal", - Description: "Returns a boolean representing the result of comparing two MACs for equality without leaking timing information.", - Decl: types.NewFunction( - types.Args( - types.Named("mac1", types.S).Description("mac1 to compare"), - types.Named("mac2", types.S).Description("mac2 to compare"), - ), - types.Named("result", types.B).Description("`true` if the MACs are equals, `false` otherwise"), - ), -} +var CryptoHmacEqual = v1.CryptoHmacEqual /** * Graphs. */ -var graphs = category("graph") - -var WalkBuiltin = &Builtin{ - Name: "walk", - Relation: true, - Description: "Generates `[path, value]` tuples for all nested documents of `x` (recursively). Queries can use `walk` to traverse documents nested under `x`.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("output", types.NewArray( - []types.Type{ - types.NewArray(nil, types.A), - types.A, - }, - nil, - )).Description("pairs of `path` and `value`: `path` is an array representing the pointer to `value` in `x`. If `path` is assigned a wildcard (`_`), the `walk` function will skip path creation entirely for faster evaluation."), - ), - Categories: graphs, -} -var ReachableBuiltin = &Builtin{ - Name: "graph.reachable", - Description: "Computes the set of reachable nodes in the graph from a set of starting nodes.", - Decl: types.NewFunction( - types.Args( - types.Named("graph", types.NewObject( - nil, - types.NewDynamicProperty( - types.A, - types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A)), - )), - ).Description("object containing a set or array of neighboring vertices"), - types.Named("initial", types.NewAny(types.NewSet(types.A), types.NewArray(nil, types.A))).Description("set or array of root vertices"), - ), - types.Named("output", types.NewSet(types.A)).Description("set of vertices reachable from the `initial` vertices in the directed `graph`"), - ), -} +var WalkBuiltin = v1.WalkBuiltin -var ReachablePathsBuiltin = &Builtin{ - Name: "graph.reachable_paths", - Description: "Computes the set of reachable paths in the graph from a set of starting nodes.", - Decl: types.NewFunction( - types.Args( - types.Named("graph", types.NewObject( - nil, - types.NewDynamicProperty( - types.A, - types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A)), - )), - ).Description("object containing a set or array of root vertices"), - types.Named("initial", types.NewAny(types.NewSet(types.A), types.NewArray(nil, types.A))).Description("initial paths"), // TODO(sr): copied. is that correct? - ), - types.Named("output", types.NewSet(types.NewArray(nil, types.A))).Description("paths reachable from the `initial` vertices in the directed `graph`"), - ), -} +var ReachableBuiltin = v1.ReachableBuiltin + +var ReachablePathsBuiltin = v1.ReachablePathsBuiltin /** * Type */ -var typesCat = category("types") - -var IsNumber = &Builtin{ - Name: "is_number", - Description: "Returns `true` if the input value is a number.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is a number, `false` otherwise."), - ), - Categories: typesCat, -} -var IsString = &Builtin{ - Name: "is_string", - Description: "Returns `true` if the input value is a string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is a string, `false` otherwise."), - ), - Categories: typesCat, -} +var IsNumber = v1.IsNumber -var IsBoolean = &Builtin{ - Name: "is_boolean", - Description: "Returns `true` if the input value is a boolean.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is an boolean, `false` otherwise."), - ), - Categories: typesCat, -} +var IsString = v1.IsString -var IsArray = &Builtin{ - Name: "is_array", - Description: "Returns `true` if the input value is an array.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is an array, `false` otherwise."), - ), - Categories: typesCat, -} +var IsBoolean = v1.IsBoolean -var IsSet = &Builtin{ - Name: "is_set", - Description: "Returns `true` if the input value is a set.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is a set, `false` otherwise."), - ), - Categories: typesCat, -} +var IsArray = v1.IsArray -var IsObject = &Builtin{ - Name: "is_object", - Description: "Returns true if the input value is an object", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is an object, `false` otherwise."), - ), - Categories: typesCat, -} +var IsSet = v1.IsSet -var IsNull = &Builtin{ - Name: "is_null", - Description: "Returns `true` if the input value is null.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is null, `false` otherwise."), - ), - Categories: typesCat, -} +var IsObject = v1.IsObject + +var IsNull = v1.IsNull /** * Type Name */ // TypeNameBuiltin returns the type of the input. -var TypeNameBuiltin = &Builtin{ - Name: "type_name", - Description: "Returns the type of its input value.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("type", types.S).Description(`one of "null", "boolean", "number", "string", "array", "object", "set"`), - ), - Categories: typesCat, -} +var TypeNameBuiltin = v1.TypeNameBuiltin /** * HTTP Request */ // Marked non-deterministic because HTTP request results can be non-deterministic. -var HTTPSend = &Builtin{ - Name: "http.send", - Description: "Returns a HTTP response to the given HTTP request.", - Decl: types.NewFunction( - types.Args( - types.Named("request", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), - ), - types.Named("response", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))), - ), - Nondeterministic: true, -} +var HTTPSend = v1.HTTPSend /** * GraphQL */ // GraphQLParse returns a pair of AST objects from parsing/validation. -var GraphQLParse = &Builtin{ - Name: "graphql.parse", - Description: "Returns AST objects for a given GraphQL query and schema after validating the query against the schema. Returns undefined if errors were encountered during parsing or validation. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", - Decl: types.NewFunction( - types.Args( - types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - ), - types.Named("output", types.NewArray([]types.Type{ - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - }, nil)).Description("`output` is of the form `[query_ast, schema_ast]`. If the GraphQL query is valid given the provided schema, then `query_ast` and `schema_ast` are objects describing the ASTs for the query and schema."), - ), -} +var GraphQLParse = v1.GraphQLParse // GraphQLParseAndVerify returns a boolean and a pair of AST object from parsing/validation. -var GraphQLParseAndVerify = &Builtin{ - Name: "graphql.parse_and_verify", - Description: "Returns a boolean indicating success or failure alongside the parsed ASTs for a given GraphQL query and schema after validating the query against the schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", - Decl: types.NewFunction( - types.Args( - types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - }, nil)).Description(" `output` is of the form `[valid, query_ast, schema_ast]`. If the query is valid given the provided schema, then `valid` is `true`, and `query_ast` and `schema_ast` are objects describing the ASTs for the GraphQL query and schema. Otherwise, `valid` is `false` and `query_ast` and `schema_ast` are `{}`."), - ), -} +var GraphQLParseAndVerify = v1.GraphQLParseAndVerify // GraphQLParseQuery parses the input GraphQL query and returns a JSON // representation of its AST. -var GraphQLParseQuery = &Builtin{ - Name: "graphql.parse_query", - Description: "Returns an AST object for a GraphQL query.", - Decl: types.NewFunction( - types.Args( - types.Named("query", types.S), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL query."), - ), -} +var GraphQLParseQuery = v1.GraphQLParseQuery // GraphQLParseSchema parses the input GraphQL schema and returns a JSON // representation of its AST. -var GraphQLParseSchema = &Builtin{ - Name: "graphql.parse_schema", - Description: "Returns an AST object for a GraphQL schema.", - Decl: types.NewFunction( - types.Args( - types.Named("schema", types.S), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL schema."), - ), -} +var GraphQLParseSchema = v1.GraphQLParseSchema // GraphQLIsValid returns true if a GraphQL query is valid with a given // schema, and returns false for all other inputs. -var GraphQLIsValid = &Builtin{ - Name: "graphql.is_valid", - Description: "Checks that a GraphQL query is valid against a given schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", - Decl: types.NewFunction( - types.Args( - types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - ), - types.Named("output", types.B).Description("`true` if the query is valid under the given schema. `false` otherwise."), - ), -} +var GraphQLIsValid = v1.GraphQLIsValid // GraphQLSchemaIsValid returns true if the input is valid GraphQL schema, // and returns false for all other inputs. -var GraphQLSchemaIsValid = &Builtin{ - Name: "graphql.schema_is_valid", - Description: "Checks that the input is a valid GraphQL schema. The schema can be either a GraphQL string or an AST object from the other GraphQL builtin functions.", - Decl: types.NewFunction( - types.Args( - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - ), - types.Named("output", types.B).Description("`true` if the schema is a valid GraphQL schema. `false` otherwise."), - ), -} +var GraphQLSchemaIsValid = v1.GraphQLSchemaIsValid /** * JSON Schema @@ -2811,313 +499,76 @@ var GraphQLSchemaIsValid = &Builtin{ // JSONSchemaVerify returns empty string if the input is valid JSON schema // and returns error string for all other inputs. -var JSONSchemaVerify = &Builtin{ - Name: "json.verify_schema", - Description: "Checks that the input is a valid JSON schema object. The schema can be either a JSON string or an JSON object.", - Decl: types.NewFunction( - types.Args( - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). - Description("the schema to verify"), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewAny(types.S, types.Null{}), - }, nil)). - Description("`output` is of the form `[valid, error]`. If the schema is valid, then `valid` is `true`, and `error` is `null`. Otherwise, `valid` is `false` and `error` is a string describing the error."), - ), - Categories: objectCat, -} +var JSONSchemaVerify = v1.JSONSchemaVerify // JSONMatchSchema returns empty array if the document matches the JSON schema, // and returns non-empty array with error objects otherwise. -var JSONMatchSchema = &Builtin{ - Name: "json.match_schema", - Description: "Checks that the document matches the JSON schema.", - Decl: types.NewFunction( - types.Args( - types.Named("document", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). - Description("document to verify by schema"), - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). - Description("schema to verify document by"), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewArray( - nil, types.NewObject( - []*types.StaticProperty{ - {Key: "error", Value: types.S}, - {Key: "type", Value: types.S}, - {Key: "field", Value: types.S}, - {Key: "desc", Value: types.S}, - }, - nil, - ), - ), - }, nil)). - Description("`output` is of the form `[match, errors]`. If the document is valid given the schema, then `match` is `true`, and `errors` is an empty array. Otherwise, `match` is `false` and `errors` is an array of objects describing the error(s)."), - ), - Categories: objectCat, -} +var JSONMatchSchema = v1.JSONMatchSchema /** * Cloud Provider Helper Functions */ -var providersAWSCat = category("providers.aws") - -var ProvidersAWSSignReqObj = &Builtin{ - Name: "providers.aws.sign_req", - Description: "Signs an HTTP request object for Amazon Web Services. Currently implements [AWS Signature Version 4 request signing](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) by the `Authorization` header method.", - Decl: types.NewFunction( - types.Args( - types.Named("request", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), - types.Named("aws_config", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), - types.Named("time_ns", types.N), - ), - types.Named("signed_request", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))), - ), - Categories: providersAWSCat, -} + +var ProvidersAWSSignReqObj = v1.ProvidersAWSSignReqObj /** * Rego */ -var RegoParseModule = &Builtin{ - Name: "rego.parse_module", - Description: "Parses the input Rego string and returns an object representation of the AST.", - Decl: types.NewFunction( - types.Args( - types.Named("filename", types.S).Description("file name to attach to AST nodes' locations"), - types.Named("rego", types.S).Description("Rego module"), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), // TODO(tsandall): import AST schema - ), -} +var RegoParseModule = v1.RegoParseModule -var RegoMetadataChain = &Builtin{ - Name: "rego.metadata.chain", - Description: `Returns the chain of metadata for the active rule. -Ordered starting at the active rule, going outward to the most distant node in its package ancestry. -A chain entry is a JSON document with two members: "path", an array representing the path of the node; and "annotations", a JSON document containing the annotations declared for the node. -The first entry in the chain always points to the active rule, even if it has no declared annotations (in which case the "annotations" member is not present).`, - Decl: types.NewFunction( - types.Args(), - types.Named("chain", types.NewArray(nil, types.A)).Description("each array entry represents a node in the path ancestry (chain) of the active rule that also has declared annotations"), - ), -} +var RegoMetadataChain = v1.RegoMetadataChain // RegoMetadataRule returns the metadata for the active rule -var RegoMetadataRule = &Builtin{ - Name: "rego.metadata.rule", - Description: "Returns annotations declared for the active rule and using the _rule_ scope.", - Decl: types.NewFunction( - types.Args(), - types.Named("output", types.A).Description("\"rule\" scope annotations for this rule; empty object if no annotations exist"), - ), -} +var RegoMetadataRule = v1.RegoMetadataRule /** * OPA */ // Marked non-deterministic because of unpredictable config/environment-dependent results. -var OPARuntime = &Builtin{ - Name: "opa.runtime", - Description: "Returns an object that describes the runtime environment where OPA is deployed.", - Decl: types.NewFunction( - nil, - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). - Description("includes a `config` key if OPA was started with a configuration file; an `env` key containing the environment variables that the OPA process was started with; includes `version` and `commit` keys containing the version and build commit of OPA."), - ), - Nondeterministic: true, -} +var OPARuntime = v1.OPARuntime /** * Trace */ -var tracing = category("tracing") - -var Trace = &Builtin{ - Name: "trace", - Description: "Emits `note` as a `Note` event in the query explanation. Query explanations show the exact expressions evaluated by OPA during policy execution. For example, `trace(\"Hello There!\")` includes `Note \"Hello There!\"` in the query explanation. To include variables in the message, use `sprintf`. For example, `person := \"Bob\"; trace(sprintf(\"Hello There! %v\", [person]))` will emit `Note \"Hello There! Bob\"` inside of the explanation.", - Decl: types.NewFunction( - types.Args( - types.Named("note", types.S).Description("the note to include"), - ), - types.Named("result", types.B).Description("always `true`"), - ), - Categories: tracing, -} + +var Trace = v1.Trace /** * Glob */ -var GlobMatch = &Builtin{ - Name: "glob.match", - Description: "Parses and matches strings against the glob notation. Not to be confused with `regex.globs_match`.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S), - types.Named("delimiters", types.NewAny( - types.NewArray(nil, types.S), - types.NewNull(), - )).Description("glob pattern delimiters, e.g. `[\".\", \":\"]`, defaults to `[\".\"]` if unset. If `delimiters` is `null`, glob match without delimiter."), - types.Named("match", types.S), - ), - types.Named("result", types.B).Description("true if `match` can be found in `pattern` which is separated by `delimiters`"), - ), -} +var GlobMatch = v1.GlobMatch -var GlobQuoteMeta = &Builtin{ - Name: "glob.quote_meta", - Description: "Returns a string which represents a version of the pattern where all asterisks have been escaped.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S), - ), - types.Named("output", types.S).Description("the escaped string of `pattern`"), - ), - // TODO(sr): example for this was: Calling ``glob.quote_meta("*.github.com", output)`` returns ``\\*.github.com`` as ``output``. -} +var GlobQuoteMeta = v1.GlobQuoteMeta /** * Networking */ -var NetCIDRIntersects = &Builtin{ - Name: "net.cidr_intersects", - Description: "Checks if a CIDR intersects with another CIDR (e.g. `192.168.0.0/16` overlaps with `192.168.1.0/24`). Supports both IPv4 and IPv6 notations.", - Decl: types.NewFunction( - types.Args( - types.Named("cidr1", types.S), - types.Named("cidr2", types.S), - ), - types.Named("result", types.B), - ), -} +var NetCIDRIntersects = v1.NetCIDRIntersects -var NetCIDRExpand = &Builtin{ - Name: "net.cidr_expand", - Description: "Expands CIDR to set of hosts (e.g., `net.cidr_expand(\"192.168.0.0/30\")` generates 4 hosts: `{\"192.168.0.0\", \"192.168.0.1\", \"192.168.0.2\", \"192.168.0.3\"}`).", - Decl: types.NewFunction( - types.Args( - types.Named("cidr", types.S), - ), - types.Named("hosts", types.NewSet(types.S)).Description("set of IP addresses the CIDR `cidr` expands to"), - ), -} +var NetCIDRExpand = v1.NetCIDRExpand -var NetCIDRContains = &Builtin{ - Name: "net.cidr_contains", - Description: "Checks if a CIDR or IP is contained within another CIDR. `output` is `true` if `cidr_or_ip` (e.g. `127.0.0.64/26` or `127.0.0.1`) is contained within `cidr` (e.g. `127.0.0.1/24`) and `false` otherwise. Supports both IPv4 and IPv6 notations.", - Decl: types.NewFunction( - types.Args( - types.Named("cidr", types.S), - types.Named("cidr_or_ip", types.S), - ), - types.Named("result", types.B), - ), -} +var NetCIDRContains = v1.NetCIDRContains -var NetCIDRContainsMatches = &Builtin{ - Name: "net.cidr_contains_matches", - Description: "Checks if collections of cidrs or ips are contained within another collection of cidrs and returns matches. " + - "This function is similar to `net.cidr_contains` except it allows callers to pass collections of CIDRs or IPs as arguments and returns the matches (as opposed to a boolean result indicating a match between two CIDRs/IPs).", - Decl: types.NewFunction( - types.Args( - types.Named("cidrs", netCidrContainsMatchesOperandType), - types.Named("cidrs_or_ips", netCidrContainsMatchesOperandType), - ), - types.Named("output", types.NewSet(types.NewArray([]types.Type{types.A, types.A}, nil))).Description("tuples identifying matches where `cidrs_or_ips` are contained within `cidrs`"), - ), -} +var NetCIDRContainsMatches = v1.NetCIDRContainsMatches -var NetCIDRMerge = &Builtin{ - Name: "net.cidr_merge", - Description: "Merges IP addresses and subnets into the smallest possible list of CIDRs (e.g., `net.cidr_merge([\"192.0.128.0/24\", \"192.0.129.0/24\"])` generates `{\"192.0.128.0/23\"}`." + - `This function merges adjacent subnets where possible, those contained within others and also removes any duplicates. -Supports both IPv4 and IPv6 notations. IPv6 inputs need a prefix length (e.g. "/128").`, - Decl: types.NewFunction( - types.Args( - types.Named("addrs", types.NewAny( - types.NewArray(nil, types.NewAny(types.S)), - types.NewSet(types.S), - )).Description("CIDRs or IP addresses"), - ), - types.Named("output", types.NewSet(types.S)).Description("smallest possible set of CIDRs obtained after merging the provided list of IP addresses and subnets in `addrs`"), - ), -} +var NetCIDRMerge = v1.NetCIDRMerge -var NetCIDRIsValid = &Builtin{ - Name: "net.cidr_is_valid", - Description: "Parses an IPv4/IPv6 CIDR and returns a boolean indicating if the provided CIDR is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("cidr", types.S), - ), - types.Named("result", types.B), - ), -} - -var netCidrContainsMatchesOperandType = types.NewAny( - types.S, - types.NewArray(nil, types.NewAny( - types.S, - types.NewArray(nil, types.A), - )), - types.NewSet(types.NewAny( - types.S, - types.NewArray(nil, types.A), - )), - types.NewObject(nil, types.NewDynamicProperty( - types.S, - types.NewAny( - types.S, - types.NewArray(nil, types.A), - ), - )), -) +var NetCIDRIsValid = v1.NetCIDRIsValid // Marked non-deterministic because DNS resolution results can be non-deterministic. -var NetLookupIPAddr = &Builtin{ - Name: "net.lookup_ip_addr", - Description: "Returns the set of IP addresses (both v4 and v6) that the passed-in `name` resolves to using the standard name resolution mechanisms available.", - Decl: types.NewFunction( - types.Args( - types.Named("name", types.S).Description("domain name to resolve"), - ), - types.Named("addrs", types.NewSet(types.S)).Description("IP addresses (v4 and v6) that `name` resolves to"), - ), - Nondeterministic: true, -} +var NetLookupIPAddr = v1.NetLookupIPAddr /** * Semantic Versions */ -var SemVerIsValid = &Builtin{ - Name: "semver.is_valid", - Description: "Validates that the input is a valid SemVer string.", - Decl: types.NewFunction( - types.Args( - types.Named("vsn", types.A), - ), - types.Named("result", types.B).Description("`true` if `vsn` is a valid SemVer; `false` otherwise"), - ), -} +var SemVerIsValid = v1.SemVerIsValid -var SemVerCompare = &Builtin{ - Name: "semver.compare", - Description: "Compares valid SemVer formatted version strings.", - Decl: types.NewFunction( - types.Args( - types.Named("a", types.S), - types.Named("b", types.S), - ), - types.Named("result", types.N).Description("`-1` if `a < b`; `1` if `a > b`; `0` if `a == b`"), - ), -} +var SemVerCompare = v1.SemVerCompare /** * Printing @@ -3128,248 +579,56 @@ var SemVerCompare = &Builtin{ // operands may be of any type. Furthermore, unlike other built-in functions, // undefined operands DO NOT cause the print() function to fail during // evaluation. -var Print = &Builtin{ - Name: "print", - Decl: types.NewVariadicFunction(nil, types.A, nil), -} +var Print = v1.Print // InternalPrint represents the internal implementation of the print() function. // The compiler rewrites print() calls to refer to the internal implementation. -var InternalPrint = &Builtin{ - Name: "internal.print", - Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.NewSet(types.A))}, nil), -} +var InternalPrint = v1.InternalPrint /** * Deprecated built-ins. */ // SetDiff has been replaced by the minus built-in. -var SetDiff = &Builtin{ - Name: "set_diff", - Decl: types.NewFunction( - types.Args( - types.NewSet(types.A), - types.NewSet(types.A), - ), - types.NewSet(types.A), - ), - deprecated: true, -} +var SetDiff = v1.SetDiff // NetCIDROverlap has been replaced by the `net.cidr_contains` built-in. -var NetCIDROverlap = &Builtin{ - Name: "net.cidr_overlap", - Decl: types.NewFunction( - types.Args( - types.S, - types.S, - ), - types.B, - ), - deprecated: true, -} +var NetCIDROverlap = v1.NetCIDROverlap // CastArray checks the underlying type of the input. If it is array or set, an array // containing the values is returned. If it is not an array, an error is thrown. -var CastArray = &Builtin{ - Name: "cast_array", - Decl: types.NewFunction( - types.Args(types.A), - types.NewArray(nil, types.A), - ), - deprecated: true, -} +var CastArray = v1.CastArray // CastSet checks the underlying type of the input. // If it is a set, the set is returned. // If it is an array, the array is returned in set form (all duplicates removed) // If neither, an error is thrown -var CastSet = &Builtin{ - Name: "cast_set", - Decl: types.NewFunction( - types.Args(types.A), - types.NewSet(types.A), - ), - deprecated: true, -} +var CastSet = v1.CastSet // CastString returns input if it is a string; if not returns error. // For formatting variables, see sprintf -var CastString = &Builtin{ - Name: "cast_string", - Decl: types.NewFunction( - types.Args(types.A), - types.S, - ), - deprecated: true, -} +var CastString = v1.CastString // CastBoolean returns input if it is a boolean; if not returns error. -var CastBoolean = &Builtin{ - Name: "cast_boolean", - Decl: types.NewFunction( - types.Args(types.A), - types.B, - ), - deprecated: true, -} +var CastBoolean = v1.CastBoolean // CastNull returns null if input is null; if not returns error. -var CastNull = &Builtin{ - Name: "cast_null", - Decl: types.NewFunction( - types.Args(types.A), - types.NewNull(), - ), - deprecated: true, -} +var CastNull = v1.CastNull // CastObject returns the given object if it is null; throws an error otherwise -var CastObject = &Builtin{ - Name: "cast_object", - Decl: types.NewFunction( - types.Args(types.A), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - ), - deprecated: true, -} +var CastObject = v1.CastObject // RegexMatchDeprecated declares `re_match` which has been deprecated. Use `regex.match` instead. -var RegexMatchDeprecated = &Builtin{ - Name: "re_match", - Decl: types.NewFunction( - types.Args( - types.S, - types.S, - ), - types.B, - ), - deprecated: true, -} +var RegexMatchDeprecated = v1.RegexMatchDeprecated // All takes a list and returns true if all of the items // are true. A collection of length 0 returns true. -var All = &Builtin{ - Name: "all", - Decl: types.NewFunction( - types.Args( - types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A), - ), - ), - types.B, - ), - deprecated: true, -} +var All = v1.All // Any takes a collection and returns true if any of the items // is true. A collection of length 0 returns false. -var Any = &Builtin{ - Name: "any", - Decl: types.NewFunction( - types.Args( - types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A), - ), - ), - types.B, - ), - deprecated: true, -} +var Any = v1.Any // Builtin represents a built-in function supported by OPA. Every built-in // function is uniquely identified by a name. -type Builtin struct { - Name string `json:"name"` // Unique name of built-in function, e.g., (arg1,arg2,...,argN) - Description string `json:"description,omitempty"` // Description of what the built-in function does. - - // Categories of the built-in function. Omitted for namespaced - // built-ins, i.e. "array.concat" is taken to be of the "array" category. - // "minus" for example, is part of two categories: numbers and sets. (NOTE(sr): aspirational) - Categories []string `json:"categories,omitempty"` - - Decl *types.Function `json:"decl"` // Built-in function type declaration. - Infix string `json:"infix,omitempty"` // Unique name of infix operator. Default should be unset. - Relation bool `json:"relation,omitempty"` // Indicates if the built-in acts as a relation. - deprecated bool // Indicates if the built-in has been deprecated. - Nondeterministic bool `json:"nondeterministic,omitempty"` // Indicates if the built-in returns non-deterministic results. -} - -// category is a helper for specifying a Builtin's Categories -func category(cs ...string) []string { - return cs -} - -// Minimal returns a shallow copy of b with the descriptions and categories and -// named arguments stripped out. -func (b *Builtin) Minimal() *Builtin { - cpy := *b - fargs := b.Decl.FuncArgs() - if fargs.Variadic != nil { - cpy.Decl = types.NewVariadicFunction(fargs.Args, fargs.Variadic, b.Decl.Result()) - } else { - cpy.Decl = types.NewFunction(fargs.Args, b.Decl.Result()) - } - cpy.Categories = nil - cpy.Description = "" - return &cpy -} - -// IsDeprecated returns true if the Builtin function is deprecated and will be removed in a future release. -func (b *Builtin) IsDeprecated() bool { - return b.deprecated -} - -// IsDeterministic returns true if the Builtin function returns non-deterministic results. -func (b *Builtin) IsNondeterministic() bool { - return b.Nondeterministic -} - -// Expr creates a new expression for the built-in with the given operands. -func (b *Builtin) Expr(operands ...*Term) *Expr { - ts := make([]*Term, len(operands)+1) - ts[0] = NewTerm(b.Ref()) - for i := range operands { - ts[i+1] = operands[i] - } - return &Expr{ - Terms: ts, - } -} - -// Call creates a new term for the built-in with the given operands. -func (b *Builtin) Call(operands ...*Term) *Term { - call := make(Call, len(operands)+1) - call[0] = NewTerm(b.Ref()) - for i := range operands { - call[i+1] = operands[i] - } - return NewTerm(call) -} - -// Ref returns a Ref that refers to the built-in function. -func (b *Builtin) Ref() Ref { - parts := strings.Split(b.Name, ".") - ref := make(Ref, len(parts)) - ref[0] = VarTerm(parts[0]) - for i := 1; i < len(parts); i++ { - ref[i] = StringTerm(parts[i]) - } - return ref -} - -// IsTargetPos returns true if a variable in the i-th position will be bound by -// evaluating the call expression. -func (b *Builtin) IsTargetPos(i int) bool { - return len(b.Decl.FuncArgs().Args) == i -} - -func init() { - BuiltinMap = map[string]*Builtin{} - for _, b := range DefaultBuiltins { - RegisterBuiltin(b) - } -} +type Builtin = v1.Builtin diff --git a/vendor/github.com/open-policy-agent/opa/ast/capabilities.go b/vendor/github.com/open-policy-agent/opa/ast/capabilities.go index 3b95d79e57..bc7278a885 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/capabilities.go +++ b/vendor/github.com/open-policy-agent/opa/ast/capabilities.go @@ -5,228 +5,54 @@ package ast import ( - "bytes" - _ "embed" - "encoding/json" - "fmt" "io" - "os" - "sort" - "strings" - caps "github.com/open-policy-agent/opa/capabilities" - "github.com/open-policy-agent/opa/internal/semver" - "github.com/open-policy-agent/opa/internal/wasm/sdk/opa/capabilities" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // VersonIndex contains an index from built-in function name, language feature, // and future rego keyword to version number. During the build, this is used to // create an index of the minimum version required for the built-in/feature/kw. -type VersionIndex struct { - Builtins map[string]semver.Version `json:"builtins"` - Features map[string]semver.Version `json:"features"` - Keywords map[string]semver.Version `json:"keywords"` -} - -// NOTE(tsandall): this file is generated by internal/cmd/genversionindex/main.go -// and run as part of go:generate. We generate the version index as part of the -// build process because it's relatively expensive to build (it takes ~500ms on -// my machine) and never changes. -// -//go:embed version_index.json -var versionIndexBs []byte - -var minVersionIndex = func() VersionIndex { - var vi VersionIndex - err := json.Unmarshal(versionIndexBs, &vi) - if err != nil { - panic(err) - } - return vi -}() +type VersionIndex = v1.VersionIndex // In the compiler, we used this to check that we're OK working with ref heads. // If this isn't present, we'll fail. This is to ensure that older versions of // OPA can work with policies that we're compiling -- if they don't know ref // heads, they wouldn't be able to parse them. -const FeatureRefHeadStringPrefixes = "rule_head_ref_string_prefixes" -const FeatureRefHeads = "rule_head_refs" -const FeatureRegoV1Import = "rego_v1_import" +const FeatureRefHeadStringPrefixes = v1.FeatureRefHeadStringPrefixes +const FeatureRefHeads = v1.FeatureRefHeads +const FeatureRegoV1 = v1.FeatureRegoV1 +const FeatureRegoV1Import = v1.FeatureRegoV1Import // Capabilities defines a structure containing data that describes the capabilities // or features supported by a particular version of OPA. -type Capabilities struct { - Builtins []*Builtin `json:"builtins,omitempty"` - FutureKeywords []string `json:"future_keywords,omitempty"` - WasmABIVersions []WasmABIVersion `json:"wasm_abi_versions,omitempty"` - - // Features is a bit of a mixed bag for checking that an older version of OPA - // is able to do what needs to be done. - // TODO(sr): find better words ^^ - Features []string `json:"features,omitempty"` - - // allow_net is an array of hostnames or IP addresses, that an OPA instance is - // allowed to connect to. - // If omitted, ANY host can be connected to. If empty, NO host can be connected to. - // As of now, this only controls fetching remote refs for using JSON Schemas in - // the type checker. - // TODO(sr): support ports to further restrict connection peers - // TODO(sr): support restricting `http.send` using the same mechanism (see https://github.com/open-policy-agent/opa/issues/3665) - AllowNet []string `json:"allow_net,omitempty"` -} +type Capabilities = v1.Capabilities // WasmABIVersion captures the Wasm ABI version. Its `Minor` version is indicating // backwards-compatible changes. -type WasmABIVersion struct { - Version int `json:"version"` - Minor int `json:"minor_version"` -} +type WasmABIVersion = v1.WasmABIVersion // CapabilitiesForThisVersion returns the capabilities of this version of OPA. func CapabilitiesForThisVersion() *Capabilities { - f := &Capabilities{} - - for _, vers := range capabilities.ABIVersions() { - f.WasmABIVersions = append(f.WasmABIVersions, WasmABIVersion{Version: vers[0], Minor: vers[1]}) - } - - f.Builtins = make([]*Builtin, len(Builtins)) - copy(f.Builtins, Builtins) - sort.Slice(f.Builtins, func(i, j int) bool { - return f.Builtins[i].Name < f.Builtins[j].Name - }) - - for kw := range futureKeywords { - f.FutureKeywords = append(f.FutureKeywords, kw) - } - sort.Strings(f.FutureKeywords) - - f.Features = []string{ - FeatureRefHeadStringPrefixes, - FeatureRefHeads, - FeatureRegoV1Import, - } - - return f + return v1.CapabilitiesForThisVersion(v1.CapabilitiesRegoVersion(DefaultRegoVersion)) } // LoadCapabilitiesJSON loads a JSON serialized capabilities structure from the reader r. func LoadCapabilitiesJSON(r io.Reader) (*Capabilities, error) { - d := util.NewJSONDecoder(r) - var c Capabilities - return &c, d.Decode(&c) + return v1.LoadCapabilitiesJSON(r) } // LoadCapabilitiesVersion loads a JSON serialized capabilities structure from the specific version. func LoadCapabilitiesVersion(version string) (*Capabilities, error) { - cvs, err := LoadCapabilitiesVersions() - if err != nil { - return nil, err - } - - for _, cv := range cvs { - if cv == version { - cont, err := caps.FS.ReadFile(cv + ".json") - if err != nil { - return nil, err - } - - return LoadCapabilitiesJSON(bytes.NewReader(cont)) - } - - } - return nil, fmt.Errorf("no capabilities version found %v", version) + return v1.LoadCapabilitiesVersion(version) } // LoadCapabilitiesFile loads a JSON serialized capabilities structure from a file. func LoadCapabilitiesFile(file string) (*Capabilities, error) { - fd, err := os.Open(file) - if err != nil { - return nil, err - } - defer fd.Close() - return LoadCapabilitiesJSON(fd) + return v1.LoadCapabilitiesFile(file) } // LoadCapabilitiesVersions loads all capabilities versions func LoadCapabilitiesVersions() ([]string, error) { - ents, err := caps.FS.ReadDir(".") - if err != nil { - return nil, err - } - - capabilitiesVersions := make([]string, 0, len(ents)) - for _, ent := range ents { - capabilitiesVersions = append(capabilitiesVersions, strings.Replace(ent.Name(), ".json", "", 1)) - } - return capabilitiesVersions, nil -} - -// MinimumCompatibleVersion returns the minimum compatible OPA version based on -// the built-ins, features, and keywords in c. -func (c *Capabilities) MinimumCompatibleVersion() (string, bool) { - - var maxVersion semver.Version - - // this is the oldest OPA release that includes capabilities - if err := maxVersion.Set("0.17.0"); err != nil { - panic("unreachable") - } - - for _, bi := range c.Builtins { - v, ok := minVersionIndex.Builtins[bi.Name] - if !ok { - return "", false - } - if v.Compare(maxVersion) > 0 { - maxVersion = v - } - } - - for _, kw := range c.FutureKeywords { - v, ok := minVersionIndex.Keywords[kw] - if !ok { - return "", false - } - if v.Compare(maxVersion) > 0 { - maxVersion = v - } - } - - for _, feat := range c.Features { - v, ok := minVersionIndex.Features[feat] - if !ok { - return "", false - } - if v.Compare(maxVersion) > 0 { - maxVersion = v - } - } - - return maxVersion.String(), true -} - -func (c *Capabilities) ContainsFeature(feature string) bool { - for _, f := range c.Features { - if f == feature { - return true - } - } - return false -} - -// addBuiltinSorted inserts a built-in into c in sorted order. An existing built-in with the same name -// will be overwritten. -func (c *Capabilities) addBuiltinSorted(bi *Builtin) { - i := sort.Search(len(c.Builtins), func(x int) bool { - return c.Builtins[x].Name >= bi.Name - }) - if i < len(c.Builtins) && bi.Name == c.Builtins[i].Name { - c.Builtins[i] = bi - return - } - c.Builtins = append(c.Builtins, nil) - copy(c.Builtins[i+1:], c.Builtins[i:]) - c.Builtins[i] = bi + return v1.LoadCapabilitiesVersions() } diff --git a/vendor/github.com/open-policy-agent/opa/ast/check.go b/vendor/github.com/open-policy-agent/opa/ast/check.go index 03d31123cf..4cf00436df 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/check.go +++ b/vendor/github.com/open-policy-agent/opa/ast/check.go @@ -5,1317 +5,18 @@ package ast import ( - "fmt" - "sort" - "strings" - - "github.com/open-policy-agent/opa/types" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) -type varRewriter func(Ref) Ref - -// exprChecker defines the interface for executing type checking on a single -// expression. The exprChecker must update the provided TypeEnv with inferred -// types of vars. -type exprChecker func(*TypeEnv, *Expr) *Error - -// typeChecker implements type checking on queries and rules. Errors are -// accumulated on the typeChecker so that a single run can report multiple -// issues. -type typeChecker struct { - builtins map[string]*Builtin - required *Capabilities - errs Errors - exprCheckers map[string]exprChecker - varRewriter varRewriter - ss *SchemaSet - allowNet []string - input types.Type - allowUndefinedFuncs bool - schemaTypes map[string]types.Type -} - -// newTypeChecker returns a new typeChecker object that has no errors. -func newTypeChecker() *typeChecker { - return &typeChecker{ - builtins: make(map[string]*Builtin), - schemaTypes: make(map[string]types.Type), - exprCheckers: map[string]exprChecker{ - "eq": checkExprEq, - }, - } -} - -func (tc *typeChecker) newEnv(exist *TypeEnv) *TypeEnv { - if exist != nil { - return exist.wrap() - } - env := newTypeEnv(tc.copy) - if tc.input != nil { - env.tree.Put(InputRootRef, tc.input) - } - return env -} - -func (tc *typeChecker) copy() *typeChecker { - return newTypeChecker(). - WithVarRewriter(tc.varRewriter). - WithSchemaSet(tc.ss). - WithAllowNet(tc.allowNet). - WithInputType(tc.input). - WithAllowUndefinedFunctionCalls(tc.allowUndefinedFuncs). - WithBuiltins(tc.builtins). - WithRequiredCapabilities(tc.required) -} - -func (tc *typeChecker) WithRequiredCapabilities(c *Capabilities) *typeChecker { - tc.required = c - return tc -} - -func (tc *typeChecker) WithBuiltins(builtins map[string]*Builtin) *typeChecker { - tc.builtins = builtins - return tc -} - -func (tc *typeChecker) WithSchemaSet(ss *SchemaSet) *typeChecker { - tc.ss = ss - return tc -} - -func (tc *typeChecker) WithAllowNet(hosts []string) *typeChecker { - tc.allowNet = hosts - return tc -} - -func (tc *typeChecker) WithVarRewriter(f varRewriter) *typeChecker { - tc.varRewriter = f - return tc -} - -func (tc *typeChecker) WithInputType(tpe types.Type) *typeChecker { - tc.input = tpe - return tc -} - -// WithAllowUndefinedFunctionCalls sets the type checker to allow references to undefined functions. -// Additionally, the 'CheckUndefinedFuncs' and 'CheckSafetyRuleBodies' compiler stages are skipped. -func (tc *typeChecker) WithAllowUndefinedFunctionCalls(allow bool) *typeChecker { - tc.allowUndefinedFuncs = allow - return tc -} - -// Env returns a type environment for the specified built-ins with any other -// global types configured on the checker. In practice, this is the default -// environment that other statements will be checked against. -func (tc *typeChecker) Env(builtins map[string]*Builtin) *TypeEnv { - env := tc.newEnv(nil) - for _, bi := range builtins { - env.tree.Put(bi.Ref(), bi.Decl) - } - return env -} - -// CheckBody runs type checking on the body and returns a TypeEnv if no errors -// are found. The resulting TypeEnv wraps the provided one. The resulting -// TypeEnv will be able to resolve types of vars contained in the body. -func (tc *typeChecker) CheckBody(env *TypeEnv, body Body) (*TypeEnv, Errors) { - - errors := []*Error{} - env = tc.newEnv(env) - - WalkExprs(body, func(expr *Expr) bool { - - closureErrs := tc.checkClosures(env, expr) - for _, err := range closureErrs { - errors = append(errors, err) - } - - hasClosureErrors := len(closureErrs) > 0 - - vis := newRefChecker(env, tc.varRewriter) - NewGenericVisitor(vis.Visit).Walk(expr) - for _, err := range vis.errs { - errors = append(errors, err) - } - - hasRefErrors := len(vis.errs) > 0 - - if err := tc.checkExpr(env, expr); err != nil { - // Suppress this error if a more actionable one has occurred. In - // this case, if an error occurred in a ref or closure contained in - // this expression, and the error is due to a nil type, then it's - // likely to be the result of the more specific error. - skip := (hasClosureErrors || hasRefErrors) && causedByNilType(err) - if !skip { - errors = append(errors, err) - } - } - return true - }) - - tc.err(errors) - return env, errors -} - -// CheckTypes runs type checking on the rules returns a TypeEnv if no errors -// are found. The resulting TypeEnv wraps the provided one. The resulting -// TypeEnv will be able to resolve types of refs that refer to rules. -func (tc *typeChecker) CheckTypes(env *TypeEnv, sorted []util.T, as *AnnotationSet) (*TypeEnv, Errors) { - env = tc.newEnv(env) - for _, s := range sorted { - tc.checkRule(env, as, s.(*Rule)) - } - tc.errs.Sort() - return env, tc.errs -} - -func (tc *typeChecker) checkClosures(env *TypeEnv, expr *Expr) Errors { - var result Errors - WalkClosures(expr, func(x interface{}) bool { - switch x := x.(type) { - case *ArrayComprehension: - _, errs := tc.copy().CheckBody(env, x.Body) - if len(errs) > 0 { - result = errs - return true - } - case *SetComprehension: - _, errs := tc.copy().CheckBody(env, x.Body) - if len(errs) > 0 { - result = errs - return true - } - case *ObjectComprehension: - _, errs := tc.copy().CheckBody(env, x.Body) - if len(errs) > 0 { - result = errs - return true - } - } - return false - }) - return result -} - -func (tc *typeChecker) getSchemaType(schemaAnnot *SchemaAnnotation, rule *Rule) (types.Type, *Error) { - if refType, exists := tc.schemaTypes[schemaAnnot.Schema.String()]; exists { - return refType, nil - } - - refType, err := processAnnotation(tc.ss, schemaAnnot, rule, tc.allowNet) - if err != nil { - return nil, err - } - - if refType == nil { - return nil, nil - } - - tc.schemaTypes[schemaAnnot.Schema.String()] = refType - return refType, nil - -} - -func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) { - - env = env.wrap() - - schemaAnnots := getRuleAnnotation(as, rule) - for _, schemaAnnot := range schemaAnnots { - refType, err := tc.getSchemaType(schemaAnnot, rule) - if err != nil { - tc.err([]*Error{err}) - continue - } - - ref := schemaAnnot.Path - // if we do not have a ref or a reftype, we should not evaluate this rule. - if ref == nil || refType == nil { - continue - } - - prefixRef, t := getPrefix(env, ref) - if t == nil || len(prefixRef) == len(ref) { - env.tree.Put(ref, refType) - } else { - newType, err := override(ref[len(prefixRef):], t, refType, rule) - if err != nil { - tc.err([]*Error{err}) - continue - } - env.tree.Put(prefixRef, newType) - } - } - - cpy, err := tc.CheckBody(env, rule.Body) - env = env.next - path := rule.Ref() - - if len(err) > 0 { - // if the rule/function contains an error, add it to the type env so - // that expressions that refer to this rule/function do not encounter - // type errors. - env.tree.Put(path, types.A) - return - } - - var tpe types.Type - - if len(rule.Head.Args) > 0 { - // If args are not referred to in body, infer as any. - WalkVars(rule.Head.Args, func(v Var) bool { - if cpy.Get(v) == nil { - cpy.tree.PutOne(v, types.A) - } - return false - }) - - // Construct function type. - args := make([]types.Type, len(rule.Head.Args)) - for i := 0; i < len(rule.Head.Args); i++ { - args[i] = cpy.Get(rule.Head.Args[i]) - } - - f := types.NewFunction(args, cpy.Get(rule.Head.Value)) - - tpe = f - } else { - switch rule.Head.RuleKind() { - case SingleValue: - typeV := cpy.Get(rule.Head.Value) - if !path.IsGround() { - // e.g. store object[string: whatever] at data.p.q.r, not data.p.q.r[x] or data.p.q.r[x].y[z] - objPath := path.DynamicSuffix() - path = path.GroundPrefix() - - var err error - tpe, err = nestedObject(cpy, objPath, typeV) - if err != nil { - tc.err([]*Error{NewError(TypeErr, rule.Head.Location, err.Error())}) - tpe = nil - } - } else { - if typeV != nil { - tpe = typeV - } - } - case MultiValue: - typeK := cpy.Get(rule.Head.Key) - if typeK != nil { - tpe = types.NewSet(typeK) - } - } - } - - if tpe != nil { - env.tree.Insert(path, tpe, env) - } -} - -// nestedObject creates a nested structure of object types, where each term on path corresponds to a level in the -// nesting. Each term in the path only contributes to the dynamic portion of its corresponding object. -func nestedObject(env *TypeEnv, path Ref, tpe types.Type) (types.Type, error) { - if len(path) == 0 { - return tpe, nil - } - - k := path[0] - typeV, err := nestedObject(env, path[1:], tpe) - if err != nil { - return nil, err - } - if typeV == nil { - return nil, nil - } - - var dynamicProperty *types.DynamicProperty - typeK := env.Get(k) - if typeK == nil { - return nil, nil - } - dynamicProperty = types.NewDynamicProperty(typeK, typeV) - - return types.NewObject(nil, dynamicProperty), nil -} - -func (tc *typeChecker) checkExpr(env *TypeEnv, expr *Expr) *Error { - if err := tc.checkExprWith(env, expr, 0); err != nil { - return err - } - if !expr.IsCall() { - return nil - } - - operator := expr.Operator().String() - - // If the type checker wasn't provided with a required capabilities - // structure then just skip. In some cases, type checking might be run - // without the need to record what builtins are required. - if tc.required != nil { - if bi, ok := tc.builtins[operator]; ok { - tc.required.addBuiltinSorted(bi) - } - } - - checker := tc.exprCheckers[operator] - if checker != nil { - return checker(env, expr) - } - - return tc.checkExprBuiltin(env, expr) -} - -func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error { - - args := expr.Operands() - pre := getArgTypes(env, args) - - // NOTE(tsandall): undefined functions will have been caught earlier in the - // compiler. We check for undefined functions before the safety check so - // that references to non-existent functions result in undefined function - // errors as opposed to unsafe var errors. - // - // We cannot run type checking before the safety check because part of the - // type checker relies on reordering (in particular for references to local - // vars). - name := expr.Operator() - tpe := env.Get(name) - - if tpe == nil { - if tc.allowUndefinedFuncs { - return nil - } - return NewError(TypeErr, expr.Location, "undefined function %v", name) - } - - // check if the expression refers to a function that contains an error - _, ok := tpe.(types.Any) - if ok { - return nil - } - - ftpe, ok := tpe.(*types.Function) - if !ok { - return NewError(TypeErr, expr.Location, "undefined function %v", name) - } - - fargs := ftpe.FuncArgs() - namedFargs := ftpe.NamedFuncArgs() - - if ftpe.Result() != nil { - fargs.Args = append(fargs.Args, ftpe.Result()) - namedFargs.Args = append(namedFargs.Args, ftpe.NamedResult()) - } - - if len(args) > len(fargs.Args) && fargs.Variadic == nil { - return newArgError(expr.Location, name, "too many arguments", pre, namedFargs) - } - - if len(args) < len(ftpe.FuncArgs().Args) { - return newArgError(expr.Location, name, "too few arguments", pre, namedFargs) - } - - for i := range args { - if !unify1(env, args[i], fargs.Arg(i), false) { - post := make([]types.Type, len(args)) - for i := range args { - post[i] = env.Get(args[i]) - } - return newArgError(expr.Location, name, "invalid argument(s)", post, namedFargs) - } - } - - return nil -} - -func checkExprEq(env *TypeEnv, expr *Expr) *Error { - - pre := getArgTypes(env, expr.Operands()) - exp := Equality.Decl.FuncArgs() - - if len(pre) < len(exp.Args) { - return newArgError(expr.Location, expr.Operator(), "too few arguments", pre, exp) - } - - if len(exp.Args) < len(pre) { - return newArgError(expr.Location, expr.Operator(), "too many arguments", pre, exp) - } - - a, b := expr.Operand(0), expr.Operand(1) - typeA, typeB := env.Get(a), env.Get(b) - - if !unify2(env, a, typeA, b, typeB) { - err := NewError(TypeErr, expr.Location, "match error") - err.Details = &UnificationErrDetail{ - Left: typeA, - Right: typeB, - } - return err - } - - return nil -} - -func (tc *typeChecker) checkExprWith(env *TypeEnv, expr *Expr, i int) *Error { - if i == len(expr.With) { - return nil - } - - target, value := expr.With[i].Target, expr.With[i].Value - targetType, valueType := env.Get(target), env.Get(value) - - if t, ok := targetType.(*types.Function); ok { // built-in function replacement - switch v := valueType.(type) { - case *types.Function: // ...by function - if !unifies(targetType, valueType) { - return newArgError(expr.With[i].Loc(), target.Value.(Ref), "arity mismatch", v.FuncArgs().Args, t.NamedFuncArgs()) - } - default: // ... by value, nothing to check - } - } - - return tc.checkExprWith(env, expr, i+1) -} - -func unify2(env *TypeEnv, a *Term, typeA types.Type, b *Term, typeB types.Type) bool { - - nilA := types.Nil(typeA) - nilB := types.Nil(typeB) - - if nilA && !nilB { - return unify1(env, a, typeB, false) - } else if nilB && !nilA { - return unify1(env, b, typeA, false) - } else if !nilA && !nilB { - return unifies(typeA, typeB) - } - - switch a.Value.(type) { - case *Array: - return unify2Array(env, a, b) - case *object: - return unify2Object(env, a, b) - case Var: - switch b.Value.(type) { - case Var: - return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false) - case *Array: - return unify2Array(env, b, a) - case *object: - return unify2Object(env, b, a) - } - } - - return false -} - -func unify2Array(env *TypeEnv, a *Term, b *Term) bool { - arr := a.Value.(*Array) - switch bv := b.Value.(type) { - case *Array: - if arr.Len() == bv.Len() { - for i := 0; i < arr.Len(); i++ { - if !unify2(env, arr.Elem(i), env.Get(arr.Elem(i)), bv.Elem(i), env.Get(bv.Elem(i))) { - return false - } - } - return true - } - case Var: - return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false) - } - return false -} - -func unify2Object(env *TypeEnv, a *Term, b *Term) bool { - obj := a.Value.(Object) - switch bv := b.Value.(type) { - case *object: - cv := obj.Intersect(bv) - if obj.Len() == bv.Len() && bv.Len() == len(cv) { - for i := range cv { - if !unify2(env, cv[i][1], env.Get(cv[i][1]), cv[i][2], env.Get(cv[i][2])) { - return false - } - } - return true - } - case Var: - return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false) - } - return false -} - -func unify1(env *TypeEnv, term *Term, tpe types.Type, union bool) bool { - switch v := term.Value.(type) { - case *Array: - switch tpe := tpe.(type) { - case *types.Array: - return unify1Array(env, v, tpe, union) - case types.Any: - if types.Compare(tpe, types.A) == 0 { - for i := 0; i < v.Len(); i++ { - unify1(env, v.Elem(i), types.A, true) - } - return true - } - unifies := false - for i := range tpe { - unifies = unify1(env, term, tpe[i], true) || unifies - } - return unifies - } - return false - case *object: - switch tpe := tpe.(type) { - case *types.Object: - return unify1Object(env, v, tpe, union) - case types.Any: - if types.Compare(tpe, types.A) == 0 { - v.Foreach(func(key, value *Term) { - unify1(env, key, types.A, true) - unify1(env, value, types.A, true) - }) - return true - } - unifies := false - for i := range tpe { - unifies = unify1(env, term, tpe[i], true) || unifies - } - return unifies - } - return false - case Set: - switch tpe := tpe.(type) { - case *types.Set: - return unify1Set(env, v, tpe, union) - case types.Any: - if types.Compare(tpe, types.A) == 0 { - v.Foreach(func(elem *Term) { - unify1(env, elem, types.A, true) - }) - return true - } - unifies := false - for i := range tpe { - unifies = unify1(env, term, tpe[i], true) || unifies - } - return unifies - } - return false - case Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension: - return unifies(env.Get(v), tpe) - case Var: - if !union { - if exist := env.Get(v); exist != nil { - return unifies(exist, tpe) - } - env.tree.PutOne(term.Value, tpe) - } else { - env.tree.PutOne(term.Value, types.Or(env.Get(v), tpe)) - } - return true - default: - if !IsConstant(v) { - panic("unreachable") - } - return unifies(env.Get(term), tpe) - } -} - -func unify1Array(env *TypeEnv, val *Array, tpe *types.Array, union bool) bool { - if val.Len() != tpe.Len() && tpe.Dynamic() == nil { - return false - } - for i := 0; i < val.Len(); i++ { - if !unify1(env, val.Elem(i), tpe.Select(i), union) { - return false - } - } - return true -} - -func unify1Object(env *TypeEnv, val Object, tpe *types.Object, union bool) bool { - if val.Len() != len(tpe.Keys()) && tpe.DynamicValue() == nil { - return false - } - stop := val.Until(func(k, v *Term) bool { - if IsConstant(k.Value) { - if child := selectConstant(tpe, k); child != nil { - if !unify1(env, v, child, union) { - return true - } - } else { - return true - } - } else { - // Inferring type of value under dynamic key would involve unioning - // with all property values of tpe whose keys unify. For now, type - // these values as Any. We can investigate stricter inference in - // the future. - unify1(env, v, types.A, union) - } - return false - }) - return !stop -} - -func unify1Set(env *TypeEnv, val Set, tpe *types.Set, union bool) bool { - of := types.Values(tpe) - return !val.Until(func(elem *Term) bool { - return !unify1(env, elem, of, union) - }) -} - -func (tc *typeChecker) err(errors []*Error) { - tc.errs = append(tc.errs, errors...) -} - -type refChecker struct { - env *TypeEnv - errs Errors - varRewriter varRewriter -} - -func rewriteVarsNop(node Ref) Ref { - return node -} - -func newRefChecker(env *TypeEnv, f varRewriter) *refChecker { - - if f == nil { - f = rewriteVarsNop - } - - return &refChecker{ - env: env, - errs: nil, - varRewriter: f, - } -} - -func (rc *refChecker) Visit(x interface{}) bool { - switch x := x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: - return true - case *Expr: - switch terms := x.Terms.(type) { - case []*Term: - for i := 1; i < len(terms); i++ { - NewGenericVisitor(rc.Visit).Walk(terms[i]) - } - return true - case *Term: - NewGenericVisitor(rc.Visit).Walk(terms) - return true - } - case Ref: - if err := rc.checkApply(rc.env, x); err != nil { - rc.errs = append(rc.errs, err) - return true - } - if err := rc.checkRef(rc.env, rc.env.tree, x, 0); err != nil { - rc.errs = append(rc.errs, err) - } - } - return false -} - -func (rc *refChecker) checkApply(curr *TypeEnv, ref Ref) *Error { - switch tpe := curr.Get(ref).(type) { - case *types.Function: // NOTE(sr): We don't support first-class functions, except for `with`. - return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), len(ref)-1, tpe) - } - - return nil -} - -func (rc *refChecker) checkRef(curr *TypeEnv, node *typeTreeNode, ref Ref, idx int) *Error { - - if idx == len(ref) { - return nil - } - - head := ref[idx] - - // NOTE(sr): as long as package statements are required, this isn't possible: - // the shortest possible rule ref is data.a.b (b is idx 2), idx 1 and 2 need to - // be strings or vars. - if idx == 1 || idx == 2 { - switch head.Value.(type) { - case Var, String: // OK - default: - have := rc.env.Get(head.Value) - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, have, types.S, getOneOfForNode(node)) - } - } - - if v, ok := head.Value.(Var); ok && idx != 0 { - tpe := types.Keys(rc.env.getRefRecExtent(node)) - if exist := rc.env.Get(v); exist != nil { - if !unifies(tpe, exist) { - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, tpe, getOneOfForNode(node)) - } - } else { - rc.env.tree.PutOne(v, tpe) - } - } - - child := node.Child(head.Value) - if child == nil { - // NOTE(sr): idx is reset on purpose: we start over - switch { - case curr.next != nil: - next := curr.next - return rc.checkRef(next, next.tree, ref, 0) - - case RootDocumentNames.Contains(ref[0]): - if idx != 0 { - node.Children().Iter(func(_, child util.T) bool { - _ = rc.checkRef(curr, child.(*typeTreeNode), ref, idx+1) // ignore error - return false - }) - return nil - } - return rc.checkRefLeaf(types.A, ref, 1) - - default: - return rc.checkRefLeaf(types.A, ref, 0) - } - } - - if child.Leaf() { - return rc.checkRefLeaf(child.Value(), ref, idx+1) - } - - return rc.checkRef(curr, child, ref, idx+1) -} - -func (rc *refChecker) checkRefLeaf(tpe types.Type, ref Ref, idx int) *Error { - - if idx == len(ref) { - return nil - } - - head := ref[idx] - - keys := types.Keys(tpe) - if keys == nil { - return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), idx-1, tpe) - } - - switch value := head.Value.(type) { - - case Var: - if exist := rc.env.Get(value); exist != nil { - if !unifies(exist, keys) { - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe)) - } - } else { - rc.env.tree.PutOne(value, types.Keys(tpe)) - } - - case Ref: - if exist := rc.env.Get(value); exist != nil { - if !unifies(exist, keys) { - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe)) - } - } - - case *Array, Object, Set: - if !unify1(rc.env, head, keys, false) { - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, rc.env.Get(head), keys, nil) - } - - default: - child := selectConstant(tpe, head) - if child == nil { - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, nil, types.Keys(tpe), getOneOfForType(tpe)) - } - return rc.checkRefLeaf(child, ref, idx+1) - } - - return rc.checkRefLeaf(types.Values(tpe), ref, idx+1) -} - -func unifies(a, b types.Type) bool { - - if a == nil || b == nil { - return false - } - - anyA, ok1 := a.(types.Any) - if ok1 { - if unifiesAny(anyA, b) { - return true - } - } - - anyB, ok2 := b.(types.Any) - if ok2 { - if unifiesAny(anyB, a) { - return true - } - } - - if ok1 || ok2 { - return false - } - - switch a := a.(type) { - case types.Null: - _, ok := b.(types.Null) - return ok - case types.Boolean: - _, ok := b.(types.Boolean) - return ok - case types.Number: - _, ok := b.(types.Number) - return ok - case types.String: - _, ok := b.(types.String) - return ok - case *types.Array: - b, ok := b.(*types.Array) - if !ok { - return false - } - return unifiesArrays(a, b) - case *types.Object: - b, ok := b.(*types.Object) - if !ok { - return false - } - return unifiesObjects(a, b) - case *types.Set: - b, ok := b.(*types.Set) - if !ok { - return false - } - return unifies(types.Values(a), types.Values(b)) - case *types.Function: - // NOTE(sr): variadic functions can only be internal ones, and we've forbidden - // their replacement via `with`; so we disregard variadic here - if types.Arity(a) == types.Arity(b) { - b := b.(*types.Function) - for i := range a.FuncArgs().Args { - if !unifies(a.FuncArgs().Arg(i), b.FuncArgs().Arg(i)) { - return false - } - } - return true - } - return false - default: - panic("unreachable") - } -} - -func unifiesAny(a types.Any, b types.Type) bool { - if _, ok := b.(*types.Function); ok { - return false - } - for i := range a { - if unifies(a[i], b) { - return true - } - } - return len(a) == 0 -} - -func unifiesArrays(a, b *types.Array) bool { - - if !unifiesArraysStatic(a, b) { - return false - } - - if !unifiesArraysStatic(b, a) { - return false - } - - return a.Dynamic() == nil || b.Dynamic() == nil || unifies(a.Dynamic(), b.Dynamic()) -} - -func unifiesArraysStatic(a, b *types.Array) bool { - if a.Len() != 0 { - for i := 0; i < a.Len(); i++ { - if !unifies(a.Select(i), b.Select(i)) { - return false - } - } - } - return true -} - -func unifiesObjects(a, b *types.Object) bool { - if !unifiesObjectsStatic(a, b) { - return false - } - - if !unifiesObjectsStatic(b, a) { - return false - } - - return a.DynamicValue() == nil || b.DynamicValue() == nil || unifies(a.DynamicValue(), b.DynamicValue()) -} - -func unifiesObjectsStatic(a, b *types.Object) bool { - for _, k := range a.Keys() { - if !unifies(a.Select(k), b.Select(k)) { - return false - } - } - return true -} - -// typeErrorCause defines an interface to determine the reason for a type -// error. The type error details implement this interface so that type checking -// can report more actionable errors. -type typeErrorCause interface { - nilType() bool -} - -func causedByNilType(err *Error) bool { - cause, ok := err.Details.(typeErrorCause) - if !ok { - return false - } - return cause.nilType() -} - -// ArgErrDetail represents a generic argument error. -type ArgErrDetail struct { - Have []types.Type `json:"have"` - Want types.FuncArgs `json:"want"` -} - -// Lines returns the string representation of the detail. -func (d *ArgErrDetail) Lines() []string { - lines := make([]string, 2) - lines[0] = "have: " + formatArgs(d.Have) - lines[1] = "want: " + fmt.Sprint(d.Want) - return lines -} - -func (d *ArgErrDetail) nilType() bool { - for i := range d.Have { - if types.Nil(d.Have[i]) { - return true - } - } - return false -} - // UnificationErrDetail describes a type mismatch error when two values are // unified (e.g., x = [1,2,y]). -type UnificationErrDetail struct { - Left types.Type `json:"a"` - Right types.Type `json:"b"` -} - -func (a *UnificationErrDetail) nilType() bool { - return types.Nil(a.Left) || types.Nil(a.Right) -} - -// Lines returns the string representation of the detail. -func (a *UnificationErrDetail) Lines() []string { - lines := make([]string, 2) - lines[0] = fmt.Sprint("left : ", types.Sprint(a.Left)) - lines[1] = fmt.Sprint("right : ", types.Sprint(a.Right)) - return lines -} +type UnificationErrDetail = v1.UnificationErrDetail // RefErrUnsupportedDetail describes an undefined reference error where the // referenced value does not support dereferencing (e.g., scalars). -type RefErrUnsupportedDetail struct { - Ref Ref `json:"ref"` // invalid ref - Pos int `json:"pos"` // invalid element - Have types.Type `json:"have"` // referenced type -} - -// Lines returns the string representation of the detail. -func (r *RefErrUnsupportedDetail) Lines() []string { - lines := []string{ - r.Ref.String(), - strings.Repeat("^", len(r.Ref[:r.Pos+1].String())), - fmt.Sprintf("have: %v", r.Have), - } - return lines -} +type RefErrUnsupportedDetail = v1.RefErrUnsupportedDetail // RefErrInvalidDetail describes an undefined reference error where the referenced // value does not support the reference operand (e.g., missing object key, // invalid key type, etc.) -type RefErrInvalidDetail struct { - Ref Ref `json:"ref"` // invalid ref - Pos int `json:"pos"` // invalid element - Have types.Type `json:"have,omitempty"` // type of invalid element (for var/ref elements) - Want types.Type `json:"want"` // allowed type (for non-object values) - OneOf []Value `json:"oneOf"` // allowed values (e.g., for object keys) -} - -// Lines returns the string representation of the detail. -func (r *RefErrInvalidDetail) Lines() []string { - lines := []string{r.Ref.String()} - offset := len(r.Ref[:r.Pos].String()) + 1 - pad := strings.Repeat(" ", offset) - lines = append(lines, fmt.Sprintf("%s^", pad)) - if r.Have != nil { - lines = append(lines, fmt.Sprintf("%shave (type): %v", pad, r.Have)) - } else { - lines = append(lines, fmt.Sprintf("%shave: %v", pad, r.Ref[r.Pos])) - } - if len(r.OneOf) > 0 { - lines = append(lines, fmt.Sprintf("%swant (one of): %v", pad, r.OneOf)) - } else { - lines = append(lines, fmt.Sprintf("%swant (type): %v", pad, r.Want)) - } - return lines -} - -func formatArgs(args []types.Type) string { - buf := make([]string, len(args)) - for i := range args { - buf[i] = types.Sprint(args[i]) - } - return "(" + strings.Join(buf, ", ") + ")" -} - -func newRefErrInvalid(loc *Location, ref Ref, idx int, have, want types.Type, oneOf []Value) *Error { - err := newRefError(loc, ref) - err.Details = &RefErrInvalidDetail{ - Ref: ref, - Pos: idx, - Have: have, - Want: want, - OneOf: oneOf, - } - return err -} - -func newRefErrUnsupported(loc *Location, ref Ref, idx int, have types.Type) *Error { - err := newRefError(loc, ref) - err.Details = &RefErrUnsupportedDetail{ - Ref: ref, - Pos: idx, - Have: have, - } - return err -} - -func newRefError(loc *Location, ref Ref) *Error { - return NewError(TypeErr, loc, "undefined ref: %v", ref) -} - -func newArgError(loc *Location, builtinName Ref, msg string, have []types.Type, want types.FuncArgs) *Error { - err := NewError(TypeErr, loc, "%v: %v", builtinName, msg) - err.Details = &ArgErrDetail{ - Have: have, - Want: want, - } - return err -} - -func getOneOfForNode(node *typeTreeNode) (result []Value) { - node.Children().Iter(func(k, _ util.T) bool { - result = append(result, k.(Value)) - return false - }) - - sortValueSlice(result) - return result -} - -func getOneOfForType(tpe types.Type) (result []Value) { - switch tpe := tpe.(type) { - case *types.Object: - for _, k := range tpe.Keys() { - v, err := InterfaceToValue(k) - if err != nil { - panic(err) - } - result = append(result, v) - } - - case types.Any: - for _, object := range tpe { - objRes := getOneOfForType(object) - result = append(result, objRes...) - } - } - - result = removeDuplicate(result) - sortValueSlice(result) - return result -} - -func sortValueSlice(sl []Value) { - sort.Slice(sl, func(i, j int) bool { - return sl[i].Compare(sl[j]) < 0 - }) -} - -func removeDuplicate(list []Value) []Value { - seen := make(map[Value]bool) - var newResult []Value - for _, item := range list { - if !seen[item] { - newResult = append(newResult, item) - seen[item] = true - } - } - return newResult -} - -func getArgTypes(env *TypeEnv, args []*Term) []types.Type { - pre := make([]types.Type, len(args)) - for i := range args { - pre[i] = env.Get(args[i]) - } - return pre -} - -// getPrefix returns the shortest prefix of ref that exists in env -func getPrefix(env *TypeEnv, ref Ref) (Ref, types.Type) { - if len(ref) == 1 { - t := env.Get(ref) - if t != nil { - return ref, t - } - } - for i := 1; i < len(ref); i++ { - t := env.Get(ref[:i]) - if t != nil { - return ref[:i], t - } - } - return nil, nil -} - -// override takes a type t and returns a type obtained from t where the path represented by ref within it has type o (overriding the original type of that path) -func override(ref Ref, t types.Type, o types.Type, rule *Rule) (types.Type, *Error) { - var newStaticProps []*types.StaticProperty - obj, ok := t.(*types.Object) - if !ok { - newType, err := getObjectType(ref, o, rule, types.NewDynamicProperty(types.A, types.A)) - if err != nil { - return nil, err - } - return newType, nil - } - found := false - if ok { - staticProps := obj.StaticProperties() - for _, prop := range staticProps { - valueCopy := prop.Value - key, err := InterfaceToValue(prop.Key) - if err != nil { - return nil, NewError(TypeErr, rule.Location, "unexpected error in override: %s", err.Error()) - } - if len(ref) > 0 && ref[0].Value.Compare(key) == 0 { - found = true - if len(ref) == 1 { - valueCopy = o - } else { - newVal, err := override(ref[1:], valueCopy, o, rule) - if err != nil { - return nil, err - } - valueCopy = newVal - } - } - newStaticProps = append(newStaticProps, types.NewStaticProperty(prop.Key, valueCopy)) - } - } - - // ref[0] is not a top-level key in staticProps, so it must be added - if !found { - newType, err := getObjectType(ref, o, rule, obj.DynamicProperties()) - if err != nil { - return nil, err - } - newStaticProps = append(newStaticProps, newType.StaticProperties()...) - } - return types.NewObject(newStaticProps, obj.DynamicProperties()), nil -} - -func getKeys(ref Ref, rule *Rule) ([]interface{}, *Error) { - keys := []interface{}{} - for _, refElem := range ref { - key, err := JSON(refElem.Value) - if err != nil { - return nil, NewError(TypeErr, rule.Location, "error getting key from value: %s", err.Error()) - } - keys = append(keys, key) - } - return keys, nil -} - -func getObjectTypeRec(keys []interface{}, o types.Type, d *types.DynamicProperty) *types.Object { - if len(keys) == 1 { - staticProps := []*types.StaticProperty{types.NewStaticProperty(keys[0], o)} - return types.NewObject(staticProps, d) - } - - staticProps := []*types.StaticProperty{types.NewStaticProperty(keys[0], getObjectTypeRec(keys[1:], o, d))} - return types.NewObject(staticProps, d) -} - -func getObjectType(ref Ref, o types.Type, rule *Rule, d *types.DynamicProperty) (*types.Object, *Error) { - keys, err := getKeys(ref, rule) - if err != nil { - return nil, err - } - return getObjectTypeRec(keys, o, d), nil -} - -func getRuleAnnotation(as *AnnotationSet, rule *Rule) (result []*SchemaAnnotation) { - - for _, x := range as.GetSubpackagesScope(rule.Module.Package.Path) { - result = append(result, x.Schemas...) - } - - if x := as.GetPackageScope(rule.Module.Package); x != nil { - result = append(result, x.Schemas...) - } - - if x := as.GetDocumentScope(rule.Ref().GroundPrefix()); x != nil { - result = append(result, x.Schemas...) - } - - for _, x := range as.GetRuleScope(rule) { - result = append(result, x.Schemas...) - } - - return result -} - -func processAnnotation(ss *SchemaSet, annot *SchemaAnnotation, rule *Rule, allowNet []string) (types.Type, *Error) { - - var schema interface{} - - if annot.Schema != nil { - if ss == nil { - return nil, nil - } - schema = ss.Get(annot.Schema) - if schema == nil { - return nil, NewError(TypeErr, rule.Location, "undefined schema: %v", annot.Schema) - } - } else if annot.Definition != nil { - schema = *annot.Definition - } - - tpe, err := loadSchema(schema, allowNet) - if err != nil { - return nil, NewError(TypeErr, rule.Location, err.Error()) - } - - return tpe, nil -} - -func errAnnotationRedeclared(a *Annotations, other *Location) *Error { - return NewError(TypeErr, a.Location, "%v annotation redeclared: %v", a.Scope, other) -} +type RefErrInvalidDetail = v1.RefErrInvalidDetail diff --git a/vendor/github.com/open-policy-agent/opa/ast/compare.go b/vendor/github.com/open-policy-agent/opa/ast/compare.go index 3bb6f2a75d..5e617e992f 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/compare.go +++ b/vendor/github.com/open-policy-agent/opa/ast/compare.go @@ -5,9 +5,7 @@ package ast import ( - "encoding/json" - "fmt" - "math/big" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // Compare returns an integer indicating whether two AST values are less than, @@ -36,361 +34,6 @@ import ( // Sets are considered equal if and only if the symmetric difference of a and b // is empty. // Other comparisons are consistent but not defined. -func Compare(a, b interface{}) int { - - if t, ok := a.(*Term); ok { - if t == nil { - a = nil - } else { - a = t.Value - } - } - - if t, ok := b.(*Term); ok { - if t == nil { - b = nil - } else { - b = t.Value - } - } - - if a == nil { - if b == nil { - return 0 - } - return -1 - } - if b == nil { - return 1 - } - - sortA := sortOrder(a) - sortB := sortOrder(b) - - if sortA < sortB { - return -1 - } else if sortB < sortA { - return 1 - } - - switch a := a.(type) { - case Null: - return 0 - case Boolean: - b := b.(Boolean) - if a.Equal(b) { - return 0 - } - if !a { - return -1 - } - return 1 - case Number: - if ai, err := json.Number(a).Int64(); err == nil { - if bi, err := json.Number(b.(Number)).Int64(); err == nil { - if ai == bi { - return 0 - } - if ai < bi { - return -1 - } - return 1 - } - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var bigA, bigB *big.Rat - fa, ok := new(big.Float).SetString(string(a)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - bigA = new(big.Rat).SetInt64(0) - } - } - if bigA == nil { - bigA, ok = new(big.Rat).SetString(string(a)) - if !ok { - panic("illegal value") - } - } - - fb, ok := new(big.Float).SetString(string(b.(Number))) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - bigB = new(big.Rat).SetInt64(0) - } - } - if bigB == nil { - bigB, ok = new(big.Rat).SetString(string(b.(Number))) - if !ok { - panic("illegal value") - } - } - - return bigA.Cmp(bigB) - case String: - b := b.(String) - if a.Equal(b) { - return 0 - } - if a < b { - return -1 - } - return 1 - case Var: - b := b.(Var) - if a.Equal(b) { - return 0 - } - if a < b { - return -1 - } - return 1 - case Ref: - b := b.(Ref) - return termSliceCompare(a, b) - case *Array: - b := b.(*Array) - return termSliceCompare(a.elems, b.elems) - case *lazyObj: - return Compare(a.force(), b) - case *object: - if x, ok := b.(*lazyObj); ok { - b = x.force() - } - b := b.(*object) - return a.Compare(b) - case Set: - b := b.(Set) - return a.Compare(b) - case *ArrayComprehension: - b := b.(*ArrayComprehension) - if cmp := Compare(a.Term, b.Term); cmp != 0 { - return cmp - } - return Compare(a.Body, b.Body) - case *ObjectComprehension: - b := b.(*ObjectComprehension) - if cmp := Compare(a.Key, b.Key); cmp != 0 { - return cmp - } - if cmp := Compare(a.Value, b.Value); cmp != 0 { - return cmp - } - return Compare(a.Body, b.Body) - case *SetComprehension: - b := b.(*SetComprehension) - if cmp := Compare(a.Term, b.Term); cmp != 0 { - return cmp - } - return Compare(a.Body, b.Body) - case Call: - b := b.(Call) - return termSliceCompare(a, b) - case *Expr: - b := b.(*Expr) - return a.Compare(b) - case *SomeDecl: - b := b.(*SomeDecl) - return a.Compare(b) - case *Every: - b := b.(*Every) - return a.Compare(b) - case *With: - b := b.(*With) - return a.Compare(b) - case Body: - b := b.(Body) - return a.Compare(b) - case *Head: - b := b.(*Head) - return a.Compare(b) - case *Rule: - b := b.(*Rule) - return a.Compare(b) - case Args: - b := b.(Args) - return termSliceCompare(a, b) - case *Import: - b := b.(*Import) - return a.Compare(b) - case *Package: - b := b.(*Package) - return a.Compare(b) - case *Annotations: - b := b.(*Annotations) - return a.Compare(b) - case *Module: - b := b.(*Module) - return a.Compare(b) - } - panic(fmt.Sprintf("illegal value: %T", a)) -} - -type termSlice []*Term - -func (s termSlice) Less(i, j int) bool { return Compare(s[i].Value, s[j].Value) < 0 } -func (s termSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x } -func (s termSlice) Len() int { return len(s) } - -func sortOrder(x interface{}) int { - switch x.(type) { - case Null: - return 0 - case Boolean: - return 1 - case Number: - return 2 - case String: - return 3 - case Var: - return 4 - case Ref: - return 5 - case *Array: - return 6 - case Object: - return 7 - case Set: - return 8 - case *ArrayComprehension: - return 9 - case *ObjectComprehension: - return 10 - case *SetComprehension: - return 11 - case Call: - return 12 - case Args: - return 13 - case *Expr: - return 100 - case *SomeDecl: - return 101 - case *Every: - return 102 - case *With: - return 110 - case *Head: - return 120 - case Body: - return 200 - case *Rule: - return 1000 - case *Import: - return 1001 - case *Package: - return 1002 - case *Annotations: - return 1003 - case *Module: - return 10000 - } - panic(fmt.Sprintf("illegal value: %T", x)) -} - -func importsCompare(a, b []*Import) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { - if cmp := a[i].Compare(b[i]); cmp != 0 { - return cmp - } - } - if len(a) < len(b) { - return -1 - } - if len(b) < len(a) { - return 1 - } - return 0 -} - -func annotationsCompare(a, b []*Annotations) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { - if cmp := a[i].Compare(b[i]); cmp != 0 { - return cmp - } - } - if len(a) < len(b) { - return -1 - } - if len(b) < len(a) { - return 1 - } - return 0 -} - -func rulesCompare(a, b []*Rule) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { - if cmp := a[i].Compare(b[i]); cmp != 0 { - return cmp - } - } - if len(a) < len(b) { - return -1 - } - if len(b) < len(a) { - return 1 - } - return 0 -} - -func termSliceCompare(a, b []*Term) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { - if cmp := Compare(a[i], b[i]); cmp != 0 { - return cmp - } - } - if len(a) < len(b) { - return -1 - } else if len(b) < len(a) { - return 1 - } - return 0 -} - -func withSliceCompare(a, b []*With) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { - if cmp := Compare(a[i], b[i]); cmp != 0 { - return cmp - } - } - if len(a) < len(b) { - return -1 - } else if len(b) < len(a) { - return 1 - } - return 0 +func Compare(a, b any) int { + return v1.Compare(a, b) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/compile.go b/vendor/github.com/open-policy-agent/opa/ast/compile.go index 9025f862b2..5a3daa910a 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/compile.go +++ b/vendor/github.com/open-policy-agent/opa/ast/compile.go @@ -5,5882 +5,123 @@ package ast import ( - "errors" - "fmt" - "io" - "sort" - "strconv" - "strings" - - "github.com/open-policy-agent/opa/ast/location" - "github.com/open-policy-agent/opa/internal/debug" - "github.com/open-policy-agent/opa/internal/gojsonschema" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/types" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // CompileErrorLimitDefault is the default number errors a compiler will allow before // exiting. const CompileErrorLimitDefault = 10 -var errLimitReached = NewError(CompileErr, nil, "error limit reached") - // Compiler contains the state of a compilation process. -type Compiler struct { - - // Errors contains errors that occurred during the compilation process. - // If there are one or more errors, the compilation process is considered - // "failed". - Errors Errors - - // Modules contains the compiled modules. The compiled modules are the - // output of the compilation process. If the compilation process failed, - // there is no guarantee about the state of the modules. - Modules map[string]*Module - - // ModuleTree organizes the modules into a tree where each node is keyed by - // an element in the module's package path. E.g., given modules containing - // the following package directives: "a", "a.b", "a.c", and "a.b", the - // resulting module tree would be: - // - // root - // | - // +--- data (no modules) - // | - // +--- a (1 module) - // | - // +--- b (2 modules) - // | - // +--- c (1 module) - // - ModuleTree *ModuleTreeNode - - // RuleTree organizes rules into a tree where each node is keyed by an - // element in the rule's path. The rule path is the concatenation of the - // containing package and the stringified rule name. E.g., given the - // following module: - // - // package ex - // p[1] { true } - // p[2] { true } - // q = true - // a.b.c = 3 - // - // root - // | - // +--- data (no rules) - // | - // +--- ex (no rules) - // | - // +--- p (2 rules) - // | - // +--- q (1 rule) - // | - // +--- a - // | - // +--- b - // | - // +--- c (1 rule) - // - // Another example with general refs containing vars at arbitrary locations: - // - // package ex - // a.b[x].d { x := "c" } # R1 - // a.b.c[x] { x := "d" } # R2 - // a.b[x][y] { x := "c"; y := "d" } # R3 - // p := true # R4 - // - // root - // | - // +--- data (no rules) - // | - // +--- ex (no rules) - // | - // +--- a - // | | - // | +--- b (R1, R3) - // | | - // | +--- c (R2) - // | - // +--- p (R4) - RuleTree *TreeNode - - // Graph contains dependencies between rules. An edge (u,v) is added to the - // graph if rule 'u' refers to the virtual document defined by 'v'. - Graph *Graph - - // TypeEnv holds type information for values inferred by the compiler. - TypeEnv *TypeEnv - - // RewrittenVars is a mapping of variables that have been rewritten - // with the key being the generated name and value being the original. - RewrittenVars map[Var]Var - - // Capabliities required by the modules that were compiled. - Required *Capabilities - - localvargen *localVarGenerator - moduleLoader ModuleLoader - ruleIndices *util.HashMap - stages []stage - maxErrs int - sorted []string // list of sorted module names - pathExists func([]string) (bool, error) - after map[string][]CompilerStageDefinition - metrics metrics.Metrics - capabilities *Capabilities // user-supplied capabilities - imports map[string][]*Import // saved imports from stripping - builtins map[string]*Builtin // universe of built-in functions - customBuiltins map[string]*Builtin // user-supplied custom built-in functions (deprecated: use capabilities) - unsafeBuiltinsMap map[string]struct{} // user-supplied set of unsafe built-ins functions to block (deprecated: use capabilities) - deprecatedBuiltinsMap map[string]struct{} // set of deprecated, but not removed, built-in functions - enablePrintStatements bool // indicates if print statements should be elided (default) - comprehensionIndices map[*Term]*ComprehensionIndex // comprehension key index - initialized bool // indicates if init() has been called - debug debug.Debug // emits debug information produced during compilation - schemaSet *SchemaSet // user-supplied schemas for input and data documents - inputType types.Type // global input type retrieved from schema set - annotationSet *AnnotationSet // hierarchical set of annotations - strict bool // enforce strict compilation checks - keepModules bool // whether to keep the unprocessed, parse modules (below) - parsedModules map[string]*Module // parsed, but otherwise unprocessed modules, kept track of when keepModules is true - useTypeCheckAnnotations bool // whether to provide annotated information (schemas) to the type checker - allowUndefinedFuncCalls bool // don't error on calls to unknown functions. - evalMode CompilerEvalMode // - rewriteTestRulesForTracing bool // rewrite test rules to capture dynamic values for tracing. -} +type Compiler = v1.Compiler // CompilerStage defines the interface for stages in the compiler. -type CompilerStage func(*Compiler) *Error +type CompilerStage = v1.CompilerStage // CompilerEvalMode allows toggling certain stages that are only // needed for certain modes, Concretely, only "topdown" mode will // have the compiler build comprehension and rule indices. -type CompilerEvalMode int +type CompilerEvalMode = v1.CompilerEvalMode const ( // EvalModeTopdown (default) instructs the compiler to build rule // and comprehension indices used by topdown evaluation. - EvalModeTopdown CompilerEvalMode = iota + EvalModeTopdown = v1.EvalModeTopdown // EvalModeIR makes the compiler skip the stages for comprehension // and rule indices. - EvalModeIR + EvalModeIR = v1.EvalModeIR ) // CompilerStageDefinition defines a compiler stage -type CompilerStageDefinition struct { - Name string - MetricName string - Stage CompilerStage -} +type CompilerStageDefinition = v1.CompilerStageDefinition // RulesOptions defines the options for retrieving rules by Ref from the // compiler. -type RulesOptions struct { - // IncludeHiddenModules determines if the result contains hidden modules, - // currently only the "system" namespace, i.e. "data.system.*". - IncludeHiddenModules bool -} +type RulesOptions = v1.RulesOptions // QueryContext contains contextual information for running an ad-hoc query. // // Ad-hoc queries can be run in the context of a package and imports may be // included to provide concise access to data. -type QueryContext struct { - Package *Package - Imports []*Import -} +type QueryContext = v1.QueryContext // NewQueryContext returns a new QueryContext object. func NewQueryContext() *QueryContext { - return &QueryContext{} -} - -// WithPackage sets the pkg on qc. -func (qc *QueryContext) WithPackage(pkg *Package) *QueryContext { - if qc == nil { - qc = NewQueryContext() - } - qc.Package = pkg - return qc -} - -// WithImports sets the imports on qc. -func (qc *QueryContext) WithImports(imports []*Import) *QueryContext { - if qc == nil { - qc = NewQueryContext() - } - qc.Imports = imports - return qc -} - -// Copy returns a deep copy of qc. -func (qc *QueryContext) Copy() *QueryContext { - if qc == nil { - return nil - } - cpy := *qc - if cpy.Package != nil { - cpy.Package = qc.Package.Copy() - } - cpy.Imports = make([]*Import, len(qc.Imports)) - for i := range qc.Imports { - cpy.Imports[i] = qc.Imports[i].Copy() - } - return &cpy + return v1.NewQueryContext() } // QueryCompiler defines the interface for compiling ad-hoc queries. -type QueryCompiler interface { - - // Compile should be called to compile ad-hoc queries. The return value is - // the compiled version of the query. - Compile(q Body) (Body, error) - - // TypeEnv returns the type environment built after running type checking - // on the query. - TypeEnv() *TypeEnv - - // WithContext sets the QueryContext on the QueryCompiler. Subsequent calls - // to Compile will take the QueryContext into account. - WithContext(qctx *QueryContext) QueryCompiler - - // WithEnablePrintStatements enables print statements in queries compiled - // with the QueryCompiler. - WithEnablePrintStatements(yes bool) QueryCompiler - - // WithUnsafeBuiltins sets the built-in functions to treat as unsafe and not - // allow inside of queries. By default the query compiler inherits the - // compiler's unsafe built-in functions. This function allows callers to - // override that set. If an empty (non-nil) map is provided, all built-ins - // are allowed. - WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler - - // WithStageAfter registers a stage to run during query compilation after - // the named stage. - WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler - - // RewrittenVars maps generated vars in the compiled query to vars from the - // parsed query. For example, given the query "input := 1" the rewritten - // query would be "__local0__ = 1". The mapping would then be {__local0__: input}. - RewrittenVars() map[Var]Var - - // ComprehensionIndex returns an index data structure for the given comprehension - // term. If no index is found, returns nil. - ComprehensionIndex(term *Term) *ComprehensionIndex - - // WithStrict enables strict mode for the query compiler. - WithStrict(strict bool) QueryCompiler -} +type QueryCompiler = v1.QueryCompiler // QueryCompilerStage defines the interface for stages in the query compiler. -type QueryCompilerStage func(QueryCompiler, Body) (Body, error) +type QueryCompilerStage = v1.QueryCompilerStage // QueryCompilerStageDefinition defines a QueryCompiler stage -type QueryCompilerStageDefinition struct { - Name string - MetricName string - Stage QueryCompilerStage -} - -type stage struct { - name string - metricName string - f func() -} +type QueryCompilerStageDefinition = v1.QueryCompilerStageDefinition // NewCompiler returns a new empty compiler. func NewCompiler() *Compiler { - - c := &Compiler{ - Modules: map[string]*Module{}, - RewrittenVars: map[Var]Var{}, - Required: &Capabilities{}, - ruleIndices: util.NewHashMap(func(a, b util.T) bool { - r1, r2 := a.(Ref), b.(Ref) - return r1.Equal(r2) - }, func(x util.T) int { - return x.(Ref).Hash() - }), - maxErrs: CompileErrorLimitDefault, - after: map[string][]CompilerStageDefinition{}, - unsafeBuiltinsMap: map[string]struct{}{}, - deprecatedBuiltinsMap: map[string]struct{}{}, - comprehensionIndices: map[*Term]*ComprehensionIndex{}, - debug: debug.Discard(), - } - - c.ModuleTree = NewModuleTree(nil) - c.RuleTree = NewRuleTree(c.ModuleTree) - - c.stages = []stage{ - // Reference resolution should run first as it may be used to lazily - // load additional modules. If any stages run before resolution, they - // need to be re-run after resolution. - {"ResolveRefs", "compile_stage_resolve_refs", c.resolveAllRefs}, - // The local variable generator must be initialized after references are - // resolved and the dynamic module loader has run but before subsequent - // stages that need to generate variables. - {"InitLocalVarGen", "compile_stage_init_local_var_gen", c.initLocalVarGen}, - {"RewriteRuleHeadRefs", "compile_stage_rewrite_rule_head_refs", c.rewriteRuleHeadRefs}, - {"CheckKeywordOverrides", "compile_stage_check_keyword_overrides", c.checkKeywordOverrides}, - {"CheckDuplicateImports", "compile_stage_check_duplicate_imports", c.checkDuplicateImports}, - {"RemoveImports", "compile_stage_remove_imports", c.removeImports}, - {"SetModuleTree", "compile_stage_set_module_tree", c.setModuleTree}, - {"SetRuleTree", "compile_stage_set_rule_tree", c.setRuleTree}, // depends on RewriteRuleHeadRefs - {"RewriteLocalVars", "compile_stage_rewrite_local_vars", c.rewriteLocalVars}, - {"CheckVoidCalls", "compile_stage_check_void_calls", c.checkVoidCalls}, - {"RewritePrintCalls", "compile_stage_rewrite_print_calls", c.rewritePrintCalls}, - {"RewriteExprTerms", "compile_stage_rewrite_expr_terms", c.rewriteExprTerms}, - {"ParseMetadataBlocks", "compile_stage_parse_metadata_blocks", c.parseMetadataBlocks}, - {"SetAnnotationSet", "compile_stage_set_annotationset", c.setAnnotationSet}, - {"RewriteRegoMetadataCalls", "compile_stage_rewrite_rego_metadata_calls", c.rewriteRegoMetadataCalls}, - {"SetGraph", "compile_stage_set_graph", c.setGraph}, - {"RewriteComprehensionTerms", "compile_stage_rewrite_comprehension_terms", c.rewriteComprehensionTerms}, - {"RewriteRefsInHead", "compile_stage_rewrite_refs_in_head", c.rewriteRefsInHead}, - {"RewriteWithValues", "compile_stage_rewrite_with_values", c.rewriteWithModifiers}, - {"CheckRuleConflicts", "compile_stage_check_rule_conflicts", c.checkRuleConflicts}, - {"CheckUndefinedFuncs", "compile_stage_check_undefined_funcs", c.checkUndefinedFuncs}, - {"CheckSafetyRuleHeads", "compile_stage_check_safety_rule_heads", c.checkSafetyRuleHeads}, - {"CheckSafetyRuleBodies", "compile_stage_check_safety_rule_bodies", c.checkSafetyRuleBodies}, - {"RewriteEquals", "compile_stage_rewrite_equals", c.rewriteEquals}, - {"RewriteDynamicTerms", "compile_stage_rewrite_dynamic_terms", c.rewriteDynamicTerms}, - {"RewriteTestRulesForTracing", "compile_stage_rewrite_test_rules_for_tracing", c.rewriteTestRuleEqualities}, // must run after RewriteDynamicTerms - {"CheckRecursion", "compile_stage_check_recursion", c.checkRecursion}, - {"CheckTypes", "compile_stage_check_types", c.checkTypes}, // must be run after CheckRecursion - {"CheckUnsafeBuiltins", "compile_state_check_unsafe_builtins", c.checkUnsafeBuiltins}, - {"CheckDeprecatedBuiltins", "compile_state_check_deprecated_builtins", c.checkDeprecatedBuiltins}, - {"BuildRuleIndices", "compile_stage_rebuild_indices", c.buildRuleIndices}, - {"BuildComprehensionIndices", "compile_stage_rebuild_comprehension_indices", c.buildComprehensionIndices}, - {"BuildRequiredCapabilities", "compile_stage_build_required_capabilities", c.buildRequiredCapabilities}, - } - - return c -} - -// SetErrorLimit sets the number of errors the compiler can encounter before it -// quits. Zero or a negative number indicates no limit. -func (c *Compiler) SetErrorLimit(limit int) *Compiler { - c.maxErrs = limit - return c -} - -// WithEnablePrintStatements enables print statements inside of modules compiled -// by the compiler. If print statements are not enabled, calls to print() are -// erased at compile-time. -func (c *Compiler) WithEnablePrintStatements(yes bool) *Compiler { - c.enablePrintStatements = yes - return c -} - -// WithPathConflictsCheck enables base-virtual document conflict -// detection. The compiler will check that rules don't overlap with -// paths that exist as determined by the provided callable. -func (c *Compiler) WithPathConflictsCheck(fn func([]string) (bool, error)) *Compiler { - c.pathExists = fn - return c -} - -// WithStageAfter registers a stage to run during compilation after -// the named stage. -func (c *Compiler) WithStageAfter(after string, stage CompilerStageDefinition) *Compiler { - c.after[after] = append(c.after[after], stage) - return c -} - -// WithMetrics will set a metrics.Metrics and be used for profiling -// the Compiler instance. -func (c *Compiler) WithMetrics(metrics metrics.Metrics) *Compiler { - c.metrics = metrics - return c -} - -// WithCapabilities sets capabilities to enable during compilation. Capabilities allow the caller -// to specify the set of built-in functions available to the policy. In the future, capabilities -// may be able to restrict access to other language features. Capabilities allow callers to check -// if policies are compatible with a particular version of OPA. If policies are a compiled for a -// specific version of OPA, there is no guarantee that _this_ version of OPA can evaluate them -// successfully. -func (c *Compiler) WithCapabilities(capabilities *Capabilities) *Compiler { - c.capabilities = capabilities - return c -} - -// Capabilities returns the capabilities enabled during compilation. -func (c *Compiler) Capabilities() *Capabilities { - return c.capabilities -} - -// WithDebug sets where debug messages are written to. Passing `nil` has no -// effect. -func (c *Compiler) WithDebug(sink io.Writer) *Compiler { - if sink != nil { - c.debug = debug.New(sink) - } - return c -} - -// WithBuiltins is deprecated. Use WithCapabilities instead. -func (c *Compiler) WithBuiltins(builtins map[string]*Builtin) *Compiler { - c.customBuiltins = make(map[string]*Builtin) - for k, v := range builtins { - c.customBuiltins[k] = v - } - return c -} - -// WithUnsafeBuiltins is deprecated. Use WithCapabilities instead. -func (c *Compiler) WithUnsafeBuiltins(unsafeBuiltins map[string]struct{}) *Compiler { - for name := range unsafeBuiltins { - c.unsafeBuiltinsMap[name] = struct{}{} - } - return c -} - -// WithStrict enables strict mode in the compiler. -func (c *Compiler) WithStrict(strict bool) *Compiler { - c.strict = strict - return c -} - -// WithKeepModules enables retaining unprocessed modules in the compiler. -// Note that the modules aren't copied on the way in or out -- so when -// accessing them via ParsedModules(), mutations will occur in the module -// map that was passed into Compile().` -func (c *Compiler) WithKeepModules(y bool) *Compiler { - c.keepModules = y - return c -} - -// WithUseTypeCheckAnnotations use schema annotations during type checking -func (c *Compiler) WithUseTypeCheckAnnotations(enabled bool) *Compiler { - c.useTypeCheckAnnotations = enabled - return c -} - -func (c *Compiler) WithAllowUndefinedFunctionCalls(allow bool) *Compiler { - c.allowUndefinedFuncCalls = allow - return c -} - -// WithEvalMode allows setting the CompilerEvalMode of the compiler -func (c *Compiler) WithEvalMode(e CompilerEvalMode) *Compiler { - c.evalMode = e - return c -} - -// WithRewriteTestRules enables rewriting test rules to capture dynamic values in local variables, -// so they can be accessed by tracing. -func (c *Compiler) WithRewriteTestRules(rewrite bool) *Compiler { - c.rewriteTestRulesForTracing = rewrite - return c -} - -// ParsedModules returns the parsed, unprocessed modules from the compiler. -// It is `nil` if keeping modules wasn't enabled via `WithKeepModules(true)`. -// The map includes all modules loaded via the ModuleLoader, if one was used. -func (c *Compiler) ParsedModules() map[string]*Module { - return c.parsedModules -} - -func (c *Compiler) QueryCompiler() QueryCompiler { - c.init() - c0 := *c - return newQueryCompiler(&c0) -} - -// Compile runs the compilation process on the input modules. The compiled -// version of the modules and associated data structures are stored on the -// compiler. If the compilation process fails for any reason, the compiler will -// contain a slice of errors. -func (c *Compiler) Compile(modules map[string]*Module) { - - c.init() - - c.Modules = make(map[string]*Module, len(modules)) - c.sorted = make([]string, 0, len(modules)) - - if c.keepModules { - c.parsedModules = make(map[string]*Module, len(modules)) - } else { - c.parsedModules = nil - } - - for k, v := range modules { - c.Modules[k] = v.Copy() - c.sorted = append(c.sorted, k) - if c.parsedModules != nil { - c.parsedModules[k] = v - } - } - - sort.Strings(c.sorted) - - c.compile() -} - -// WithSchemas sets a schemaSet to the compiler -func (c *Compiler) WithSchemas(schemas *SchemaSet) *Compiler { - c.schemaSet = schemas - return c -} - -// Failed returns true if a compilation error has been encountered. -func (c *Compiler) Failed() bool { - return len(c.Errors) > 0 -} - -// ComprehensionIndex returns a data structure specifying how to index comprehension -// results so that callers do not have to recompute the comprehension more than once. -// If no index is found, returns nil. -func (c *Compiler) ComprehensionIndex(term *Term) *ComprehensionIndex { - return c.comprehensionIndices[term] -} - -// GetArity returns the number of args a function referred to by ref takes. If -// ref refers to built-in function, the built-in declaration is consulted, -// otherwise, the ref is used to perform a ruleset lookup. -func (c *Compiler) GetArity(ref Ref) int { - if bi := c.builtins[ref.String()]; bi != nil { - return len(bi.Decl.FuncArgs().Args) - } - rules := c.GetRulesExact(ref) - if len(rules) == 0 { - return -1 - } - return len(rules[0].Head.Args) -} - -// GetRulesExact returns a slice of rules referred to by the reference. -// -// E.g., given the following module: -// -// package a.b.c -// -// p[k] = v { ... } # rule1 -// p[k1] = v1 { ... } # rule2 -// -// The following calls yield the rules on the right. -// -// GetRulesExact("data.a.b.c.p") => [rule1, rule2] -// GetRulesExact("data.a.b.c.p.x") => nil -// GetRulesExact("data.a.b.c") => nil -func (c *Compiler) GetRulesExact(ref Ref) (rules []*Rule) { - node := c.RuleTree - - for _, x := range ref { - if node = node.Child(x.Value); node == nil { - return nil - } - } - - return extractRules(node.Values) -} - -// GetRulesForVirtualDocument returns a slice of rules that produce the virtual -// document referred to by the reference. -// -// E.g., given the following module: -// -// package a.b.c -// -// p[k] = v { ... } # rule1 -// p[k1] = v1 { ... } # rule2 -// -// The following calls yield the rules on the right. -// -// GetRulesForVirtualDocument("data.a.b.c.p") => [rule1, rule2] -// GetRulesForVirtualDocument("data.a.b.c.p.x") => [rule1, rule2] -// GetRulesForVirtualDocument("data.a.b.c") => nil -func (c *Compiler) GetRulesForVirtualDocument(ref Ref) (rules []*Rule) { - - node := c.RuleTree - - for _, x := range ref { - if node = node.Child(x.Value); node == nil { - return nil - } - if len(node.Values) > 0 { - return extractRules(node.Values) - } - } - - return extractRules(node.Values) -} - -// GetRulesWithPrefix returns a slice of rules that share the prefix ref. -// -// E.g., given the following module: -// -// package a.b.c -// -// p[x] = y { ... } # rule1 -// p[k] = v { ... } # rule2 -// q { ... } # rule3 -// -// The following calls yield the rules on the right. -// -// GetRulesWithPrefix("data.a.b.c.p") => [rule1, rule2] -// GetRulesWithPrefix("data.a.b.c.p.a") => nil -// GetRulesWithPrefix("data.a.b.c") => [rule1, rule2, rule3] -func (c *Compiler) GetRulesWithPrefix(ref Ref) (rules []*Rule) { - - node := c.RuleTree - - for _, x := range ref { - if node = node.Child(x.Value); node == nil { - return nil - } - } - - var acc func(node *TreeNode) - - acc = func(node *TreeNode) { - rules = append(rules, extractRules(node.Values)...) - for _, child := range node.Children { - if child.Hide { - continue - } - acc(child) - } - } - - acc(node) - - return rules -} - -func extractRules(s []util.T) []*Rule { - rules := make([]*Rule, len(s)) - for i := range s { - rules[i] = s[i].(*Rule) - } - return rules -} - -// GetRules returns a slice of rules that are referred to by ref. -// -// E.g., given the following module: -// -// package a.b.c -// -// p[x] = y { q[x] = y; ... } # rule1 -// q[x] = y { ... } # rule2 -// -// The following calls yield the rules on the right. -// -// GetRules("data.a.b.c.p") => [rule1] -// GetRules("data.a.b.c.p.x") => [rule1] -// GetRules("data.a.b.c.q") => [rule2] -// GetRules("data.a.b.c") => [rule1, rule2] -// GetRules("data.a.b.d") => nil -func (c *Compiler) GetRules(ref Ref) (rules []*Rule) { - - set := map[*Rule]struct{}{} - - for _, rule := range c.GetRulesForVirtualDocument(ref) { - set[rule] = struct{}{} - } - - for _, rule := range c.GetRulesWithPrefix(ref) { - set[rule] = struct{}{} - } - - for rule := range set { - rules = append(rules, rule) - } - - return rules -} - -// GetRulesDynamic returns a slice of rules that could be referred to by a ref. -// -// Deprecated: use GetRulesDynamicWithOpts -func (c *Compiler) GetRulesDynamic(ref Ref) []*Rule { - return c.GetRulesDynamicWithOpts(ref, RulesOptions{}) -} - -// GetRulesDynamicWithOpts returns a slice of rules that could be referred to by -// a ref. -// When parts of the ref are statically known, we use that information to narrow -// down which rules the ref could refer to, but in the most general case this -// will be an over-approximation. -// -// E.g., given the following modules: -// -// package a.b.c -// -// r1 = 1 # rule1 -// -// and: -// -// package a.d.c -// -// r2 = 2 # rule2 -// -// The following calls yield the rules on the right. -// -// GetRulesDynamicWithOpts("data.a[x].c[y]", opts) => [rule1, rule2] -// GetRulesDynamicWithOpts("data.a[x].c.r2", opts) => [rule2] -// GetRulesDynamicWithOpts("data.a.b[x][y]", opts) => [rule1] -// -// Using the RulesOptions parameter, the inclusion of hidden modules can be -// controlled: -// -// With -// -// package system.main -// -// r3 = 3 # rule3 -// -// We'd get this result: -// -// GetRulesDynamicWithOpts("data[x]", RulesOptions{IncludeHiddenModules: true}) => [rule1, rule2, rule3] -// -// Without the options, it would be excluded. -func (c *Compiler) GetRulesDynamicWithOpts(ref Ref, opts RulesOptions) []*Rule { - node := c.RuleTree - - set := map[*Rule]struct{}{} - var walk func(node *TreeNode, i int) - walk = func(node *TreeNode, i int) { - switch { - case i >= len(ref): - // We've reached the end of the reference and want to collect everything - // under this "prefix". - node.DepthFirst(func(descendant *TreeNode) bool { - insertRules(set, descendant.Values) - if opts.IncludeHiddenModules { - return false - } - return descendant.Hide - }) - - case i == 0 || IsConstant(ref[i].Value): - // The head of the ref is always grounded. In case another part of the - // ref is also grounded, we can lookup the exact child. If it's not found - // we can immediately return... - if child := node.Child(ref[i].Value); child != nil { - if len(child.Values) > 0 { - // Add any rules at this position - insertRules(set, child.Values) - } - // There might still be "sub-rules" contributing key-value "overrides" for e.g. partial object rules, continue walking - walk(child, i+1) - } else { - return - } - - default: - // This part of the ref is a dynamic term. We can't know what it refers - // to and will just need to try all of the children. - for _, child := range node.Children { - if child.Hide && !opts.IncludeHiddenModules { - continue - } - insertRules(set, child.Values) - walk(child, i+1) - } - } - } - - walk(node, 0) - rules := make([]*Rule, 0, len(set)) - for rule := range set { - rules = append(rules, rule) - } - return rules -} - -// Utility: add all rule values to the set. -func insertRules(set map[*Rule]struct{}, rules []util.T) { - for _, rule := range rules { - set[rule.(*Rule)] = struct{}{} - } -} - -// RuleIndex returns a RuleIndex built for the rule set referred to by path. -// The path must refer to the rule set exactly, i.e., given a rule set at path -// data.a.b.c.p, refs data.a.b.c.p.x and data.a.b.c would not return a -// RuleIndex built for the rule. -func (c *Compiler) RuleIndex(path Ref) RuleIndex { - r, ok := c.ruleIndices.Get(path) - if !ok { - return nil - } - return r.(RuleIndex) -} - -// PassesTypeCheck determines whether the given body passes type checking -func (c *Compiler) PassesTypeCheck(body Body) bool { - checker := newTypeChecker().WithSchemaSet(c.schemaSet).WithInputType(c.inputType) - env := c.TypeEnv - _, errs := checker.CheckBody(env, body) - return len(errs) == 0 -} - -// PassesTypeCheckRules determines whether the given rules passes type checking -func (c *Compiler) PassesTypeCheckRules(rules []*Rule) Errors { - elems := []util.T{} - - for _, rule := range rules { - elems = append(elems, rule) - } - - // Load the global input schema if one was provided. - if c.schemaSet != nil { - if schema := c.schemaSet.Get(SchemaRootRef); schema != nil { - - var allowNet []string - if c.capabilities != nil { - allowNet = c.capabilities.AllowNet - } - - tpe, err := loadSchema(schema, allowNet) - if err != nil { - return Errors{NewError(TypeErr, nil, err.Error())} - } - c.inputType = tpe - } - } - - var as *AnnotationSet - if c.useTypeCheckAnnotations { - as = c.annotationSet - } - - checker := newTypeChecker().WithSchemaSet(c.schemaSet).WithInputType(c.inputType) - - if c.TypeEnv == nil { - if c.capabilities == nil { - c.capabilities = CapabilitiesForThisVersion() - } - - c.builtins = make(map[string]*Builtin, len(c.capabilities.Builtins)+len(c.customBuiltins)) - - for _, bi := range c.capabilities.Builtins { - c.builtins[bi.Name] = bi - } - - for name, bi := range c.customBuiltins { - c.builtins[name] = bi - } - - c.TypeEnv = checker.Env(c.builtins) - } - - _, errs := checker.CheckTypes(c.TypeEnv, elems, as) - return errs + return v1.NewCompiler().WithDefaultRegoVersion(DefaultRegoVersion) } // ModuleLoader defines the interface that callers can implement to enable lazy // loading of modules during compilation. -type ModuleLoader func(resolved map[string]*Module) (parsed map[string]*Module, err error) - -// WithModuleLoader sets f as the ModuleLoader on the compiler. -// -// The compiler will invoke the ModuleLoader after resolving all references in -// the current set of input modules. The ModuleLoader can return a new -// collection of parsed modules that are to be included in the compilation -// process. This process will repeat until the ModuleLoader returns an empty -// collection or an error. If an error is returned, compilation will stop -// immediately. -func (c *Compiler) WithModuleLoader(f ModuleLoader) *Compiler { - c.moduleLoader = f - return c -} - -func (c *Compiler) counterAdd(name string, n uint64) { - if c.metrics == nil { - return - } - c.metrics.Counter(name).Add(n) -} - -func (c *Compiler) buildRuleIndices() { - - c.RuleTree.DepthFirst(func(node *TreeNode) bool { - if len(node.Values) == 0 { - return false - } - rules := extractRules(node.Values) - hasNonGroundRef := false - for _, r := range rules { - hasNonGroundRef = !r.Head.Ref().IsGround() - } - if hasNonGroundRef { - // Collect children to ensure that all rules within the extent of a rule with a general ref - // are found on the same index. E.g. the following rules should be indexed under data.a.b.c: - // - // package a - // b.c[x].e := 1 { x := input.x } - // b.c.d := 2 - // b.c.d2.e[x] := 3 { x := input.x } - for _, child := range node.Children { - child.DepthFirst(func(c *TreeNode) bool { - rules = append(rules, extractRules(c.Values)...) - return false - }) - } - } - - index := newBaseDocEqIndex(func(ref Ref) bool { - return isVirtual(c.RuleTree, ref.GroundPrefix()) - }) - if index.Build(rules) { - c.ruleIndices.Put(rules[0].Ref().GroundPrefix(), index) - } - return hasNonGroundRef // currently, we don't allow those branches to go deeper - }) - -} - -func (c *Compiler) buildComprehensionIndices() { - for _, name := range c.sorted { - WalkRules(c.Modules[name], func(r *Rule) bool { - candidates := r.Head.Args.Vars() - candidates.Update(ReservedVars) - n := buildComprehensionIndices(c.debug, c.GetArity, candidates, c.RewrittenVars, r.Body, c.comprehensionIndices) - c.counterAdd(compileStageComprehensionIndexBuild, n) - return false - }) - } -} +type ModuleLoader = v1.ModuleLoader -// buildRequiredCapabilities updates the required capabilities on the compiler -// to include any keyword and feature dependencies present in the modules. The -// built-in function dependencies will have already been added by the type -// checker. -func (c *Compiler) buildRequiredCapabilities() { - - features := map[string]struct{}{} - - // extract required keywords from modules - keywords := map[string]struct{}{} - futureKeywordsPrefix := Ref{FutureRootDocument, StringTerm("keywords")} - for _, name := range c.sorted { - for _, imp := range c.imports[name] { - path := imp.Path.Value.(Ref) - switch { - case path.Equal(RegoV1CompatibleRef): - features[FeatureRegoV1Import] = struct{}{} - case path.HasPrefix(futureKeywordsPrefix): - if len(path) == 2 { - for kw := range futureKeywords { - keywords[kw] = struct{}{} - } - } else { - keywords[string(path[2].Value.(String))] = struct{}{} - } - } - } - } - - c.Required.FutureKeywords = stringMapToSortedSlice(keywords) - - // extract required features from modules - - for _, name := range c.sorted { - for _, rule := range c.Modules[name].Rules { - refLen := len(rule.Head.Reference) - if refLen >= 3 { - if refLen > len(rule.Head.Reference.ConstantPrefix()) { - features[FeatureRefHeads] = struct{}{} - } else { - features[FeatureRefHeadStringPrefixes] = struct{}{} - } - } - } - } - - c.Required.Features = stringMapToSortedSlice(features) - - for i, bi := range c.Required.Builtins { - c.Required.Builtins[i] = bi.Minimal() - } -} - -func stringMapToSortedSlice(xs map[string]struct{}) []string { - if len(xs) == 0 { - return nil - } - s := make([]string, 0, len(xs)) - for k := range xs { - s = append(s, k) - } - sort.Strings(s) - return s -} - -// checkRecursion ensures that there are no recursive definitions, i.e., there are -// no cycles in the Graph. -func (c *Compiler) checkRecursion() { - eq := func(a, b util.T) bool { - return a.(*Rule) == b.(*Rule) - } - - c.RuleTree.DepthFirst(func(node *TreeNode) bool { - for _, rule := range node.Values { - for node := rule.(*Rule); node != nil; node = node.Else { - c.checkSelfPath(node.Loc(), eq, node, node) - } - } - return false - }) -} - -func (c *Compiler) checkSelfPath(loc *Location, eq func(a, b util.T) bool, a, b util.T) { - tr := NewGraphTraversal(c.Graph) - if p := util.DFSPath(tr, eq, a, b); len(p) > 0 { - n := make([]string, 0, len(p)) - for _, x := range p { - n = append(n, astNodeToString(x)) - } - c.err(NewError(RecursionErr, loc, "rule %v is recursive: %v", astNodeToString(a), strings.Join(n, " -> "))) - } -} - -func astNodeToString(x interface{}) string { - return x.(*Rule).Ref().String() -} - -// checkRuleConflicts ensures that rules definitions are not in conflict. -func (c *Compiler) checkRuleConflicts() { - rw := rewriteVarsInRef(c.RewrittenVars) - - c.RuleTree.DepthFirst(func(node *TreeNode) bool { - if len(node.Values) == 0 { - return false // go deeper - } - - kinds := make(map[RuleKind]struct{}, len(node.Values)) - defaultRules := 0 - completeRules := 0 - partialRules := 0 - arities := make(map[int]struct{}, len(node.Values)) - name := "" - var conflicts []Ref - - for _, rule := range node.Values { - r := rule.(*Rule) - ref := r.Ref() - name = rw(ref.Copy()).String() // varRewriter operates in-place - kinds[r.Head.RuleKind()] = struct{}{} - arities[len(r.Head.Args)] = struct{}{} - if r.Default { - defaultRules++ - } - - // Single-value rules may not have any other rules in their extent. - // Rules with vars in their ref are allowed to have rules inside their extent. - // Only the ground portion (terms before the first var term) of a rule's ref is considered when determining - // whether it's inside the extent of another (c.RuleTree is organized this way already). - // These pairs are invalid: - // - // data.p.q.r { true } # data.p.q is { "r": true } - // data.p.q.r.s { true } - // - // data.p.q.r { true } - // data.p.q.r[s].t { s = input.key } - // - // But this is allowed: - // - // data.p.q.r { true } - // data.p.q[r].s.t { r = input.key } - // - // data.p[r] := x { r = input.key; x = input.bar } - // data.p.q[r] := x { r = input.key; x = input.bar } - // - // data.p.q[r] { r := input.r } - // data.p.q.r.s { true } - // - // data.p.q[r] = 1 { r := "r" } - // data.p.q.s = 2 - // - // data.p[q][r] { q := input.q; r := input.r } - // data.p.q.r { true } - // - // data.p.q[r] { r := input.r } - // data.p[q].r { q := input.q } - // - // data.p.q[r][s] { r := input.r; s := input.s } - // data.p[q].r.s { q := input.q } - - if r.Ref().IsGround() && len(node.Children) > 0 { - conflicts = node.flattenChildren() - } - - if r.Head.RuleKind() == SingleValue && r.Head.Ref().IsGround() { - completeRules++ - } else { - partialRules++ - } - } - - switch { - case conflicts != nil: - c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "rule %v conflicts with %v", name, conflicts)) - - case len(kinds) > 1 || len(arities) > 1 || (completeRules >= 1 && partialRules >= 1): - c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "conflicting rules %v found", name)) - - case defaultRules > 1: - c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "multiple default rules %s found", name)) - } - - return false - }) +// SafetyCheckVisitorParams defines the AST visitor parameters to use for collecting +// variables during the safety check. This has to be exported because it's relied on +// by the copy propagation implementation in topdown. +var SafetyCheckVisitorParams = v1.SafetyCheckVisitorParams - if c.pathExists != nil { - for _, err := range CheckPathConflicts(c, c.pathExists) { - c.err(err) - } - } +// ComprehensionIndex specifies how the comprehension term can be indexed. The keys +// tell the evaluator what variables to use for indexing. In the future, the index +// could be expanded with more information that would allow the evaluator to index +// a larger fragment of comprehensions (e.g., by closing over variables in the outer +// query.) +type ComprehensionIndex = v1.ComprehensionIndex - // NOTE(sr): depthfirst might better use sorted for stable errs? - c.ModuleTree.DepthFirst(func(node *ModuleTreeNode) bool { - for _, mod := range node.Modules { - for _, rule := range mod.Rules { - ref := rule.Head.Ref().GroundPrefix() - // Rules with a dynamic portion in their ref are exempted, as a conflict within the dynamic portion - // can only be detected at eval-time. - if len(ref) < len(rule.Head.Ref()) { - continue - } +// ModuleTreeNode represents a node in the module tree. The module +// tree is keyed by the package path. +type ModuleTreeNode = v1.ModuleTreeNode - childNode, tail := node.find(ref) - if childNode != nil && len(tail) == 0 { - for _, childMod := range childNode.Modules { - // Avoid recursively checking a module for equality unless we know it's a possible self-match. - if childMod.Equal(mod) { - continue // don't self-conflict - } - msg := fmt.Sprintf("%v conflicts with rule %v defined at %v", childMod.Package, rule.Head.Ref(), rule.Loc()) - c.err(NewError(TypeErr, mod.Package.Loc(), msg)) - } - } - } - } - return false - }) -} +// TreeNode represents a node in the rule tree. The rule tree is keyed by +// rule path. +type TreeNode = v1.TreeNode -func (c *Compiler) checkUndefinedFuncs() { - for _, name := range c.sorted { - m := c.Modules[name] - for _, err := range checkUndefinedFuncs(c.TypeEnv, m, c.GetArity, c.RewrittenVars) { - c.err(err) - } - } +// NewRuleTree returns a new TreeNode that represents the root +// of the rule tree populated with the given rules. +func NewRuleTree(mtree *ModuleTreeNode) *TreeNode { + return v1.NewRuleTree(mtree) } -func checkUndefinedFuncs(env *TypeEnv, x interface{}, arity func(Ref) int, rwVars map[Var]Var) Errors { - - var errs Errors - - WalkExprs(x, func(expr *Expr) bool { - if !expr.IsCall() { - return false - } - ref := expr.Operator() - if arity := arity(ref); arity >= 0 { - operands := len(expr.Operands()) - if expr.Generated { // an output var was added - if !expr.IsEquality() && operands != arity+1 { - ref = rewriteVarsInRef(rwVars)(ref) - errs = append(errs, arityMismatchError(env, ref, expr, arity, operands-1)) - return true - } - } else { // either output var or not - if operands != arity && operands != arity+1 { - ref = rewriteVarsInRef(rwVars)(ref) - errs = append(errs, arityMismatchError(env, ref, expr, arity, operands)) - return true - } - } - return false - } - ref = rewriteVarsInRef(rwVars)(ref) - errs = append(errs, NewError(TypeErr, expr.Loc(), "undefined function %v", ref)) - return true - }) - - return errs -} +// Graph represents the graph of dependencies between rules. +type Graph = v1.Graph -func arityMismatchError(env *TypeEnv, f Ref, expr *Expr, exp, act int) *Error { - if want, ok := env.Get(f).(*types.Function); ok { // generate richer error for built-in functions - have := make([]types.Type, len(expr.Operands())) - for i, op := range expr.Operands() { - have[i] = env.Get(op) - } - return newArgError(expr.Loc(), f, "arity mismatch", have, want.NamedFuncArgs()) - } - if act != 1 { - return NewError(TypeErr, expr.Loc(), "function %v has arity %d, got %d arguments", f, exp, act) - } - return NewError(TypeErr, expr.Loc(), "function %v has arity %d, got %d argument", f, exp, act) +// NewGraph returns a new Graph based on modules. The list function must return +// the rules referred to directly by the ref. +func NewGraph(modules map[string]*Module, list func(Ref) []*Rule) *Graph { + return v1.NewGraph(modules, list) } -// checkSafetyRuleBodies ensures that variables appearing in negated expressions or non-target -// positions of built-in expressions will be bound when evaluating the rule from left -// to right, re-ordering as necessary. -func (c *Compiler) checkSafetyRuleBodies() { - for _, name := range c.sorted { - m := c.Modules[name] - WalkRules(m, func(r *Rule) bool { - safe := ReservedVars.Copy() - safe.Update(r.Head.Args.Vars()) - r.Body = c.checkBodySafety(safe, r.Body) - return false - }) - } -} +// GraphTraversal is a Traversal that understands the dependency graph +type GraphTraversal = v1.GraphTraversal -func (c *Compiler) checkBodySafety(safe VarSet, b Body) Body { - reordered, unsafe := reorderBodyForSafety(c.builtins, c.GetArity, safe, b) - if errs := safetyErrorSlice(unsafe, c.RewrittenVars); len(errs) > 0 { - for _, err := range errs { - c.err(err) - } - return b - } - return reordered +// NewGraphTraversal returns a Traversal for the dependency graph +func NewGraphTraversal(graph *Graph) *GraphTraversal { + return v1.NewGraphTraversal(graph) } -// SafetyCheckVisitorParams defines the AST visitor parameters to use for collecting -// variables during the safety check. This has to be exported because it's relied on -// by the copy propagation implementation in topdown. -var SafetyCheckVisitorParams = VarVisitorParams{ - SkipRefCallHead: true, - SkipClosures: true, +// OutputVarsFromBody returns all variables which are the "output" for +// the given body. For safety checks this means that they would be +// made safe by the body. +func OutputVarsFromBody(c *Compiler, body Body, safe VarSet) VarSet { + return v1.OutputVarsFromBody(c, body, safe) } -// checkSafetyRuleHeads ensures that variables appearing in the head of a -// rule also appear in the body. -func (c *Compiler) checkSafetyRuleHeads() { - - for _, name := range c.sorted { - m := c.Modules[name] - WalkRules(m, func(r *Rule) bool { - safe := r.Body.Vars(SafetyCheckVisitorParams) - safe.Update(r.Head.Args.Vars()) - unsafe := r.Head.Vars().Diff(safe) - for v := range unsafe { - if w, ok := c.RewrittenVars[v]; ok { - v = w - } - if !v.IsGenerated() { - c.err(NewError(UnsafeVarErr, r.Loc(), "var %v is unsafe", v)) - } - } - return false - }) - } -} - -func compileSchema(goSchema interface{}, allowNet []string) (*gojsonschema.Schema, error) { - gojsonschema.SetAllowNet(allowNet) - - var refLoader gojsonschema.JSONLoader - sl := gojsonschema.NewSchemaLoader() - - if goSchema != nil { - refLoader = gojsonschema.NewGoLoader(goSchema) - } else { - return nil, fmt.Errorf("no schema as input to compile") - } - schemasCompiled, err := sl.Compile(refLoader) - if err != nil { - return nil, fmt.Errorf("unable to compile the schema: %w", err) - } - return schemasCompiled, nil -} - -func mergeSchemas(schemas ...*gojsonschema.SubSchema) (*gojsonschema.SubSchema, error) { - if len(schemas) == 0 { - return nil, nil - } - var result = schemas[0] - - for i := range schemas { - if len(schemas[i].PropertiesChildren) > 0 { - if !schemas[i].Types.Contains("object") { - if err := schemas[i].Types.Add("object"); err != nil { - return nil, fmt.Errorf("unable to set the type in schemas") - } - } - } else if len(schemas[i].ItemsChildren) > 0 { - if !schemas[i].Types.Contains("array") { - if err := schemas[i].Types.Add("array"); err != nil { - return nil, fmt.Errorf("unable to set the type in schemas") - } - } - } - } - - for i := 1; i < len(schemas); i++ { - if result.Types.String() != schemas[i].Types.String() { - return nil, fmt.Errorf("unable to merge these schemas: type mismatch: %v and %v", result.Types.String(), schemas[i].Types.String()) - } else if result.Types.Contains("object") && len(result.PropertiesChildren) > 0 && schemas[i].Types.Contains("object") && len(schemas[i].PropertiesChildren) > 0 { - result.PropertiesChildren = append(result.PropertiesChildren, schemas[i].PropertiesChildren...) - } else if result.Types.Contains("array") && len(result.ItemsChildren) > 0 && schemas[i].Types.Contains("array") && len(schemas[i].ItemsChildren) > 0 { - for j := 0; j < len(schemas[i].ItemsChildren); j++ { - if len(result.ItemsChildren)-1 < j && !(len(schemas[i].ItemsChildren)-1 < j) { - result.ItemsChildren = append(result.ItemsChildren, schemas[i].ItemsChildren[j]) - } - if result.ItemsChildren[j].Types.String() != schemas[i].ItemsChildren[j].Types.String() { - return nil, fmt.Errorf("unable to merge these schemas") - } - } - } - } - return result, nil -} - -type schemaParser struct { - definitionCache map[string]*cachedDef -} - -type cachedDef struct { - properties []*types.StaticProperty -} - -func newSchemaParser() *schemaParser { - return &schemaParser{ - definitionCache: map[string]*cachedDef{}, - } -} - -func (parser *schemaParser) parseSchema(schema interface{}) (types.Type, error) { - return parser.parseSchemaWithPropertyKey(schema, "") -} - -func (parser *schemaParser) parseSchemaWithPropertyKey(schema interface{}, propertyKey string) (types.Type, error) { - subSchema, ok := schema.(*gojsonschema.SubSchema) - if !ok { - return nil, fmt.Errorf("unexpected schema type %v", subSchema) - } - - // Handle referenced schemas, returns directly when a $ref is found - if subSchema.RefSchema != nil { - if existing, ok := parser.definitionCache[subSchema.Ref.String()]; ok { - return types.NewObject(existing.properties, nil), nil - } - return parser.parseSchemaWithPropertyKey(subSchema.RefSchema, subSchema.Ref.String()) - } - - // Handle anyOf - if subSchema.AnyOf != nil { - var orType types.Type - - // If there is a core schema, find its type first - if subSchema.Types.IsTyped() { - copySchema := *subSchema - copySchemaRef := ©Schema - copySchemaRef.AnyOf = nil - coreType, err := parser.parseSchema(copySchemaRef) - if err != nil { - return nil, fmt.Errorf("unexpected schema type %v: %w", subSchema, err) - } - - // Only add Object type with static props to orType - if objType, ok := coreType.(*types.Object); ok { - if objType.StaticProperties() != nil && objType.DynamicProperties() == nil { - orType = types.Or(orType, coreType) - } - } - } - - // Iterate through every property of AnyOf and add it to orType - for _, pSchema := range subSchema.AnyOf { - newtype, err := parser.parseSchema(pSchema) - if err != nil { - return nil, fmt.Errorf("unexpected schema type %v: %w", pSchema, err) - } - orType = types.Or(newtype, orType) - } - - return orType, nil - } - - if subSchema.AllOf != nil { - subSchemaArray := subSchema.AllOf - allOfResult, err := mergeSchemas(subSchemaArray...) - if err != nil { - return nil, err - } - - if subSchema.Types.IsTyped() { - if (subSchema.Types.Contains("object") && allOfResult.Types.Contains("object")) || (subSchema.Types.Contains("array") && allOfResult.Types.Contains("array")) { - objectOrArrayResult, err := mergeSchemas(allOfResult, subSchema) - if err != nil { - return nil, err - } - return parser.parseSchema(objectOrArrayResult) - } else if subSchema.Types.String() != allOfResult.Types.String() { - return nil, fmt.Errorf("unable to merge these schemas") - } - } - return parser.parseSchema(allOfResult) - } - - if subSchema.Types.IsTyped() { - if subSchema.Types.Contains("boolean") { - return types.B, nil - - } else if subSchema.Types.Contains("string") { - return types.S, nil - - } else if subSchema.Types.Contains("integer") || subSchema.Types.Contains("number") { - return types.N, nil - - } else if subSchema.Types.Contains("object") { - if len(subSchema.PropertiesChildren) > 0 { - def := &cachedDef{ - properties: make([]*types.StaticProperty, 0, len(subSchema.PropertiesChildren)), - } - for _, pSchema := range subSchema.PropertiesChildren { - def.properties = append(def.properties, types.NewStaticProperty(pSchema.Property, nil)) - } - if propertyKey != "" { - parser.definitionCache[propertyKey] = def - } - for _, pSchema := range subSchema.PropertiesChildren { - newtype, err := parser.parseSchema(pSchema) - if err != nil { - return nil, fmt.Errorf("unexpected schema type %v: %w", pSchema, err) - } - for i, prop := range def.properties { - if prop.Key == pSchema.Property { - def.properties[i].Value = newtype - break - } - } - } - return types.NewObject(def.properties, nil), nil - } - return types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), nil - - } else if subSchema.Types.Contains("array") { - if len(subSchema.ItemsChildren) > 0 { - if subSchema.ItemsChildrenIsSingleSchema { - iSchema := subSchema.ItemsChildren[0] - newtype, err := parser.parseSchema(iSchema) - if err != nil { - return nil, fmt.Errorf("unexpected schema type %v", iSchema) - } - return types.NewArray(nil, newtype), nil - } - newTypes := make([]types.Type, 0, len(subSchema.ItemsChildren)) - for i := 0; i != len(subSchema.ItemsChildren); i++ { - iSchema := subSchema.ItemsChildren[i] - newtype, err := parser.parseSchema(iSchema) - if err != nil { - return nil, fmt.Errorf("unexpected schema type %v", iSchema) - } - newTypes = append(newTypes, newtype) - } - return types.NewArray(newTypes, nil), nil - } - return types.NewArray(nil, types.A), nil - } - } - - // Assume types if not specified in schema - if len(subSchema.PropertiesChildren) > 0 { - if err := subSchema.Types.Add("object"); err == nil { - return parser.parseSchema(subSchema) - } - } else if len(subSchema.ItemsChildren) > 0 { - if err := subSchema.Types.Add("array"); err == nil { - return parser.parseSchema(subSchema) - } - } - - return types.A, nil -} - -func (c *Compiler) setAnnotationSet() { - // Sorting modules by name for stable error reporting - sorted := make([]*Module, 0, len(c.Modules)) - for _, mName := range c.sorted { - sorted = append(sorted, c.Modules[mName]) - } - - as, errs := BuildAnnotationSet(sorted) - for _, err := range errs { - c.err(err) - } - c.annotationSet = as -} - -// checkTypes runs the type checker on all rules. The type checker builds a -// TypeEnv that is stored on the compiler. -func (c *Compiler) checkTypes() { - // Recursion is caught in earlier step, so this cannot fail. - sorted, _ := c.Graph.Sort() - checker := newTypeChecker(). - WithAllowNet(c.capabilities.AllowNet). - WithSchemaSet(c.schemaSet). - WithInputType(c.inputType). - WithBuiltins(c.builtins). - WithRequiredCapabilities(c.Required). - WithVarRewriter(rewriteVarsInRef(c.RewrittenVars)). - WithAllowUndefinedFunctionCalls(c.allowUndefinedFuncCalls) - var as *AnnotationSet - if c.useTypeCheckAnnotations { - as = c.annotationSet - } - env, errs := checker.CheckTypes(c.TypeEnv, sorted, as) - for _, err := range errs { - c.err(err) - } - c.TypeEnv = env -} - -func (c *Compiler) checkUnsafeBuiltins() { - for _, name := range c.sorted { - errs := checkUnsafeBuiltins(c.unsafeBuiltinsMap, c.Modules[name]) - for _, err := range errs { - c.err(err) - } - } -} - -func (c *Compiler) checkDeprecatedBuiltins() { - for _, name := range c.sorted { - mod := c.Modules[name] - if c.strict || mod.regoV1Compatible() { - errs := checkDeprecatedBuiltins(c.deprecatedBuiltinsMap, mod) - for _, err := range errs { - c.err(err) - } - } - } -} - -func (c *Compiler) runStage(metricName string, f func()) { - if c.metrics != nil { - c.metrics.Timer(metricName).Start() - defer c.metrics.Timer(metricName).Stop() - } - f() -} - -func (c *Compiler) runStageAfter(metricName string, s CompilerStage) *Error { - if c.metrics != nil { - c.metrics.Timer(metricName).Start() - defer c.metrics.Timer(metricName).Stop() - } - return s(c) -} - -func (c *Compiler) compile() { - - defer func() { - if r := recover(); r != nil && r != errLimitReached { - panic(r) - } - }() - - for _, s := range c.stages { - if c.evalMode == EvalModeIR { - switch s.name { - case "BuildRuleIndices", "BuildComprehensionIndices": - continue // skip these stages - } - } - - if c.allowUndefinedFuncCalls && (s.name == "CheckUndefinedFuncs" || s.name == "CheckSafetyRuleBodies") { - continue - } - - c.runStage(s.metricName, s.f) - if c.Failed() { - return - } - for _, a := range c.after[s.name] { - if err := c.runStageAfter(a.MetricName, a.Stage); err != nil { - c.err(err) - return - } - } - } -} - -func (c *Compiler) init() { - - if c.initialized { - return - } - - if c.capabilities == nil { - c.capabilities = CapabilitiesForThisVersion() - } - - c.builtins = make(map[string]*Builtin, len(c.capabilities.Builtins)+len(c.customBuiltins)) - - for _, bi := range c.capabilities.Builtins { - c.builtins[bi.Name] = bi - if bi.IsDeprecated() { - c.deprecatedBuiltinsMap[bi.Name] = struct{}{} - } - } - - for name, bi := range c.customBuiltins { - c.builtins[name] = bi - } - - // Load the global input schema if one was provided. - if c.schemaSet != nil { - if schema := c.schemaSet.Get(SchemaRootRef); schema != nil { - tpe, err := loadSchema(schema, c.capabilities.AllowNet) - if err != nil { - c.err(NewError(TypeErr, nil, err.Error())) - } else { - c.inputType = tpe - } - } - } - - c.TypeEnv = newTypeChecker(). - WithSchemaSet(c.schemaSet). - WithInputType(c.inputType). - Env(c.builtins) - - c.initialized = true -} - -func (c *Compiler) err(err *Error) { - if c.maxErrs > 0 && len(c.Errors) >= c.maxErrs { - c.Errors = append(c.Errors, errLimitReached) - panic(errLimitReached) - } - c.Errors = append(c.Errors, err) -} - -func (c *Compiler) getExports() *util.HashMap { - - rules := util.NewHashMap(func(a, b util.T) bool { - return a.(Ref).Equal(b.(Ref)) - }, func(v util.T) int { - return v.(Ref).Hash() - }) - - for _, name := range c.sorted { - mod := c.Modules[name] - - for _, rule := range mod.Rules { - hashMapAdd(rules, mod.Package.Path, rule.Head.Ref().GroundPrefix()) - } - } - - return rules -} - -func hashMapAdd(rules *util.HashMap, pkg, rule Ref) { - prev, ok := rules.Get(pkg) - if !ok { - rules.Put(pkg, []Ref{rule}) - return - } - for _, p := range prev.([]Ref) { - if p.Equal(rule) { - return - } - } - rules.Put(pkg, append(prev.([]Ref), rule)) -} - -func (c *Compiler) GetAnnotationSet() *AnnotationSet { - return c.annotationSet -} - -func (c *Compiler) checkDuplicateImports() { - modules := make([]*Module, 0, len(c.Modules)) - - for _, name := range c.sorted { - mod := c.Modules[name] - if c.strict || mod.regoV1Compatible() { - modules = append(modules, mod) - } - } - - errs := checkDuplicateImports(modules) - for _, err := range errs { - c.err(err) - } -} - -func (c *Compiler) checkKeywordOverrides() { - for _, name := range c.sorted { - mod := c.Modules[name] - if c.strict || mod.regoV1Compatible() { - errs := checkRootDocumentOverrides(mod) - for _, err := range errs { - c.err(err) - } - } - } -} - -// resolveAllRefs resolves references in expressions to their fully qualified values. -// -// For instance, given the following module: -// -// package a.b -// import data.foo.bar -// p[x] { bar[_] = x } -// -// The reference "bar[_]" would be resolved to "data.foo.bar[_]". -// -// Ref rules are resolved, too: -// -// package a.b -// q { c.d.e == 1 } -// c.d[e] := 1 if e := "e" -// -// The reference "c.d.e" would be resolved to "data.a.b.c.d.e". -func (c *Compiler) resolveAllRefs() { - - rules := c.getExports() - - for _, name := range c.sorted { - mod := c.Modules[name] - - var ruleExports []Ref - if x, ok := rules.Get(mod.Package.Path); ok { - ruleExports = x.([]Ref) - } - - globals := getGlobals(mod.Package, ruleExports, mod.Imports) - - WalkRules(mod, func(rule *Rule) bool { - err := resolveRefsInRule(globals, rule) - if err != nil { - c.err(NewError(CompileErr, rule.Location, err.Error())) - } - return false - }) - - if c.strict { // check for unused imports - for _, imp := range mod.Imports { - path := imp.Path.Value.(Ref) - if FutureRootDocument.Equal(path[0]) || RegoRootDocument.Equal(path[0]) { - continue // ignore future and rego imports - } - - for v, u := range globals { - if v.Equal(imp.Name()) && !u.used { - c.err(NewError(CompileErr, imp.Location, "%s unused", imp.String())) - } - } - } - } - } - - if c.moduleLoader != nil { - - parsed, err := c.moduleLoader(c.Modules) - if err != nil { - c.err(NewError(CompileErr, nil, err.Error())) - return - } - - if len(parsed) == 0 { - return - } - - for id, module := range parsed { - c.Modules[id] = module.Copy() - c.sorted = append(c.sorted, id) - if c.parsedModules != nil { - c.parsedModules[id] = module - } - } - - sort.Strings(c.sorted) - c.resolveAllRefs() - } -} - -func (c *Compiler) removeImports() { - c.imports = make(map[string][]*Import, len(c.Modules)) - for name := range c.Modules { - c.imports[name] = c.Modules[name].Imports - c.Modules[name].Imports = nil - } -} - -func (c *Compiler) initLocalVarGen() { - c.localvargen = newLocalVarGeneratorForModuleSet(c.sorted, c.Modules) -} - -func (c *Compiler) rewriteComprehensionTerms() { - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - mod := c.Modules[name] - _, _ = rewriteComprehensionTerms(f, mod) // ignore error - } -} - -func (c *Compiler) rewriteExprTerms() { - for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(rule *Rule) bool { - rewriteExprTermsInHead(c.localvargen, rule) - rule.Body = rewriteExprTermsInBody(c.localvargen, rule.Body) - return false - }) - } -} - -func (c *Compiler) rewriteRuleHeadRefs() { - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - WalkRules(c.Modules[name], func(rule *Rule) bool { - - ref := rule.Head.Ref() - // NOTE(sr): We're backfilling Refs here -- all parser code paths would have them, but - // it's possible to construct Module{} instances from Golang code, so we need - // to accommodate for that, too. - if len(rule.Head.Reference) == 0 { - rule.Head.Reference = ref - } - - cannotSpeakStringPrefixRefs := true - cannotSpeakGeneralRefs := true - for _, f := range c.capabilities.Features { - switch f { - case FeatureRefHeadStringPrefixes: - cannotSpeakStringPrefixRefs = false - case FeatureRefHeads: - cannotSpeakGeneralRefs = false - } - } - - if cannotSpeakStringPrefixRefs && cannotSpeakGeneralRefs && rule.Head.Name == "" { - c.err(NewError(CompileErr, rule.Loc(), "rule heads with refs are not supported: %v", rule.Head.Reference)) - return true - } - - for i := 1; i < len(ref); i++ { - if cannotSpeakGeneralRefs && (rule.Head.RuleKind() == MultiValue || i != len(ref)-1) { // last - if _, ok := ref[i].Value.(String); !ok { - c.err(NewError(TypeErr, rule.Loc(), "rule heads with general refs (containing variables) are not supported: %v", rule.Head.Reference)) - continue - } - } - - // Rewrite so that any non-scalar elements in the rule's ref are vars: - // p.q.r[y.z] { ... } => p.q.r[__local0__] { __local0__ = y.z } - // p.q[a.b][c.d] { ... } => p.q[__local0__] { __local0__ = a.b; __local1__ = c.d } - // because that's what the RuleTree knows how to deal with. - if _, ok := ref[i].Value.(Var); !ok && !IsScalar(ref[i].Value) { - expr := f.Generate(ref[i]) - if i == len(ref)-1 && rule.Head.Key.Equal(ref[i]) { - rule.Head.Key = expr.Operand(0) - } - rule.Head.Reference[i] = expr.Operand(0) - rule.Body.Append(expr) - } - } - - return true - }) - } -} - -func (c *Compiler) checkVoidCalls() { - for _, name := range c.sorted { - mod := c.Modules[name] - for _, err := range checkVoidCalls(c.TypeEnv, mod) { - c.err(err) - } - } -} - -func (c *Compiler) rewritePrintCalls() { - var modified bool - if !c.enablePrintStatements { - for _, name := range c.sorted { - if erasePrintCalls(c.Modules[name]) { - modified = true - } - } - } else { - for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(r *Rule) bool { - safe := r.Head.Args.Vars() - safe.Update(ReservedVars) - vis := func(b Body) bool { - modrec, errs := rewritePrintCalls(c.localvargen, c.GetArity, safe, b) - if modrec { - modified = true - } - for _, err := range errs { - c.err(err) - } - return false - } - WalkBodies(r.Head, vis) - WalkBodies(r.Body, vis) - return false - }) - } - } - if modified { - c.Required.addBuiltinSorted(Print) - } -} - -// checkVoidCalls returns errors for any expressions that treat void function -// calls as values. The only void functions in Rego are specific built-ins like -// print(). -func checkVoidCalls(env *TypeEnv, x interface{}) Errors { - var errs Errors - WalkTerms(x, func(x *Term) bool { - if call, ok := x.Value.(Call); ok { - if tpe, ok := env.Get(call[0]).(*types.Function); ok && tpe.Result() == nil { - errs = append(errs, NewError(TypeErr, x.Loc(), "%v used as value", call)) - } - } - return false - }) - return errs -} - -// rewritePrintCalls will rewrite the body so that print operands are captured -// in local variables and their evaluation occurs within a comprehension. -// Wrapping the terms inside of a comprehension ensures that undefined values do -// not short-circuit evaluation. -// -// For example, given the following print statement: -// -// print("the value of x is:", input.x) -// -// The expression would be rewritten to: -// -// print({__local0__ | __local0__ = "the value of x is:"}, {__local1__ | __local1__ = input.x}) -func rewritePrintCalls(gen *localVarGenerator, getArity func(Ref) int, globals VarSet, body Body) (bool, Errors) { - - var errs Errors - var modified bool - - // Visit comprehension bodies recursively to ensure print statements inside - // those bodies only close over variables that are safe. - for i := range body { - if ContainsClosures(body[i]) { - safe := outputVarsForBody(body[:i], getArity, globals) - safe.Update(globals) - WalkClosures(body[i], func(x interface{}) bool { - var modrec bool - var errsrec Errors - switch x := x.(type) { - case *SetComprehension: - modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) - case *ArrayComprehension: - modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) - case *ObjectComprehension: - modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) - case *Every: - safe.Update(x.KeyValueVars()) - modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) - } - if modrec { - modified = true - } - errs = append(errs, errsrec...) - return true - }) - if len(errs) > 0 { - return false, errs - } - } - } - - for i := range body { - - if !isPrintCall(body[i]) { - continue - } - - modified = true - - var errs Errors - safe := outputVarsForBody(body[:i], getArity, globals) - safe.Update(globals) - args := body[i].Operands() - - for j := range args { - vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams) - vis.Walk(args[j]) - unsafe := vis.Vars().Diff(safe) - for _, v := range unsafe.Sorted() { - errs = append(errs, NewError(CompileErr, args[j].Loc(), "var %v is undeclared", v)) - } - } - - if len(errs) > 0 { - return false, errs - } - - arr := NewArray() - - for j := range args { - x := NewTerm(gen.Generate()).SetLocation(args[j].Loc()) - capture := Equality.Expr(x, args[j]).SetLocation(args[j].Loc()) - arr = arr.Append(SetComprehensionTerm(x, NewBody(capture)).SetLocation(args[j].Loc())) - } - - body.Set(NewExpr([]*Term{ - NewTerm(InternalPrint.Ref()).SetLocation(body[i].Loc()), - NewTerm(arr).SetLocation(body[i].Loc()), - }).SetLocation(body[i].Loc()), i) - } - - return modified, nil -} - -func erasePrintCalls(node interface{}) bool { - var modified bool - NewGenericVisitor(func(x interface{}) bool { - var modrec bool - switch x := x.(type) { - case *Rule: - modrec, x.Body = erasePrintCallsInBody(x.Body) - case *ArrayComprehension: - modrec, x.Body = erasePrintCallsInBody(x.Body) - case *SetComprehension: - modrec, x.Body = erasePrintCallsInBody(x.Body) - case *ObjectComprehension: - modrec, x.Body = erasePrintCallsInBody(x.Body) - case *Every: - modrec, x.Body = erasePrintCallsInBody(x.Body) - } - if modrec { - modified = true - } - return false - }).Walk(node) - return modified -} - -func erasePrintCallsInBody(x Body) (bool, Body) { - - if !containsPrintCall(x) { - return false, x - } - - var cpy Body - - for i := range x { - - // Recursively visit any comprehensions contained in this expression. - erasePrintCalls(x[i]) - - if !isPrintCall(x[i]) { - cpy.Append(x[i]) - } - } - - if len(cpy) == 0 { - term := BooleanTerm(true).SetLocation(x.Loc()) - expr := NewExpr(term).SetLocation(x.Loc()) - cpy.Append(expr) - } - - return true, cpy -} - -func containsPrintCall(x interface{}) bool { - var found bool - WalkExprs(x, func(expr *Expr) bool { - if !found { - if isPrintCall(expr) { - found = true - } - } - return found - }) - return found -} - -func isPrintCall(x *Expr) bool { - return x.IsCall() && x.Operator().Equal(Print.Ref()) -} - -// rewriteRefsInHead will rewrite rules so that the head does not contain any -// terms that require evaluation (e.g., refs or comprehensions). If the key or -// value contains one or more of these terms, the key or value will be moved -// into the body and assigned to a new variable. The new variable will replace -// the key or value in the head. -// -// For instance, given the following rule: -// -// p[{"foo": data.foo[i]}] { i < 100 } -// -// The rule would be re-written as: -// -// p[__local0__] { i < 100; __local0__ = {"foo": data.foo[i]} } -func (c *Compiler) rewriteRefsInHead() { - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(rule *Rule) bool { - if requiresEval(rule.Head.Key) { - expr := f.Generate(rule.Head.Key) - rule.Head.Key = expr.Operand(0) - rule.Body.Append(expr) - } - if requiresEval(rule.Head.Value) { - expr := f.Generate(rule.Head.Value) - rule.Head.Value = expr.Operand(0) - rule.Body.Append(expr) - } - for i := 0; i < len(rule.Head.Args); i++ { - if requiresEval(rule.Head.Args[i]) { - expr := f.Generate(rule.Head.Args[i]) - rule.Head.Args[i] = expr.Operand(0) - rule.Body.Append(expr) - } - } - return false - }) - } -} - -func (c *Compiler) rewriteEquals() { - modified := false - for _, name := range c.sorted { - mod := c.Modules[name] - modified = rewriteEquals(mod) || modified - } - if modified { - c.Required.addBuiltinSorted(Equal) - } -} - -func (c *Compiler) rewriteDynamicTerms() { - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(rule *Rule) bool { - rule.Body = rewriteDynamics(f, rule.Body) - return false - }) - } -} - -// rewriteTestRuleEqualities rewrites equality expressions in test rule bodies to create local vars for statements that would otherwise -// not have their values captured through tracing, such as refs and comprehensions not unified/assigned to a local var. -// For example, given the following module: -// -// package test -// -// p.q contains v if { -// some v in numbers.range(1, 3) -// } -// -// p.r := "foo" -// -// test_rule { -// p == { -// "q": {4, 5, 6} -// } -// } -// -// `p` in `test_rule` resolves to `data.test.p`, which won't be an entry in the virtual-cache and must therefore be calculated after-the-fact. -// If `p` isn't captured in a local var, there is no trivial way to retrieve its value for test reporting. -func (c *Compiler) rewriteTestRuleEqualities() { - if !c.rewriteTestRulesForTracing { - return - } - - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(rule *Rule) bool { - if strings.HasPrefix(string(rule.Head.Name), "test_") { - rule.Body = rewriteTestEqualities(f, rule.Body) - } - return false - }) - } -} - -func (c *Compiler) parseMetadataBlocks() { - // Only parse annotations if rego.metadata built-ins are called - regoMetadataCalled := false - for _, name := range c.sorted { - mod := c.Modules[name] - WalkExprs(mod, func(expr *Expr) bool { - if isRegoMetadataChainCall(expr) || isRegoMetadataRuleCall(expr) { - regoMetadataCalled = true - } - return regoMetadataCalled - }) - - if regoMetadataCalled { - break - } - } - - if regoMetadataCalled { - // NOTE: Possible optimization: only parse annotations for modules on the path of rego.metadata-calling module - for _, name := range c.sorted { - mod := c.Modules[name] - - if len(mod.Annotations) == 0 { - var errs Errors - mod.Annotations, errs = parseAnnotations(mod.Comments) - errs = append(errs, attachAnnotationsNodes(mod)...) - for _, err := range errs { - c.err(err) - } - - attachRuleAnnotations(mod) - } - } - } -} - -func (c *Compiler) rewriteRegoMetadataCalls() { - eqFactory := newEqualityFactory(c.localvargen) - - _, chainFuncAllowed := c.builtins[RegoMetadataChain.Name] - _, ruleFuncAllowed := c.builtins[RegoMetadataRule.Name] - - for _, name := range c.sorted { - mod := c.Modules[name] - - WalkRules(mod, func(rule *Rule) bool { - var firstChainCall *Expr - var firstRuleCall *Expr - - WalkExprs(rule, func(expr *Expr) bool { - if chainFuncAllowed && firstChainCall == nil && isRegoMetadataChainCall(expr) { - firstChainCall = expr - } else if ruleFuncAllowed && firstRuleCall == nil && isRegoMetadataRuleCall(expr) { - firstRuleCall = expr - } - return firstChainCall != nil && firstRuleCall != nil - }) - - chainCalled := firstChainCall != nil - ruleCalled := firstRuleCall != nil - - if chainCalled || ruleCalled { - body := make(Body, 0, len(rule.Body)+2) - - var metadataChainVar Var - if chainCalled { - // Create and inject metadata chain for rule - - chain, err := createMetadataChain(c.annotationSet.Chain(rule)) - if err != nil { - c.err(err) - return false - } - - chain.Location = firstChainCall.Location - eq := eqFactory.Generate(chain) - metadataChainVar = eq.Operands()[0].Value.(Var) - body.Append(eq) - } - - var metadataRuleVar Var - if ruleCalled { - // Create and inject metadata for rule - - var metadataRuleTerm *Term - - a := getPrimaryRuleAnnotations(c.annotationSet, rule) - if a != nil { - annotObj, err := a.toObject() - if err != nil { - c.err(err) - return false - } - metadataRuleTerm = NewTerm(*annotObj) - } else { - // If rule has no annotations, assign an empty object - metadataRuleTerm = ObjectTerm() - } - - metadataRuleTerm.Location = firstRuleCall.Location - eq := eqFactory.Generate(metadataRuleTerm) - metadataRuleVar = eq.Operands()[0].Value.(Var) - body.Append(eq) - } - - for _, expr := range rule.Body { - body.Append(expr) - } - rule.Body = body - - vis := func(b Body) bool { - for _, err := range rewriteRegoMetadataCalls(&metadataChainVar, &metadataRuleVar, b, &c.RewrittenVars) { - c.err(err) - } - return false - } - WalkBodies(rule.Head, vis) - WalkBodies(rule.Body, vis) - } - - return false - }) - } -} - -func getPrimaryRuleAnnotations(as *AnnotationSet, rule *Rule) *Annotations { - annots := as.GetRuleScope(rule) - - if len(annots) == 0 { - return nil - } - - // Sort by annotation location; chain must start with annotations declared closest to rule, then going outward - sort.SliceStable(annots, func(i, j int) bool { - return annots[i].Location.Compare(annots[j].Location) > 0 - }) - - return annots[0] -} - -func rewriteRegoMetadataCalls(metadataChainVar *Var, metadataRuleVar *Var, body Body, rewrittenVars *map[Var]Var) Errors { - var errs Errors - - WalkClosures(body, func(x interface{}) bool { - switch x := x.(type) { - case *ArrayComprehension: - errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) - case *SetComprehension: - errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) - case *ObjectComprehension: - errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) - case *Every: - errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) - } - return true - }) - - for i := range body { - expr := body[i] - var metadataVar Var - - if metadataChainVar != nil && isRegoMetadataChainCall(expr) { - metadataVar = *metadataChainVar - } else if metadataRuleVar != nil && isRegoMetadataRuleCall(expr) { - metadataVar = *metadataRuleVar - } else { - continue - } - - // NOTE(johanfylling): An alternative strategy would be to walk the body and replace all operands[0] - // usages with *metadataChainVar - operands := expr.Operands() - var newExpr *Expr - if len(operands) > 0 { // There is an output var to rewrite - rewrittenVar := operands[0] - newExpr = Equality.Expr(rewrittenVar, NewTerm(metadataVar)) - } else { // No output var, just rewrite expr to metadataVar - newExpr = NewExpr(NewTerm(metadataVar)) - } - - newExpr.Generated = true - newExpr.Location = expr.Location - body.Set(newExpr, i) - } - - return errs -} - -func isRegoMetadataChainCall(x *Expr) bool { - return x.IsCall() && x.Operator().Equal(RegoMetadataChain.Ref()) -} - -func isRegoMetadataRuleCall(x *Expr) bool { - return x.IsCall() && x.Operator().Equal(RegoMetadataRule.Ref()) -} - -func createMetadataChain(chain []*AnnotationsRef) (*Term, *Error) { - - metaArray := NewArray() - for _, link := range chain { - p := link.Path.toArray(). - Slice(1, -1) // Dropping leading 'data' element of path - obj := NewObject( - Item(StringTerm("path"), NewTerm(p)), - ) - if link.Annotations != nil { - annotObj, err := link.Annotations.toObject() - if err != nil { - return nil, err - } - obj.Insert(StringTerm("annotations"), NewTerm(*annotObj)) - } - metaArray = metaArray.Append(NewTerm(obj)) - } - - return NewTerm(metaArray), nil -} - -func (c *Compiler) rewriteLocalVars() { - - var assignment bool - - for _, name := range c.sorted { - mod := c.Modules[name] - gen := c.localvargen - - WalkRules(mod, func(rule *Rule) bool { - argsStack := newLocalDeclaredVars() - - args := NewVarVisitor() - if c.strict { - args.Walk(rule.Head.Args) - } - unusedArgs := args.Vars() - - c.rewriteLocalArgVars(gen, argsStack, rule) - - // Rewrite local vars in each else-branch of the rule. - // Note: this is done instead of a walk so that we can capture any unused function arguments - // across else-branches. - for rule := rule; rule != nil; rule = rule.Else { - stack, errs := c.rewriteLocalVarsInRule(rule, unusedArgs, argsStack, gen) - if stack.assignment { - assignment = true - } - - for arg := range unusedArgs { - if stack.Count(arg) > 1 { - delete(unusedArgs, arg) - } - } - - for _, err := range errs { - c.err(err) - } - } - - if c.strict { - // Report an error for each unused function argument - for arg := range unusedArgs { - if !arg.IsWildcard() { - c.err(NewError(CompileErr, rule.Head.Location, "unused argument %v. (hint: use _ (wildcard variable) instead)", arg)) - } - } - } - - return true - }) - } - - if assignment { - c.Required.addBuiltinSorted(Assign) - } -} - -func (c *Compiler) rewriteLocalVarsInRule(rule *Rule, unusedArgs VarSet, argsStack *localDeclaredVars, gen *localVarGenerator) (*localDeclaredVars, Errors) { - // Rewrite assignments contained in head of rule. Assignments can - // occur in rule head if they're inside a comprehension. Note, - // assigned vars in comprehensions in the head will be rewritten - // first to preserve scoping rules. For example: - // - // p = [x | x := 1] { x := 2 } becomes p = [__local0__ | __local0__ = 1] { __local1__ = 2 } - // - // This behaviour is consistent scoping inside the body. For example: - // - // p = xs { x := 2; xs = [x | x := 1] } becomes p = xs { __local0__ = 2; xs = [__local1__ | __local1__ = 1] } - nestedXform := &rewriteNestedHeadVarLocalTransform{ - gen: gen, - RewrittenVars: c.RewrittenVars, - strict: c.strict, - } - - NewGenericVisitor(nestedXform.Visit).Walk(rule.Head) - - for _, err := range nestedXform.errs { - c.err(err) - } - - // Rewrite assignments in body. - used := NewVarSet() - - for _, t := range rule.Head.Ref()[1:] { - used.Update(t.Vars()) - } - - if rule.Head.Key != nil { - used.Update(rule.Head.Key.Vars()) - } - - if rule.Head.Value != nil { - valueVars := rule.Head.Value.Vars() - used.Update(valueVars) - for arg := range unusedArgs { - if valueVars.Contains(arg) { - delete(unusedArgs, arg) - } - } - } - - stack := argsStack.Copy() - - body, declared, errs := rewriteLocalVars(gen, stack, used, rule.Body, c.strict) - - // For rewritten vars use the collection of all variables that - // were in the stack at some point in time. - for k, v := range stack.rewritten { - c.RewrittenVars[k] = v - } - - rule.Body = body - - // Rewrite vars in head that refer to locally declared vars in the body. - localXform := rewriteHeadVarLocalTransform{declared: declared} - - for i := range rule.Head.Args { - rule.Head.Args[i], _ = transformTerm(localXform, rule.Head.Args[i]) - } - - for i := 1; i < len(rule.Head.Ref()); i++ { - rule.Head.Reference[i], _ = transformTerm(localXform, rule.Head.Ref()[i]) - } - if rule.Head.Key != nil { - rule.Head.Key, _ = transformTerm(localXform, rule.Head.Key) - } - - if rule.Head.Value != nil { - rule.Head.Value, _ = transformTerm(localXform, rule.Head.Value) - } - return stack, errs -} - -type rewriteNestedHeadVarLocalTransform struct { - gen *localVarGenerator - errs Errors - RewrittenVars map[Var]Var - strict bool -} - -func (xform *rewriteNestedHeadVarLocalTransform) Visit(x interface{}) bool { - - if term, ok := x.(*Term); ok { - - stop := false - stack := newLocalDeclaredVars() - - switch x := term.Value.(type) { - case *object: - cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) { - kcpy := k.Copy() - NewGenericVisitor(xform.Visit).Walk(kcpy) - vcpy := v.Copy() - NewGenericVisitor(xform.Visit).Walk(vcpy) - return kcpy, vcpy, nil - }) - term.Value = cpy - stop = true - case *set: - cpy, _ := x.Map(func(v *Term) (*Term, error) { - vcpy := v.Copy() - NewGenericVisitor(xform.Visit).Walk(vcpy) - return vcpy, nil - }) - term.Value = cpy - stop = true - case *ArrayComprehension: - xform.errs = rewriteDeclaredVarsInArrayComprehension(xform.gen, stack, x, xform.errs, xform.strict) - stop = true - case *SetComprehension: - xform.errs = rewriteDeclaredVarsInSetComprehension(xform.gen, stack, x, xform.errs, xform.strict) - stop = true - case *ObjectComprehension: - xform.errs = rewriteDeclaredVarsInObjectComprehension(xform.gen, stack, x, xform.errs, xform.strict) - stop = true - } - - for k, v := range stack.rewritten { - xform.RewrittenVars[k] = v - } - - return stop - } - - return false -} - -type rewriteHeadVarLocalTransform struct { - declared map[Var]Var -} - -func (xform rewriteHeadVarLocalTransform) Transform(x interface{}) (interface{}, error) { - if v, ok := x.(Var); ok { - if gv, ok := xform.declared[v]; ok { - return gv, nil - } - } - return x, nil -} - -func (c *Compiler) rewriteLocalArgVars(gen *localVarGenerator, stack *localDeclaredVars, rule *Rule) { - - vis := &ruleArgLocalRewriter{ - stack: stack, - gen: gen, - } - - for i := range rule.Head.Args { - Walk(vis, rule.Head.Args[i]) - } - - for i := range vis.errs { - c.err(vis.errs[i]) - } -} - -type ruleArgLocalRewriter struct { - stack *localDeclaredVars - gen *localVarGenerator - errs []*Error -} - -func (vis *ruleArgLocalRewriter) Visit(x interface{}) Visitor { - - t, ok := x.(*Term) - if !ok { - return vis - } - - switch v := t.Value.(type) { - case Var: - gv, ok := vis.stack.Declared(v) - if ok { - vis.stack.Seen(v) - } else { - gv = vis.gen.Generate() - vis.stack.Insert(v, gv, argVar) - } - t.Value = gv - return nil - case *object: - if cpy, err := v.Map(func(k, v *Term) (*Term, *Term, error) { - vcpy := v.Copy() - Walk(vis, vcpy) - return k, vcpy, nil - }); err != nil { - vis.errs = append(vis.errs, NewError(CompileErr, t.Location, err.Error())) - } else { - t.Value = cpy - } - return nil - case Null, Boolean, Number, String, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Set: - // Scalars are no-ops. Comprehensions are handled above. Sets must not - // contain variables. - return nil - case Call: - vis.errs = append(vis.errs, NewError(CompileErr, t.Location, "rule arguments cannot contain calls")) - return nil - default: - // Recurse on refs and arrays. Any embedded - // variables can be rewritten. - return vis - } -} - -func (c *Compiler) rewriteWithModifiers() { - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - mod := c.Modules[name] - t := NewGenericTransformer(func(x interface{}) (interface{}, error) { - body, ok := x.(Body) - if !ok { - return x, nil - } - body, err := rewriteWithModifiersInBody(c, c.unsafeBuiltinsMap, f, body) - if err != nil { - c.err(err) - } - - return body, nil - }) - _, _ = Transform(t, mod) // ignore error - } -} - -func (c *Compiler) setModuleTree() { - c.ModuleTree = NewModuleTree(c.Modules) -} - -func (c *Compiler) setRuleTree() { - c.RuleTree = NewRuleTree(c.ModuleTree) -} - -func (c *Compiler) setGraph() { - list := func(r Ref) []*Rule { - return c.GetRulesDynamicWithOpts(r, RulesOptions{IncludeHiddenModules: true}) - } - c.Graph = NewGraph(c.Modules, list) -} - -type queryCompiler struct { - compiler *Compiler - qctx *QueryContext - typeEnv *TypeEnv - rewritten map[Var]Var - after map[string][]QueryCompilerStageDefinition - unsafeBuiltins map[string]struct{} - comprehensionIndices map[*Term]*ComprehensionIndex - enablePrintStatements bool -} - -func newQueryCompiler(compiler *Compiler) QueryCompiler { - qc := &queryCompiler{ - compiler: compiler, - qctx: nil, - after: map[string][]QueryCompilerStageDefinition{}, - comprehensionIndices: map[*Term]*ComprehensionIndex{}, - } - return qc -} - -func (qc *queryCompiler) WithStrict(strict bool) QueryCompiler { - qc.compiler.WithStrict(strict) - return qc -} - -func (qc *queryCompiler) WithEnablePrintStatements(yes bool) QueryCompiler { - qc.enablePrintStatements = yes - return qc -} - -func (qc *queryCompiler) WithContext(qctx *QueryContext) QueryCompiler { - qc.qctx = qctx - return qc -} - -func (qc *queryCompiler) WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler { - qc.after[after] = append(qc.after[after], stage) - return qc -} - -func (qc *queryCompiler) WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler { - qc.unsafeBuiltins = unsafe - return qc -} - -func (qc *queryCompiler) RewrittenVars() map[Var]Var { - return qc.rewritten -} - -func (qc *queryCompiler) ComprehensionIndex(term *Term) *ComprehensionIndex { - if result, ok := qc.comprehensionIndices[term]; ok { - return result - } else if result, ok := qc.compiler.comprehensionIndices[term]; ok { - return result - } - return nil -} - -func (qc *queryCompiler) runStage(metricName string, qctx *QueryContext, query Body, s func(*QueryContext, Body) (Body, error)) (Body, error) { - if qc.compiler.metrics != nil { - qc.compiler.metrics.Timer(metricName).Start() - defer qc.compiler.metrics.Timer(metricName).Stop() - } - return s(qctx, query) -} - -func (qc *queryCompiler) runStageAfter(metricName string, query Body, s QueryCompilerStage) (Body, error) { - if qc.compiler.metrics != nil { - qc.compiler.metrics.Timer(metricName).Start() - defer qc.compiler.metrics.Timer(metricName).Stop() - } - return s(qc, query) -} - -type queryStage = struct { - name string - metricName string - f func(*QueryContext, Body) (Body, error) -} - -func (qc *queryCompiler) Compile(query Body) (Body, error) { - if len(query) == 0 { - return nil, Errors{NewError(CompileErr, nil, "empty query cannot be compiled")} - } - - query = query.Copy() - - stages := []queryStage{ - {"CheckKeywordOverrides", "query_compile_stage_check_keyword_overrides", qc.checkKeywordOverrides}, - {"ResolveRefs", "query_compile_stage_resolve_refs", qc.resolveRefs}, - {"RewriteLocalVars", "query_compile_stage_rewrite_local_vars", qc.rewriteLocalVars}, - {"CheckVoidCalls", "query_compile_stage_check_void_calls", qc.checkVoidCalls}, - {"RewritePrintCalls", "query_compile_stage_rewrite_print_calls", qc.rewritePrintCalls}, - {"RewriteExprTerms", "query_compile_stage_rewrite_expr_terms", qc.rewriteExprTerms}, - {"RewriteComprehensionTerms", "query_compile_stage_rewrite_comprehension_terms", qc.rewriteComprehensionTerms}, - {"RewriteWithValues", "query_compile_stage_rewrite_with_values", qc.rewriteWithModifiers}, - {"CheckUndefinedFuncs", "query_compile_stage_check_undefined_funcs", qc.checkUndefinedFuncs}, - {"CheckSafety", "query_compile_stage_check_safety", qc.checkSafety}, - {"RewriteDynamicTerms", "query_compile_stage_rewrite_dynamic_terms", qc.rewriteDynamicTerms}, - {"CheckTypes", "query_compile_stage_check_types", qc.checkTypes}, - {"CheckUnsafeBuiltins", "query_compile_stage_check_unsafe_builtins", qc.checkUnsafeBuiltins}, - {"CheckDeprecatedBuiltins", "query_compile_stage_check_deprecated_builtins", qc.checkDeprecatedBuiltins}, - } - if qc.compiler.evalMode == EvalModeTopdown { - stages = append(stages, queryStage{"BuildComprehensionIndex", "query_compile_stage_build_comprehension_index", qc.buildComprehensionIndices}) - } - - qctx := qc.qctx.Copy() - - for _, s := range stages { - var err error - query, err = qc.runStage(s.metricName, qctx, query, s.f) - if err != nil { - return nil, qc.applyErrorLimit(err) - } - for _, s := range qc.after[s.name] { - query, err = qc.runStageAfter(s.MetricName, query, s.Stage) - if err != nil { - return nil, qc.applyErrorLimit(err) - } - } - } - - return query, nil -} - -func (qc *queryCompiler) TypeEnv() *TypeEnv { - return qc.typeEnv -} - -func (qc *queryCompiler) applyErrorLimit(err error) error { - var errs Errors - if errors.As(err, &errs) { - if qc.compiler.maxErrs > 0 && len(errs) > qc.compiler.maxErrs { - err = append(errs[:qc.compiler.maxErrs], errLimitReached) - } - } - return err -} - -func (qc *queryCompiler) checkKeywordOverrides(_ *QueryContext, body Body) (Body, error) { - if qc.compiler.strict { - if errs := checkRootDocumentOverrides(body); len(errs) > 0 { - return nil, errs - } - } - return body, nil -} - -func (qc *queryCompiler) resolveRefs(qctx *QueryContext, body Body) (Body, error) { - - var globals map[Var]*usedRef - - if qctx != nil { - pkg := qctx.Package - // Query compiler ought to generate a package if one was not provided and one or more imports were provided. - // The generated package name could even be an empty string to avoid conflicts (it doesn't have to be valid syntactically) - if pkg == nil && len(qctx.Imports) > 0 { - pkg = &Package{Path: RefTerm(VarTerm("")).Value.(Ref)} - } - if pkg != nil { - var ruleExports []Ref - rules := qc.compiler.getExports() - if exist, ok := rules.Get(pkg.Path); ok { - ruleExports = exist.([]Ref) - } - - globals = getGlobals(qctx.Package, ruleExports, qctx.Imports) - qctx.Imports = nil - } - } - - ignore := &declaredVarStack{declaredVars(body)} - - return resolveRefsInBody(globals, ignore, body), nil -} - -func (qc *queryCompiler) rewriteComprehensionTerms(_ *QueryContext, body Body) (Body, error) { - gen := newLocalVarGenerator("q", body) - f := newEqualityFactory(gen) - node, err := rewriteComprehensionTerms(f, body) - if err != nil { - return nil, err - } - return node.(Body), nil -} - -func (qc *queryCompiler) rewriteDynamicTerms(_ *QueryContext, body Body) (Body, error) { - gen := newLocalVarGenerator("q", body) - f := newEqualityFactory(gen) - return rewriteDynamics(f, body), nil -} - -func (qc *queryCompiler) rewriteExprTerms(_ *QueryContext, body Body) (Body, error) { - gen := newLocalVarGenerator("q", body) - return rewriteExprTermsInBody(gen, body), nil -} - -func (qc *queryCompiler) rewriteLocalVars(_ *QueryContext, body Body) (Body, error) { - gen := newLocalVarGenerator("q", body) - stack := newLocalDeclaredVars() - body, _, err := rewriteLocalVars(gen, stack, nil, body, qc.compiler.strict) - if len(err) != 0 { - return nil, err - } - qc.rewritten = make(map[Var]Var, len(stack.rewritten)) - for k, v := range stack.rewritten { - // The vars returned during the rewrite will include all seen vars, - // even if they're not declared with an assignment operation. We don't - // want to include these inside the rewritten set though. - qc.rewritten[k] = v - } - return body, nil -} - -func (qc *queryCompiler) rewritePrintCalls(_ *QueryContext, body Body) (Body, error) { - if !qc.enablePrintStatements { - _, cpy := erasePrintCallsInBody(body) - return cpy, nil - } - gen := newLocalVarGenerator("q", body) - if _, errs := rewritePrintCalls(gen, qc.compiler.GetArity, ReservedVars, body); len(errs) > 0 { - return nil, errs - } - return body, nil -} - -func (qc *queryCompiler) checkVoidCalls(_ *QueryContext, body Body) (Body, error) { - if errs := checkVoidCalls(qc.compiler.TypeEnv, body); len(errs) > 0 { - return nil, errs - } - return body, nil -} - -func (qc *queryCompiler) checkUndefinedFuncs(_ *QueryContext, body Body) (Body, error) { - if errs := checkUndefinedFuncs(qc.compiler.TypeEnv, body, qc.compiler.GetArity, qc.rewritten); len(errs) > 0 { - return nil, errs - } - return body, nil -} - -func (qc *queryCompiler) checkSafety(_ *QueryContext, body Body) (Body, error) { - safe := ReservedVars.Copy() - reordered, unsafe := reorderBodyForSafety(qc.compiler.builtins, qc.compiler.GetArity, safe, body) - if errs := safetyErrorSlice(unsafe, qc.RewrittenVars()); len(errs) > 0 { - return nil, errs - } - return reordered, nil -} - -func (qc *queryCompiler) checkTypes(_ *QueryContext, body Body) (Body, error) { - var errs Errors - checker := newTypeChecker(). - WithSchemaSet(qc.compiler.schemaSet). - WithInputType(qc.compiler.inputType). - WithVarRewriter(rewriteVarsInRef(qc.rewritten, qc.compiler.RewrittenVars)) - qc.typeEnv, errs = checker.CheckBody(qc.compiler.TypeEnv, body) - if len(errs) > 0 { - return nil, errs - } - - return body, nil -} - -func (qc *queryCompiler) checkUnsafeBuiltins(_ *QueryContext, body Body) (Body, error) { - errs := checkUnsafeBuiltins(qc.unsafeBuiltinsMap(), body) - if len(errs) > 0 { - return nil, errs - } - return body, nil -} - -func (qc *queryCompiler) unsafeBuiltinsMap() map[string]struct{} { - if qc.unsafeBuiltins != nil { - return qc.unsafeBuiltins - } - return qc.compiler.unsafeBuiltinsMap -} - -func (qc *queryCompiler) checkDeprecatedBuiltins(_ *QueryContext, body Body) (Body, error) { - if qc.compiler.strict { - errs := checkDeprecatedBuiltins(qc.compiler.deprecatedBuiltinsMap, body) - if len(errs) > 0 { - return nil, errs - } - } - return body, nil -} - -func (qc *queryCompiler) rewriteWithModifiers(_ *QueryContext, body Body) (Body, error) { - f := newEqualityFactory(newLocalVarGenerator("q", body)) - body, err := rewriteWithModifiersInBody(qc.compiler, qc.unsafeBuiltinsMap(), f, body) - if err != nil { - return nil, Errors{err} - } - return body, nil -} - -func (qc *queryCompiler) buildComprehensionIndices(_ *QueryContext, body Body) (Body, error) { - // NOTE(tsandall): The query compiler does not have a metrics object so we - // cannot record index metrics currently. - _ = buildComprehensionIndices(qc.compiler.debug, qc.compiler.GetArity, ReservedVars, qc.RewrittenVars(), body, qc.comprehensionIndices) - return body, nil -} - -// ComprehensionIndex specifies how the comprehension term can be indexed. The keys -// tell the evaluator what variables to use for indexing. In the future, the index -// could be expanded with more information that would allow the evaluator to index -// a larger fragment of comprehensions (e.g., by closing over variables in the outer -// query.) -type ComprehensionIndex struct { - Term *Term - Keys []*Term -} - -func (ci *ComprehensionIndex) String() string { - if ci == nil { - return "" - } - return fmt.Sprintf("", NewArray(ci.Keys...)) -} - -func buildComprehensionIndices(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, node interface{}, result map[*Term]*ComprehensionIndex) uint64 { - var n uint64 - cpy := candidates.Copy() - WalkBodies(node, func(b Body) bool { - for _, expr := range b { - index := getComprehensionIndex(dbg, arity, cpy, rwVars, expr) - if index != nil { - result[index.Term] = index - n++ - } - // Any variables appearing in the expressions leading up to the comprehension - // are fair-game to be used as index keys. - cpy.Update(expr.Vars(VarVisitorParams{SkipClosures: true, SkipRefCallHead: true})) - } - return false - }) - return n -} - -func getComprehensionIndex(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, expr *Expr) *ComprehensionIndex { - - // Ignore everything except = expressions. Extract - // the comprehension term from the expression. - if !expr.IsEquality() || expr.Negated || len(expr.With) > 0 { - // No debug message, these are assumed to be known hinderances - // to comprehension indexing. - return nil - } - - var term *Term - - lhs, rhs := expr.Operand(0), expr.Operand(1) - - if _, ok := lhs.Value.(Var); ok && IsComprehension(rhs.Value) { - term = rhs - } else if _, ok := rhs.Value.(Var); ok && IsComprehension(lhs.Value) { - term = lhs - } - - if term == nil { - // no debug for this, it's the ordinary "nothing to do here" case - return nil - } - - // Ignore comprehensions that contain expressions that close over variables - // in the outer body if those variables are not also output variables in the - // comprehension body. In other words, ignore comprehensions that we cannot - // safely evaluate without bindings from the outer body. For example: - // - // x = [1] - // [true | data.y[z] = x] # safe to evaluate w/o outer body - // [true | data.y[z] = x[0]] # NOT safe to evaluate because 'x' would be unsafe. - // - // By identifying output variables in the body we also know what to index on by - // intersecting with candidate variables from the outer query. - // - // For example: - // - // x = data.foo[_] - // _ = [y | data.bar[y] = x] # index on 'x' - // - // This query goes from O(data.foo*data.bar) to O(data.foo+data.bar). - var body Body - - switch x := term.Value.(type) { - case *ArrayComprehension: - body = x.Body - case *SetComprehension: - body = x.Body - case *ObjectComprehension: - body = x.Body - } - - outputs := outputVarsForBody(body, arity, ReservedVars) - unsafe := body.Vars(SafetyCheckVisitorParams).Diff(outputs).Diff(ReservedVars) - - if len(unsafe) > 0 { - dbg.Printf("%s: comprehension index: unsafe vars: %v", expr.Location, unsafe) - return nil - } - - // Similarly, ignore comprehensions that contain references with output variables - // that intersect with the candidates. Indexing these comprehensions could worsen - // performance. - regressionVis := newComprehensionIndexRegressionCheckVisitor(candidates) - regressionVis.Walk(body) - if regressionVis.worse { - dbg.Printf("%s: comprehension index: output vars intersect candidates", expr.Location) - return nil - } - - // Check if any nested comprehensions close over candidates. If any intersection is found - // the comprehension cannot be cached because it would require closing over the candidates - // which the evaluator does not support today. - nestedVis := newComprehensionIndexNestedCandidateVisitor(candidates) - nestedVis.Walk(body) - if nestedVis.found { - dbg.Printf("%s: comprehension index: nested comprehensions close over candidates", expr.Location) - return nil - } - - // Make a sorted set of variable names that will serve as the index key set. - // Sort to ensure deterministic indexing. In future this could be relaxed - // if we can decide that one ordering is better than another. If the set is - // empty, there is no indexing to do. - indexVars := candidates.Intersect(outputs) - if len(indexVars) == 0 { - dbg.Printf("%s: comprehension index: no index vars", expr.Location) - return nil - } - - result := make([]*Term, 0, len(indexVars)) - - for v := range indexVars { - result = append(result, NewTerm(v)) - } - - sort.Slice(result, func(i, j int) bool { - return result[i].Value.Compare(result[j].Value) < 0 - }) - - debugRes := make([]*Term, len(result)) - for i, r := range result { - if o, ok := rwVars[r.Value.(Var)]; ok { - debugRes[i] = NewTerm(o) - } else { - debugRes[i] = r - } - } - dbg.Printf("%s: comprehension index: built with keys: %v", expr.Location, debugRes) - return &ComprehensionIndex{Term: term, Keys: result} -} - -type comprehensionIndexRegressionCheckVisitor struct { - candidates VarSet - seen VarSet - worse bool -} - -// TODO(tsandall): Improve this so that users can either supply this list explicitly -// or the information is maintained on the built-in function declaration. What we really -// need to know is whether the built-in function allows callers to push down output -// values or not. It's unlikely that anything outside of OPA does this today so this -// solution is fine for now. -var comprehensionIndexBlacklist = map[string]int{ - WalkBuiltin.Name: len(WalkBuiltin.Decl.FuncArgs().Args), -} - -func newComprehensionIndexRegressionCheckVisitor(candidates VarSet) *comprehensionIndexRegressionCheckVisitor { - return &comprehensionIndexRegressionCheckVisitor{ - candidates: candidates, - seen: NewVarSet(), - } -} - -func (vis *comprehensionIndexRegressionCheckVisitor) Walk(x interface{}) { - NewGenericVisitor(vis.visit).Walk(x) -} - -func (vis *comprehensionIndexRegressionCheckVisitor) visit(x interface{}) bool { - if !vis.worse { - switch x := x.(type) { - case *Expr: - operands := x.Operands() - if pos := comprehensionIndexBlacklist[x.Operator().String()]; pos > 0 && pos < len(operands) { - vis.assertEmptyIntersection(operands[pos].Vars()) - } - case Ref: - vis.assertEmptyIntersection(x.OutputVars()) - case Var: - vis.seen.Add(x) - // Always skip comprehensions. We do not have to visit their bodies here. - case *ArrayComprehension, *SetComprehension, *ObjectComprehension: - return true - } - } - return vis.worse -} - -func (vis *comprehensionIndexRegressionCheckVisitor) assertEmptyIntersection(vs VarSet) { - for v := range vs { - if vis.candidates.Contains(v) && !vis.seen.Contains(v) { - vis.worse = true - return - } - } -} - -type comprehensionIndexNestedCandidateVisitor struct { - candidates VarSet - found bool -} - -func newComprehensionIndexNestedCandidateVisitor(candidates VarSet) *comprehensionIndexNestedCandidateVisitor { - return &comprehensionIndexNestedCandidateVisitor{ - candidates: candidates, - } -} - -func (vis *comprehensionIndexNestedCandidateVisitor) Walk(x interface{}) { - NewGenericVisitor(vis.visit).Walk(x) -} - -func (vis *comprehensionIndexNestedCandidateVisitor) visit(x interface{}) bool { - - if vis.found { - return true - } - - if v, ok := x.(Value); ok && IsComprehension(v) { - varVis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true}) - varVis.Walk(v) - vis.found = len(varVis.Vars().Intersect(vis.candidates)) > 0 - return true - } - - return false -} - -// ModuleTreeNode represents a node in the module tree. The module -// tree is keyed by the package path. -type ModuleTreeNode struct { - Key Value - Modules []*Module - Children map[Value]*ModuleTreeNode - Hide bool -} - -func (n *ModuleTreeNode) String() string { - var rules []string - for _, m := range n.Modules { - for _, r := range m.Rules { - rules = append(rules, r.Head.String()) - } - } - return fmt.Sprintf("", n.Key, n.Children, rules, n.Hide) -} - -// NewModuleTree returns a new ModuleTreeNode that represents the root -// of the module tree populated with the given modules. -func NewModuleTree(mods map[string]*Module) *ModuleTreeNode { - root := &ModuleTreeNode{ - Children: map[Value]*ModuleTreeNode{}, - } - names := make([]string, 0, len(mods)) - for name := range mods { - names = append(names, name) - } - sort.Strings(names) - for _, name := range names { - m := mods[name] - node := root - for i, x := range m.Package.Path { - c, ok := node.Children[x.Value] - if !ok { - var hide bool - if i == 1 && x.Value.Compare(SystemDocumentKey) == 0 { - hide = true - } - c = &ModuleTreeNode{ - Key: x.Value, - Children: map[Value]*ModuleTreeNode{}, - Hide: hide, - } - node.Children[x.Value] = c - } - node = c - } - node.Modules = append(node.Modules, m) - } - return root -} - -// Size returns the number of modules in the tree. -func (n *ModuleTreeNode) Size() int { - s := len(n.Modules) - for _, c := range n.Children { - s += c.Size() - } - return s -} - -// Child returns n's child with key k. -func (n *ModuleTreeNode) child(k Value) *ModuleTreeNode { - switch k.(type) { - case String, Var: - return n.Children[k] - } - return nil -} - -// Find dereferences ref along the tree. ref[0] is converted to a String -// for convenience. -func (n *ModuleTreeNode) find(ref Ref) (*ModuleTreeNode, Ref) { - if v, ok := ref[0].Value.(Var); ok { - ref = Ref{StringTerm(string(v))}.Concat(ref[1:]) - } - node := n - for i, r := range ref { - next := node.child(r.Value) - if next == nil { - tail := make(Ref, len(ref)-i) - tail[0] = VarTerm(string(ref[i].Value.(String))) - copy(tail[1:], ref[i+1:]) - return node, tail - } - node = next - } - return node, nil -} - -// DepthFirst performs a depth-first traversal of the module tree rooted at n. -// If f returns true, traversal will not continue to the children of n. -func (n *ModuleTreeNode) DepthFirst(f func(*ModuleTreeNode) bool) { - if f(n) { - return - } - for _, node := range n.Children { - node.DepthFirst(f) - } -} - -// TreeNode represents a node in the rule tree. The rule tree is keyed by -// rule path. -type TreeNode struct { - Key Value - Values []util.T - Children map[Value]*TreeNode - Sorted []Value - Hide bool -} - -func (n *TreeNode) String() string { - return fmt.Sprintf("", n.Key, n.Values, n.Sorted, n.Hide) -} - -// NewRuleTree returns a new TreeNode that represents the root -// of the rule tree populated with the given rules. -func NewRuleTree(mtree *ModuleTreeNode) *TreeNode { - root := TreeNode{ - Key: mtree.Key, - } - - mtree.DepthFirst(func(m *ModuleTreeNode) bool { - for _, mod := range m.Modules { - if len(mod.Rules) == 0 { - root.add(mod.Package.Path, nil) - } - for _, rule := range mod.Rules { - root.add(rule.Ref().GroundPrefix(), rule) - } - } - return false - }) - - // ensure that data.system's TreeNode is hidden - node, tail := root.find(DefaultRootRef.Append(NewTerm(SystemDocumentKey))) - if len(tail) == 0 { // found - node.Hide = true - } - - root.DepthFirst(func(x *TreeNode) bool { - x.sort() - return false - }) - - return &root -} - -func (n *TreeNode) add(path Ref, rule *Rule) { - node, tail := n.find(path) - if len(tail) > 0 { - sub := treeNodeFromRef(tail, rule) - if node.Children == nil { - node.Children = make(map[Value]*TreeNode, 1) - } - node.Children[sub.Key] = sub - node.Sorted = append(node.Sorted, sub.Key) - } else { - if rule != nil { - node.Values = append(node.Values, rule) - } - } -} - -// Size returns the number of rules in the tree. -func (n *TreeNode) Size() int { - s := len(n.Values) - for _, c := range n.Children { - s += c.Size() - } - return s -} - -// Child returns n's child with key k. -func (n *TreeNode) Child(k Value) *TreeNode { - switch k.(type) { - case Ref, Call: - return nil - default: - return n.Children[k] - } -} - -// Find dereferences ref along the tree -func (n *TreeNode) Find(ref Ref) *TreeNode { - node := n - for _, r := range ref { - node = node.Child(r.Value) - if node == nil { - return nil - } - } - return node -} - -// Iteratively dereferences ref along the node's subtree. -// - If matching fails immediately, the tail will contain the full ref. -// - Partial matching will result in a tail of non-zero length. -// - A complete match will result in a 0 length tail. -func (n *TreeNode) find(ref Ref) (*TreeNode, Ref) { - node := n - for i := range ref { - next := node.Child(ref[i].Value) - if next == nil { - tail := make(Ref, len(ref)-i) - copy(tail, ref[i:]) - return node, tail - } - node = next - } - return node, nil -} - -// DepthFirst performs a depth-first traversal of the rule tree rooted at n. If -// f returns true, traversal will not continue to the children of n. -func (n *TreeNode) DepthFirst(f func(*TreeNode) bool) { - if f(n) { - return - } - for _, node := range n.Children { - node.DepthFirst(f) - } -} - -func (n *TreeNode) sort() { - sort.Slice(n.Sorted, func(i, j int) bool { - return n.Sorted[i].Compare(n.Sorted[j]) < 0 - }) -} - -func treeNodeFromRef(ref Ref, rule *Rule) *TreeNode { - depth := len(ref) - 1 - key := ref[depth].Value - node := &TreeNode{ - Key: key, - Children: nil, - } - if rule != nil { - node.Values = []util.T{rule} - } - - for i := len(ref) - 2; i >= 0; i-- { - key := ref[i].Value - node = &TreeNode{ - Key: key, - Children: map[Value]*TreeNode{ref[i+1].Value: node}, - Sorted: []Value{ref[i+1].Value}, - } - } - return node -} - -// flattenChildren flattens all children's rule refs into a sorted array. -func (n *TreeNode) flattenChildren() []Ref { - ret := newRefSet() - for _, sub := range n.Children { // we only want the children, so don't use n.DepthFirst() right away - sub.DepthFirst(func(x *TreeNode) bool { - for _, r := range x.Values { - rule := r.(*Rule) - ret.AddPrefix(rule.Ref()) - } - return false - }) - } - - sort.Slice(ret.s, func(i, j int) bool { - return ret.s[i].Compare(ret.s[j]) < 0 - }) - return ret.s -} - -// Graph represents the graph of dependencies between rules. -type Graph struct { - adj map[util.T]map[util.T]struct{} - radj map[util.T]map[util.T]struct{} - nodes map[util.T]struct{} - sorted []util.T -} - -// NewGraph returns a new Graph based on modules. The list function must return -// the rules referred to directly by the ref. -func NewGraph(modules map[string]*Module, list func(Ref) []*Rule) *Graph { - - graph := &Graph{ - adj: map[util.T]map[util.T]struct{}{}, - radj: map[util.T]map[util.T]struct{}{}, - nodes: map[util.T]struct{}{}, - sorted: nil, - } - - // Create visitor to walk a rule AST and add edges to the rule graph for - // each dependency. - vis := func(a *Rule) *GenericVisitor { - stop := false - return NewGenericVisitor(func(x interface{}) bool { - switch x := x.(type) { - case Ref: - for _, b := range list(x) { - for node := b; node != nil; node = node.Else { - graph.addDependency(a, node) - } - } - case *Rule: - if stop { - // Do not recurse into else clauses (which will be handled - // by the outer visitor.) - return true - } - stop = true - } - return false - }) - } - - // Walk over all rules, add them to graph, and build adjacency lists. - for _, module := range modules { - WalkRules(module, func(a *Rule) bool { - graph.addNode(a) - vis(a).Walk(a) - return false - }) - } - - return graph -} - -// Dependencies returns the set of rules that x depends on. -func (g *Graph) Dependencies(x util.T) map[util.T]struct{} { - return g.adj[x] -} - -// Dependents returns the set of rules that depend on x. -func (g *Graph) Dependents(x util.T) map[util.T]struct{} { - return g.radj[x] -} - -// Sort returns a slice of rules sorted by dependencies. If a cycle is found, -// ok is set to false. -func (g *Graph) Sort() (sorted []util.T, ok bool) { - if g.sorted != nil { - return g.sorted, true - } - - sorter := &graphSort{ - sorted: make([]util.T, 0, len(g.nodes)), - deps: g.Dependencies, - marked: map[util.T]struct{}{}, - temp: map[util.T]struct{}{}, - } - - for node := range g.nodes { - if !sorter.Visit(node) { - return nil, false - } - } - - g.sorted = sorter.sorted - return g.sorted, true -} - -func (g *Graph) addDependency(u util.T, v util.T) { - - if _, ok := g.nodes[u]; !ok { - g.addNode(u) - } - - if _, ok := g.nodes[v]; !ok { - g.addNode(v) - } - - edges, ok := g.adj[u] - if !ok { - edges = map[util.T]struct{}{} - g.adj[u] = edges - } - - edges[v] = struct{}{} - - edges, ok = g.radj[v] - if !ok { - edges = map[util.T]struct{}{} - g.radj[v] = edges - } - - edges[u] = struct{}{} -} - -func (g *Graph) addNode(n util.T) { - g.nodes[n] = struct{}{} -} - -type graphSort struct { - sorted []util.T - deps func(util.T) map[util.T]struct{} - marked map[util.T]struct{} - temp map[util.T]struct{} -} - -func (sort *graphSort) Marked(node util.T) bool { - _, marked := sort.marked[node] - return marked -} - -func (sort *graphSort) Visit(node util.T) (ok bool) { - if _, ok := sort.temp[node]; ok { - return false - } - if sort.Marked(node) { - return true - } - sort.temp[node] = struct{}{} - for other := range sort.deps(node) { - if !sort.Visit(other) { - return false - } - } - sort.marked[node] = struct{}{} - delete(sort.temp, node) - sort.sorted = append(sort.sorted, node) - return true -} - -// GraphTraversal is a Traversal that understands the dependency graph -type GraphTraversal struct { - graph *Graph - visited map[util.T]struct{} -} - -// NewGraphTraversal returns a Traversal for the dependency graph -func NewGraphTraversal(graph *Graph) *GraphTraversal { - return &GraphTraversal{ - graph: graph, - visited: map[util.T]struct{}{}, - } -} - -// Edges lists all dependency connections for a given node -func (g *GraphTraversal) Edges(x util.T) []util.T { - r := []util.T{} - for v := range g.graph.Dependencies(x) { - r = append(r, v) - } - return r -} - -// Visited returns whether a node has been visited, setting a node to visited if not -func (g *GraphTraversal) Visited(u util.T) bool { - _, ok := g.visited[u] - g.visited[u] = struct{}{} - return ok -} - -type unsafePair struct { - Expr *Expr - Vars VarSet -} - -type unsafeVarLoc struct { - Var Var - Loc *Location -} - -type unsafeVars map[*Expr]VarSet - -func (vs unsafeVars) Add(e *Expr, v Var) { - if u, ok := vs[e]; ok { - u[v] = struct{}{} - } else { - vs[e] = VarSet{v: struct{}{}} - } -} - -func (vs unsafeVars) Set(e *Expr, s VarSet) { - vs[e] = s -} - -func (vs unsafeVars) Update(o unsafeVars) { - for k, v := range o { - if _, ok := vs[k]; !ok { - vs[k] = VarSet{} - } - vs[k].Update(v) - } -} - -func (vs unsafeVars) Vars() (result []unsafeVarLoc) { - - locs := map[Var]*Location{} - - // If var appears in multiple sets then pick first by location. - for expr, vars := range vs { - for v := range vars { - if locs[v].Compare(expr.Location) > 0 { - locs[v] = expr.Location - } - } - } - - for v, loc := range locs { - result = append(result, unsafeVarLoc{ - Var: v, - Loc: loc, - }) - } - - sort.Slice(result, func(i, j int) bool { - return result[i].Loc.Compare(result[j].Loc) < 0 - }) - - return result -} - -func (vs unsafeVars) Slice() (result []unsafePair) { - for expr, vs := range vs { - result = append(result, unsafePair{ - Expr: expr, - Vars: vs, - }) - } - return -} - -// reorderBodyForSafety returns a copy of the body ordered such that -// left to right evaluation of the body will not encounter unbound variables -// in input positions or negated expressions. -// -// Expressions are added to the re-ordered body as soon as they are considered -// safe. If multiple expressions become safe in the same pass, they are added -// in their original order. This results in minimal re-ordering of the body. -// -// If the body cannot be reordered to ensure safety, the second return value -// contains a mapping of expressions to unsafe variables in those expressions. -func reorderBodyForSafety(builtins map[string]*Builtin, arity func(Ref) int, globals VarSet, body Body) (Body, unsafeVars) { - - bodyVars := body.Vars(SafetyCheckVisitorParams) - reordered := make(Body, 0, len(body)) - safe := VarSet{} - unsafe := unsafeVars{} - - for _, e := range body { - for v := range e.Vars(SafetyCheckVisitorParams) { - if globals.Contains(v) { - safe.Add(v) - } else { - unsafe.Add(e, v) - } - } - } - - for { - n := len(reordered) - - for _, e := range body { - if reordered.Contains(e) { - continue - } - - ovs := outputVarsForExpr(e, arity, safe) - - // check closures: is this expression closing over variables that - // haven't been made safe by what's already included in `reordered`? - vs := unsafeVarsInClosures(e) - cv := vs.Intersect(bodyVars).Diff(globals) - uv := cv.Diff(outputVarsForBody(reordered, arity, safe)) - - if len(uv) > 0 { - if uv.Equal(ovs) { // special case "closure-self" - continue - } - unsafe.Set(e, uv) - } - - for v := range unsafe[e] { - if ovs.Contains(v) || safe.Contains(v) { - delete(unsafe[e], v) - } - } - - if len(unsafe[e]) == 0 { - delete(unsafe, e) - reordered.Append(e) - safe.Update(ovs) // this expression's outputs are safe - } - } - - if len(reordered) == n { // fixed point, could not add any expr of body - break - } - } - - // Recursively visit closures and perform the safety checks on them. - // Update the globals at each expression to include the variables that could - // be closed over. - g := globals.Copy() - for i, e := range reordered { - if i > 0 { - g.Update(reordered[i-1].Vars(SafetyCheckVisitorParams)) - } - xform := &bodySafetyTransformer{ - builtins: builtins, - arity: arity, - current: e, - globals: g, - unsafe: unsafe, - } - NewGenericVisitor(xform.Visit).Walk(e) - } - - return reordered, unsafe -} - -type bodySafetyTransformer struct { - builtins map[string]*Builtin - arity func(Ref) int - current *Expr - globals VarSet - unsafe unsafeVars -} - -func (xform *bodySafetyTransformer) Visit(x interface{}) bool { - switch term := x.(type) { - case *Term: - switch x := term.Value.(type) { - case *object: - cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) { - kcpy := k.Copy() - NewGenericVisitor(xform.Visit).Walk(kcpy) - vcpy := v.Copy() - NewGenericVisitor(xform.Visit).Walk(vcpy) - return kcpy, vcpy, nil - }) - term.Value = cpy - return true - case *set: - cpy, _ := x.Map(func(v *Term) (*Term, error) { - vcpy := v.Copy() - NewGenericVisitor(xform.Visit).Walk(vcpy) - return vcpy, nil - }) - term.Value = cpy - return true - case *ArrayComprehension: - xform.reorderArrayComprehensionSafety(x) - return true - case *ObjectComprehension: - xform.reorderObjectComprehensionSafety(x) - return true - case *SetComprehension: - xform.reorderSetComprehensionSafety(x) - return true - } - case *Expr: - if ev, ok := term.Terms.(*Every); ok { - xform.globals.Update(ev.KeyValueVars()) - ev.Body = xform.reorderComprehensionSafety(NewVarSet(), ev.Body) - return true - } - } - return false -} - -func (xform *bodySafetyTransformer) reorderComprehensionSafety(tv VarSet, body Body) Body { - bv := body.Vars(SafetyCheckVisitorParams) - bv.Update(xform.globals) - uv := tv.Diff(bv) - for v := range uv { - xform.unsafe.Add(xform.current, v) - } - - r, u := reorderBodyForSafety(xform.builtins, xform.arity, xform.globals, body) - if len(u) == 0 { - return r - } - - xform.unsafe.Update(u) - return body -} - -func (xform *bodySafetyTransformer) reorderArrayComprehensionSafety(ac *ArrayComprehension) { - ac.Body = xform.reorderComprehensionSafety(ac.Term.Vars(), ac.Body) -} - -func (xform *bodySafetyTransformer) reorderObjectComprehensionSafety(oc *ObjectComprehension) { - tv := oc.Key.Vars() - tv.Update(oc.Value.Vars()) - oc.Body = xform.reorderComprehensionSafety(tv, oc.Body) -} - -func (xform *bodySafetyTransformer) reorderSetComprehensionSafety(sc *SetComprehension) { - sc.Body = xform.reorderComprehensionSafety(sc.Term.Vars(), sc.Body) -} - -// unsafeVarsInClosures collects vars that are contained in closures within -// this expression. -func unsafeVarsInClosures(e *Expr) VarSet { - vs := VarSet{} - WalkClosures(e, func(x interface{}) bool { - vis := &VarVisitor{vars: vs} - if ev, ok := x.(*Every); ok { - vis.Walk(ev.Body) - return true - } - vis.Walk(x) - return true - }) - return vs -} - -// OutputVarsFromBody returns all variables which are the "output" for -// the given body. For safety checks this means that they would be -// made safe by the body. -func OutputVarsFromBody(c *Compiler, body Body, safe VarSet) VarSet { - return outputVarsForBody(body, c.GetArity, safe) -} - -func outputVarsForBody(body Body, arity func(Ref) int, safe VarSet) VarSet { - o := safe.Copy() - for _, e := range body { - o.Update(outputVarsForExpr(e, arity, o)) - } - return o.Diff(safe) -} - -// OutputVarsFromExpr returns all variables which are the "output" for -// the given expression. For safety checks this means that they would be -// made safe by the expr. -func OutputVarsFromExpr(c *Compiler, expr *Expr, safe VarSet) VarSet { - return outputVarsForExpr(expr, c.GetArity, safe) -} - -func outputVarsForExpr(expr *Expr, arity func(Ref) int, safe VarSet) VarSet { - - // Negated expressions must be safe. - if expr.Negated { - return VarSet{} - } - - // With modifier inputs must be safe. - for _, with := range expr.With { - vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams) - vis.Walk(with) - vars := vis.Vars() - unsafe := vars.Diff(safe) - if len(unsafe) > 0 { - return VarSet{} - } - } - - switch terms := expr.Terms.(type) { - case *Term: - return outputVarsForTerms(expr, safe) - case []*Term: - if expr.IsEquality() { - return outputVarsForExprEq(expr, safe) - } - - operator, ok := terms[0].Value.(Ref) - if !ok { - return VarSet{} - } - - ar := arity(operator) - if ar < 0 { - return VarSet{} - } - - return outputVarsForExprCall(expr, ar, safe, terms) - case *Every: - return outputVarsForTerms(terms.Domain, safe) - default: - panic("illegal expression") - } -} - -func outputVarsForExprEq(expr *Expr, safe VarSet) VarSet { - - if !validEqAssignArgCount(expr) { - return safe - } - - output := outputVarsForTerms(expr, safe) - output.Update(safe) - output.Update(Unify(output, expr.Operand(0), expr.Operand(1))) - - return output.Diff(safe) -} - -func outputVarsForExprCall(expr *Expr, arity int, safe VarSet, terms []*Term) VarSet { - - output := outputVarsForTerms(expr, safe) - - numInputTerms := arity + 1 - if numInputTerms >= len(terms) { - return output - } - - params := VarVisitorParams{ - SkipClosures: true, - SkipSets: true, - SkipObjectKeys: true, - SkipRefHead: true, - } - vis := NewVarVisitor().WithParams(params) - vis.Walk(Args(terms[:numInputTerms])) - unsafe := vis.Vars().Diff(output).Diff(safe) - - if len(unsafe) > 0 { - return VarSet{} - } - - vis = NewVarVisitor().WithParams(params) - vis.Walk(Args(terms[numInputTerms:])) - output.Update(vis.vars) - return output -} - -func outputVarsForTerms(expr interface{}, safe VarSet) VarSet { - output := VarSet{} - WalkTerms(expr, func(x *Term) bool { - switch r := x.Value.(type) { - case *SetComprehension, *ArrayComprehension, *ObjectComprehension: - return true - case Ref: - if !isRefSafe(r, safe) { - return true - } - output.Update(r.OutputVars()) - return false - } - return false - }) - return output -} - -type equalityFactory struct { - gen *localVarGenerator -} - -func newEqualityFactory(gen *localVarGenerator) *equalityFactory { - return &equalityFactory{gen} -} - -func (f *equalityFactory) Generate(other *Term) *Expr { - term := NewTerm(f.gen.Generate()).SetLocation(other.Location) - expr := Equality.Expr(term, other) - expr.Generated = true - expr.Location = other.Location - return expr -} - -type localVarGenerator struct { - exclude VarSet - suffix string - next int -} - -func newLocalVarGeneratorForModuleSet(sorted []string, modules map[string]*Module) *localVarGenerator { - exclude := NewVarSet() - vis := &VarVisitor{vars: exclude} - for _, key := range sorted { - vis.Walk(modules[key]) - } - return &localVarGenerator{exclude: exclude, next: 0} -} - -func newLocalVarGenerator(suffix string, node interface{}) *localVarGenerator { - exclude := NewVarSet() - vis := &VarVisitor{vars: exclude} - vis.Walk(node) - return &localVarGenerator{exclude: exclude, suffix: suffix, next: 0} -} - -func (l *localVarGenerator) Generate() Var { - for { - result := Var("__local" + l.suffix + strconv.Itoa(l.next) + "__") - l.next++ - if !l.exclude.Contains(result) { - return result - } - } -} - -func getGlobals(pkg *Package, rules []Ref, imports []*Import) map[Var]*usedRef { - - globals := make(map[Var]*usedRef, len(rules)) // NB: might grow bigger with imports - - // Populate globals with exports within the package. - for _, ref := range rules { - v := ref[0].Value.(Var) - globals[v] = &usedRef{ref: pkg.Path.Append(StringTerm(string(v)))} - } - - // Populate globals with imports. - for _, imp := range imports { - path := imp.Path.Value.(Ref) - if FutureRootDocument.Equal(path[0]) || RegoRootDocument.Equal(path[0]) { - continue // ignore future and rego imports - } - globals[imp.Name()] = &usedRef{ref: path} - } - - return globals -} - -func requiresEval(x *Term) bool { - if x == nil { - return false - } - return ContainsRefs(x) || ContainsComprehensions(x) -} - -func resolveRef(globals map[Var]*usedRef, ignore *declaredVarStack, ref Ref) Ref { - - r := Ref{} - for i, x := range ref { - switch v := x.Value.(type) { - case Var: - if g, ok := globals[v]; ok && !ignore.Contains(v) { - cpy := g.ref.Copy() - for i := range cpy { - cpy[i].SetLocation(x.Location) - } - if i == 0 { - r = cpy - } else { - r = append(r, NewTerm(cpy).SetLocation(x.Location)) - } - g.used = true - } else { - r = append(r, x) - } - case Ref, *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: - r = append(r, resolveRefsInTerm(globals, ignore, x)) - default: - r = append(r, x) - } - } - - return r -} - -type usedRef struct { - ref Ref - used bool -} - -func resolveRefsInRule(globals map[Var]*usedRef, rule *Rule) error { - ignore := &declaredVarStack{} - - vars := NewVarSet() - var vis *GenericVisitor - var err error - - // Walk args to collect vars and transform body so that callers can shadow - // root documents. - vis = NewGenericVisitor(func(x interface{}) bool { - if err != nil { - return true - } - switch x := x.(type) { - case Var: - vars.Add(x) - - // Object keys cannot be pattern matched so only walk values. - case *object: - x.Foreach(func(_, v *Term) { - vis.Walk(v) - }) - - // Skip terms that could contain vars that cannot be pattern matched. - case Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: - return true - - case *Term: - if _, ok := x.Value.(Ref); ok { - if RootDocumentRefs.Contains(x) { - // We could support args named input, data, etc. however - // this would require rewriting terms in the head and body. - // Preventing root document shadowing is simpler, and - // arguably, will prevent confusing names from being used. - // NOTE: this check is also performed as part of strict-mode in - // checkRootDocumentOverrides. - err = fmt.Errorf("args must not shadow %v (use a different variable name)", x) - return true - } - } - } - return false - }) - - vis.Walk(rule.Head.Args) - - if err != nil { - return err - } - - ignore.Push(vars) - ignore.Push(declaredVars(rule.Body)) - - ref := rule.Head.Ref() - for i := 1; i < len(ref); i++ { - ref[i] = resolveRefsInTerm(globals, ignore, ref[i]) - } - if rule.Head.Key != nil { - rule.Head.Key = resolveRefsInTerm(globals, ignore, rule.Head.Key) - } - - if rule.Head.Value != nil { - rule.Head.Value = resolveRefsInTerm(globals, ignore, rule.Head.Value) - } - - rule.Body = resolveRefsInBody(globals, ignore, rule.Body) - return nil -} - -func resolveRefsInBody(globals map[Var]*usedRef, ignore *declaredVarStack, body Body) Body { - r := make([]*Expr, 0, len(body)) - for _, expr := range body { - r = append(r, resolveRefsInExpr(globals, ignore, expr)) - } - return r -} - -func resolveRefsInExpr(globals map[Var]*usedRef, ignore *declaredVarStack, expr *Expr) *Expr { - cpy := *expr - switch ts := expr.Terms.(type) { - case *Term: - cpy.Terms = resolveRefsInTerm(globals, ignore, ts) - case []*Term: - buf := make([]*Term, len(ts)) - for i := 0; i < len(ts); i++ { - buf[i] = resolveRefsInTerm(globals, ignore, ts[i]) - } - cpy.Terms = buf - case *SomeDecl: - if val, ok := ts.Symbols[0].Value.(Call); ok { - cpy.Terms = &SomeDecl{Symbols: []*Term{CallTerm(resolveRefsInTermSlice(globals, ignore, val)...)}} - } - case *Every: - locals := NewVarSet() - if ts.Key != nil { - locals.Update(ts.Key.Vars()) - } - locals.Update(ts.Value.Vars()) - ignore.Push(locals) - cpy.Terms = &Every{ - Key: ts.Key.Copy(), // TODO(sr): do more? - Value: ts.Value.Copy(), // TODO(sr): do more? - Domain: resolveRefsInTerm(globals, ignore, ts.Domain), - Body: resolveRefsInBody(globals, ignore, ts.Body), - } - ignore.Pop() - } - for _, w := range cpy.With { - w.Target = resolveRefsInTerm(globals, ignore, w.Target) - w.Value = resolveRefsInTerm(globals, ignore, w.Value) - } - return &cpy -} - -func resolveRefsInTerm(globals map[Var]*usedRef, ignore *declaredVarStack, term *Term) *Term { - switch v := term.Value.(type) { - case Var: - if g, ok := globals[v]; ok && !ignore.Contains(v) { - cpy := g.ref.Copy() - for i := range cpy { - cpy[i].SetLocation(term.Location) - } - g.used = true - return NewTerm(cpy).SetLocation(term.Location) - } - return term - case Ref: - fqn := resolveRef(globals, ignore, v) - cpy := *term - cpy.Value = fqn - return &cpy - case *object: - cpy := *term - cpy.Value, _ = v.Map(func(k, v *Term) (*Term, *Term, error) { - k = resolveRefsInTerm(globals, ignore, k) - v = resolveRefsInTerm(globals, ignore, v) - return k, v, nil - }) - return &cpy - case *Array: - cpy := *term - cpy.Value = NewArray(resolveRefsInTermArray(globals, ignore, v)...) - return &cpy - case Call: - cpy := *term - cpy.Value = Call(resolveRefsInTermSlice(globals, ignore, v)) - return &cpy - case Set: - s, _ := v.Map(func(e *Term) (*Term, error) { - return resolveRefsInTerm(globals, ignore, e), nil - }) - cpy := *term - cpy.Value = s - return &cpy - case *ArrayComprehension: - ac := &ArrayComprehension{} - ignore.Push(declaredVars(v.Body)) - ac.Term = resolveRefsInTerm(globals, ignore, v.Term) - ac.Body = resolveRefsInBody(globals, ignore, v.Body) - cpy := *term - cpy.Value = ac - ignore.Pop() - return &cpy - case *ObjectComprehension: - oc := &ObjectComprehension{} - ignore.Push(declaredVars(v.Body)) - oc.Key = resolveRefsInTerm(globals, ignore, v.Key) - oc.Value = resolveRefsInTerm(globals, ignore, v.Value) - oc.Body = resolveRefsInBody(globals, ignore, v.Body) - cpy := *term - cpy.Value = oc - ignore.Pop() - return &cpy - case *SetComprehension: - sc := &SetComprehension{} - ignore.Push(declaredVars(v.Body)) - sc.Term = resolveRefsInTerm(globals, ignore, v.Term) - sc.Body = resolveRefsInBody(globals, ignore, v.Body) - cpy := *term - cpy.Value = sc - ignore.Pop() - return &cpy - default: - return term - } -} - -func resolveRefsInTermArray(globals map[Var]*usedRef, ignore *declaredVarStack, terms *Array) []*Term { - cpy := make([]*Term, terms.Len()) - for i := 0; i < terms.Len(); i++ { - cpy[i] = resolveRefsInTerm(globals, ignore, terms.Elem(i)) - } - return cpy -} - -func resolveRefsInTermSlice(globals map[Var]*usedRef, ignore *declaredVarStack, terms []*Term) []*Term { - cpy := make([]*Term, len(terms)) - for i := 0; i < len(terms); i++ { - cpy[i] = resolveRefsInTerm(globals, ignore, terms[i]) - } - return cpy -} - -type declaredVarStack []VarSet - -func (s declaredVarStack) Contains(v Var) bool { - for i := len(s) - 1; i >= 0; i-- { - if _, ok := s[i][v]; ok { - return ok - } - } - return false -} - -func (s declaredVarStack) Add(v Var) { - s[len(s)-1].Add(v) -} - -func (s *declaredVarStack) Push(vs VarSet) { - *s = append(*s, vs) -} - -func (s *declaredVarStack) Pop() { - curr := *s - *s = curr[:len(curr)-1] -} - -func declaredVars(x interface{}) VarSet { - vars := NewVarSet() - vis := NewGenericVisitor(func(x interface{}) bool { - switch x := x.(type) { - case *Expr: - if x.IsAssignment() && validEqAssignArgCount(x) { - WalkVars(x.Operand(0), func(v Var) bool { - vars.Add(v) - return false - }) - } else if decl, ok := x.Terms.(*SomeDecl); ok { - for i := range decl.Symbols { - switch val := decl.Symbols[i].Value.(type) { - case Var: - vars.Add(val) - case Call: - args := val[1:] - if len(args) == 3 { // some x, y in xs - WalkVars(args[1], func(v Var) bool { - vars.Add(v) - return false - }) - } - // some x in xs - WalkVars(args[0], func(v Var) bool { - vars.Add(v) - return false - }) - } - } - } - case *ArrayComprehension, *SetComprehension, *ObjectComprehension: - return true - } - return false - }) - vis.Walk(x) - return vars -} - -// rewriteComprehensionTerms will rewrite comprehensions so that the term part -// is bound to a variable in the body. This allows any type of term to be used -// in the term part (even if the term requires evaluation.) -// -// For instance, given the following comprehension: -// -// [x[0] | x = y[_]; y = [1,2,3]] -// -// The comprehension would be rewritten as: -// -// [__local0__ | x = y[_]; y = [1,2,3]; __local0__ = x[0]] -func rewriteComprehensionTerms(f *equalityFactory, node interface{}) (interface{}, error) { - return TransformComprehensions(node, func(x interface{}) (Value, error) { - switch x := x.(type) { - case *ArrayComprehension: - if requiresEval(x.Term) { - expr := f.Generate(x.Term) - x.Term = expr.Operand(0) - x.Body.Append(expr) - } - return x, nil - case *SetComprehension: - if requiresEval(x.Term) { - expr := f.Generate(x.Term) - x.Term = expr.Operand(0) - x.Body.Append(expr) - } - return x, nil - case *ObjectComprehension: - if requiresEval(x.Key) { - expr := f.Generate(x.Key) - x.Key = expr.Operand(0) - x.Body.Append(expr) - } - if requiresEval(x.Value) { - expr := f.Generate(x.Value) - x.Value = expr.Operand(0) - x.Body.Append(expr) - } - return x, nil - } - panic("illegal type") - }) -} - -// rewriteEquals will rewrite exprs under x as unification calls instead of == -// calls. For example: -// -// data.foo == data.bar is rewritten as data.foo = data.bar -// -// This stage should only run the safety check (since == is a built-in with no -// outputs, so the inputs must not be marked as safe.) -// -// This stage is not executed by the query compiler by default because when -// callers specify == instead of = they expect to receive a true/false/undefined -// result back whereas with = the result is only ever true/undefined. For -// partial evaluation cases we do want to rewrite == to = to simplify the -// result. -func rewriteEquals(x interface{}) (modified bool) { - doubleEq := Equal.Ref() - unifyOp := Equality.Ref() - t := NewGenericTransformer(func(x interface{}) (interface{}, error) { - if x, ok := x.(*Expr); ok && x.IsCall() { - operator := x.Operator() - if operator.Equal(doubleEq) && len(x.Operands()) == 2 { - modified = true - x.SetOperator(NewTerm(unifyOp)) - } - } - return x, nil - }) - _, _ = Transform(t, x) // ignore error - return modified -} - -func rewriteTestEqualities(f *equalityFactory, body Body) Body { - result := make(Body, 0, len(body)) - for _, expr := range body { - // We can't rewrite negated expressions; if the extracted term is undefined, evaluation would fail before - // reaching the negation check. - if !expr.Negated && !expr.Generated { - switch { - case expr.IsEquality(): - terms := expr.Terms.([]*Term) - result, terms[1] = rewriteDynamicsShallow(expr, f, terms[1], result) - result, terms[2] = rewriteDynamicsShallow(expr, f, terms[2], result) - case expr.IsEvery(): - // We rewrite equalities inside of every-bodies as a fail here will be the cause of the test-rule fail. - // Failures inside other expressions with closures, such as comprehensions, won't cause the test-rule to fail, so we skip those. - every := expr.Terms.(*Every) - every.Body = rewriteTestEqualities(f, every.Body) - } - } - result = appendExpr(result, expr) - } - return result -} - -func rewriteDynamicsShallow(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { - switch term.Value.(type) { - case Ref, *ArrayComprehension, *SetComprehension, *ObjectComprehension: - generated := f.Generate(term) - generated.With = original.With - result.Append(generated) - connectGeneratedExprs(original, generated) - return result, result[len(result)-1].Operand(0) - } - return result, term -} - -// rewriteDynamics will rewrite the body so that dynamic terms (i.e., refs and -// comprehensions) are bound to vars earlier in the query. This translation -// results in eager evaluation. -// -// For instance, given the following query: -// -// foo(data.bar) = 1 -// -// The rewritten version will be: -// -// __local0__ = data.bar; foo(__local0__) = 1 -func rewriteDynamics(f *equalityFactory, body Body) Body { - result := make(Body, 0, len(body)) - for _, expr := range body { - switch { - case expr.IsEquality(): - result = rewriteDynamicsEqExpr(f, expr, result) - case expr.IsCall(): - result = rewriteDynamicsCallExpr(f, expr, result) - case expr.IsEvery(): - result = rewriteDynamicsEveryExpr(f, expr, result) - default: - result = rewriteDynamicsTermExpr(f, expr, result) - } - } - return result -} - -func appendExpr(body Body, expr *Expr) Body { - body.Append(expr) - return body -} - -func rewriteDynamicsEqExpr(f *equalityFactory, expr *Expr, result Body) Body { - if !validEqAssignArgCount(expr) { - return appendExpr(result, expr) - } - terms := expr.Terms.([]*Term) - result, terms[1] = rewriteDynamicsInTerm(expr, f, terms[1], result) - result, terms[2] = rewriteDynamicsInTerm(expr, f, terms[2], result) - return appendExpr(result, expr) -} - -func rewriteDynamicsCallExpr(f *equalityFactory, expr *Expr, result Body) Body { - terms := expr.Terms.([]*Term) - for i := 1; i < len(terms); i++ { - result, terms[i] = rewriteDynamicsOne(expr, f, terms[i], result) - } - return appendExpr(result, expr) -} - -func rewriteDynamicsEveryExpr(f *equalityFactory, expr *Expr, result Body) Body { - ev := expr.Terms.(*Every) - result, ev.Domain = rewriteDynamicsOne(expr, f, ev.Domain, result) - ev.Body = rewriteDynamics(f, ev.Body) - return appendExpr(result, expr) -} - -func rewriteDynamicsTermExpr(f *equalityFactory, expr *Expr, result Body) Body { - term := expr.Terms.(*Term) - result, expr.Terms = rewriteDynamicsInTerm(expr, f, term, result) - return appendExpr(result, expr) -} - -func rewriteDynamicsInTerm(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { - switch v := term.Value.(type) { - case Ref: - for i := 1; i < len(v); i++ { - result, v[i] = rewriteDynamicsOne(original, f, v[i], result) - } - case *ArrayComprehension: - v.Body = rewriteDynamics(f, v.Body) - case *SetComprehension: - v.Body = rewriteDynamics(f, v.Body) - case *ObjectComprehension: - v.Body = rewriteDynamics(f, v.Body) - default: - result, term = rewriteDynamicsOne(original, f, term, result) - } - return result, term -} - -func rewriteDynamicsOne(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { - switch v := term.Value.(type) { - case Ref: - for i := 1; i < len(v); i++ { - result, v[i] = rewriteDynamicsOne(original, f, v[i], result) - } - generated := f.Generate(term) - generated.With = original.With - result.Append(generated) - connectGeneratedExprs(original, generated) - return result, result[len(result)-1].Operand(0) - case *Array: - for i := 0; i < v.Len(); i++ { - var t *Term - result, t = rewriteDynamicsOne(original, f, v.Elem(i), result) - v.set(i, t) - } - return result, term - case *object: - cpy := NewObject() - v.Foreach(func(key, value *Term) { - result, key = rewriteDynamicsOne(original, f, key, result) - result, value = rewriteDynamicsOne(original, f, value, result) - cpy.Insert(key, value) - }) - return result, NewTerm(cpy).SetLocation(term.Location) - case Set: - cpy := NewSet() - for _, term := range v.Slice() { - var rw *Term - result, rw = rewriteDynamicsOne(original, f, term, result) - cpy.Add(rw) - } - return result, NewTerm(cpy).SetLocation(term.Location) - case *ArrayComprehension: - var extra *Expr - v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) - result.Append(extra) - connectGeneratedExprs(original, extra) - return result, result[len(result)-1].Operand(0) - case *SetComprehension: - var extra *Expr - v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) - result.Append(extra) - connectGeneratedExprs(original, extra) - return result, result[len(result)-1].Operand(0) - case *ObjectComprehension: - var extra *Expr - v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) - result.Append(extra) - connectGeneratedExprs(original, extra) - return result, result[len(result)-1].Operand(0) - } - return result, term -} - -func rewriteDynamicsComprehensionBody(original *Expr, f *equalityFactory, body Body, term *Term) (Body, *Expr) { - body = rewriteDynamics(f, body) - generated := f.Generate(term) - generated.With = original.With - return body, generated -} - -func rewriteExprTermsInHead(gen *localVarGenerator, rule *Rule) { - for i := range rule.Head.Args { - support, output := expandExprTerm(gen, rule.Head.Args[i]) - for j := range support { - rule.Body.Append(support[j]) - } - rule.Head.Args[i] = output - } - if rule.Head.Key != nil { - support, output := expandExprTerm(gen, rule.Head.Key) - for i := range support { - rule.Body.Append(support[i]) - } - rule.Head.Key = output - } - if rule.Head.Value != nil { - support, output := expandExprTerm(gen, rule.Head.Value) - for i := range support { - rule.Body.Append(support[i]) - } - rule.Head.Value = output - } -} - -func rewriteExprTermsInBody(gen *localVarGenerator, body Body) Body { - cpy := make(Body, 0, len(body)) - for i := 0; i < len(body); i++ { - for _, expr := range expandExpr(gen, body[i]) { - cpy.Append(expr) - } - } - return cpy -} - -func expandExpr(gen *localVarGenerator, expr *Expr) (result []*Expr) { - for i := range expr.With { - extras, value := expandExprTerm(gen, expr.With[i].Value) - expr.With[i].Value = value - result = append(result, extras...) - } - switch terms := expr.Terms.(type) { - case *Term: - extras, term := expandExprTerm(gen, terms) - if len(expr.With) > 0 { - for i := range extras { - extras[i].With = expr.With - } - } - result = append(result, extras...) - expr.Terms = term - result = append(result, expr) - case []*Term: - for i := 1; i < len(terms); i++ { - var extras []*Expr - extras, terms[i] = expandExprTerm(gen, terms[i]) - connectGeneratedExprs(expr, extras...) - if len(expr.With) > 0 { - for i := range extras { - extras[i].With = expr.With - } - } - result = append(result, extras...) - } - result = append(result, expr) - case *Every: - var extras []*Expr - - term := NewTerm(gen.Generate()).SetLocation(terms.Domain.Location) - eq := Equality.Expr(term, terms.Domain).SetLocation(terms.Domain.Location) - eq.Generated = true - eq.With = expr.With - extras = expandExpr(gen, eq) - terms.Domain = term - - terms.Body = rewriteExprTermsInBody(gen, terms.Body) - result = append(result, extras...) - result = append(result, expr) - } - return -} - -func connectGeneratedExprs(parent *Expr, children ...*Expr) { - for _, child := range children { - child.generatedFrom = parent - parent.generates = append(parent.generates, child) - } -} - -func expandExprTerm(gen *localVarGenerator, term *Term) (support []*Expr, output *Term) { - output = term - switch v := term.Value.(type) { - case Call: - for i := 1; i < len(v); i++ { - var extras []*Expr - extras, v[i] = expandExprTerm(gen, v[i]) - support = append(support, extras...) - } - output = NewTerm(gen.Generate()).SetLocation(term.Location) - expr := v.MakeExpr(output).SetLocation(term.Location) - expr.Generated = true - support = append(support, expr) - case Ref: - support = expandExprRef(gen, v) - case *Array: - support = expandExprTermArray(gen, v) - case *object: - cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) { - extras1, expandedKey := expandExprTerm(gen, k) - extras2, expandedValue := expandExprTerm(gen, v) - support = append(support, extras1...) - support = append(support, extras2...) - return expandedKey, expandedValue, nil - }) - output = NewTerm(cpy).SetLocation(term.Location) - case Set: - cpy, _ := v.Map(func(x *Term) (*Term, error) { - extras, expanded := expandExprTerm(gen, x) - support = append(support, extras...) - return expanded, nil - }) - output = NewTerm(cpy).SetLocation(term.Location) - case *ArrayComprehension: - support, term := expandExprTerm(gen, v.Term) - for i := range support { - v.Body.Append(support[i]) - } - v.Term = term - v.Body = rewriteExprTermsInBody(gen, v.Body) - case *SetComprehension: - support, term := expandExprTerm(gen, v.Term) - for i := range support { - v.Body.Append(support[i]) - } - v.Term = term - v.Body = rewriteExprTermsInBody(gen, v.Body) - case *ObjectComprehension: - support, key := expandExprTerm(gen, v.Key) - for i := range support { - v.Body.Append(support[i]) - } - v.Key = key - support, value := expandExprTerm(gen, v.Value) - for i := range support { - v.Body.Append(support[i]) - } - v.Value = value - v.Body = rewriteExprTermsInBody(gen, v.Body) - } - return -} - -func expandExprRef(gen *localVarGenerator, v []*Term) (support []*Expr) { - // Start by calling a normal expandExprTerm on all terms. - support = expandExprTermSlice(gen, v) - - // Rewrite references in order to support indirect references. We rewrite - // e.g. - // - // [1, 2, 3][i] - // - // to - // - // __local_var = [1, 2, 3] - // __local_var[i] - // - // to support these. This only impacts the reference subject, i.e. the - // first item in the slice. - var subject = v[0] - switch subject.Value.(type) { - case *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: - f := newEqualityFactory(gen) - assignToLocal := f.Generate(subject) - support = append(support, assignToLocal) - v[0] = assignToLocal.Operand(0) - } - return -} - -func expandExprTermArray(gen *localVarGenerator, arr *Array) (support []*Expr) { - for i := 0; i < arr.Len(); i++ { - extras, v := expandExprTerm(gen, arr.Elem(i)) - arr.set(i, v) - support = append(support, extras...) - } - return -} - -func expandExprTermSlice(gen *localVarGenerator, v []*Term) (support []*Expr) { - for i := 0; i < len(v); i++ { - var extras []*Expr - extras, v[i] = expandExprTerm(gen, v[i]) - support = append(support, extras...) - } - return -} - -type localDeclaredVars struct { - vars []*declaredVarSet - - // rewritten contains a mapping of *all* user-defined variables - // that have been rewritten whereas vars contains the state - // from the current query (not any nested queries, and all vars - // seen). - rewritten map[Var]Var - - // indicates if an assignment (:= operator) has been seen *ever* - assignment bool -} - -type varOccurrence int - -const ( - newVar varOccurrence = iota - argVar - seenVar - assignedVar - declaredVar -) - -type declaredVarSet struct { - vs map[Var]Var - reverse map[Var]Var - occurrence map[Var]varOccurrence - count map[Var]int -} - -func newDeclaredVarSet() *declaredVarSet { - return &declaredVarSet{ - vs: map[Var]Var{}, - reverse: map[Var]Var{}, - occurrence: map[Var]varOccurrence{}, - count: map[Var]int{}, - } -} - -func newLocalDeclaredVars() *localDeclaredVars { - return &localDeclaredVars{ - vars: []*declaredVarSet{newDeclaredVarSet()}, - rewritten: map[Var]Var{}, - } -} - -func (s *localDeclaredVars) Copy() *localDeclaredVars { - stack := &localDeclaredVars{ - vars: []*declaredVarSet{}, - rewritten: map[Var]Var{}, - } - - for i := range s.vars { - stack.vars = append(stack.vars, newDeclaredVarSet()) - for k, v := range s.vars[i].vs { - stack.vars[0].vs[k] = v - } - for k, v := range s.vars[i].reverse { - stack.vars[0].reverse[k] = v - } - for k, v := range s.vars[i].count { - stack.vars[0].count[k] = v - } - for k, v := range s.vars[i].occurrence { - stack.vars[0].occurrence[k] = v - } - } - - for k, v := range s.rewritten { - stack.rewritten[k] = v - } - - return stack -} - -func (s *localDeclaredVars) Push() { - s.vars = append(s.vars, newDeclaredVarSet()) -} - -func (s *localDeclaredVars) Pop() *declaredVarSet { - sl := s.vars - curr := sl[len(sl)-1] - s.vars = sl[:len(sl)-1] - return curr -} - -func (s localDeclaredVars) Peek() *declaredVarSet { - return s.vars[len(s.vars)-1] -} - -func (s localDeclaredVars) Insert(x, y Var, occurrence varOccurrence) { - elem := s.vars[len(s.vars)-1] - elem.vs[x] = y - elem.reverse[y] = x - elem.occurrence[x] = occurrence - - elem.count[x] = 1 - - // If the variable has been rewritten (where x != y, with y being - // the generated value), store it in the map of rewritten vars. - // Assume that the generated values are unique for the compilation. - if !x.Equal(y) { - s.rewritten[y] = x - } -} - -func (s localDeclaredVars) Declared(x Var) (y Var, ok bool) { - for i := len(s.vars) - 1; i >= 0; i-- { - if y, ok = s.vars[i].vs[x]; ok { - return - } - } - return -} - -// Occurrence returns a flag that indicates whether x has occurred in the -// current scope. -func (s localDeclaredVars) Occurrence(x Var) varOccurrence { - return s.vars[len(s.vars)-1].occurrence[x] -} - -// GlobalOccurrence returns a flag that indicates whether x has occurred in the -// global scope. -func (s localDeclaredVars) GlobalOccurrence(x Var) (varOccurrence, bool) { - for i := len(s.vars) - 1; i >= 0; i-- { - if occ, ok := s.vars[i].occurrence[x]; ok { - return occ, true - } - } - return newVar, false -} - -// Seen marks x as seen by incrementing its counter -func (s localDeclaredVars) Seen(x Var) { - for i := len(s.vars) - 1; i >= 0; i-- { - dvs := s.vars[i] - if c, ok := dvs.count[x]; ok { - dvs.count[x] = c + 1 - return - } - } - - s.vars[len(s.vars)-1].count[x] = 1 -} - -// Count returns how many times x has been seen -func (s localDeclaredVars) Count(x Var) int { - for i := len(s.vars) - 1; i >= 0; i-- { - if c, ok := s.vars[i].count[x]; ok { - return c - } - } - - return 0 -} - -// rewriteLocalVars rewrites bodies to remove assignment/declaration -// expressions. For example: -// -// a := 1; p[a] -// -// Is rewritten to: -// -// __local0__ = 1; p[__local0__] -// -// During rewriting, assignees are validated to prevent use before declaration. -func rewriteLocalVars(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body, strict bool) (Body, map[Var]Var, Errors) { - var errs Errors - body, errs = rewriteDeclaredVarsInBody(g, stack, used, body, errs, strict) - return body, stack.Peek().vs, errs -} - -func rewriteDeclaredVarsInBody(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body, errs Errors, strict bool) (Body, Errors) { - - var cpy Body - - for i := range body { - var expr *Expr - switch { - case body[i].IsAssignment(): - stack.assignment = true - expr, errs = rewriteDeclaredAssignment(g, stack, body[i], errs, strict) - case body[i].IsSome(): - expr, errs = rewriteSomeDeclStatement(g, stack, body[i], errs, strict) - case body[i].IsEvery(): - expr, errs = rewriteEveryStatement(g, stack, body[i], errs, strict) - default: - expr, errs = rewriteDeclaredVarsInExpr(g, stack, body[i], errs, strict) - } - if expr != nil { - cpy.Append(expr) - } - } - - // If the body only contained a var statement it will be empty at this - // point. Append true to the body to ensure that it's non-empty (zero length - // bodies are not supported.) - if len(cpy) == 0 { - cpy.Append(NewExpr(BooleanTerm(true))) - } - - errs = checkUnusedAssignedVars(body, stack, used, errs, strict) - return cpy, checkUnusedDeclaredVars(body, stack, used, cpy, errs) -} - -func checkUnusedAssignedVars(body Body, stack *localDeclaredVars, used VarSet, errs Errors, strict bool) Errors { - - if !strict || len(errs) > 0 { - return errs - } - - dvs := stack.Peek() - unused := NewVarSet() - - for v, occ := range dvs.occurrence { - // A var that was assigned in this scope must have been seen (used) more than once (the time of assignment) in - // the same, or nested, scope to be counted as used. - if !v.IsWildcard() && stack.Count(v) <= 1 && occ == assignedVar { - unused.Add(dvs.vs[v]) - } - } - - rewrittenUsed := NewVarSet() - for v := range used { - if gv, ok := stack.Declared(v); ok { - rewrittenUsed.Add(gv) - } else { - rewrittenUsed.Add(v) - } - } - - unused = unused.Diff(rewrittenUsed) - - for _, gv := range unused.Sorted() { - found := false - for i := range body { - if body[i].Vars(VarVisitorParams{}).Contains(gv) { - errs = append(errs, NewError(CompileErr, body[i].Loc(), "assigned var %v unused", dvs.reverse[gv])) - found = true - break - } - } - if !found { - errs = append(errs, NewError(CompileErr, body[0].Loc(), "assigned var %v unused", dvs.reverse[gv])) - } - } - - return errs -} - -func checkUnusedDeclaredVars(body Body, stack *localDeclaredVars, used VarSet, cpy Body, errs Errors) Errors { - - // NOTE(tsandall): Do not generate more errors if there are existing - // declaration errors. - if len(errs) > 0 { - return errs - } - - dvs := stack.Peek() - declared := NewVarSet() - - for v, occ := range dvs.occurrence { - if occ == declaredVar { - declared.Add(dvs.vs[v]) - } - } - - bodyvars := cpy.Vars(VarVisitorParams{}) - - for v := range used { - if gv, ok := stack.Declared(v); ok { - bodyvars.Add(gv) - } else { - bodyvars.Add(v) - } - } - - unused := declared.Diff(bodyvars).Diff(used) - - for _, gv := range unused.Sorted() { - rv := dvs.reverse[gv] - if !rv.IsGenerated() { - // Scan through body exprs, looking for a match between the - // bad var's original name, and each expr's declared vars. - foundUnusedVarByName := false - for i := range body { - varsDeclaredInExpr := declaredVars(body[i]) - if varsDeclaredInExpr.Contains(dvs.reverse[gv]) { - // TODO(philipc): Clean up the offset logic here when the parser - // reports more accurate locations. - errs = append(errs, NewError(CompileErr, body[i].Loc(), "declared var %v unused", dvs.reverse[gv])) - foundUnusedVarByName = true - break - } - } - // Default error location returned. - if !foundUnusedVarByName { - errs = append(errs, NewError(CompileErr, body[0].Loc(), "declared var %v unused", dvs.reverse[gv])) - } - } - } - - return errs -} - -func rewriteEveryStatement(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { - e := expr.Copy() - every := e.Terms.(*Every) - - errs = rewriteDeclaredVarsInTermRecursive(g, stack, every.Domain, errs, strict) - - stack.Push() - defer stack.Pop() - - // if the key exists, rewrite - if every.Key != nil { - if v := every.Key.Value.(Var); !v.IsWildcard() { - gv, err := rewriteDeclaredVar(g, stack, v, declaredVar) - if err != nil { - return nil, append(errs, NewError(CompileErr, every.Loc(), err.Error())) - } - every.Key.Value = gv - } - } else { // if the key doesn't exist, add dummy local - every.Key = NewTerm(g.Generate()) - } - - // value is always present - if v := every.Value.Value.(Var); !v.IsWildcard() { - gv, err := rewriteDeclaredVar(g, stack, v, declaredVar) - if err != nil { - return nil, append(errs, NewError(CompileErr, every.Loc(), err.Error())) - } - every.Value.Value = gv - } - - used := NewVarSet() - every.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, every.Body, errs, strict) - - return rewriteDeclaredVarsInExpr(g, stack, e, errs, strict) -} - -func rewriteSomeDeclStatement(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { - e := expr.Copy() - decl := e.Terms.(*SomeDecl) - for i := range decl.Symbols { - switch v := decl.Symbols[i].Value.(type) { - case Var: - if _, err := rewriteDeclaredVar(g, stack, v, declaredVar); err != nil { - return nil, append(errs, NewError(CompileErr, decl.Loc(), err.Error())) - } - case Call: - var key, val, container *Term - switch len(v) { - case 4: // member3 - key = v[1] - val = v[2] - container = v[3] - case 3: // member - key = NewTerm(g.Generate()) - val = v[1] - container = v[2] - } - - var rhs *Term - switch c := container.Value.(type) { - case Ref: - rhs = RefTerm(append(c, key)...) - default: - rhs = RefTerm(container, key) - } - e.Terms = []*Term{ - RefTerm(VarTerm(Equality.Name)), val, rhs, - } - - for _, v0 := range outputVarsForExprEq(e, container.Vars()).Sorted() { - if _, err := rewriteDeclaredVar(g, stack, v0, declaredVar); err != nil { - return nil, append(errs, NewError(CompileErr, decl.Loc(), err.Error())) - } - } - return rewriteDeclaredVarsInExpr(g, stack, e, errs, strict) - } - } - return nil, errs -} - -func rewriteDeclaredVarsInExpr(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { - vis := NewGenericVisitor(func(x interface{}) bool { - var stop bool - switch x := x.(type) { - case *Term: - stop, errs = rewriteDeclaredVarsInTerm(g, stack, x, errs, strict) - case *With: - stop, errs = true, rewriteDeclaredVarsInWithRecursive(g, stack, x, errs, strict) - } - return stop - }) - vis.Walk(expr) - return expr, errs -} - -func rewriteDeclaredAssignment(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { - - if expr.Negated { - errs = append(errs, NewError(CompileErr, expr.Location, "cannot assign vars inside negated expression")) - return expr, errs - } - - numErrsBefore := len(errs) - - if !validEqAssignArgCount(expr) { - return expr, errs - } - - // Rewrite terms on right hand side capture seen vars and recursively - // process comprehensions before left hand side is processed. Also - // rewrite with modifier. - errs = rewriteDeclaredVarsInTermRecursive(g, stack, expr.Operand(1), errs, strict) - - for _, w := range expr.With { - errs = rewriteDeclaredVarsInTermRecursive(g, stack, w.Value, errs, strict) - } - - // Rewrite vars on left hand side with unique names. Catch redeclaration - // and invalid term types here. - var vis func(t *Term) bool - - vis = func(t *Term) bool { - switch v := t.Value.(type) { - case Var: - if gv, err := rewriteDeclaredVar(g, stack, v, assignedVar); err != nil { - errs = append(errs, NewError(CompileErr, t.Location, err.Error())) - } else { - t.Value = gv - } - return true - case *Array: - return false - case *object: - v.Foreach(func(_, v *Term) { - WalkTerms(v, vis) - }) - return true - case Ref: - if RootDocumentRefs.Contains(t) { - if gv, err := rewriteDeclaredVar(g, stack, v[0].Value.(Var), assignedVar); err != nil { - errs = append(errs, NewError(CompileErr, t.Location, err.Error())) - } else { - t.Value = gv - } - return true - } - } - errs = append(errs, NewError(CompileErr, t.Location, "cannot assign to %v", TypeName(t.Value))) - return true - } - - WalkTerms(expr.Operand(0), vis) - - if len(errs) == numErrsBefore { - loc := expr.Operator()[0].Location - expr.SetOperator(RefTerm(VarTerm(Equality.Name).SetLocation(loc)).SetLocation(loc)) - } - - return expr, errs -} - -func rewriteDeclaredVarsInTerm(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors, strict bool) (bool, Errors) { - switch v := term.Value.(type) { - case Var: - if gv, ok := stack.Declared(v); ok { - term.Value = gv - stack.Seen(v) - } else if stack.Occurrence(v) == newVar { - stack.Insert(v, v, seenVar) - } - case Ref: - if RootDocumentRefs.Contains(term) { - x := v[0].Value.(Var) - if occ, ok := stack.GlobalOccurrence(x); ok && occ != seenVar { - gv, _ := stack.Declared(x) - term.Value = gv - } - - return true, errs - } - return false, errs - case Call: - ref := v[0] - WalkVars(ref, func(v Var) bool { - if gv, ok := stack.Declared(v); ok && !gv.Equal(v) { - // We will rewrite the ref of a function call, which is never ok since we don't have first-class functions. - errs = append(errs, NewError(CompileErr, term.Location, "called function %s shadowed", ref)) - return true - } - return false - }) - return false, errs - case *object: - cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) { - kcpy := k.Copy() - errs = rewriteDeclaredVarsInTermRecursive(g, stack, kcpy, errs, strict) - errs = rewriteDeclaredVarsInTermRecursive(g, stack, v, errs, strict) - return kcpy, v, nil - }) - term.Value = cpy - case Set: - cpy, _ := v.Map(func(elem *Term) (*Term, error) { - elemcpy := elem.Copy() - errs = rewriteDeclaredVarsInTermRecursive(g, stack, elemcpy, errs, strict) - return elemcpy, nil - }) - term.Value = cpy - case *ArrayComprehension: - errs = rewriteDeclaredVarsInArrayComprehension(g, stack, v, errs, strict) - case *SetComprehension: - errs = rewriteDeclaredVarsInSetComprehension(g, stack, v, errs, strict) - case *ObjectComprehension: - errs = rewriteDeclaredVarsInObjectComprehension(g, stack, v, errs, strict) - default: - return false, errs - } - return true, errs -} - -func rewriteDeclaredVarsInTermRecursive(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors, strict bool) Errors { - WalkTerms(term, func(t *Term) bool { - var stop bool - stop, errs = rewriteDeclaredVarsInTerm(g, stack, t, errs, strict) - return stop - }) - return errs -} - -func rewriteDeclaredVarsInWithRecursive(g *localVarGenerator, stack *localDeclaredVars, w *With, errs Errors, strict bool) Errors { - // NOTE(sr): `with input as` and `with input.a.b.c as` are deliberately skipped here: `input` could - // have been shadowed by a local variable/argument but should NOT be replaced in the `with` target. - // - // We cannot drop `input` from the stack since it's conceivable to do `with input[input] as` where - // the second input is meant to be the local var. It's a terrible idea, but when you're shadowing - // `input` those might be your thing. - errs = rewriteDeclaredVarsInTermRecursive(g, stack, w.Target, errs, strict) - if sdwInput, ok := stack.Declared(InputRootDocument.Value.(Var)); ok { // Was "input" shadowed... - switch value := w.Target.Value.(type) { - case Var: - if sdwInput.Equal(value) { // ...and replaced? If so, fix it - w.Target.Value = InputRootRef - } - case Ref: - if sdwInput.Equal(value[0].Value.(Var)) { - w.Target.Value.(Ref)[0].Value = InputRootDocument.Value - } - } - } - // No special handling of the `with` value - return rewriteDeclaredVarsInTermRecursive(g, stack, w.Value, errs, strict) -} - -func rewriteDeclaredVarsInArrayComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ArrayComprehension, errs Errors, strict bool) Errors { - used := NewVarSet() - used.Update(v.Term.Vars()) - - stack.Push() - v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) - errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs, strict) - stack.Pop() - return errs -} - -func rewriteDeclaredVarsInSetComprehension(g *localVarGenerator, stack *localDeclaredVars, v *SetComprehension, errs Errors, strict bool) Errors { - used := NewVarSet() - used.Update(v.Term.Vars()) - - stack.Push() - v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) - errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs, strict) - stack.Pop() - return errs -} - -func rewriteDeclaredVarsInObjectComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ObjectComprehension, errs Errors, strict bool) Errors { - used := NewVarSet() - used.Update(v.Key.Vars()) - used.Update(v.Value.Vars()) - - stack.Push() - v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) - errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Key, errs, strict) - errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Value, errs, strict) - stack.Pop() - return errs -} - -func rewriteDeclaredVar(g *localVarGenerator, stack *localDeclaredVars, v Var, occ varOccurrence) (gv Var, err error) { - switch stack.Occurrence(v) { - case seenVar: - return gv, fmt.Errorf("var %v referenced above", v) - case assignedVar: - return gv, fmt.Errorf("var %v assigned above", v) - case declaredVar: - return gv, fmt.Errorf("var %v declared above", v) - case argVar: - return gv, fmt.Errorf("arg %v redeclared", v) - } - gv = g.Generate() - stack.Insert(v, gv, occ) - return -} - -// rewriteWithModifiersInBody will rewrite the body so that with modifiers do -// not contain terms that require evaluation as values. If this function -// encounters an invalid with modifier target then it will raise an error. -func rewriteWithModifiersInBody(c *Compiler, unsafeBuiltinsMap map[string]struct{}, f *equalityFactory, body Body) (Body, *Error) { - var result Body - for i := range body { - exprs, err := rewriteWithModifier(c, unsafeBuiltinsMap, f, body[i]) - if err != nil { - return nil, err - } - if len(exprs) > 0 { - for _, expr := range exprs { - result.Append(expr) - } - } else { - result.Append(body[i]) - } - } - return result, nil -} - -func rewriteWithModifier(c *Compiler, unsafeBuiltinsMap map[string]struct{}, f *equalityFactory, expr *Expr) ([]*Expr, *Error) { - - var result []*Expr - for i := range expr.With { - eval, err := validateWith(c, unsafeBuiltinsMap, expr, i) - if err != nil { - return nil, err - } - - if eval { - eq := f.Generate(expr.With[i].Value) - result = append(result, eq) - expr.With[i].Value = eq.Operand(0) - } - } - - return append(result, expr), nil -} - -func validateWith(c *Compiler, unsafeBuiltinsMap map[string]struct{}, expr *Expr, i int) (bool, *Error) { - target, value := expr.With[i].Target, expr.With[i].Value - - // Ensure that values that are built-ins are rewritten to Ref (not Var) - if v, ok := value.Value.(Var); ok { - if _, ok := c.builtins[v.String()]; ok { - value.Value = Ref([]*Term{NewTerm(v)}) - } - } - isBuiltinRefOrVar, err := isBuiltinRefOrVar(c.builtins, unsafeBuiltinsMap, target) - if err != nil { - return false, err - } - - isAllowedUnknownFuncCall := false - if c.allowUndefinedFuncCalls { - switch target.Value.(type) { - case Ref, Var: - isAllowedUnknownFuncCall = true - } - } - - switch { - case isDataRef(target): - ref := target.Value.(Ref) - targetNode := c.RuleTree - for i := 0; i < len(ref)-1; i++ { - child := targetNode.Child(ref[i].Value) - if child == nil { - break - } else if len(child.Values) > 0 { - return false, NewError(CompileErr, target.Loc(), "with keyword cannot partially replace virtual document(s)") - } - targetNode = child - } - - if targetNode != nil { - // NOTE(sr): at this point in the compiler stages, we don't have a fully-populated - // TypeEnv yet -- so we have to make do with this check to see if the replacement - // target is a function. It's probably wrong for arity-0 functions, but those are - // and edge case anyways. - if child := targetNode.Child(ref[len(ref)-1].Value); child != nil { - for _, v := range child.Values { - if len(v.(*Rule).Head.Args) > 0 { - if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok { - return false, err // err may be nil - } - } - } - } - } - - // If the with-value is a ref to a function, but not a call, we can't rewrite it - if r, ok := value.Value.(Ref); ok { - // TODO: check that target ref doesn't exist? - if valueNode := c.RuleTree.Find(r); valueNode != nil { - for _, v := range valueNode.Values { - if len(v.(*Rule).Head.Args) > 0 { - return false, nil - } - } - } - } - case isInputRef(target): // ok, valid - case isBuiltinRefOrVar: - - // NOTE(sr): first we ensure that parsed Var builtins (`count`, `concat`, etc) - // are rewritten to their proper Ref convention - if v, ok := target.Value.(Var); ok { - target.Value = Ref([]*Term{NewTerm(v)}) - } - - targetRef := target.Value.(Ref) - bi := c.builtins[targetRef.String()] // safe because isBuiltinRefOrVar checked this - if err := validateWithBuiltinTarget(bi, targetRef, target.Loc()); err != nil { - return false, err - } - - if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok { - return false, err // err may be nil - } - case isAllowedUnknownFuncCall: - // The target isn't a ref to the input doc, data doc, or a known built-in, but it might be a ref to an unknown built-in. - return false, nil - default: - return false, NewError(TypeErr, target.Location, "with keyword target must reference existing %v, %v, or a function", InputRootDocument, DefaultRootDocument) - } - return requiresEval(value), nil -} - -func validateWithBuiltinTarget(bi *Builtin, target Ref, loc *location.Location) *Error { - switch bi.Name { - case Equality.Name, - RegoMetadataChain.Name, - RegoMetadataRule.Name: - return NewError(CompileErr, loc, "with keyword replacing built-in function: replacement of %q invalid", bi.Name) - } - - switch { - case target.HasPrefix(Ref([]*Term{VarTerm("internal")})): - return NewError(CompileErr, loc, "with keyword replacing built-in function: replacement of internal function %q invalid", target) - - case bi.Relation: - return NewError(CompileErr, loc, "with keyword replacing built-in function: target must not be a relation") - - case bi.Decl.Result() == nil: - return NewError(CompileErr, loc, "with keyword replacing built-in function: target must not be a void function") - } - return nil -} - -func validateWithFunctionValue(bs map[string]*Builtin, unsafeMap map[string]struct{}, ruleTree *TreeNode, value *Term) (bool, *Error) { - if v, ok := value.Value.(Ref); ok { - if ruleTree.Find(v) != nil { // ref exists in rule tree - return true, nil - } - } - return isBuiltinRefOrVar(bs, unsafeMap, value) -} - -func isInputRef(term *Term) bool { - if ref, ok := term.Value.(Ref); ok { - if ref.HasPrefix(InputRootRef) { - return true - } - } - return false -} - -func isDataRef(term *Term) bool { - if ref, ok := term.Value.(Ref); ok { - if ref.HasPrefix(DefaultRootRef) { - return true - } - } - return false -} - -func isBuiltinRefOrVar(bs map[string]*Builtin, unsafeBuiltinsMap map[string]struct{}, term *Term) (bool, *Error) { - switch v := term.Value.(type) { - case Ref, Var: - if _, ok := unsafeBuiltinsMap[v.String()]; ok { - return false, NewError(CompileErr, term.Location, "with keyword replacing built-in function: target must not be unsafe: %q", v) - } - _, ok := bs[v.String()] - return ok, nil - } - return false, nil -} - -func isVirtual(node *TreeNode, ref Ref) bool { - for i := range ref { - child := node.Child(ref[i].Value) - if child == nil { - return false - } else if len(child.Values) > 0 { - return true - } - node = child - } - return true -} - -func safetyErrorSlice(unsafe unsafeVars, rewritten map[Var]Var) (result Errors) { - - if len(unsafe) == 0 { - return - } - - for _, pair := range unsafe.Vars() { - v := pair.Var - if w, ok := rewritten[v]; ok { - v = w - } - if !v.IsGenerated() { - if _, ok := futureKeywords[string(v)]; ok { - result = append(result, NewError(UnsafeVarErr, pair.Loc, - "var %[1]v is unsafe (hint: `import future.keywords.%[1]v` to import a future keyword)", v)) - continue - } - result = append(result, NewError(UnsafeVarErr, pair.Loc, "var %v is unsafe", v)) - } - } - - if len(result) > 0 { - return - } - - // If the expression contains unsafe generated variables, report which - // expressions are unsafe instead of the variables that are unsafe (since - // the latter are not meaningful to the user.) - pairs := unsafe.Slice() - - sort.Slice(pairs, func(i, j int) bool { - return pairs[i].Expr.Location.Compare(pairs[j].Expr.Location) < 0 - }) - - // Report at most one error per generated variable. - seen := NewVarSet() - - for _, expr := range pairs { - before := len(seen) - for v := range expr.Vars { - if v.IsGenerated() { - seen.Add(v) - } - } - if len(seen) > before { - result = append(result, NewError(UnsafeVarErr, expr.Expr.Location, "expression is unsafe")) - } - } - - return -} - -func checkUnsafeBuiltins(unsafeBuiltinsMap map[string]struct{}, node interface{}) Errors { - errs := make(Errors, 0) - WalkExprs(node, func(x *Expr) bool { - if x.IsCall() { - operator := x.Operator().String() - if _, ok := unsafeBuiltinsMap[operator]; ok { - errs = append(errs, NewError(TypeErr, x.Loc(), "unsafe built-in function calls in expression: %v", operator)) - } - } - return false - }) - return errs -} - -func rewriteVarsInRef(vars ...map[Var]Var) varRewriter { - return func(node Ref) Ref { - i, _ := TransformVars(node, func(v Var) (Value, error) { - for _, m := range vars { - if u, ok := m[v]; ok { - return u, nil - } - } - return v, nil - }) - return i.(Ref) - } -} - -// NOTE(sr): This is duplicated with compile/compile.go; but moving it into another location -// would cause a circular dependency -- the refSet definition needs ast.Ref. If we make it -// public in the ast package, the compile package could take it from there, but it would also -// increase our public interface. Let's reconsider if we need it in a third place. -type refSet struct { - s []Ref -} - -func newRefSet(x ...Ref) *refSet { - result := &refSet{} - for i := range x { - result.AddPrefix(x[i]) - } - return result -} - -// ContainsPrefix returns true if r is prefixed by any of the existing refs in the set. -func (rs *refSet) ContainsPrefix(r Ref) bool { - for i := range rs.s { - if r.HasPrefix(rs.s[i]) { - return true - } - } - return false -} - -// AddPrefix inserts r into the set if r is not prefixed by any existing -// refs in the set. If any existing refs are prefixed by r, those existing -// refs are removed. -func (rs *refSet) AddPrefix(r Ref) { - if rs.ContainsPrefix(r) { - return - } - cpy := []Ref{r} - for i := range rs.s { - if !rs.s[i].HasPrefix(r) { - cpy = append(cpy, rs.s[i]) - } - } - rs.s = cpy -} - -// Sorted returns a sorted slice of terms for refs in the set. -func (rs *refSet) Sorted() []*Term { - terms := make([]*Term, len(rs.s)) - for i := range rs.s { - terms[i] = NewTerm(rs.s[i]) - } - sort.Slice(terms, func(i, j int) bool { - return terms[i].Value.Compare(terms[j].Value) < 0 - }) - return terms +// OutputVarsFromExpr returns all variables which are the "output" for +// the given expression. For safety checks this means that they would be +// made safe by the expr. +func OutputVarsFromExpr(c *Compiler, expr *Expr, safe VarSet) VarSet { + return v1.OutputVarsFromExpr(c, expr, safe) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go b/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go index dd48884f9d..37ede329ea 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go +++ b/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go @@ -4,41 +4,29 @@ package ast +import v1 "github.com/open-policy-agent/opa/v1/ast" + // CompileModules takes a set of Rego modules represented as strings and // compiles them for evaluation. The keys of the map are used as filenames. func CompileModules(modules map[string]string) (*Compiler, error) { - return CompileModulesWithOpt(modules, CompileOpts{}) + return CompileModulesWithOpt(modules, CompileOpts{ + ParserOptions: ParserOptions{ + RegoVersion: DefaultRegoVersion, + }, + }) } // CompileOpts defines a set of options for the compiler. -type CompileOpts struct { - EnablePrintStatements bool - ParserOptions ParserOptions -} +type CompileOpts = v1.CompileOpts // CompileModulesWithOpt takes a set of Rego modules represented as strings and // compiles them for evaluation. The keys of the map are used as filenames. func CompileModulesWithOpt(modules map[string]string, opts CompileOpts) (*Compiler, error) { - - parsed := make(map[string]*Module, len(modules)) - - for f, module := range modules { - var pm *Module - var err error - if pm, err = ParseModuleWithOpts(f, module, opts.ParserOptions); err != nil { - return nil, err - } - parsed[f] = pm - } - - compiler := NewCompiler().WithEnablePrintStatements(opts.EnablePrintStatements) - compiler.Compile(parsed) - - if compiler.Failed() { - return nil, compiler.Errors + if opts.ParserOptions.RegoVersion == RegoUndefined { + opts.ParserOptions.RegoVersion = DefaultRegoVersion } - return compiler, nil + return v1.CompileModulesWithOpt(modules, opts) } // MustCompileModules compiles a set of Rego modules represented as strings. If diff --git a/vendor/github.com/open-policy-agent/opa/ast/conflicts.go b/vendor/github.com/open-policy-agent/opa/ast/conflicts.go index c2713ad576..10edce382c 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/conflicts.go +++ b/vendor/github.com/open-policy-agent/opa/ast/conflicts.go @@ -5,49 +5,11 @@ package ast import ( - "strings" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // CheckPathConflicts returns a set of errors indicating paths that // are in conflict with the result of the provided callable. func CheckPathConflicts(c *Compiler, exists func([]string) (bool, error)) Errors { - var errs Errors - - root := c.RuleTree.Child(DefaultRootDocument.Value) - if root == nil { - return nil - } - - for _, node := range root.Children { - errs = append(errs, checkDocumentConflicts(node, exists, nil)...) - } - - return errs -} - -func checkDocumentConflicts(node *TreeNode, exists func([]string) (bool, error), path []string) Errors { - - switch key := node.Key.(type) { - case String: - path = append(path, string(key)) - default: // other key types cannot conflict with data - return nil - } - - if len(node.Values) > 0 { - s := strings.Join(path, "/") - if ok, err := exists(path); err != nil { - return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflict check for data path %v: %v", s, err.Error())} - } else if ok { - return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflicting rule for data path %v found", s)} - } - } - - var errs Errors - - for _, child := range node.Children { - errs = append(errs, checkDocumentConflicts(child, exists, path)...) - } - - return errs + return v1.CheckPathConflicts(c, exists) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/doc.go b/vendor/github.com/open-policy-agent/opa/ast/doc.go index 62b04e301e..ba974e5ba6 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/doc.go +++ b/vendor/github.com/open-policy-agent/opa/ast/doc.go @@ -1,36 +1,8 @@ -// Copyright 2016 The OPA Authors. All rights reserved. +// Copyright 2024 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. -// Package ast declares Rego syntax tree types and also includes a parser and compiler for preparing policies for execution in the policy engine. -// -// Rego policies are defined using a relatively small set of types: modules, package and import declarations, rules, expressions, and terms. At their core, policies consist of rules that are defined by one or more expressions over documents available to the policy engine. The expressions are defined by intrinsic values (terms) such as strings, objects, variables, etc. -// -// Rego policies are typically defined in text files and then parsed and compiled by the policy engine at runtime. The parsing stage takes the text or string representation of the policy and converts it into an abstract syntax tree (AST) that consists of the types mentioned above. The AST is organized as follows: -// -// Module -// | -// +--- Package (Reference) -// | -// +--- Imports -// | | -// | +--- Import (Term) -// | -// +--- Rules -// | -// +--- Rule -// | -// +--- Head -// | | -// | +--- Name (Variable) -// | | -// | +--- Key (Term) -// | | -// | +--- Value (Term) -// | -// +--- Body -// | -// +--- Expression (Term | Terms | Variable Declaration) -// -// At query time, the policy engine expects policies to have been compiled. The compilation stage takes one or more modules and compiles them into a format that the policy engine supports. +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. package ast diff --git a/vendor/github.com/open-policy-agent/opa/ast/env.go b/vendor/github.com/open-policy-agent/opa/ast/env.go index c767aafefb..ef0ccf89ce 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/env.go +++ b/vendor/github.com/open-policy-agent/opa/ast/env.go @@ -5,522 +5,8 @@ package ast import ( - "fmt" - "strings" - - "github.com/open-policy-agent/opa/types" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // TypeEnv contains type info for static analysis such as type checking. -type TypeEnv struct { - tree *typeTreeNode - next *TypeEnv - newChecker func() *typeChecker -} - -// newTypeEnv returns an empty TypeEnv. The constructor is not exported because -// type environments should only be created by the type checker. -func newTypeEnv(f func() *typeChecker) *TypeEnv { - return &TypeEnv{ - tree: newTypeTree(), - newChecker: f, - } -} - -// Get returns the type of x. -func (env *TypeEnv) Get(x interface{}) types.Type { - - if term, ok := x.(*Term); ok { - x = term.Value - } - - switch x := x.(type) { - - // Scalars. - case Null: - return types.NewNull() - case Boolean: - return types.NewBoolean() - case Number: - return types.NewNumber() - case String: - return types.NewString() - - // Composites. - case *Array: - static := make([]types.Type, x.Len()) - for i := range static { - tpe := env.Get(x.Elem(i).Value) - static[i] = tpe - } - - var dynamic types.Type - if len(static) == 0 { - dynamic = types.A - } - - return types.NewArray(static, dynamic) - - case *lazyObj: - return env.Get(x.force()) - case *object: - static := []*types.StaticProperty{} - var dynamic *types.DynamicProperty - - x.Foreach(func(k, v *Term) { - if IsConstant(k.Value) { - kjson, err := JSON(k.Value) - if err == nil { - tpe := env.Get(v) - static = append(static, types.NewStaticProperty(kjson, tpe)) - return - } - } - // Can't handle it as a static property, fallback to dynamic - typeK := env.Get(k.Value) - typeV := env.Get(v.Value) - dynamic = types.NewDynamicProperty(typeK, typeV) - }) - - if len(static) == 0 && dynamic == nil { - dynamic = types.NewDynamicProperty(types.A, types.A) - } - - return types.NewObject(static, dynamic) - - case Set: - var tpe types.Type - x.Foreach(func(elem *Term) { - other := env.Get(elem.Value) - tpe = types.Or(tpe, other) - }) - if tpe == nil { - tpe = types.A - } - return types.NewSet(tpe) - - // Comprehensions. - case *ArrayComprehension: - cpy, errs := env.newChecker().CheckBody(env, x.Body) - if len(errs) == 0 { - return types.NewArray(nil, cpy.Get(x.Term)) - } - return nil - case *ObjectComprehension: - cpy, errs := env.newChecker().CheckBody(env, x.Body) - if len(errs) == 0 { - return types.NewObject(nil, types.NewDynamicProperty(cpy.Get(x.Key), cpy.Get(x.Value))) - } - return nil - case *SetComprehension: - cpy, errs := env.newChecker().CheckBody(env, x.Body) - if len(errs) == 0 { - return types.NewSet(cpy.Get(x.Term)) - } - return nil - - // Refs. - case Ref: - return env.getRef(x) - - // Vars. - case Var: - if node := env.tree.Child(x); node != nil { - return node.Value() - } - if env.next != nil { - return env.next.Get(x) - } - return nil - - // Calls. - case Call: - return nil - - default: - panic("unreachable") - } -} - -func (env *TypeEnv) getRef(ref Ref) types.Type { - - node := env.tree.Child(ref[0].Value) - if node == nil { - return env.getRefFallback(ref) - } - - return env.getRefRec(node, ref, ref[1:]) -} - -func (env *TypeEnv) getRefFallback(ref Ref) types.Type { - - if env.next != nil { - return env.next.Get(ref) - } - - if RootDocumentNames.Contains(ref[0]) { - return types.A - } - - return nil -} - -func (env *TypeEnv) getRefRec(node *typeTreeNode, ref, tail Ref) types.Type { - if len(tail) == 0 { - return env.getRefRecExtent(node) - } - - if node.Leaf() { - if node.children.Len() > 0 { - if child := node.Child(tail[0].Value); child != nil { - return env.getRefRec(child, ref, tail[1:]) - } - } - return selectRef(node.Value(), tail) - } - - if !IsConstant(tail[0].Value) { - return selectRef(env.getRefRecExtent(node), tail) - } - - child := node.Child(tail[0].Value) - if child == nil { - return env.getRefFallback(ref) - } - - return env.getRefRec(child, ref, tail[1:]) -} - -func (env *TypeEnv) getRefRecExtent(node *typeTreeNode) types.Type { - - if node.Leaf() { - return node.Value() - } - - children := []*types.StaticProperty{} - - node.Children().Iter(func(k, v util.T) bool { - key := k.(Value) - child := v.(*typeTreeNode) - - tpe := env.getRefRecExtent(child) - - // NOTE(sr): Converting to Golang-native types here is an extension of what we did - // before -- only supporting strings. But since we cannot differentiate sets and arrays - // that way, we could reconsider. - switch key.(type) { - case String, Number, Boolean: // skip anything else - propKey, err := JSON(key) - if err != nil { - panic(fmt.Errorf("unreachable, ValueToInterface: %w", err)) - } - children = append(children, types.NewStaticProperty(propKey, tpe)) - } - return false - }) - - // TODO(tsandall): for now, these objects can have any dynamic properties - // because we don't have schema for base docs. Once schemas are supported - // we can improve this. - return types.NewObject(children, types.NewDynamicProperty(types.S, types.A)) -} - -func (env *TypeEnv) wrap() *TypeEnv { - cpy := *env - cpy.next = env - cpy.tree = newTypeTree() - return &cpy -} - -// typeTreeNode is used to store type information in a tree. -type typeTreeNode struct { - key Value - value types.Type - children *util.HashMap -} - -func newTypeTree() *typeTreeNode { - return &typeTreeNode{ - key: nil, - value: nil, - children: util.NewHashMap(valueEq, valueHash), - } -} - -func (n *typeTreeNode) Child(key Value) *typeTreeNode { - value, ok := n.children.Get(key) - if !ok { - return nil - } - return value.(*typeTreeNode) -} - -func (n *typeTreeNode) Children() *util.HashMap { - return n.children -} - -func (n *typeTreeNode) Get(path Ref) types.Type { - curr := n - for _, term := range path { - child, ok := curr.children.Get(term.Value) - if !ok { - return nil - } - curr = child.(*typeTreeNode) - } - return curr.Value() -} - -func (n *typeTreeNode) Leaf() bool { - return n.value != nil -} - -func (n *typeTreeNode) PutOne(key Value, tpe types.Type) { - c, ok := n.children.Get(key) - - var child *typeTreeNode - if !ok { - child = newTypeTree() - child.key = key - n.children.Put(key, child) - } else { - child = c.(*typeTreeNode) - } - - child.value = tpe -} - -func (n *typeTreeNode) Put(path Ref, tpe types.Type) { - curr := n - for _, term := range path { - c, ok := curr.children.Get(term.Value) - - var child *typeTreeNode - if !ok { - child = newTypeTree() - child.key = term.Value - curr.children.Put(child.key, child) - } else { - child = c.(*typeTreeNode) - } - - curr = child - } - curr.value = tpe -} - -// Insert inserts tpe at path in the tree, but also merges the value into any types.Object present along that path. -// If a types.Object is inserted, any leafs already present further down the tree are merged into the inserted object. -// path must be ground. -func (n *typeTreeNode) Insert(path Ref, tpe types.Type, env *TypeEnv) { - curr := n - for i, term := range path { - c, ok := curr.children.Get(term.Value) - - var child *typeTreeNode - if !ok { - child = newTypeTree() - child.key = term.Value - curr.children.Put(child.key, child) - } else { - child = c.(*typeTreeNode) - - if child.value != nil && i+1 < len(path) { - // If child has an object value, merge the new value into it. - if o, ok := child.value.(*types.Object); ok { - var err error - child.value, err = insertIntoObject(o, path[i+1:], tpe, env) - if err != nil { - panic(fmt.Errorf("unreachable, insertIntoObject: %w", err)) - } - } - } - } - - curr = child - } - - curr.value = mergeTypes(curr.value, tpe) - - if _, ok := tpe.(*types.Object); ok && curr.children.Len() > 0 { - // merge all leafs into the inserted object - leafs := curr.Leafs() - for p, t := range leafs { - var err error - curr.value, err = insertIntoObject(curr.value.(*types.Object), *p, t, env) - if err != nil { - panic(fmt.Errorf("unreachable, insertIntoObject: %w", err)) - } - } - } -} - -// mergeTypes merges the types of 'a' and 'b'. If both are sets, their 'of' types are joined with an types.Or. -// If both are objects, the key types of their dynamic properties are joined with types.Or:s, and their value types -// are recursively merged (using mergeTypes). -// If 'a' and 'b' are both objects, and at least one of them have static properties, they are joined -// with an types.Or, instead of being merged. -// If 'a' is an Any containing an Object, and 'b' is an Object (or vice versa); AND both objects have no -// static properties, they are merged. -// If 'a' and 'b' are different types, they are joined with an types.Or. -func mergeTypes(a, b types.Type) types.Type { - if a == nil { - return b - } - - if b == nil { - return a - } - - switch a := a.(type) { - case *types.Object: - if bObj, ok := b.(*types.Object); ok && len(a.StaticProperties()) == 0 && len(bObj.StaticProperties()) == 0 { - if len(a.StaticProperties()) > 0 || len(bObj.StaticProperties()) > 0 { - return types.Or(a, bObj) - } - - aDynProps := a.DynamicProperties() - bDynProps := bObj.DynamicProperties() - dynProps := types.NewDynamicProperty( - types.Or(aDynProps.Key, bDynProps.Key), - mergeTypes(aDynProps.Value, bDynProps.Value)) - return types.NewObject(nil, dynProps) - } else if bAny, ok := b.(types.Any); ok && len(a.StaticProperties()) == 0 { - // If a is an object type with no static components ... - for _, t := range bAny { - if tObj, ok := t.(*types.Object); ok && len(tObj.StaticProperties()) == 0 { - // ... and b is a types.Any containing an object with no static components, we merge them. - aDynProps := a.DynamicProperties() - tDynProps := tObj.DynamicProperties() - tDynProps.Key = types.Or(tDynProps.Key, aDynProps.Key) - tDynProps.Value = types.Or(tDynProps.Value, aDynProps.Value) - return bAny - } - } - } - case *types.Set: - if bSet, ok := b.(*types.Set); ok { - return types.NewSet(types.Or(a.Of(), bSet.Of())) - } - case types.Any: - if _, ok := b.(types.Any); !ok { - return mergeTypes(b, a) - } - } - - return types.Or(a, b) -} - -func (n *typeTreeNode) String() string { - b := strings.Builder{} - - if k := n.key; k != nil { - b.WriteString(k.String()) - } else { - b.WriteString("-") - } - - if v := n.value; v != nil { - b.WriteString(": ") - b.WriteString(v.String()) - } - - n.children.Iter(func(_, v util.T) bool { - if child, ok := v.(*typeTreeNode); ok { - b.WriteString("\n\t+ ") - s := child.String() - s = strings.ReplaceAll(s, "\n", "\n\t") - b.WriteString(s) - } - return false - }) - - return b.String() -} - -func insertIntoObject(o *types.Object, path Ref, tpe types.Type, env *TypeEnv) (*types.Object, error) { - if len(path) == 0 { - return o, nil - } - - key := env.Get(path[0].Value) - - if len(path) == 1 { - var dynamicProps *types.DynamicProperty - if dp := o.DynamicProperties(); dp != nil { - dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, tpe)) - } else { - dynamicProps = types.NewDynamicProperty(key, tpe) - } - return types.NewObject(o.StaticProperties(), dynamicProps), nil - } - - child, err := insertIntoObject(types.NewObject(nil, nil), path[1:], tpe, env) - if err != nil { - return nil, err - } - - var dynamicProps *types.DynamicProperty - if dp := o.DynamicProperties(); dp != nil { - dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, child)) - } else { - dynamicProps = types.NewDynamicProperty(key, child) - } - return types.NewObject(o.StaticProperties(), dynamicProps), nil -} - -func (n *typeTreeNode) Leafs() map[*Ref]types.Type { - leafs := map[*Ref]types.Type{} - n.children.Iter(func(_, v util.T) bool { - collectLeafs(v.(*typeTreeNode), nil, leafs) - return false - }) - return leafs -} - -func collectLeafs(n *typeTreeNode, path Ref, leafs map[*Ref]types.Type) { - nPath := append(path, NewTerm(n.key)) - if n.Leaf() { - leafs[&nPath] = n.Value() - return - } - n.children.Iter(func(_, v util.T) bool { - collectLeafs(v.(*typeTreeNode), nPath, leafs) - return false - }) -} - -func (n *typeTreeNode) Value() types.Type { - return n.value -} - -// selectConstant returns the attribute of the type referred to by the term. If -// the attribute type cannot be determined, nil is returned. -func selectConstant(tpe types.Type, term *Term) types.Type { - x, err := JSON(term.Value) - if err == nil { - return types.Select(tpe, x) - } - return nil -} - -// selectRef returns the type of the nested attribute referred to by ref. If -// the attribute type cannot be determined, nil is returned. If the ref -// contains vars or refs, then the returned type will be a union of the -// possible types. -func selectRef(tpe types.Type, ref Ref) types.Type { - - if tpe == nil || len(ref) == 0 { - return tpe - } - - head, tail := ref[0], ref[1:] - - switch head.Value.(type) { - case Var, Ref, *Array, Object, Set: - return selectRef(types.Values(tpe), tail) - default: - return selectRef(selectConstant(tpe, head), tail) - } -} +type TypeEnv = v1.TypeEnv diff --git a/vendor/github.com/open-policy-agent/opa/ast/errors.go b/vendor/github.com/open-policy-agent/opa/ast/errors.go index 066dfcdd68..722cfc0fb7 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/errors.go +++ b/vendor/github.com/open-policy-agent/opa/ast/errors.go @@ -5,119 +5,42 @@ package ast import ( - "fmt" - "sort" - "strings" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // Errors represents a series of errors encountered during parsing, compiling, // etc. -type Errors []*Error - -func (e Errors) Error() string { - - if len(e) == 0 { - return "no error(s)" - } - - if len(e) == 1 { - return fmt.Sprintf("1 error occurred: %v", e[0].Error()) - } - - s := make([]string, len(e)) - for i, err := range e { - s[i] = err.Error() - } - - return fmt.Sprintf("%d errors occurred:\n%s", len(e), strings.Join(s, "\n")) -} - -// Sort sorts the error slice by location. If the locations are equal then the -// error message is compared. -func (e Errors) Sort() { - sort.Slice(e, func(i, j int) bool { - a := e[i] - b := e[j] - - if cmp := a.Location.Compare(b.Location); cmp != 0 { - return cmp < 0 - } - - return a.Error() < b.Error() - }) -} +type Errors = v1.Errors const ( // ParseErr indicates an unclassified parse error occurred. - ParseErr = "rego_parse_error" + ParseErr = v1.ParseErr // CompileErr indicates an unclassified compile error occurred. - CompileErr = "rego_compile_error" + CompileErr = v1.CompileErr // TypeErr indicates a type error was caught. - TypeErr = "rego_type_error" + TypeErr = v1.TypeErr // UnsafeVarErr indicates an unsafe variable was found during compilation. - UnsafeVarErr = "rego_unsafe_var_error" + UnsafeVarErr = v1.UnsafeVarErr // RecursionErr indicates recursion was found during compilation. - RecursionErr = "rego_recursion_error" + RecursionErr = v1.RecursionErr ) // IsError returns true if err is an AST error with code. func IsError(code string, err error) bool { - if err, ok := err.(*Error); ok { - return err.Code == code - } - return false + return v1.IsError(code, err) } // ErrorDetails defines the interface for detailed error messages. -type ErrorDetails interface { - Lines() []string -} +type ErrorDetails = v1.ErrorDetails // Error represents a single error caught during parsing, compiling, etc. -type Error struct { - Code string `json:"code"` - Message string `json:"message"` - Location *Location `json:"location,omitempty"` - Details ErrorDetails `json:"details,omitempty"` -} - -func (e *Error) Error() string { - - var prefix string - - if e.Location != nil { - - if len(e.Location.File) > 0 { - prefix += e.Location.File + ":" + fmt.Sprint(e.Location.Row) - } else { - prefix += fmt.Sprint(e.Location.Row) + ":" + fmt.Sprint(e.Location.Col) - } - } - - msg := fmt.Sprintf("%v: %v", e.Code, e.Message) - - if len(prefix) > 0 { - msg = prefix + ": " + msg - } - - if e.Details != nil { - for _, line := range e.Details.Lines() { - msg += "\n\t" + line - } - } - - return msg -} +type Error = v1.Error // NewError returns a new Error object. -func NewError(code string, loc *Location, f string, a ...interface{}) *Error { - return &Error{ - Code: code, - Location: loc, - Message: fmt.Sprintf(f, a...), - } +func NewError(code string, loc *Location, f string, a ...any) *Error { + return v1.NewError(code, loc, f, a...) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/index.go b/vendor/github.com/open-policy-agent/opa/ast/index.go index cb0cbea323..7e80bb7716 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/index.go +++ b/vendor/github.com/open-policy-agent/opa/ast/index.go @@ -5,904 +5,16 @@ package ast import ( - "fmt" - "sort" - "strings" - - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // RuleIndex defines the interface for rule indices. -type RuleIndex interface { - - // Build tries to construct an index for the given rules. If the index was - // constructed, it returns true, otherwise false. - Build(rules []*Rule) bool - - // Lookup searches the index for rules that will match the provided - // resolver. If the resolver returns an error, it is returned via err. - Lookup(resolver ValueResolver) (*IndexResult, error) - - // AllRules traverses the index and returns all rules that will match - // the provided resolver without any optimizations (effectively with - // indexing disabled). If the resolver returns an error, it is returned - // via err. - AllRules(resolver ValueResolver) (*IndexResult, error) -} +type RuleIndex v1.RuleIndex // IndexResult contains the result of an index lookup. -type IndexResult struct { - Kind RuleKind - Rules []*Rule - Else map[*Rule][]*Rule - Default *Rule - EarlyExit bool - OnlyGroundRefs bool -} +type IndexResult = v1.IndexResult // NewIndexResult returns a new IndexResult object. func NewIndexResult(kind RuleKind) *IndexResult { - return &IndexResult{ - Kind: kind, - Else: map[*Rule][]*Rule{}, - } -} - -// Empty returns true if there are no rules to evaluate. -func (ir *IndexResult) Empty() bool { - return len(ir.Rules) == 0 && ir.Default == nil -} - -type baseDocEqIndex struct { - skipIndexing Set - isVirtual func(Ref) bool - root *trieNode - defaultRule *Rule - kind RuleKind - onlyGroundRefs bool -} - -func newBaseDocEqIndex(isVirtual func(Ref) bool) *baseDocEqIndex { - return &baseDocEqIndex{ - skipIndexing: NewSet(NewTerm(InternalPrint.Ref())), - isVirtual: isVirtual, - root: newTrieNodeImpl(), - onlyGroundRefs: true, - } -} - -func (i *baseDocEqIndex) Build(rules []*Rule) bool { - if len(rules) == 0 { - return false - } - - i.kind = rules[0].Head.RuleKind() - indices := newrefindices(i.isVirtual) - - // build indices for each rule. - for idx := range rules { - WalkRules(rules[idx], func(rule *Rule) bool { - if rule.Default { - i.defaultRule = rule - return false - } - if i.onlyGroundRefs { - i.onlyGroundRefs = rule.Head.Reference.IsGround() - } - var skip bool - for _, expr := range rule.Body { - if op := expr.OperatorTerm(); op != nil && i.skipIndexing.Contains(op) { - skip = true - break - } - } - if !skip { - for _, expr := range rule.Body { - indices.Update(rule, expr) - } - } - return false - }) - } - - // build trie out of indices. - for idx := range rules { - var prio int - WalkRules(rules[idx], func(rule *Rule) bool { - if rule.Default { - return false - } - node := i.root - if indices.Indexed(rule) { - for _, ref := range indices.Sorted() { - node = node.Insert(ref, indices.Value(rule, ref), indices.Mapper(rule, ref)) - } - } - // Insert rule into trie with (insertion order, priority order) - // tuple. Retaining the insertion order allows us to return rules - // in the order they were passed to this function. - node.append([...]int{idx, prio}, rule) - prio++ - return false - }) - } - return true -} - -func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) { - - tr := newTrieTraversalResult() - - err := i.root.Traverse(resolver, tr) - if err != nil { - return nil, err - } - - result := NewIndexResult(i.kind) - result.Default = i.defaultRule - result.OnlyGroundRefs = i.onlyGroundRefs - result.Rules = make([]*Rule, 0, len(tr.ordering)) - - for _, pos := range tr.ordering { - sort.Slice(tr.unordered[pos], func(i, j int) bool { - return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1] - }) - nodes := tr.unordered[pos] - root := nodes[0].rule - - result.Rules = append(result.Rules, root) - if len(nodes) > 1 { - result.Else[root] = make([]*Rule, len(nodes)-1) - for i := 1; i < len(nodes); i++ { - result.Else[root][i-1] = nodes[i].rule - } - } - } - - result.EarlyExit = tr.values.Len() == 1 && tr.values.Slice()[0].IsGround() - - return result, nil -} - -func (i *baseDocEqIndex) AllRules(_ ValueResolver) (*IndexResult, error) { - tr := newTrieTraversalResult() - - // Walk over the rule trie and accumulate _all_ rules - rw := &ruleWalker{result: tr} - i.root.Do(rw) - - result := NewIndexResult(i.kind) - result.Default = i.defaultRule - result.OnlyGroundRefs = i.onlyGroundRefs - result.Rules = make([]*Rule, 0, len(tr.ordering)) - - for _, pos := range tr.ordering { - sort.Slice(tr.unordered[pos], func(i, j int) bool { - return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1] - }) - nodes := tr.unordered[pos] - root := nodes[0].rule - result.Rules = append(result.Rules, root) - if len(nodes) > 1 { - result.Else[root] = make([]*Rule, len(nodes)-1) - for i := 1; i < len(nodes); i++ { - result.Else[root][i-1] = nodes[i].rule - } - } - } - - result.EarlyExit = tr.values.Len() == 1 && tr.values.Slice()[0].IsGround() - - return result, nil -} - -type ruleWalker struct { - result *trieTraversalResult -} - -func (r *ruleWalker) Do(x interface{}) trieWalker { - tn := x.(*trieNode) - r.result.Add(tn) - return r -} - -type valueMapper struct { - Key string - MapValue func(Value) Value -} - -type refindex struct { - Ref Ref - Value Value - Mapper *valueMapper -} - -type refindices struct { - isVirtual func(Ref) bool - rules map[*Rule][]*refindex - frequency *util.HashMap - sorted []Ref -} - -func newrefindices(isVirtual func(Ref) bool) *refindices { - return &refindices{ - isVirtual: isVirtual, - rules: map[*Rule][]*refindex{}, - frequency: util.NewHashMap(func(a, b util.T) bool { - r1, r2 := a.(Ref), b.(Ref) - return r1.Equal(r2) - }, func(x util.T) int { - return x.(Ref).Hash() - }), - } -} - -// Update attempts to update the refindices for the given expression in the -// given rule. If the expression cannot be indexed the update does not affect -// the indices. -func (i *refindices) Update(rule *Rule, expr *Expr) { - - if expr.Negated { - return - } - - if len(expr.With) > 0 { - // NOTE(tsandall): In the future, we may need to consider expressions - // that have with statements applied to them. - return - } - - op := expr.Operator() - - switch { - case op.Equal(Equality.Ref()): - i.updateEq(rule, expr) - - case op.Equal(Equal.Ref()) && len(expr.Operands()) == 2: - // NOTE(tsandall): if equal() is called with more than two arguments the - // output value is being captured in which case the indexer cannot - // exclude the rule if the equal() call would return false (because the - // false value must still be produced.) - i.updateEq(rule, expr) - - case op.Equal(GlobMatch.Ref()) && len(expr.Operands()) == 3: - // NOTE(sr): Same as with equal() above -- 4 operands means the output - // of `glob.match` is captured and the rule can thus not be excluded. - i.updateGlobMatch(rule, expr) - } -} - -// Sorted returns a sorted list of references that the indices were built from. -// References that appear more frequently in the indexed rules are ordered -// before less frequently appearing references. -func (i *refindices) Sorted() []Ref { - - if i.sorted == nil { - counts := make([]int, 0, i.frequency.Len()) - i.sorted = make([]Ref, 0, i.frequency.Len()) - - i.frequency.Iter(func(k, v util.T) bool { - counts = append(counts, v.(int)) - i.sorted = append(i.sorted, k.(Ref)) - return false - }) - - sort.Slice(i.sorted, func(a, b int) bool { - if counts[a] > counts[b] { - return true - } else if counts[b] > counts[a] { - return false - } - return i.sorted[a][0].Loc().Compare(i.sorted[b][0].Loc()) < 0 - }) - } - - return i.sorted -} - -func (i *refindices) Indexed(rule *Rule) bool { - return len(i.rules[rule]) > 0 -} - -func (i *refindices) Value(rule *Rule, ref Ref) Value { - if index := i.index(rule, ref); index != nil { - return index.Value - } - return nil -} - -func (i *refindices) Mapper(rule *Rule, ref Ref) *valueMapper { - if index := i.index(rule, ref); index != nil { - return index.Mapper - } - return nil -} - -func (i *refindices) updateEq(rule *Rule, expr *Expr) { - a, b := expr.Operand(0), expr.Operand(1) - args := rule.Head.Args - if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, a, b); ok { - i.insert(rule, idx) - return - } - if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, b, a); ok { - i.insert(rule, idx) - return - } -} - -func (i *refindices) updateGlobMatch(rule *Rule, expr *Expr) { - args := rule.Head.Args - - delim, ok := globDelimiterToString(expr.Operand(1)) - if !ok { - return - } - - if arr := globPatternToArray(expr.Operand(0), delim); arr != nil { - // The 3rd operand of glob.match is the value to match. We assume the - // 3rd operand was a reference that has been rewritten and bound to a - // variable earlier in the query OR a function argument variable. - match := expr.Operand(2) - if _, ok := match.Value.(Var); ok { - var ref Ref - for _, other := range i.rules[rule] { - if _, ok := other.Value.(Var); ok && other.Value.Compare(match.Value) == 0 { - ref = other.Ref - } - } - if ref == nil { - for j, arg := range args { - if arg.Equal(match) { - ref = Ref{FunctionArgRootDocument, IntNumberTerm(j)} - } - } - } - if ref != nil { - i.insert(rule, &refindex{ - Ref: ref, - Value: arr.Value, - Mapper: &valueMapper{ - Key: delim, - MapValue: func(v Value) Value { - if s, ok := v.(String); ok { - return stringSliceToArray(splitStringEscaped(string(s), delim)) - } - return v - }, - }, - }) - } - } - } -} - -func (i *refindices) insert(rule *Rule, index *refindex) { - - count, ok := i.frequency.Get(index.Ref) - if !ok { - count = 0 - } - - i.frequency.Put(index.Ref, count.(int)+1) - - for pos, other := range i.rules[rule] { - if other.Ref.Equal(index.Ref) { - i.rules[rule][pos] = index - return - } - } - - i.rules[rule] = append(i.rules[rule], index) -} - -func (i *refindices) index(rule *Rule, ref Ref) *refindex { - for _, index := range i.rules[rule] { - if index.Ref.Equal(ref) { - return index - } - } - return nil -} - -type trieWalker interface { - Do(x interface{}) trieWalker -} - -type trieTraversalResult struct { - unordered map[int][]*ruleNode - ordering []int - values Set -} - -func newTrieTraversalResult() *trieTraversalResult { - return &trieTraversalResult{ - unordered: map[int][]*ruleNode{}, - values: NewSet(), - } -} - -func (tr *trieTraversalResult) Add(t *trieNode) { - for _, node := range t.rules { - root := node.prio[0] - nodes, ok := tr.unordered[root] - if !ok { - tr.ordering = append(tr.ordering, root) - } - tr.unordered[root] = append(nodes, node) - } - if t.values != nil { - t.values.Foreach(func(v *Term) { tr.values.Add(v) }) - } -} - -type trieNode struct { - ref Ref - values Set - mappers []*valueMapper - next *trieNode - any *trieNode - undefined *trieNode - scalars *util.HashMap - array *trieNode - rules []*ruleNode -} - -func (node *trieNode) String() string { - var flags []string - flags = append(flags, fmt.Sprintf("self:%p", node)) - if len(node.ref) > 0 { - flags = append(flags, node.ref.String()) - } - if node.next != nil { - flags = append(flags, fmt.Sprintf("next:%p", node.next)) - } - if node.any != nil { - flags = append(flags, fmt.Sprintf("any:%p", node.any)) - } - if node.undefined != nil { - flags = append(flags, fmt.Sprintf("undefined:%p", node.undefined)) - } - if node.array != nil { - flags = append(flags, fmt.Sprintf("array:%p", node.array)) - } - if node.scalars.Len() > 0 { - buf := make([]string, 0, node.scalars.Len()) - node.scalars.Iter(func(k, v util.T) bool { - key := k.(Value) - val := v.(*trieNode) - buf = append(buf, fmt.Sprintf("scalar(%v):%p", key, val)) - return false - }) - sort.Strings(buf) - flags = append(flags, strings.Join(buf, " ")) - } - if len(node.rules) > 0 { - flags = append(flags, fmt.Sprintf("%d rule(s)", len(node.rules))) - } - if len(node.mappers) > 0 { - flags = append(flags, fmt.Sprintf("%d mapper(s)", len(node.mappers))) - } - if node.values != nil { - if l := node.values.Len(); l > 0 { - flags = append(flags, fmt.Sprintf("%d value(s)", l)) - } - } - return strings.Join(flags, " ") -} - -func (node *trieNode) append(prio [2]int, rule *Rule) { - node.rules = append(node.rules, &ruleNode{prio, rule}) - - if node.values != nil && rule.Head.Value != nil { - node.values.Add(rule.Head.Value) - return - } - - if node.values == nil && rule.Head.DocKind() == CompleteDoc { - node.values = NewSet(rule.Head.Value) - } -} - -type ruleNode struct { - prio [2]int - rule *Rule -} - -func newTrieNodeImpl() *trieNode { - return &trieNode{ - scalars: util.NewHashMap(valueEq, valueHash), - } -} - -func (node *trieNode) Do(walker trieWalker) { - next := walker.Do(node) - if next == nil { - return - } - if node.any != nil { - node.any.Do(next) - } - if node.undefined != nil { - node.undefined.Do(next) - } - - node.scalars.Iter(func(_, v util.T) bool { - child := v.(*trieNode) - child.Do(next) - return false - }) - - if node.array != nil { - node.array.Do(next) - } - if node.next != nil { - node.next.Do(next) - } -} - -func (node *trieNode) Insert(ref Ref, value Value, mapper *valueMapper) *trieNode { - - if node.next == nil { - node.next = newTrieNodeImpl() - node.next.ref = ref - } - - if mapper != nil { - node.next.addMapper(mapper) - } - - return node.next.insertValue(value) -} - -func (node *trieNode) Traverse(resolver ValueResolver, tr *trieTraversalResult) error { - - if node == nil { - return nil - } - - tr.Add(node) - - return node.next.traverse(resolver, tr) -} - -func (node *trieNode) addMapper(mapper *valueMapper) { - for i := range node.mappers { - if node.mappers[i].Key == mapper.Key { - return - } - } - node.mappers = append(node.mappers, mapper) -} - -func (node *trieNode) insertValue(value Value) *trieNode { - - switch value := value.(type) { - case nil: - if node.undefined == nil { - node.undefined = newTrieNodeImpl() - } - return node.undefined - case Var: - if node.any == nil { - node.any = newTrieNodeImpl() - } - return node.any - case Null, Boolean, Number, String: - child, ok := node.scalars.Get(value) - if !ok { - child = newTrieNodeImpl() - node.scalars.Put(value, child) - } - return child.(*trieNode) - case *Array: - if node.array == nil { - node.array = newTrieNodeImpl() - } - return node.array.insertArray(value) - } - - panic("illegal value") -} - -func (node *trieNode) insertArray(arr *Array) *trieNode { - - if arr.Len() == 0 { - return node - } - - switch head := arr.Elem(0).Value.(type) { - case Var: - if node.any == nil { - node.any = newTrieNodeImpl() - } - return node.any.insertArray(arr.Slice(1, -1)) - case Null, Boolean, Number, String: - child, ok := node.scalars.Get(head) - if !ok { - child = newTrieNodeImpl() - node.scalars.Put(head, child) - } - return child.(*trieNode).insertArray(arr.Slice(1, -1)) - } - - panic("illegal value") -} - -func (node *trieNode) traverse(resolver ValueResolver, tr *trieTraversalResult) error { - - if node == nil { - return nil - } - - v, err := resolver.Resolve(node.ref) - if err != nil { - if IsUnknownValueErr(err) { - return node.traverseUnknown(resolver, tr) - } - return err - } - - if node.undefined != nil { - err = node.undefined.Traverse(resolver, tr) - if err != nil { - return err - } - } - - if v == nil { - return nil - } - - if node.any != nil { - err = node.any.Traverse(resolver, tr) - if err != nil { - return err - } - } - - if err := node.traverseValue(resolver, tr, v); err != nil { - return err - } - - for i := range node.mappers { - if err := node.traverseValue(resolver, tr, node.mappers[i].MapValue(v)); err != nil { - return err - } - } - - return nil -} - -func (node *trieNode) traverseValue(resolver ValueResolver, tr *trieTraversalResult, value Value) error { - - switch value := value.(type) { - case *Array: - if node.array == nil { - return nil - } - return node.array.traverseArray(resolver, tr, value) - - case Null, Boolean, Number, String: - child, ok := node.scalars.Get(value) - if !ok { - return nil - } - return child.(*trieNode).Traverse(resolver, tr) - } - - return nil -} - -func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalResult, arr *Array) error { - - if arr.Len() == 0 { - return node.Traverse(resolver, tr) - } - - if node.any != nil { - err := node.any.traverseArray(resolver, tr, arr.Slice(1, -1)) - if err != nil { - return err - } - } - - head := arr.Elem(0).Value - - if !IsScalar(head) { - return nil - } - - child, ok := node.scalars.Get(head) - if !ok { - return nil - } - return child.(*trieNode).traverseArray(resolver, tr, arr.Slice(1, -1)) -} - -func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalResult) error { - - if node == nil { - return nil - } - - if err := node.Traverse(resolver, tr); err != nil { - return err - } - - if err := node.undefined.traverseUnknown(resolver, tr); err != nil { - return err - } - - if err := node.any.traverseUnknown(resolver, tr); err != nil { - return err - } - - if err := node.array.traverseUnknown(resolver, tr); err != nil { - return err - } - - var iterErr error - node.scalars.Iter(func(_, v util.T) bool { - child := v.(*trieNode) - if iterErr = child.traverseUnknown(resolver, tr); iterErr != nil { - return true - } - return false - }) - - return iterErr -} - -// If term `a` is one of the function's operands, we store a Ref: `args[0]` -// for the argument number. So for `f(x, y) { x = 10; y = 12 }`, we'll -// bind `args[0]` and `args[1]` to this rule when called for (x=10) and -// (y=12) respectively. -func eqOperandsToRefAndValue(isVirtual func(Ref) bool, args []*Term, a, b *Term) (*refindex, bool) { - switch v := a.Value.(type) { - case Var: - for i, arg := range args { - if arg.Value.Compare(v) == 0 { - if bval, ok := indexValue(b); ok { - return &refindex{Ref: Ref{FunctionArgRootDocument, IntNumberTerm(i)}, Value: bval}, true - } - } - } - case Ref: - if !RootDocumentNames.Contains(v[0]) { - return nil, false - } - if isVirtual(v) { - return nil, false - } - if v.IsNested() || !v.IsGround() { - return nil, false - } - if bval, ok := indexValue(b); ok { - return &refindex{Ref: v, Value: bval}, true - } - } - return nil, false -} - -func indexValue(b *Term) (Value, bool) { - switch b := b.Value.(type) { - case Null, Boolean, Number, String, Var: - return b, true - case *Array: - stop := false - first := true - vis := NewGenericVisitor(func(x interface{}) bool { - if first { - first = false - return false - } - switch x.(type) { - // No nested structures or values that require evaluation (other than var). - case *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Ref: - stop = true - } - return stop - }) - vis.Walk(b) - if !stop { - return b, true - } - } - - return nil, false -} - -func globDelimiterToString(delim *Term) (string, bool) { - - arr, ok := delim.Value.(*Array) - if !ok { - return "", false - } - - var result string - - if arr.Len() == 0 { - result = "." - } else { - for i := 0; i < arr.Len(); i++ { - term := arr.Elem(i) - s, ok := term.Value.(String) - if !ok { - return "", false - } - result += string(s) - } - } - - return result, true -} - -func globPatternToArray(pattern *Term, delim string) *Term { - - s, ok := pattern.Value.(String) - if !ok { - return nil - } - - parts := splitStringEscaped(string(s), delim) - arr := make([]*Term, len(parts)) - - for i := range parts { - if parts[i] == "*" { - arr[i] = VarTerm("$globwildcard") - } else { - var escaped bool - for _, c := range parts[i] { - if c == '\\' { - escaped = !escaped - continue - } - if !escaped { - switch c { - case '[', '?', '{', '*': - // TODO(tsandall): super glob and character pattern - // matching not supported yet. - return nil - } - } - escaped = false - } - arr[i] = StringTerm(parts[i]) - } - } - - return NewTerm(NewArray(arr...)) -} - -// splits s on characters in delim except if delim characters have been escaped -// with reverse solidus. -func splitStringEscaped(s string, delim string) []string { - - var last, curr int - var escaped bool - var result []string - - for ; curr < len(s); curr++ { - if s[curr] == '\\' || escaped { - escaped = !escaped - continue - } - if strings.ContainsRune(delim, rune(s[curr])) { - result = append(result, s[last:curr]) - last = curr + 1 - } - } - - result = append(result, s[last:]) - - return result -} - -func stringSliceToArray(s []string) *Array { - arr := make([]*Term, len(s)) - for i, v := range s { - arr[i] = StringTerm(v) - } - return NewArray(arr...) + return v1.NewIndexResult(kind) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go b/vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go deleted file mode 100644 index a0200ac18d..0000000000 --- a/vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go +++ /dev/null @@ -1,455 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package scanner - -import ( - "fmt" - "io" - "unicode" - "unicode/utf8" - - "github.com/open-policy-agent/opa/ast/internal/tokens" -) - -const bom = 0xFEFF - -// Scanner is used to tokenize an input stream of -// Rego source code. -type Scanner struct { - offset int - row int - col int - bs []byte - curr rune - width int - errors []Error - keywords map[string]tokens.Token - tabs []int - regoV1Compatible bool -} - -// Error represents a scanner error. -type Error struct { - Pos Position - Message string -} - -// Position represents a point in the scanned source code. -type Position struct { - Offset int // start offset in bytes - End int // end offset in bytes - Row int // line number computed in bytes - Col int // column number computed in bytes - Tabs []int // positions of any tabs preceding Col -} - -// New returns an initialized scanner that will scan -// through the source code provided by the io.Reader. -func New(r io.Reader) (*Scanner, error) { - - bs, err := io.ReadAll(r) - if err != nil { - return nil, err - } - - s := &Scanner{ - offset: 0, - row: 1, - col: 0, - bs: bs, - curr: -1, - width: 0, - keywords: tokens.Keywords(), - tabs: []int{}, - } - - s.next() - - if s.curr == bom { - s.next() - } - - return s, nil -} - -// Bytes returns the raw bytes for the full source -// which the scanner has read in. -func (s *Scanner) Bytes() []byte { - return s.bs -} - -// String returns a human readable string of the current scanner state. -func (s *Scanner) String() string { - return fmt.Sprintf("", s.curr, s.offset, len(s.bs)) -} - -// Keyword will return a token for the passed in -// literal value. If the value is a Rego keyword -// then the appropriate token is returned. Everything -// else is an Ident. -func (s *Scanner) Keyword(lit string) tokens.Token { - if tok, ok := s.keywords[lit]; ok { - return tok - } - return tokens.Ident -} - -// AddKeyword adds a string -> token mapping to this Scanner instance. -func (s *Scanner) AddKeyword(kw string, tok tokens.Token) { - s.keywords[kw] = tok - - switch tok { - case tokens.Every: // importing 'every' means also importing 'in' - s.keywords["in"] = tokens.In - } -} - -func (s *Scanner) HasKeyword(keywords map[string]tokens.Token) bool { - for kw := range s.keywords { - if _, ok := keywords[kw]; ok { - return true - } - } - return false -} - -func (s *Scanner) SetRegoV1Compatible() { - s.regoV1Compatible = true -} - -func (s *Scanner) RegoV1Compatible() bool { - return s.regoV1Compatible -} - -// WithKeywords returns a new copy of the Scanner struct `s`, with the set -// of known keywords being that of `s` with `kws` added. -func (s *Scanner) WithKeywords(kws map[string]tokens.Token) *Scanner { - cpy := *s - cpy.keywords = make(map[string]tokens.Token, len(s.keywords)+len(kws)) - for kw, tok := range s.keywords { - cpy.AddKeyword(kw, tok) - } - for k, t := range kws { - cpy.AddKeyword(k, t) - } - return &cpy -} - -// WithoutKeywords returns a new copy of the Scanner struct `s`, with the -// set of known keywords being that of `s` with `kws` removed. -// The previously known keywords are returned for a convenient reset. -func (s *Scanner) WithoutKeywords(kws map[string]tokens.Token) (*Scanner, map[string]tokens.Token) { - cpy := *s - kw := s.keywords - cpy.keywords = make(map[string]tokens.Token, len(s.keywords)-len(kws)) - for kw, tok := range s.keywords { - if _, ok := kws[kw]; !ok { - cpy.AddKeyword(kw, tok) - } - } - return &cpy, kw -} - -// Scan will increment the scanners position in the source -// code until the next token is found. The token, starting position -// of the token, string literal, and any errors encountered are -// returned. A token will always be returned, the caller must check -// for any errors before using the other values. -func (s *Scanner) Scan() (tokens.Token, Position, string, []Error) { - - pos := Position{Offset: s.offset - s.width, Row: s.row, Col: s.col, Tabs: s.tabs} - var tok tokens.Token - var lit string - - if s.isWhitespace() { - lit = string(s.curr) - s.next() - tok = tokens.Whitespace - } else if isLetter(s.curr) { - lit = s.scanIdentifier() - tok = s.Keyword(lit) - } else if isDecimal(s.curr) { - lit = s.scanNumber() - tok = tokens.Number - } else { - ch := s.curr - s.next() - switch ch { - case -1: - tok = tokens.EOF - case '#': - lit = s.scanComment() - tok = tokens.Comment - case '"': - lit = s.scanString() - tok = tokens.String - case '`': - lit = s.scanRawString() - tok = tokens.String - case '[': - tok = tokens.LBrack - case ']': - tok = tokens.RBrack - case '{': - tok = tokens.LBrace - case '}': - tok = tokens.RBrace - case '(': - tok = tokens.LParen - case ')': - tok = tokens.RParen - case ',': - tok = tokens.Comma - case ':': - if s.curr == '=' { - s.next() - tok = tokens.Assign - } else { - tok = tokens.Colon - } - case '+': - tok = tokens.Add - case '-': - tok = tokens.Sub - case '*': - tok = tokens.Mul - case '/': - tok = tokens.Quo - case '%': - tok = tokens.Rem - case '&': - tok = tokens.And - case '|': - tok = tokens.Or - case '=': - if s.curr == '=' { - s.next() - tok = tokens.Equal - } else { - tok = tokens.Unify - } - case '>': - if s.curr == '=' { - s.next() - tok = tokens.Gte - } else { - tok = tokens.Gt - } - case '<': - if s.curr == '=' { - s.next() - tok = tokens.Lte - } else { - tok = tokens.Lt - } - case '!': - if s.curr == '=' { - s.next() - tok = tokens.Neq - } else { - s.error("illegal ! character") - } - case ';': - tok = tokens.Semicolon - case '.': - tok = tokens.Dot - } - } - - pos.End = s.offset - s.width - errs := s.errors - s.errors = nil - - return tok, pos, lit, errs -} - -func (s *Scanner) scanIdentifier() string { - start := s.offset - 1 - for isLetter(s.curr) || isDigit(s.curr) { - s.next() - } - return string(s.bs[start : s.offset-1]) -} - -func (s *Scanner) scanNumber() string { - - start := s.offset - 1 - - if s.curr != '.' { - for isDecimal(s.curr) { - s.next() - } - } - - if s.curr == '.' { - s.next() - var found bool - for isDecimal(s.curr) { - s.next() - found = true - } - if !found { - s.error("expected fraction") - } - } - - if lower(s.curr) == 'e' { - s.next() - if s.curr == '+' || s.curr == '-' { - s.next() - } - var found bool - for isDecimal(s.curr) { - s.next() - found = true - } - if !found { - s.error("expected exponent") - } - } - - // Scan any digits following the decimals to get the - // entire invalid number/identifier. - // Example: 0a2b should be a single invalid number "0a2b" - // rather than a number "0", followed by identifier "a2b". - if isLetter(s.curr) { - s.error("illegal number format") - for isLetter(s.curr) || isDigit(s.curr) { - s.next() - } - } - - return string(s.bs[start : s.offset-1]) -} - -func (s *Scanner) scanString() string { - start := s.literalStart() - for { - ch := s.curr - - if ch == '\n' || ch < 0 { - s.error("non-terminated string") - break - } - - s.next() - - if ch == '"' { - break - } - - if ch == '\\' { - switch s.curr { - case '\\', '"', '/', 'b', 'f', 'n', 'r', 't': - s.next() - case 'u': - s.next() - s.next() - s.next() - s.next() - default: - s.error("illegal escape sequence") - } - } - } - - return string(s.bs[start : s.offset-1]) -} - -func (s *Scanner) scanRawString() string { - start := s.literalStart() - for { - ch := s.curr - s.next() - if ch == '`' { - break - } else if ch < 0 { - s.error("non-terminated string") - break - } - } - return string(s.bs[start : s.offset-1]) -} - -func (s *Scanner) scanComment() string { - start := s.literalStart() - for s.curr != '\n' && s.curr != -1 { - s.next() - } - end := s.offset - 1 - // Trim carriage returns that precede the newline - if s.offset > 1 && s.bs[s.offset-2] == '\r' { - end = end - 1 - } - return string(s.bs[start:end]) -} - -func (s *Scanner) next() { - - if s.offset >= len(s.bs) { - s.curr = -1 - s.offset = len(s.bs) + 1 - return - } - - s.curr = rune(s.bs[s.offset]) - s.width = 1 - - if s.curr == 0 { - s.error("illegal null character") - } else if s.curr >= utf8.RuneSelf { - s.curr, s.width = utf8.DecodeRune(s.bs[s.offset:]) - if s.curr == utf8.RuneError && s.width == 1 { - s.error("illegal utf-8 character") - } else if s.curr == bom && s.offset > 0 { - s.error("illegal byte-order mark") - } - } - - s.offset += s.width - - if s.curr == '\n' { - s.row++ - s.col = 0 - s.tabs = []int{} - } else { - s.col++ - if s.curr == '\t' { - s.tabs = append(s.tabs, s.col) - } - } -} - -func (s *Scanner) literalStart() int { - // The current offset is at the first character past the literal delimiter (#, ", `, etc.) - // Need to subtract width of first character (plus one for the delimiter). - return s.offset - (s.width + 1) -} - -// From the Go scanner (src/go/scanner/scanner.go) - -func isLetter(ch rune) bool { - return 'a' <= lower(ch) && lower(ch) <= 'z' || ch == '_' -} - -func isDigit(ch rune) bool { - return isDecimal(ch) || ch >= utf8.RuneSelf && unicode.IsDigit(ch) -} - -func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' } - -func lower(ch rune) rune { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter - -func (s *Scanner) isWhitespace() bool { - return s.curr == ' ' || s.curr == '\t' || s.curr == '\n' || s.curr == '\r' -} - -func (s *Scanner) error(reason string) { - s.errors = append(s.errors, Error{Pos: Position{ - Offset: s.offset, - Row: s.row, - Col: s.col, - }, Message: reason}) -} diff --git a/vendor/github.com/open-policy-agent/opa/ast/internal/tokens/tokens.go b/vendor/github.com/open-policy-agent/opa/ast/internal/tokens/tokens.go deleted file mode 100644 index 623ed7ed21..0000000000 --- a/vendor/github.com/open-policy-agent/opa/ast/internal/tokens/tokens.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package tokens - -// Token represents a single Rego source code token -// for use by the Parser. -type Token int - -func (t Token) String() string { - if t < 0 || int(t) >= len(strings) { - return "unknown" - } - return strings[t] -} - -// All tokens must be defined here -const ( - Illegal Token = iota - EOF - Whitespace - Ident - Comment - - Package - Import - As - Default - Else - Not - Some - With - Null - True - False - - Number - String - - LBrack - RBrack - LBrace - RBrace - LParen - RParen - Comma - Colon - - Add - Sub - Mul - Quo - Rem - And - Or - Unify - Equal - Assign - In - Neq - Gt - Lt - Gte - Lte - Dot - Semicolon - - Every - Contains - If -) - -var strings = [...]string{ - Illegal: "illegal", - EOF: "eof", - Whitespace: "whitespace", - Comment: "comment", - Ident: "identifier", - Package: "package", - Import: "import", - As: "as", - Default: "default", - Else: "else", - Not: "not", - Some: "some", - With: "with", - Null: "null", - True: "true", - False: "false", - Number: "number", - String: "string", - LBrack: "[", - RBrack: "]", - LBrace: "{", - RBrace: "}", - LParen: "(", - RParen: ")", - Comma: ",", - Colon: ":", - Add: "plus", - Sub: "minus", - Mul: "mul", - Quo: "div", - Rem: "rem", - And: "and", - Or: "or", - Unify: "eq", - Equal: "equal", - Assign: "assign", - In: "in", - Neq: "neq", - Gt: "gt", - Lt: "lt", - Gte: "gte", - Lte: "lte", - Dot: ".", - Semicolon: ";", - Every: "every", - Contains: "contains", - If: "if", -} - -var keywords = map[string]Token{ - "package": Package, - "import": Import, - "as": As, - "default": Default, - "else": Else, - "not": Not, - "some": Some, - "with": With, - "null": Null, - "true": True, - "false": False, -} - -// Keywords returns a copy of the default string -> Token keyword map. -func Keywords() map[string]Token { - cpy := make(map[string]Token, len(keywords)) - for k, v := range keywords { - cpy[k] = v - } - return cpy -} - -// IsKeyword returns if a token is a keyword -func IsKeyword(tok Token) bool { - _, ok := keywords[strings[tok]] - return ok -} diff --git a/vendor/github.com/open-policy-agent/opa/ast/interning.go b/vendor/github.com/open-policy-agent/opa/ast/interning.go new file mode 100644 index 0000000000..29231006aa --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/ast/interning.go @@ -0,0 +1,24 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + v1 "github.com/open-policy-agent/opa/v1/ast" +) + +func InternedBooleanTerm(b bool) *Term { + return v1.InternedTerm(b) +} + +// InternedIntNumberTerm returns a term with the given integer value. The term is +// cached between -1 to 512, and for values outside of that range, this function +// is equivalent to ast.IntNumberTerm. +func InternedIntNumberTerm(i int) *Term { + return v1.InternedTerm(i) +} + +func HasInternedIntNumberTerm(i int) bool { + return v1.HasInternedIntNumberTerm(i) +} diff --git a/vendor/github.com/open-policy-agent/opa/ast/json/doc.go b/vendor/github.com/open-policy-agent/opa/ast/json/doc.go new file mode 100644 index 0000000000..26aee9b994 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/ast/json/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package json diff --git a/vendor/github.com/open-policy-agent/opa/ast/json/json.go b/vendor/github.com/open-policy-agent/opa/ast/json/json.go index 565017d58e..8a3a36bb9b 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/json/json.go +++ b/vendor/github.com/open-policy-agent/opa/ast/json/json.go @@ -1,36 +1,15 @@ package json +import v1 "github.com/open-policy-agent/opa/v1/ast/json" + // Options defines the options for JSON operations, // currently only marshaling can be configured -type Options struct { - MarshalOptions MarshalOptions -} +type Options = v1.Options // MarshalOptions defines the options for JSON marshaling, // currently only toggling the marshaling of location information is supported -type MarshalOptions struct { - // IncludeLocation toggles the marshaling of location information - IncludeLocation NodeToggle - // IncludeLocationText additionally/optionally includes the text of the location - IncludeLocationText bool - // ExcludeLocationFile additionally/optionally excludes the file of the location - // Note that this is inverted (i.e. not "include" as the default needs to remain false) - ExcludeLocationFile bool -} +type MarshalOptions = v1.MarshalOptions // NodeToggle is a generic struct to allow the toggling of // settings for different ast node types -type NodeToggle struct { - Term bool - Package bool - Comment bool - Import bool - Rule bool - Head bool - Expr bool - SomeDecl bool - Every bool - With bool - Annotations bool - AnnotationsRef bool -} +type NodeToggle = v1.NodeToggle diff --git a/vendor/github.com/open-policy-agent/opa/ast/location/location.go b/vendor/github.com/open-policy-agent/opa/ast/location/location.go deleted file mode 100644 index 92226df3f0..0000000000 --- a/vendor/github.com/open-policy-agent/opa/ast/location/location.go +++ /dev/null @@ -1,134 +0,0 @@ -// Package location defines locations in Rego source code. -package location - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - - astJSON "github.com/open-policy-agent/opa/ast/json" -) - -// Location records a position in source code -type Location struct { - Text []byte `json:"-"` // The original text fragment from the source. - File string `json:"file"` // The name of the source file (which may be empty). - Row int `json:"row"` // The line in the source. - Col int `json:"col"` // The column in the row. - Offset int `json:"-"` // The byte offset for the location in the source. - - // JSONOptions specifies options for marshaling and unmarshalling of locations - JSONOptions astJSON.Options - - Tabs []int `json:"-"` // The column offsets of tabs in the source. -} - -// NewLocation returns a new Location object. -func NewLocation(text []byte, file string, row int, col int) *Location { - return &Location{Text: text, File: file, Row: row, Col: col} -} - -// Equal checks if two locations are equal to each other. -func (loc *Location) Equal(other *Location) bool { - return bytes.Equal(loc.Text, other.Text) && - loc.File == other.File && - loc.Row == other.Row && - loc.Col == other.Col -} - -// Errorf returns a new error value with a message formatted to include the location -// info (e.g., line, column, filename, etc.) -func (loc *Location) Errorf(f string, a ...interface{}) error { - return errors.New(loc.Format(f, a...)) -} - -// Wrapf returns a new error value that wraps an existing error with a message formatted -// to include the location info (e.g., line, column, filename, etc.) -func (loc *Location) Wrapf(err error, f string, a ...interface{}) error { - return fmt.Errorf(loc.Format(f, a...)+": %w", err) -} - -// Format returns a formatted string prefixed with the location information. -func (loc *Location) Format(f string, a ...interface{}) string { - if len(loc.File) > 0 { - f = fmt.Sprintf("%v:%v: %v", loc.File, loc.Row, f) - } else { - f = fmt.Sprintf("%v:%v: %v", loc.Row, loc.Col, f) - } - return fmt.Sprintf(f, a...) -} - -func (loc *Location) String() string { - if len(loc.File) > 0 { - return fmt.Sprintf("%v:%v", loc.File, loc.Row) - } - if len(loc.Text) > 0 { - return string(loc.Text) - } - return fmt.Sprintf("%v:%v", loc.Row, loc.Col) -} - -// Compare returns -1, 0, or 1 to indicate if this loc is less than, equal to, -// or greater than the other. Comparison is performed on the file, row, and -// column of the Location (but not on the text.) Nil locations are greater than -// non-nil locations. -func (loc *Location) Compare(other *Location) int { - if loc == nil && other == nil { - return 0 - } else if loc == nil { - return 1 - } else if other == nil { - return -1 - } else if loc.File < other.File { - return -1 - } else if loc.File > other.File { - return 1 - } else if loc.Row < other.Row { - return -1 - } else if loc.Row > other.Row { - return 1 - } else if loc.Col < other.Col { - return -1 - } else if loc.Col > other.Col { - return 1 - } - return 0 -} - -func (loc *Location) MarshalJSON() ([]byte, error) { - // structs are used here to preserve the field ordering of the original Location struct - if loc.JSONOptions.MarshalOptions.ExcludeLocationFile { - data := struct { - Row int `json:"row"` - Col int `json:"col"` - Text []byte `json:"text,omitempty"` - }{ - Row: loc.Row, - Col: loc.Col, - } - - if loc.JSONOptions.MarshalOptions.IncludeLocationText { - data.Text = loc.Text - } - - return json.Marshal(data) - } - - data := struct { - File string `json:"file"` - Row int `json:"row"` - Col int `json:"col"` - Text []byte `json:"text,omitempty"` - }{ - Row: loc.Row, - Col: loc.Col, - File: loc.File, - } - - if loc.JSONOptions.MarshalOptions.IncludeLocationText { - data.Text = loc.Text - } - - return json.Marshal(data) -} diff --git a/vendor/github.com/open-policy-agent/opa/ast/map.go b/vendor/github.com/open-policy-agent/opa/ast/map.go index b0cc9eb60f..070ad3e5de 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/map.go +++ b/vendor/github.com/open-policy-agent/opa/ast/map.go @@ -5,129 +5,14 @@ package ast import ( - "encoding/json" - - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // ValueMap represents a key/value map between AST term values. Any type of term // can be used as a key in the map. -type ValueMap struct { - hashMap *util.HashMap -} +type ValueMap = v1.ValueMap // NewValueMap returns a new ValueMap. func NewValueMap() *ValueMap { - vs := &ValueMap{ - hashMap: util.NewHashMap(valueEq, valueHash), - } - return vs -} - -// MarshalJSON provides a custom marshaller for the ValueMap which -// will include the key, value, and value type. -func (vs *ValueMap) MarshalJSON() ([]byte, error) { - var tmp []map[string]interface{} - vs.Iter(func(k Value, v Value) bool { - tmp = append(tmp, map[string]interface{}{ - "name": k.String(), - "type": TypeName(v), - "value": v, - }) - return false - }) - return json.Marshal(tmp) -} - -// Copy returns a shallow copy of the ValueMap. -func (vs *ValueMap) Copy() *ValueMap { - if vs == nil { - return nil - } - cpy := NewValueMap() - cpy.hashMap = vs.hashMap.Copy() - return cpy -} - -// Equal returns true if this ValueMap equals the other. -func (vs *ValueMap) Equal(other *ValueMap) bool { - if vs == nil { - return other == nil || other.Len() == 0 - } - if other == nil { - return vs == nil || vs.Len() == 0 - } - return vs.hashMap.Equal(other.hashMap) -} - -// Len returns the number of elements in the map. -func (vs *ValueMap) Len() int { - if vs == nil { - return 0 - } - return vs.hashMap.Len() -} - -// Get returns the value in the map for k. -func (vs *ValueMap) Get(k Value) Value { - if vs != nil { - if v, ok := vs.hashMap.Get(k); ok { - return v.(Value) - } - } - return nil -} - -// Hash returns a hash code for this ValueMap. -func (vs *ValueMap) Hash() int { - if vs == nil { - return 0 - } - return vs.hashMap.Hash() -} - -// Iter calls the iter function for each key/value pair in the map. If the iter -// function returns true, iteration stops. -func (vs *ValueMap) Iter(iter func(Value, Value) bool) bool { - if vs == nil { - return false - } - return vs.hashMap.Iter(func(kt, vt util.T) bool { - k := kt.(Value) - v := vt.(Value) - return iter(k, v) - }) -} - -// Put inserts a key k into the map with value v. -func (vs *ValueMap) Put(k, v Value) { - if vs == nil { - panic("put on nil value map") - } - vs.hashMap.Put(k, v) -} - -// Delete removes a key k from the map. -func (vs *ValueMap) Delete(k Value) { - if vs == nil { - return - } - vs.hashMap.Delete(k) -} - -func (vs *ValueMap) String() string { - if vs == nil { - return "{}" - } - return vs.hashMap.String() -} - -func valueHash(v util.T) int { - return v.(Value).Hash() -} - -func valueEq(a, b util.T) bool { - av := a.(Value) - bv := b.(Value) - return av.Compare(bv) == 0 + return v1.NewValueMap() } diff --git a/vendor/github.com/open-policy-agent/opa/ast/marshal.go b/vendor/github.com/open-policy-agent/opa/ast/marshal.go deleted file mode 100644 index 53fb112044..0000000000 --- a/vendor/github.com/open-policy-agent/opa/ast/marshal.go +++ /dev/null @@ -1,11 +0,0 @@ -package ast - -import ( - astJSON "github.com/open-policy-agent/opa/ast/json" -) - -// customJSON is an interface that can be implemented by AST nodes that -// allows the parser to set options for JSON operations on that node. -type customJSON interface { - setJSONOptions(astJSON.Options) -} diff --git a/vendor/github.com/open-policy-agent/opa/ast/parser.go b/vendor/github.com/open-policy-agent/opa/ast/parser.go index 09ede2baec..45cd4da06e 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/parser.go +++ b/vendor/github.com/open-policy-agent/opa/ast/parser.go @@ -1,2733 +1,49 @@ -// Copyright 2020 The OPA Authors. All rights reserved. +// Copyright 2024 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. package ast import ( - "bytes" - "encoding/json" - "fmt" - "io" - "math/big" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - "unicode/utf8" - - "gopkg.in/yaml.v3" - - "github.com/open-policy-agent/opa/ast/internal/scanner" - "github.com/open-policy-agent/opa/ast/internal/tokens" - astJSON "github.com/open-policy-agent/opa/ast/json" - "github.com/open-policy-agent/opa/ast/location" + v1 "github.com/open-policy-agent/opa/v1/ast" ) -var RegoV1CompatibleRef = Ref{VarTerm("rego"), StringTerm("v1")} +var RegoV1CompatibleRef = v1.RegoV1CompatibleRef // RegoVersion defines the Rego syntax requirements for a module. -type RegoVersion int +type RegoVersion = v1.RegoVersion -const DefaultRegoVersion = RegoVersion(0) +const DefaultRegoVersion = RegoV0 const ( + RegoUndefined = v1.RegoUndefined // RegoV0 is the default, original Rego syntax. - RegoV0 RegoVersion = iota + RegoV0 = v1.RegoV0 // RegoV0CompatV1 requires modules to comply with both the RegoV0 and RegoV1 syntax (as when 'rego.v1' is imported in a module). // Shortly, RegoV1 compatibility is required, but 'rego.v1' or 'future.keywords' must also be imported. - RegoV0CompatV1 + RegoV0CompatV1 = v1.RegoV0CompatV1 // RegoV1 is the Rego syntax enforced by OPA 1.0; e.g.: // future.keywords part of default keyword set, and don't require imports; // 'if' and 'contains' required in rule heads; // (some) strict checks on by default. - RegoV1 + RegoV1 = v1.RegoV1 ) -func (v RegoVersion) Int() int { - if v == RegoV1 { - return 1 - } - return 0 -} - -func (v RegoVersion) String() string { - switch v { - case RegoV0: - return "v0" - case RegoV1: - return "v1" - case RegoV0CompatV1: - return "v0v1" - default: - return "unknown" - } -} - func RegoVersionFromInt(i int) RegoVersion { - if i == 1 { - return RegoV1 - } - return RegoV0 -} - -// Note: This state is kept isolated from the parser so that we -// can do efficient shallow copies of these values when doing a -// save() and restore(). -type state struct { - s *scanner.Scanner - lastEnd int - skippedNL bool - tok tokens.Token - tokEnd int - lit string - loc Location - errors Errors - hints []string - comments []*Comment - wildcard int -} - -func (s *state) String() string { - return fmt.Sprintf("", s.s, s.tok, s.lit, s.loc, len(s.errors), len(s.comments)) -} - -func (s *state) Loc() *location.Location { - cpy := s.loc - return &cpy -} - -func (s *state) Text(offset, end int) []byte { - bs := s.s.Bytes() - if offset >= 0 && offset < len(bs) { - if end >= offset && end <= len(bs) { - return bs[offset:end] - } - } - return nil + return v1.RegoVersionFromInt(i) } // Parser is used to parse Rego statements. -type Parser struct { - r io.Reader - s *state - po ParserOptions - cache parsedTermCache -} - -type parsedTermCacheItem struct { - t *Term - post *state // post is the post-state that's restored on a cache-hit - offset int - next *parsedTermCacheItem -} - -type parsedTermCache struct { - m *parsedTermCacheItem -} - -func (c parsedTermCache) String() string { - s := strings.Builder{} - s.WriteRune('{') - var e *parsedTermCacheItem - for e = c.m; e != nil; e = e.next { - s.WriteString(fmt.Sprintf("%v", e)) - } - s.WriteRune('}') - return s.String() -} - -func (e *parsedTermCacheItem) String() string { - return fmt.Sprintf("<%d:%v>", e.offset, e.t) -} +type Parser = v1.Parser // ParserOptions defines the options for parsing Rego statements. -type ParserOptions struct { - Capabilities *Capabilities - ProcessAnnotation bool - AllFutureKeywords bool - FutureKeywords []string - SkipRules bool - JSONOptions *astJSON.Options - // RegoVersion is the version of Rego to parse for. - RegoVersion RegoVersion - unreleasedKeywords bool // TODO(sr): cleanup -} - -// EffectiveRegoVersion returns the effective RegoVersion to use for parsing. -// Deprecated: Use RegoVersion instead. -func (po *ParserOptions) EffectiveRegoVersion() RegoVersion { - return po.RegoVersion -} +type ParserOptions = v1.ParserOptions // NewParser creates and initializes a Parser. func NewParser() *Parser { - p := &Parser{ - s: &state{}, - po: ParserOptions{}, - } - return p -} - -// WithFilename provides the filename for Location details -// on parsed statements. -func (p *Parser) WithFilename(filename string) *Parser { - p.s.loc.File = filename - return p -} - -// WithReader provides the io.Reader that the parser will -// use as its source. -func (p *Parser) WithReader(r io.Reader) *Parser { - p.r = r - return p -} - -// WithProcessAnnotation enables or disables the processing of -// annotations by the Parser -func (p *Parser) WithProcessAnnotation(processAnnotation bool) *Parser { - p.po.ProcessAnnotation = processAnnotation - return p -} - -// WithFutureKeywords enables "future" keywords, i.e., keywords that can -// be imported via -// -// import future.keywords.kw -// import future.keywords.other -// -// but in a more direct way. The equivalent of this import would be -// -// WithFutureKeywords("kw", "other") -func (p *Parser) WithFutureKeywords(kws ...string) *Parser { - p.po.FutureKeywords = kws - return p -} - -// WithAllFutureKeywords enables all "future" keywords, i.e., the -// ParserOption equivalent of -// -// import future.keywords -func (p *Parser) WithAllFutureKeywords(yes bool) *Parser { - p.po.AllFutureKeywords = yes - return p -} - -// withUnreleasedKeywords allows using keywords that haven't surfaced -// as future keywords (see above) yet, but have tests that require -// them to be parsed -func (p *Parser) withUnreleasedKeywords(yes bool) *Parser { - p.po.unreleasedKeywords = yes - return p -} - -// WithCapabilities sets the capabilities structure on the parser. -func (p *Parser) WithCapabilities(c *Capabilities) *Parser { - p.po.Capabilities = c - return p -} - -// WithSkipRules instructs the parser not to attempt to parse Rule statements. -func (p *Parser) WithSkipRules(skip bool) *Parser { - p.po.SkipRules = skip - return p -} - -// WithJSONOptions sets the Options which will be set on nodes to configure -// their JSON marshaling behavior. -func (p *Parser) WithJSONOptions(jsonOptions *astJSON.Options) *Parser { - p.po.JSONOptions = jsonOptions - return p -} - -func (p *Parser) WithRegoVersion(version RegoVersion) *Parser { - p.po.RegoVersion = version - return p -} - -func (p *Parser) parsedTermCacheLookup() (*Term, *state) { - l := p.s.loc.Offset - // stop comparing once the cached offsets are lower than l - for h := p.cache.m; h != nil && h.offset >= l; h = h.next { - if h.offset == l { - return h.t, h.post - } - } - return nil, nil -} - -func (p *Parser) parsedTermCachePush(t *Term, s0 *state) { - s1 := p.save() - o0 := s0.loc.Offset - entry := parsedTermCacheItem{t: t, post: s1, offset: o0} - - // find the first one whose offset is smaller than ours - var e *parsedTermCacheItem - for e = p.cache.m; e != nil; e = e.next { - if e.offset < o0 { - break - } - } - entry.next = e - p.cache.m = &entry -} - -// futureParser returns a shallow copy of `p` with an empty -// cache, and a scanner that knows all future keywords. -// It's used to present hints in errors, when statements would -// only parse successfully if some future keyword is enabled. -func (p *Parser) futureParser() *Parser { - q := *p - q.s = p.save() - q.s.s = p.s.s.WithKeywords(futureKeywords) - q.cache = parsedTermCache{} - return &q -} - -// presentParser returns a shallow copy of `p` with an empty -// cache, and a scanner that knows none of the future keywords. -// It is used to successfully parse keyword imports, like -// -// import future.keywords.in -// -// even when the parser has already been informed about the -// future keyword "in". This parser won't error out because -// "in" is an identifier. -func (p *Parser) presentParser() (*Parser, map[string]tokens.Token) { - var cpy map[string]tokens.Token - q := *p - q.s = p.save() - q.s.s, cpy = p.s.s.WithoutKeywords(futureKeywords) - q.cache = parsedTermCache{} - return &q, cpy -} - -// Parse will read the Rego source and parse statements and -// comments as they are found. Any errors encountered while -// parsing will be accumulated and returned as a list of Errors. -func (p *Parser) Parse() ([]Statement, []*Comment, Errors) { - - if p.po.Capabilities == nil { - p.po.Capabilities = CapabilitiesForThisVersion() - } - - allowedFutureKeywords := map[string]tokens.Token{} - - if p.po.RegoVersion == RegoV1 { - // RegoV1 includes all future keywords in the default language definition - for k, v := range futureKeywords { - allowedFutureKeywords[k] = v - } - - // For sake of error reporting, we still need to check that keywords in capabilities are known, - for _, kw := range p.po.Capabilities.FutureKeywords { - if _, ok := futureKeywords[kw]; !ok { - return nil, nil, Errors{ - &Error{ - Code: ParseErr, - Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw), - Location: nil, - }, - } - } - } - // and that explicitly requested future keywords are known. - for _, kw := range p.po.FutureKeywords { - if _, ok := allowedFutureKeywords[kw]; !ok { - return nil, nil, Errors{ - &Error{ - Code: ParseErr, - Message: fmt.Sprintf("unknown future keyword: %v", kw), - Location: nil, - }, - } - } - } - } else { - for _, kw := range p.po.Capabilities.FutureKeywords { - var ok bool - allowedFutureKeywords[kw], ok = futureKeywords[kw] - if !ok { - return nil, nil, Errors{ - &Error{ - Code: ParseErr, - Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw), - Location: nil, - }, - } - } - } - } - - var err error - p.s.s, err = scanner.New(p.r) - if err != nil { - return nil, nil, Errors{ - &Error{ - Code: ParseErr, - Message: err.Error(), - Location: nil, - }, - } - } - - selected := map[string]tokens.Token{} - if p.po.AllFutureKeywords || p.po.RegoVersion == RegoV1 { - for kw, tok := range allowedFutureKeywords { - selected[kw] = tok - } - } else { - for _, kw := range p.po.FutureKeywords { - tok, ok := allowedFutureKeywords[kw] - if !ok { - return nil, nil, Errors{ - &Error{ - Code: ParseErr, - Message: fmt.Sprintf("unknown future keyword: %v", kw), - Location: nil, - }, - } - } - selected[kw] = tok - } - } - p.s.s = p.s.s.WithKeywords(selected) - - if p.po.RegoVersion == RegoV1 { - for kw, tok := range allowedFutureKeywords { - p.s.s.AddKeyword(kw, tok) - } - } - - // read the first token to initialize the parser - p.scan() - - var stmts []Statement - - // Read from the scanner until the last token is reached or no statements - // can be parsed. Attempt to parse package statements, import statements, - // rule statements, and then body/query statements (in that order). If a - // statement cannot be parsed, restore the parser state before trying the - // next type of statement. If a statement can be parsed, continue from that - // point trying to parse packages, imports, etc. in the same order. - for p.s.tok != tokens.EOF { - - s := p.save() - - if pkg := p.parsePackage(); pkg != nil { - stmts = append(stmts, pkg) - continue - } else if len(p.s.errors) > 0 { - break - } - - p.restore(s) - s = p.save() - - if imp := p.parseImport(); imp != nil { - if RegoRootDocument.Equal(imp.Path.Value.(Ref)[0]) { - p.regoV1Import(imp) - } - - if FutureRootDocument.Equal(imp.Path.Value.(Ref)[0]) { - p.futureImport(imp, allowedFutureKeywords) - } - - stmts = append(stmts, imp) - continue - } else if len(p.s.errors) > 0 { - break - } - - p.restore(s) - - if !p.po.SkipRules { - s = p.save() - - if rules := p.parseRules(); rules != nil { - for i := range rules { - stmts = append(stmts, rules[i]) - } - continue - } else if len(p.s.errors) > 0 { - break - } - - p.restore(s) - } - - if body := p.parseQuery(true, tokens.EOF); body != nil { - stmts = append(stmts, body) - continue - } - - break - } - - if p.po.ProcessAnnotation { - stmts = p.parseAnnotations(stmts) - } - - if p.po.JSONOptions != nil { - for i := range stmts { - vis := NewGenericVisitor(func(x interface{}) bool { - if x, ok := x.(customJSON); ok { - x.setJSONOptions(*p.po.JSONOptions) - } - return false - }) - - vis.Walk(stmts[i]) - } - } - - return stmts, p.s.comments, p.s.errors -} - -func (p *Parser) parseAnnotations(stmts []Statement) []Statement { - - annotStmts, errs := parseAnnotations(p.s.comments) - for _, err := range errs { - p.error(err.Location, err.Message) - } - - for _, annotStmt := range annotStmts { - stmts = append(stmts, annotStmt) - } - - return stmts -} - -func parseAnnotations(comments []*Comment) ([]*Annotations, Errors) { - - var hint = []byte("METADATA") - var curr *metadataParser - var blocks []*metadataParser - - for i := 0; i < len(comments); i++ { - if curr != nil { - if comments[i].Location.Row == comments[i-1].Location.Row+1 && comments[i].Location.Col == 1 { - curr.Append(comments[i]) - continue - } - curr = nil - } - if bytes.HasPrefix(bytes.TrimSpace(comments[i].Text), hint) { - curr = newMetadataParser(comments[i].Location) - blocks = append(blocks, curr) - } - } - - var stmts []*Annotations - var errs Errors - for _, b := range blocks { - a, err := b.Parse() - if err != nil { - errs = append(errs, &Error{ - Code: ParseErr, - Message: err.Error(), - Location: b.loc, - }) - } else { - stmts = append(stmts, a) - } - } - - return stmts, errs -} - -func (p *Parser) parsePackage() *Package { - - var pkg Package - pkg.SetLoc(p.s.Loc()) - - if p.s.tok != tokens.Package { - return nil - } - - p.scan() - if p.s.tok != tokens.Ident { - p.illegalToken() - return nil - } - - term := p.parseTerm() - - if term != nil { - switch v := term.Value.(type) { - case Var: - pkg.Path = Ref{ - DefaultRootDocument.Copy().SetLocation(term.Location), - StringTerm(string(v)).SetLocation(term.Location), - } - case Ref: - pkg.Path = make(Ref, len(v)+1) - pkg.Path[0] = DefaultRootDocument.Copy().SetLocation(v[0].Location) - first, ok := v[0].Value.(Var) - if !ok { - p.errorf(v[0].Location, "unexpected %v token: expecting var", TypeName(v[0].Value)) - return nil - } - pkg.Path[1] = StringTerm(string(first)).SetLocation(v[0].Location) - for i := 2; i < len(pkg.Path); i++ { - switch v[i-1].Value.(type) { - case String: - pkg.Path[i] = v[i-1] - default: - p.errorf(v[i-1].Location, "unexpected %v token: expecting string", TypeName(v[i-1].Value)) - return nil - } - } - default: - p.illegalToken() - return nil - } - } - - if pkg.Path == nil { - if len(p.s.errors) == 0 { - p.error(p.s.Loc(), "expected path") - } - return nil - } - - return &pkg -} - -func (p *Parser) parseImport() *Import { - - var imp Import - imp.SetLoc(p.s.Loc()) - - if p.s.tok != tokens.Import { - return nil - } - - p.scan() - if p.s.tok != tokens.Ident { - p.error(p.s.Loc(), "expected ident") - return nil - } - q, prev := p.presentParser() - term := q.parseTerm() - if term != nil { - switch v := term.Value.(type) { - case Var: - imp.Path = RefTerm(term).SetLocation(term.Location) - case Ref: - for i := 1; i < len(v); i++ { - if _, ok := v[i].Value.(String); !ok { - p.errorf(v[i].Location, "unexpected %v token: expecting string", TypeName(v[i].Value)) - return nil - } - } - imp.Path = term - } - } - // keep advanced parser state, reset known keywords - p.s = q.s - p.s.s = q.s.s.WithKeywords(prev) - - if imp.Path == nil { - p.error(p.s.Loc(), "expected path") - return nil - } - - path := imp.Path.Value.(Ref) - - switch { - case RootDocumentNames.Contains(path[0]): - case FutureRootDocument.Equal(path[0]): - case RegoRootDocument.Equal(path[0]): - default: - p.hint("if this is unexpected, try updating OPA") - p.errorf(imp.Path.Location, "unexpected import path, must begin with one of: %v, got: %v", - RootDocumentNames.Union(NewSet(FutureRootDocument, RegoRootDocument)), - path[0]) - return nil - } - - if p.s.tok == tokens.As { - p.scan() - - if p.s.tok != tokens.Ident { - p.illegal("expected var") - return nil - } - - if alias := p.parseTerm(); alias != nil { - v, ok := alias.Value.(Var) - if ok { - imp.Alias = v - return &imp - } - } - p.illegal("expected var") - return nil - } - - return &imp -} - -func (p *Parser) parseRules() []*Rule { - - var rule Rule - rule.SetLoc(p.s.Loc()) - - if p.s.tok == tokens.Default { - p.scan() - rule.Default = true - } - - if p.s.tok != tokens.Ident { - return nil - } - - usesContains := false - if rule.Head, usesContains = p.parseHead(rule.Default); rule.Head == nil { - return nil - } - - if usesContains { - rule.Head.keywords = append(rule.Head.keywords, tokens.Contains) - } - - if rule.Default { - if !p.validateDefaultRuleValue(&rule) { - return nil - } - - if len(rule.Head.Args) > 0 { - if !p.validateDefaultRuleArgs(&rule) { - return nil - } - } - - rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location)) - return []*Rule{&rule} - } - - // back-compat with `p[x] { ... }`` - hasIf := p.s.tok == tokens.If - - // p[x] if ... becomes a single-value rule p[x] - if hasIf && !usesContains && len(rule.Head.Ref()) == 2 { - if !rule.Head.Ref()[1].IsGround() && len(rule.Head.Args) == 0 { - rule.Head.Key = rule.Head.Ref()[1] - } - - if rule.Head.Value == nil { - rule.Head.generatedValue = true - rule.Head.Value = BooleanTerm(true).SetLocation(rule.Head.Location) - } else { - // p[x] = y if becomes a single-value rule p[x] with value y, but needs name for compat - v, ok := rule.Head.Ref()[0].Value.(Var) - if !ok { - return nil - } - rule.Head.Name = v - } - } - - // p[x] becomes a multi-value rule p - if !hasIf && !usesContains && - len(rule.Head.Args) == 0 && // not a function - len(rule.Head.Ref()) == 2 { // ref like 'p[x]' - v, ok := rule.Head.Ref()[0].Value.(Var) - if !ok { - return nil - } - rule.Head.Name = v - rule.Head.Key = rule.Head.Ref()[1] - if rule.Head.Value == nil { - rule.Head.SetRef(rule.Head.Ref()[:len(rule.Head.Ref())-1]) - } - } - - switch { - case hasIf: - rule.Head.keywords = append(rule.Head.keywords, tokens.If) - p.scan() - s := p.save() - if expr := p.parseLiteral(); expr != nil { - // NOTE(sr): set literals are never false or undefined, so parsing this as - // p if { true } - // ^^^^^^^^ set of one element, `true` - // isn't valid. - isSetLiteral := false - if t, ok := expr.Terms.(*Term); ok { - _, isSetLiteral = t.Value.(Set) - } - // expr.Term is []*Term or Every - if !isSetLiteral { - rule.Body.Append(expr) - break - } - } - - // parsing as literal didn't work out, expect '{ BODY }' - p.restore(s) - fallthrough - - case p.s.tok == tokens.LBrace: - p.scan() - if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil { - return nil - } - p.scan() - - case usesContains: - rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location)) - rule.generatedBody = true - rule.Location = rule.Head.Location - - return []*Rule{&rule} - - default: - return nil - } - - if p.s.tok == tokens.Else { - if r := rule.Head.Ref(); len(r) > 1 && !r.IsGround() { - p.error(p.s.Loc(), "else keyword cannot be used on rules with variables in head") - return nil - } - if rule.Head.Key != nil { - p.error(p.s.Loc(), "else keyword cannot be used on multi-value rules") - return nil - } - - if rule.Else = p.parseElse(rule.Head); rule.Else == nil { - return nil - } - } - - rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd) - - rules := []*Rule{&rule} - - for p.s.tok == tokens.LBrace { - - if rule.Else != nil { - p.error(p.s.Loc(), "expected else keyword") - return nil - } - - loc := p.s.Loc() - - p.scan() - var next Rule - - if next.Body = p.parseBody(tokens.RBrace); next.Body == nil { - return nil - } - p.scan() - - loc.Text = p.s.Text(loc.Offset, p.s.lastEnd) - next.SetLoc(loc) - - // Chained rule head's keep the original - // rule's head AST but have their location - // set to the rule body. - next.Head = rule.Head.Copy() - next.Head.keywords = rule.Head.keywords - for i := range next.Head.Args { - if v, ok := next.Head.Args[i].Value.(Var); ok && v.IsWildcard() { - next.Head.Args[i].Value = Var(p.genwildcard()) - } - } - setLocRecursive(next.Head, loc) - - rules = append(rules, &next) - } - - return rules -} - -func (p *Parser) parseElse(head *Head) *Rule { - - var rule Rule - rule.SetLoc(p.s.Loc()) - - rule.Head = head.Copy() - rule.Head.generatedValue = false - for i := range rule.Head.Args { - if v, ok := rule.Head.Args[i].Value.(Var); ok && v.IsWildcard() { - rule.Head.Args[i].Value = Var(p.genwildcard()) - } - } - rule.Head.SetLoc(p.s.Loc()) - - defer func() { - rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd) - }() - - p.scan() - - switch p.s.tok { - case tokens.LBrace, tokens.If: // no value, but a body follows directly - rule.Head.generatedValue = true - rule.Head.Value = BooleanTerm(true) - case tokens.Assign, tokens.Unify: - rule.Head.Assign = tokens.Assign == p.s.tok - p.scan() - rule.Head.Value = p.parseTermInfixCall() - if rule.Head.Value == nil { - return nil - } - rule.Head.Location.Text = p.s.Text(rule.Head.Location.Offset, p.s.lastEnd) - default: - p.illegal("expected else value term or rule body") - return nil - } - - hasIf := p.s.tok == tokens.If - hasLBrace := p.s.tok == tokens.LBrace - - if !hasIf && !hasLBrace { - rule.Body = NewBody(NewExpr(BooleanTerm(true))) - rule.generatedBody = true - setLocRecursive(rule.Body, rule.Location) - return &rule - } - - if hasIf { - rule.Head.keywords = append(rule.Head.keywords, tokens.If) - p.scan() - } - - if p.s.tok == tokens.LBrace { - p.scan() - if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil { - return nil - } - p.scan() - } else if p.s.tok != tokens.EOF { - expr := p.parseLiteral() - if expr == nil { - return nil - } - rule.Body.Append(expr) - setLocRecursive(rule.Body, rule.Location) - } else { - p.illegal("rule body expected") - return nil - } - - if p.s.tok == tokens.Else { - if rule.Else = p.parseElse(head); rule.Else == nil { - return nil - } - } - return &rule -} - -func (p *Parser) parseHead(defaultRule bool) (*Head, bool) { - head := &Head{} - loc := p.s.Loc() - defer func() { - if head != nil { - head.SetLoc(loc) - head.Location.Text = p.s.Text(head.Location.Offset, p.s.lastEnd) - } - }() - - term := p.parseVar() - if term == nil { - return nil, false - } - - ref := p.parseTermFinish(term, true) - if ref == nil { - p.illegal("expected rule head name") - return nil, false - } - - switch x := ref.Value.(type) { - case Var: - // Modify the code to add the location to the head ref - // and set the head ref's jsonOptions. - head = VarHead(x, ref.Location, p.po.JSONOptions) - case Ref: - head = RefHead(x) - case Call: - op, args := x[0], x[1:] - var ref Ref - switch y := op.Value.(type) { - case Var: - ref = Ref{op} - case Ref: - if _, ok := y[0].Value.(Var); !ok { - p.illegal("rule head ref %v invalid", y) - return nil, false - } - ref = y - } - head = RefHead(ref) - head.Args = append([]*Term{}, args...) - - default: - return nil, false - } - - name := head.Ref().String() - - switch p.s.tok { - case tokens.Contains: // NOTE: no Value for `contains` heads, we return here - // Catch error case of using 'contains' with a function definition rule head. - if head.Args != nil { - p.illegal("the contains keyword can only be used with multi-value rule definitions (e.g., %s contains { ... })", name) - } - p.scan() - head.Key = p.parseTermInfixCall() - if head.Key == nil { - p.illegal("expected rule key term (e.g., %s contains { ... })", name) - } - return head, true - - case tokens.Unify: - p.scan() - head.Value = p.parseTermInfixCall() - if head.Value == nil { - // FIX HEAD.String() - p.illegal("expected rule value term (e.g., %s[%s] = { ... })", name, head.Key) - } - case tokens.Assign: - p.scan() - head.Assign = true - head.Value = p.parseTermInfixCall() - if head.Value == nil { - switch { - case len(head.Args) > 0: - p.illegal("expected function value term (e.g., %s(...) := { ... })", name) - case head.Key != nil: - p.illegal("expected partial rule value term (e.g., %s[...] := { ... })", name) - case defaultRule: - p.illegal("expected default rule value term (e.g., default %s := )", name) - default: - p.illegal("expected rule value term (e.g., %s := { ... })", name) - } - } - } - - if head.Value == nil && head.Key == nil { - if len(head.Ref()) != 2 || len(head.Args) > 0 { - head.generatedValue = true - head.Value = BooleanTerm(true).SetLocation(head.Location) - } - } - return head, false -} - -func (p *Parser) parseBody(end tokens.Token) Body { - return p.parseQuery(false, end) -} - -func (p *Parser) parseQuery(requireSemi bool, end tokens.Token) Body { - body := Body{} - - if p.s.tok == end { - p.error(p.s.Loc(), "found empty body") - return nil - } - - for { - expr := p.parseLiteral() - if expr == nil { - return nil - } - - body.Append(expr) - - if p.s.tok == tokens.Semicolon { - p.scan() - continue - } - - if p.s.tok == end || requireSemi { - return body - } - - if !p.s.skippedNL { - // If there was already an error then don't pile this one on - if len(p.s.errors) == 0 { - p.illegal(`expected \n or %s or %s`, tokens.Semicolon, end) - } - return nil - } - } -} - -func (p *Parser) parseLiteral() (expr *Expr) { - - offset := p.s.loc.Offset - loc := p.s.Loc() - - defer func() { - if expr != nil { - loc.Text = p.s.Text(offset, p.s.lastEnd) - expr.SetLoc(loc) - } - }() - - var negated bool - if p.s.tok == tokens.Not { - p.scan() - negated = true - } - - switch p.s.tok { - case tokens.Some: - if negated { - p.illegal("illegal negation of 'some'") - return nil - } - return p.parseSome() - case tokens.Every: - if negated { - p.illegal("illegal negation of 'every'") - return nil - } - return p.parseEvery() - default: - s := p.save() - expr := p.parseExpr() - if expr != nil { - expr.Negated = negated - if p.s.tok == tokens.With { - if expr.With = p.parseWith(); expr.With == nil { - return nil - } - } - // If we find a plain `every` identifier, attempt to parse an every expression, - // add hint if it succeeds. - if term, ok := expr.Terms.(*Term); ok && Var("every").Equal(term.Value) { - var hint bool - t := p.save() - p.restore(s) - if expr := p.futureParser().parseEvery(); expr != nil { - _, hint = expr.Terms.(*Every) - } - p.restore(t) - if hint { - p.hint("`import future.keywords.every` for `every x in xs { ... }` expressions") - } - } - return expr - } - return nil - } -} - -func (p *Parser) parseWith() []*With { - - withs := []*With{} - - for { - - with := With{ - Location: p.s.Loc(), - } - p.scan() - - if p.s.tok != tokens.Ident { - p.illegal("expected ident") - return nil - } - - with.Target = p.parseTerm() - if with.Target == nil { - return nil - } - - switch with.Target.Value.(type) { - case Ref, Var: - break - default: - p.illegal("expected with target path") - } - - if p.s.tok != tokens.As { - p.illegal("expected as keyword") - return nil - } - - p.scan() - - if with.Value = p.parseTermInfixCall(); with.Value == nil { - return nil - } - - with.Location.Text = p.s.Text(with.Location.Offset, p.s.lastEnd) - - withs = append(withs, &with) - - if p.s.tok != tokens.With { - break - } - } - - return withs -} - -func (p *Parser) parseSome() *Expr { - - decl := &SomeDecl{} - decl.SetLoc(p.s.Loc()) - - // Attempt to parse "some x in xs", which will end up in - // SomeDecl{Symbols: ["member(x, xs)"]} - s := p.save() - p.scan() - if term := p.parseTermInfixCall(); term != nil { - if call, ok := term.Value.(Call); ok { - switch call[0].String() { - case Member.Name: - if len(call) != 3 { - p.illegal("illegal domain") - return nil - } - case MemberWithKey.Name: - if len(call) != 4 { - p.illegal("illegal domain") - return nil - } - default: - p.illegal("expected `x in xs` or `x, y in xs` expression") - return nil - } - - decl.Symbols = []*Term{term} - expr := NewExpr(decl).SetLocation(decl.Location) - if p.s.tok == tokens.With { - if expr.With = p.parseWith(); expr.With == nil { - return nil - } - } - return expr - } - } - - p.restore(s) - s = p.save() // new copy for later - var hint bool - p.scan() - if term := p.futureParser().parseTermInfixCall(); term != nil { - if call, ok := term.Value.(Call); ok { - switch call[0].String() { - case Member.Name, MemberWithKey.Name: - hint = true - } - } - } - - // go on as before, it's `some x[...]` or illegal - p.restore(s) - if hint { - p.hint("`import future.keywords.in` for `some x in xs` expressions") - } - - for { // collecting var args - - p.scan() - - if p.s.tok != tokens.Ident { - p.illegal("expected var") - return nil - } - - decl.Symbols = append(decl.Symbols, p.parseVar()) - - p.scan() - - if p.s.tok != tokens.Comma { - break - } - } - - return NewExpr(decl).SetLocation(decl.Location) -} - -func (p *Parser) parseEvery() *Expr { - qb := &Every{} - qb.SetLoc(p.s.Loc()) - - // TODO(sr): We'd get more accurate error messages if we didn't rely on - // parseTermInfixCall here, but parsed "var [, var] in term" manually. - p.scan() - term := p.parseTermInfixCall() - if term == nil { - return nil - } - call, ok := term.Value.(Call) - if !ok { - p.illegal("expected `x[, y] in xs { ... }` expression") - return nil - } - switch call[0].String() { - case Member.Name: // x in xs - if len(call) != 3 { - p.illegal("illegal domain") - return nil - } - qb.Value = call[1] - qb.Domain = call[2] - case MemberWithKey.Name: // k, v in xs - if len(call) != 4 { - p.illegal("illegal domain") - return nil - } - qb.Key = call[1] - qb.Value = call[2] - qb.Domain = call[3] - if _, ok := qb.Key.Value.(Var); !ok { - p.illegal("expected key to be a variable") - return nil - } - default: - p.illegal("expected `x[, y] in xs { ... }` expression") - return nil - } - if _, ok := qb.Value.Value.(Var); !ok { - p.illegal("expected value to be a variable") - return nil - } - if p.s.tok == tokens.LBrace { // every x in xs { ... } - p.scan() - body := p.parseBody(tokens.RBrace) - if body == nil { - return nil - } - p.scan() - qb.Body = body - expr := NewExpr(qb).SetLocation(qb.Location) - - if p.s.tok == tokens.With { - if expr.With = p.parseWith(); expr.With == nil { - return nil - } - } - return expr - } - - p.illegal("missing body") - return nil -} - -func (p *Parser) parseExpr() *Expr { - - lhs := p.parseTermInfixCall() - if lhs == nil { - return nil - } - - if op := p.parseTermOp(tokens.Assign, tokens.Unify); op != nil { - if rhs := p.parseTermInfixCall(); rhs != nil { - return NewExpr([]*Term{op, lhs, rhs}) - } - return nil - } - - // NOTE(tsandall): the top-level call term is converted to an expr because - // the evaluator does not support the call term type (nested calls are - // rewritten by the compiler.) - if call, ok := lhs.Value.(Call); ok { - return NewExpr([]*Term(call)) - } - - return NewExpr(lhs) -} - -// parseTermInfixCall consumes the next term from the input and returns it. If a -// term cannot be parsed the return value is nil and error will be recorded. The -// scanner will be advanced to the next token before returning. -// By starting out with infix relations (==, !=, <, etc) and further calling the -// other binary operators (|, &, arithmetics), it constitutes the binding -// precedence. -func (p *Parser) parseTermInfixCall() *Term { - return p.parseTermIn(nil, true, p.s.loc.Offset) -} - -func (p *Parser) parseTermInfixCallInList() *Term { - return p.parseTermIn(nil, false, p.s.loc.Offset) -} - -func (p *Parser) parseTermIn(lhs *Term, keyVal bool, offset int) *Term { - // NOTE(sr): `in` is a bit special: besides `lhs in rhs`, it also - // supports `key, val in rhs`, so it can have an optional second lhs. - // `keyVal` triggers if we attempt to parse a second lhs argument (`mhs`). - if lhs == nil { - lhs = p.parseTermRelation(nil, offset) - } - if lhs != nil { - if keyVal && p.s.tok == tokens.Comma { // second "lhs", or "middle hand side" - s := p.save() - p.scan() - if mhs := p.parseTermRelation(nil, offset); mhs != nil { - if op := p.parseTermOpName(MemberWithKey.Ref(), tokens.In); op != nil { - if rhs := p.parseTermRelation(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, mhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.In: - return p.parseTermIn(call, keyVal, offset) - default: - return call - } - } - } - } - p.restore(s) - } - if op := p.parseTermOpName(Member.Ref(), tokens.In); op != nil { - if rhs := p.parseTermRelation(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.In: - return p.parseTermIn(call, keyVal, offset) - default: - return call - } - } - } - } - return lhs -} - -func (p *Parser) parseTermRelation(lhs *Term, offset int) *Term { - if lhs == nil { - lhs = p.parseTermOr(nil, offset) - } - if lhs != nil { - if op := p.parseTermOp(tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte); op != nil { - if rhs := p.parseTermOr(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte: - return p.parseTermRelation(call, offset) - default: - return call - } - } - } - } - return lhs -} - -func (p *Parser) parseTermOr(lhs *Term, offset int) *Term { - if lhs == nil { - lhs = p.parseTermAnd(nil, offset) - } - if lhs != nil { - if op := p.parseTermOp(tokens.Or); op != nil { - if rhs := p.parseTermAnd(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.Or: - return p.parseTermOr(call, offset) - default: - return call - } - } - } - return lhs - } - return nil -} - -func (p *Parser) parseTermAnd(lhs *Term, offset int) *Term { - if lhs == nil { - lhs = p.parseTermArith(nil, offset) - } - if lhs != nil { - if op := p.parseTermOp(tokens.And); op != nil { - if rhs := p.parseTermArith(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.And: - return p.parseTermAnd(call, offset) - default: - return call - } - } - } - return lhs - } - return nil -} - -func (p *Parser) parseTermArith(lhs *Term, offset int) *Term { - if lhs == nil { - lhs = p.parseTermFactor(nil, offset) - } - if lhs != nil { - if op := p.parseTermOp(tokens.Add, tokens.Sub); op != nil { - if rhs := p.parseTermFactor(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.Add, tokens.Sub: - return p.parseTermArith(call, offset) - default: - return call - } - } - } - } - return lhs -} - -func (p *Parser) parseTermFactor(lhs *Term, offset int) *Term { - if lhs == nil { - lhs = p.parseTerm() - } - if lhs != nil { - if op := p.parseTermOp(tokens.Mul, tokens.Quo, tokens.Rem); op != nil { - if rhs := p.parseTerm(); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.Mul, tokens.Quo, tokens.Rem: - return p.parseTermFactor(call, offset) - default: - return call - } - } - } - } - return lhs -} - -func (p *Parser) parseTerm() *Term { - if term, s := p.parsedTermCacheLookup(); s != nil { - p.restore(s) - return term - } - s0 := p.save() - - var term *Term - switch p.s.tok { - case tokens.Null: - term = NullTerm().SetLocation(p.s.Loc()) - case tokens.True: - term = BooleanTerm(true).SetLocation(p.s.Loc()) - case tokens.False: - term = BooleanTerm(false).SetLocation(p.s.Loc()) - case tokens.Sub, tokens.Dot, tokens.Number: - term = p.parseNumber() - case tokens.String: - term = p.parseString() - case tokens.Ident, tokens.Contains: // NOTE(sr): contains anywhere BUT in rule heads gets no special treatment - term = p.parseVar() - case tokens.LBrack: - term = p.parseArray() - case tokens.LBrace: - term = p.parseSetOrObject() - case tokens.LParen: - offset := p.s.loc.Offset - p.scan() - if r := p.parseTermInfixCall(); r != nil { - if p.s.tok == tokens.RParen { - r.Location.Text = p.s.Text(offset, p.s.tokEnd) - term = r - } else { - p.error(p.s.Loc(), "non-terminated expression") - } - } - default: - p.illegalToken() - } - - term = p.parseTermFinish(term, false) - p.parsedTermCachePush(term, s0) - return term -} - -func (p *Parser) parseTermFinish(head *Term, skipws bool) *Term { - if head == nil { - return nil - } - offset := p.s.loc.Offset - p.doScan(skipws) - - switch p.s.tok { - case tokens.LParen, tokens.Dot, tokens.LBrack: - return p.parseRef(head, offset) - case tokens.Whitespace: - p.scan() - fallthrough - default: - if _, ok := head.Value.(Var); ok && RootDocumentNames.Contains(head) { - return RefTerm(head).SetLocation(head.Location) - } - return head - } -} - -func (p *Parser) parseNumber() *Term { - var prefix string - loc := p.s.Loc() - if p.s.tok == tokens.Sub { - prefix = "-" - p.scan() - switch p.s.tok { - case tokens.Number, tokens.Dot: - break - default: - p.illegal("expected number") - return nil - } - } - if p.s.tok == tokens.Dot { - prefix += "." - p.scan() - if p.s.tok != tokens.Number { - p.illegal("expected number") - return nil - } - } - - // Check for multiple leading 0's, parsed by math/big.Float.Parse as decimal 0: - // https://golang.org/pkg/math/big/#Float.Parse - if ((len(prefix) != 0 && prefix[0] == '-') || len(prefix) == 0) && - len(p.s.lit) > 1 && p.s.lit[0] == '0' && p.s.lit[1] == '0' { - p.illegal("expected number") - return nil - } - - // Ensure that the number is valid - s := prefix + p.s.lit - f, ok := new(big.Float).SetString(s) - if !ok { - p.illegal("invalid float") - return nil - } - - // Put limit on size of exponent to prevent non-linear cost of String() - // function on big.Float from causing denial of service: https://github.com/golang/go/issues/11068 - // - // n == sign * mantissa * 2^exp - // 0.5 <= mantissa < 1.0 - // - // The limit is arbitrary. - exp := f.MantExp(nil) - if exp > 1e5 || exp < -1e5 || f.IsInf() { // +/- inf, exp is 0 - p.error(p.s.Loc(), "number too big") - return nil - } - - // Note: Use the original string, do *not* round trip from - // the big.Float as it can cause precision loss. - r := NumberTerm(json.Number(s)).SetLocation(loc) - return r -} - -func (p *Parser) parseString() *Term { - if p.s.lit[0] == '"' { - var s string - err := json.Unmarshal([]byte(p.s.lit), &s) - if err != nil { - p.errorf(p.s.Loc(), "illegal string literal: %s", p.s.lit) - return nil - } - term := StringTerm(s).SetLocation(p.s.Loc()) - return term - } - return p.parseRawString() -} - -func (p *Parser) parseRawString() *Term { - if len(p.s.lit) < 2 { - return nil - } - term := StringTerm(p.s.lit[1 : len(p.s.lit)-1]).SetLocation(p.s.Loc()) - return term -} - -// this is the name to use for instantiating an empty set, e.g., `set()`. -var setConstructor = RefTerm(VarTerm("set")) - -func (p *Parser) parseCall(operator *Term, offset int) (term *Term) { - - loc := operator.Location - var end int - - defer func() { - p.setLoc(term, loc, offset, end) - }() - - p.scan() // steps over '(' - - if p.s.tok == tokens.RParen { // no args, i.e. set() or any.func() - end = p.s.tokEnd - p.scanWS() - if operator.Equal(setConstructor) { - return SetTerm() - } - return CallTerm(operator) - } - - if r := p.parseTermList(tokens.RParen, []*Term{operator}); r != nil { - end = p.s.tokEnd - p.scanWS() - return CallTerm(r...) - } - - return nil -} - -func (p *Parser) parseRef(head *Term, offset int) (term *Term) { - - loc := head.Location - var end int - - defer func() { - p.setLoc(term, loc, offset, end) - }() - - switch h := head.Value.(type) { - case Var, *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call: - // ok - default: - p.errorf(loc, "illegal ref (head cannot be %v)", TypeName(h)) - } - - ref := []*Term{head} - - for { - switch p.s.tok { - case tokens.Dot: - p.scanWS() - if p.s.tok != tokens.Ident { - p.illegal("expected %v", tokens.Ident) - return nil - } - ref = append(ref, StringTerm(p.s.lit).SetLocation(p.s.Loc())) - p.scanWS() - case tokens.LParen: - term = p.parseCall(p.setLoc(RefTerm(ref...), loc, offset, p.s.loc.Offset), offset) - if term != nil { - switch p.s.tok { - case tokens.Whitespace: - p.scan() - end = p.s.lastEnd - return term - case tokens.Dot, tokens.LBrack: - term = p.parseRef(term, offset) - } - } - end = p.s.tokEnd - return term - case tokens.LBrack: - p.scan() - if term := p.parseTermInfixCall(); term != nil { - if p.s.tok != tokens.RBrack { - p.illegal("expected %v", tokens.LBrack) - return nil - } - ref = append(ref, term) - p.scanWS() - } else { - return nil - } - case tokens.Whitespace: - end = p.s.lastEnd - p.scan() - return RefTerm(ref...) - default: - end = p.s.lastEnd - return RefTerm(ref...) - } - } -} - -func (p *Parser) parseArray() (term *Term) { - - loc := p.s.Loc() - offset := p.s.loc.Offset - - defer func() { - p.setLoc(term, loc, offset, p.s.tokEnd) - }() - - p.scan() - - if p.s.tok == tokens.RBrack { - return ArrayTerm() - } - - potentialComprehension := true - - // Skip leading commas, eg [, x, y] - // Supported for backwards compatibility. In the future - // we should make this a parse error. - if p.s.tok == tokens.Comma { - potentialComprehension = false - p.scan() - } - - s := p.save() - - // NOTE(tsandall): The parser cannot attempt a relational term here because - // of ambiguity around comprehensions. For example, given: - // - // {1 | 1} - // - // Does this represent a set comprehension or a set containing binary OR - // call? We resolve the ambiguity by prioritizing comprehensions. - head := p.parseTerm() - - if head == nil { - return nil - } - - switch p.s.tok { - case tokens.RBrack: - return ArrayTerm(head) - case tokens.Comma: - p.scan() - if terms := p.parseTermList(tokens.RBrack, []*Term{head}); terms != nil { - return NewTerm(NewArray(terms...)) - } - return nil - case tokens.Or: - if potentialComprehension { - // Try to parse as if it is an array comprehension - p.scan() - if body := p.parseBody(tokens.RBrack); body != nil { - return ArrayComprehensionTerm(head, body) - } - if p.s.tok != tokens.Comma { - return nil - } - } - // fall back to parsing as a normal array definition - } - - p.restore(s) - - if terms := p.parseTermList(tokens.RBrack, nil); terms != nil { - return NewTerm(NewArray(terms...)) - } - return nil -} - -func (p *Parser) parseSetOrObject() (term *Term) { - loc := p.s.Loc() - offset := p.s.loc.Offset - - defer func() { - p.setLoc(term, loc, offset, p.s.tokEnd) - }() - - p.scan() - - if p.s.tok == tokens.RBrace { - return ObjectTerm() - } - - potentialComprehension := true - - // Skip leading commas, eg {, x, y} - // Supported for backwards compatibility. In the future - // we should make this a parse error. - if p.s.tok == tokens.Comma { - potentialComprehension = false - p.scan() - } - - s := p.save() - - // Try parsing just a single term first to give comprehensions higher - // priority to "or" calls in ambiguous situations. Eg: { a | b } - // will be a set comprehension. - // - // Note: We don't know yet if it is a set or object being defined. - head := p.parseTerm() - if head == nil { - return nil - } - - switch p.s.tok { - case tokens.Or: - if potentialComprehension { - return p.parseSet(s, head, potentialComprehension) - } - case tokens.RBrace, tokens.Comma: - return p.parseSet(s, head, potentialComprehension) - case tokens.Colon: - return p.parseObject(head, potentialComprehension) - } - - p.restore(s) - - head = p.parseTermInfixCallInList() - if head == nil { - return nil - } - - switch p.s.tok { - case tokens.RBrace, tokens.Comma: - return p.parseSet(s, head, false) - case tokens.Colon: - // It still might be an object comprehension, eg { a+1: b | ... } - return p.parseObject(head, potentialComprehension) - } - - p.illegal("non-terminated set") - return nil -} - -func (p *Parser) parseSet(s *state, head *Term, potentialComprehension bool) *Term { - switch p.s.tok { - case tokens.RBrace: - return SetTerm(head) - case tokens.Comma: - p.scan() - if terms := p.parseTermList(tokens.RBrace, []*Term{head}); terms != nil { - return SetTerm(terms...) - } - case tokens.Or: - if potentialComprehension { - // Try to parse as if it is a set comprehension - p.scan() - if body := p.parseBody(tokens.RBrace); body != nil { - return SetComprehensionTerm(head, body) - } - if p.s.tok != tokens.Comma { - return nil - } - } - // Fall back to parsing as normal set definition - p.restore(s) - if terms := p.parseTermList(tokens.RBrace, nil); terms != nil { - return SetTerm(terms...) - } - } - return nil -} - -func (p *Parser) parseObject(k *Term, potentialComprehension bool) *Term { - // NOTE(tsandall): Assumption: this function is called after parsing the key - // of the head element and then receiving a colon token from the scanner. - // Advance beyond the colon and attempt to parse an object. - if p.s.tok != tokens.Colon { - panic("expected colon") - } - p.scan() - - s := p.save() - - // NOTE(sr): We first try to parse the value as a term (`v`), and see - // if we can parse `{ x: v | ...}` as a comprehension. - // However, if we encounter either a Comma or an RBace, it cannot be - // parsed as a comprehension -- so we save double work further down - // where `parseObjectFinish(k, v, false)` would only exercise the - // same code paths once more. - v := p.parseTerm() - if v == nil { - return nil - } - - potentialRelation := true - if potentialComprehension { - switch p.s.tok { - case tokens.RBrace, tokens.Comma: - potentialRelation = false - fallthrough - case tokens.Or: - if term := p.parseObjectFinish(k, v, true); term != nil { - return term - } - } - } - - p.restore(s) - - if potentialRelation { - v := p.parseTermInfixCallInList() - if v == nil { - return nil - } - - switch p.s.tok { - case tokens.RBrace, tokens.Comma: - return p.parseObjectFinish(k, v, false) - } - } - - p.illegal("non-terminated object") - return nil -} - -func (p *Parser) parseObjectFinish(key, val *Term, potentialComprehension bool) *Term { - switch p.s.tok { - case tokens.RBrace: - return ObjectTerm([2]*Term{key, val}) - case tokens.Or: - if potentialComprehension { - p.scan() - if body := p.parseBody(tokens.RBrace); body != nil { - return ObjectComprehensionTerm(key, val, body) - } - } else { - p.illegal("non-terminated object") - } - case tokens.Comma: - p.scan() - if r := p.parseTermPairList(tokens.RBrace, [][2]*Term{{key, val}}); r != nil { - return ObjectTerm(r...) - } - } - return nil -} - -func (p *Parser) parseTermList(end tokens.Token, r []*Term) []*Term { - if p.s.tok == end { - return r - } - for { - term := p.parseTermInfixCallInList() - if term != nil { - r = append(r, term) - switch p.s.tok { - case end: - return r - case tokens.Comma: - p.scan() - if p.s.tok == end { - return r - } - continue - default: - p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end)) - return nil - } - } - return nil - } -} - -func (p *Parser) parseTermPairList(end tokens.Token, r [][2]*Term) [][2]*Term { - if p.s.tok == end { - return r - } - for { - key := p.parseTermInfixCallInList() - if key != nil { - switch p.s.tok { - case tokens.Colon: - p.scan() - if val := p.parseTermInfixCallInList(); val != nil { - r = append(r, [2]*Term{key, val}) - switch p.s.tok { - case end: - return r - case tokens.Comma: - p.scan() - if p.s.tok == end { - return r - } - continue - default: - p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end)) - return nil - } - } - default: - p.illegal(fmt.Sprintf("expected %q", tokens.Colon)) - return nil - } - } - return nil - } -} - -func (p *Parser) parseTermOp(values ...tokens.Token) *Term { - for i := range values { - if p.s.tok == values[i] { - r := RefTerm(VarTerm(fmt.Sprint(p.s.tok)).SetLocation(p.s.Loc())).SetLocation(p.s.Loc()) - p.scan() - return r - } - } - return nil -} - -func (p *Parser) parseTermOpName(ref Ref, values ...tokens.Token) *Term { - for i := range values { - if p.s.tok == values[i] { - for _, r := range ref { - r.SetLocation(p.s.Loc()) - } - t := RefTerm(ref...) - t.SetLocation(p.s.Loc()) - p.scan() - return t - } - } - return nil -} - -func (p *Parser) parseVar() *Term { - - s := p.s.lit - - term := VarTerm(s).SetLocation(p.s.Loc()) - - // Update wildcard values with unique identifiers - if term.Equal(Wildcard) { - term.Value = Var(p.genwildcard()) - } - - return term -} - -func (p *Parser) genwildcard() string { - c := p.s.wildcard - p.s.wildcard++ - return fmt.Sprintf("%v%d", WildcardPrefix, c) -} - -func (p *Parser) error(loc *location.Location, reason string) { - p.errorf(loc, reason) -} - -func (p *Parser) errorf(loc *location.Location, f string, a ...interface{}) { - msg := strings.Builder{} - msg.WriteString(fmt.Sprintf(f, a...)) - - switch len(p.s.hints) { - case 0: // nothing to do - case 1: - msg.WriteString(" (hint: ") - msg.WriteString(p.s.hints[0]) - msg.WriteRune(')') - default: - msg.WriteString(" (hints: ") - for i, h := range p.s.hints { - if i > 0 { - msg.WriteString(", ") - } - msg.WriteString(h) - } - msg.WriteRune(')') - } - - p.s.errors = append(p.s.errors, &Error{ - Code: ParseErr, - Message: msg.String(), - Location: loc, - Details: newParserErrorDetail(p.s.s.Bytes(), loc.Offset), - }) - p.s.hints = nil -} - -func (p *Parser) hint(f string, a ...interface{}) { - p.s.hints = append(p.s.hints, fmt.Sprintf(f, a...)) -} - -func (p *Parser) illegal(note string, a ...interface{}) { - tok := p.s.tok.String() - - if p.s.tok == tokens.Illegal { - p.errorf(p.s.Loc(), "illegal token") - return - } - - tokType := "token" - if tokens.IsKeyword(p.s.tok) { - tokType = "keyword" - } - if _, ok := futureKeywords[p.s.tok.String()]; ok { - tokType = "keyword" - } - - note = fmt.Sprintf(note, a...) - if len(note) > 0 { - p.errorf(p.s.Loc(), "unexpected %s %s: %s", tok, tokType, note) - } else { - p.errorf(p.s.Loc(), "unexpected %s %s", tok, tokType) - } -} - -func (p *Parser) illegalToken() { - p.illegal("") -} - -func (p *Parser) scan() { - p.doScan(true) -} - -func (p *Parser) scanWS() { - p.doScan(false) -} - -func (p *Parser) doScan(skipws bool) { - - // NOTE(tsandall): the last position is used to compute the "text" field for - // complex AST nodes. Whitespace never affects the last position of an AST - // node so do not update it when scanning. - if p.s.tok != tokens.Whitespace { - p.s.lastEnd = p.s.tokEnd - p.s.skippedNL = false - } - - var errs []scanner.Error - for { - var pos scanner.Position - p.s.tok, pos, p.s.lit, errs = p.s.s.Scan() - - p.s.tokEnd = pos.End - p.s.loc.Row = pos.Row - p.s.loc.Col = pos.Col - p.s.loc.Offset = pos.Offset - p.s.loc.Text = p.s.Text(pos.Offset, pos.End) - p.s.loc.Tabs = pos.Tabs - - for _, err := range errs { - p.error(p.s.Loc(), err.Message) - } - - if len(errs) > 0 { - p.s.tok = tokens.Illegal - } - - if p.s.tok == tokens.Whitespace { - if p.s.lit == "\n" { - p.s.skippedNL = true - } - if skipws { - continue - } - } - - if p.s.tok != tokens.Comment { - break - } - - // For backwards compatibility leave a nil - // Text value if there is no text rather than - // an empty string. - var commentText []byte - if len(p.s.lit) > 1 { - commentText = []byte(p.s.lit[1:]) - } - comment := NewComment(commentText) - comment.SetLoc(p.s.Loc()) - p.s.comments = append(p.s.comments, comment) - } -} - -func (p *Parser) save() *state { - cpy := *p.s - s := *cpy.s - cpy.s = &s - return &cpy -} - -func (p *Parser) restore(s *state) { - p.s = s -} - -func setLocRecursive(x interface{}, loc *location.Location) { - NewGenericVisitor(func(x interface{}) bool { - if node, ok := x.(Node); ok { - node.SetLoc(loc) - } - return false - }).Walk(x) -} - -func (p *Parser) setLoc(term *Term, loc *location.Location, offset, end int) *Term { - if term != nil { - cpy := *loc - term.Location = &cpy - term.Location.Text = p.s.Text(offset, end) - } - return term -} - -func (p *Parser) validateDefaultRuleValue(rule *Rule) bool { - if rule.Head.Value == nil { - p.error(rule.Loc(), "illegal default rule (must have a value)") - return false - } - - valid := true - vis := NewGenericVisitor(func(x interface{}) bool { - switch x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: // skip closures - return true - case Ref, Var, Call: - p.error(rule.Loc(), fmt.Sprintf("illegal default rule (value cannot contain %v)", TypeName(x))) - valid = false - return true - } - return false - }) - - vis.Walk(rule.Head.Value.Value) - return valid -} - -func (p *Parser) validateDefaultRuleArgs(rule *Rule) bool { - - valid := true - vars := NewVarSet() - - vis := NewGenericVisitor(func(x interface{}) bool { - switch x := x.(type) { - case Var: - if vars.Contains(x) { - p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot be repeated %v)", x)) - valid = false - return true - } - vars.Add(x) - - case *Term: - switch v := x.Value.(type) { - case Var: // do nothing - default: - p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot contain %v)", TypeName(v))) - valid = false - return true - } - } - - return false - }) - - vis.Walk(rule.Head.Args) - return valid -} - -// We explicitly use yaml unmarshalling, to accommodate for the '_' in 'related_resources', -// which isn't handled properly by json for some reason. -type rawAnnotation struct { - Scope string `yaml:"scope"` - Title string `yaml:"title"` - Entrypoint bool `yaml:"entrypoint"` - Description string `yaml:"description"` - Organizations []string `yaml:"organizations"` - RelatedResources []interface{} `yaml:"related_resources"` - Authors []interface{} `yaml:"authors"` - Schemas []map[string]any `yaml:"schemas"` - Custom map[string]interface{} `yaml:"custom"` -} - -type metadataParser struct { - buf *bytes.Buffer - comments []*Comment - loc *location.Location -} - -func newMetadataParser(loc *Location) *metadataParser { - return &metadataParser{loc: loc, buf: bytes.NewBuffer(nil)} -} - -func (b *metadataParser) Append(c *Comment) { - b.buf.Write(bytes.TrimPrefix(c.Text, []byte(" "))) - b.buf.WriteByte('\n') - b.comments = append(b.comments, c) -} - -var yamlLineErrRegex = regexp.MustCompile(`^yaml:(?: unmarshal errors:[\n\s]*)? line ([[:digit:]]+):`) - -func (b *metadataParser) Parse() (*Annotations, error) { - - var raw rawAnnotation - - if len(bytes.TrimSpace(b.buf.Bytes())) == 0 { - return nil, fmt.Errorf("expected METADATA block, found whitespace") - } - - if err := yaml.Unmarshal(b.buf.Bytes(), &raw); err != nil { - var comment *Comment - match := yamlLineErrRegex.FindStringSubmatch(err.Error()) - if len(match) == 2 { - index, err2 := strconv.Atoi(match[1]) - if err2 == nil { - if index >= len(b.comments) { - comment = b.comments[len(b.comments)-1] - } else { - comment = b.comments[index] - } - b.loc = comment.Location - } - } - - if match == nil && len(b.comments) > 0 { - b.loc = b.comments[0].Location - } - - return nil, augmentYamlError(err, b.comments) - } - - var result Annotations - result.comments = b.comments - result.Scope = raw.Scope - result.Entrypoint = raw.Entrypoint - result.Title = raw.Title - result.Description = raw.Description - result.Organizations = raw.Organizations - - for _, v := range raw.RelatedResources { - rr, err := parseRelatedResource(v) - if err != nil { - return nil, fmt.Errorf("invalid related-resource definition %s: %w", v, err) - } - result.RelatedResources = append(result.RelatedResources, rr) - } - - for _, pair := range raw.Schemas { - k, v := unwrapPair(pair) - - var a SchemaAnnotation - var err error - - a.Path, err = ParseRef(k) - if err != nil { - return nil, fmt.Errorf("invalid document reference") - } - - switch v := v.(type) { - case string: - a.Schema, err = parseSchemaRef(v) - if err != nil { - return nil, err - } - case map[string]any: - w, err := convertYAMLMapKeyTypes(v, nil) - if err != nil { - return nil, fmt.Errorf("invalid schema definition: %w", err) - } - a.Definition = &w - default: - return nil, fmt.Errorf("invalid schema declaration for path %q", k) - } - - result.Schemas = append(result.Schemas, &a) - } - - for _, v := range raw.Authors { - author, err := parseAuthor(v) - if err != nil { - return nil, fmt.Errorf("invalid author definition %s: %w", v, err) - } - result.Authors = append(result.Authors, author) - } - - result.Custom = make(map[string]interface{}) - for k, v := range raw.Custom { - val, err := convertYAMLMapKeyTypes(v, nil) - if err != nil { - return nil, err - } - result.Custom[k] = val - } - - result.Location = b.loc - - // recreate original text of entire metadata block for location text attribute - sb := strings.Builder{} - sb.WriteString("# METADATA\n") - - lines := bytes.Split(b.buf.Bytes(), []byte{'\n'}) - - for _, line := range lines[:len(lines)-1] { - sb.WriteString("# ") - sb.Write(line) - sb.WriteByte('\n') - } - - result.Location.Text = []byte(strings.TrimSuffix(sb.String(), "\n")) - - return &result, nil -} - -// augmentYamlError augments a YAML error with hints intended to help the user figure out the cause of an otherwise -// cryptic error. These are hints, instead of proper errors, because they are educated guesses, and aren't guaranteed -// to be correct. -func augmentYamlError(err error, comments []*Comment) error { - // Adding hints for when key/value ':' separator isn't suffixed with a legal YAML space symbol - for _, comment := range comments { - txt := string(comment.Text) - parts := strings.Split(txt, ":") - if len(parts) > 1 { - parts = parts[1:] - var invalidSpaces []string - for partIndex, part := range parts { - if len(part) == 0 && partIndex == len(parts)-1 { - invalidSpaces = []string{} - break - } - - r, _ := utf8.DecodeRuneInString(part) - if r == ' ' || r == '\t' { - invalidSpaces = []string{} - break - } - - invalidSpaces = append(invalidSpaces, fmt.Sprintf("%+q", r)) - } - if len(invalidSpaces) > 0 { - err = fmt.Errorf( - "%s\n Hint: on line %d, symbol(s) %v immediately following a key/value separator ':' is not a legal yaml space character", - err.Error(), comment.Location.Row, invalidSpaces) - } - } - } - return err -} - -func unwrapPair(pair map[string]interface{}) (string, interface{}) { - for k, v := range pair { - return k, v - } - return "", nil -} - -var errInvalidSchemaRef = fmt.Errorf("invalid schema reference") - -// NOTE(tsandall): 'schema' is not registered as a root because it's not -// supported by the compiler or evaluator today. Once we fix that, we can remove -// this function. -func parseSchemaRef(s string) (Ref, error) { - - term, err := ParseTerm(s) - if err == nil { - switch v := term.Value.(type) { - case Var: - if term.Equal(SchemaRootDocument) { - return SchemaRootRef.Copy(), nil - } - case Ref: - if v.HasPrefix(SchemaRootRef) { - return v, nil - } - } - } - - return nil, errInvalidSchemaRef -} - -func parseRelatedResource(rr interface{}) (*RelatedResourceAnnotation, error) { - rr, err := convertYAMLMapKeyTypes(rr, nil) - if err != nil { - return nil, err - } - - switch rr := rr.(type) { - case string: - if len(rr) > 0 { - u, err := url.Parse(rr) - if err != nil { - return nil, err - } - return &RelatedResourceAnnotation{Ref: *u}, nil - } - return nil, fmt.Errorf("ref URL may not be empty string") - case map[string]interface{}: - description := strings.TrimSpace(getSafeString(rr, "description")) - ref := strings.TrimSpace(getSafeString(rr, "ref")) - if len(ref) > 0 { - u, err := url.Parse(ref) - if err != nil { - return nil, err - } - return &RelatedResourceAnnotation{Description: description, Ref: *u}, nil - } - return nil, fmt.Errorf("'ref' value required in object") - } - - return nil, fmt.Errorf("invalid value type, must be string or map") -} - -func parseAuthor(a interface{}) (*AuthorAnnotation, error) { - a, err := convertYAMLMapKeyTypes(a, nil) - if err != nil { - return nil, err - } - - switch a := a.(type) { - case string: - return parseAuthorString(a) - case map[string]interface{}: - name := strings.TrimSpace(getSafeString(a, "name")) - email := strings.TrimSpace(getSafeString(a, "email")) - if len(name) > 0 || len(email) > 0 { - return &AuthorAnnotation{name, email}, nil - } - return nil, fmt.Errorf("'name' and/or 'email' values required in object") - } - - return nil, fmt.Errorf("invalid value type, must be string or map") -} - -func getSafeString(m map[string]interface{}, k string) string { - if v, found := m[k]; found { - if s, ok := v.(string); ok { - return s - } - } - return "" -} - -const emailPrefix = "<" -const emailSuffix = ">" - -// parseAuthor parses a string into an AuthorAnnotation. If the last word of the input string is enclosed within <>, -// it is extracted as the author's email. The email may not contain whitelines, as it then will be interpreted as -// multiple words. -func parseAuthorString(s string) (*AuthorAnnotation, error) { - parts := strings.Fields(s) - - if len(parts) == 0 { - return nil, fmt.Errorf("author is an empty string") - } - - namePartCount := len(parts) - trailing := parts[namePartCount-1] - var email string - if len(trailing) >= len(emailPrefix)+len(emailSuffix) && strings.HasPrefix(trailing, emailPrefix) && - strings.HasSuffix(trailing, emailSuffix) { - email = trailing[len(emailPrefix):] - email = email[0 : len(email)-len(emailSuffix)] - namePartCount = namePartCount - 1 - } - - name := strings.Join(parts[0:namePartCount], " ") - - return &AuthorAnnotation{Name: name, Email: email}, nil -} - -func convertYAMLMapKeyTypes(x any, path []string) (any, error) { - var err error - switch x := x.(type) { - case map[any]any: - result := make(map[string]any, len(x)) - for k, v := range x { - str, ok := k.(string) - if !ok { - return nil, fmt.Errorf("invalid map key type(s): %v", strings.Join(path, "/")) - } - result[str], err = convertYAMLMapKeyTypes(v, append(path, str)) - if err != nil { - return nil, err - } - } - return result, nil - case []any: - for i := range x { - x[i], err = convertYAMLMapKeyTypes(x[i], append(path, fmt.Sprintf("%d", i))) - if err != nil { - return nil, err - } - } - return x, nil - default: - return x, nil - } -} - -// futureKeywords is the source of truth for future keywords that will -// eventually become standard keywords inside of Rego. -var futureKeywords = map[string]tokens.Token{ - "in": tokens.In, - "every": tokens.Every, - "contains": tokens.Contains, - "if": tokens.If, + return v1.NewParser().WithRegoVersion(DefaultRegoVersion) } func IsFutureKeyword(s string) bool { - _, ok := futureKeywords[s] - return ok -} - -func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]tokens.Token) { - path := imp.Path.Value.(Ref) - - if len(path) == 1 || !path[1].Equal(StringTerm("keywords")) { - p.errorf(imp.Path.Location, "invalid import, must be `future.keywords`") - return - } - - if imp.Alias != "" { - p.errorf(imp.Path.Location, "`future` imports cannot be aliased") - return - } - - if p.s.s.RegoV1Compatible() { - p.errorf(imp.Path.Location, "the `%s` import implies `future.keywords`, these are therefore mutually exclusive", RegoV1CompatibleRef) - return - } - - kwds := make([]string, 0, len(allowedFutureKeywords)) - for k := range allowedFutureKeywords { - kwds = append(kwds, k) - } - - switch len(path) { - case 2: // all keywords imported, nothing to do - case 3: // one keyword imported - kw, ok := path[2].Value.(String) - if !ok { - p.errorf(imp.Path.Location, "invalid import, must be `future.keywords.x`, e.g. `import future.keywords.in`") - return - } - keyword := string(kw) - _, ok = allowedFutureKeywords[keyword] - if !ok { - sort.Strings(kwds) // so the error message is stable - p.errorf(imp.Path.Location, "unexpected keyword, must be one of %v", kwds) - return - } - - kwds = []string{keyword} // overwrite - } - for _, kw := range kwds { - p.s.s.AddKeyword(kw, allowedFutureKeywords[kw]) - } -} - -func (p *Parser) regoV1Import(imp *Import) { - if !p.po.Capabilities.ContainsFeature(FeatureRegoV1Import) { - p.errorf(imp.Path.Location, "invalid import, `%s` is not supported by current capabilities", RegoV1CompatibleRef) - return - } - - path := imp.Path.Value.(Ref) - - // v1 is only valid option - if len(path) == 1 || !path[1].Equal(RegoV1CompatibleRef[1]) || len(path) > 2 { - p.errorf(imp.Path.Location, "invalid import `%s`, must be `%s`", path, RegoV1CompatibleRef) - return - } - - if p.po.RegoVersion == RegoV1 { - // We're parsing for Rego v1, where the 'rego.v1' import is a no-op. - return - } - - if imp.Alias != "" { - p.errorf(imp.Path.Location, "`rego` imports cannot be aliased") - return - } - - // import all future keywords with the rego.v1 import - kwds := make([]string, 0, len(futureKeywords)) - for k := range futureKeywords { - kwds = append(kwds, k) - } - - if p.s.s.HasKeyword(futureKeywords) && !p.s.s.RegoV1Compatible() { - // We have imported future keywords, but they didn't come from another `rego.v1` import. - p.errorf(imp.Path.Location, "the `%s` import implies `future.keywords`, these are therefore mutually exclusive", RegoV1CompatibleRef) - return - } - - p.s.s.SetRegoV1Compatible() - for _, kw := range kwds { - p.s.s.AddKeyword(kw, futureKeywords[kw]) - } + return v1.IsFutureKeywordForRegoVersion(s, RegoV0) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go b/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go index 83c87e47b1..2d59616932 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go +++ b/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go @@ -1,24 +1,14 @@ -// Copyright 2016 The OPA Authors. All rights reserved. +// Copyright 2024 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. -// This file contains extra functions for parsing Rego. -// Most of the parsing is handled by the code in parser.go, -// however, there are additional utilities that are -// helpful for dealing with Rego source inputs (e.g., REPL -// statements, source files, etc.) - package ast import ( - "bytes" "errors" "fmt" - "strings" - "unicode" - "github.com/open-policy-agent/opa/ast/internal/tokens" - astJSON "github.com/open-policy-agent/opa/ast/json" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // MustParseBody returns a parsed body. @@ -30,11 +20,7 @@ func MustParseBody(input string) Body { // MustParseBodyWithOpts returns a parsed body. // If an error occurs during parsing, panic. func MustParseBodyWithOpts(input string, opts ParserOptions) Body { - parsed, err := ParseBodyWithOpts(input, opts) - if err != nil { - panic(err) - } - return parsed + return v1.MustParseBodyWithOpts(input, setDefaultRegoVersion(opts)) } // MustParseExpr returns a parsed expression. @@ -66,11 +52,7 @@ func MustParseModule(input string) *Module { // MustParseModuleWithOpts returns a parsed module. // If an error occurs during parsing, panic. func MustParseModuleWithOpts(input string, opts ParserOptions) *Module { - parsed, err := ParseModuleWithOpts("", input, opts) - if err != nil { - panic(err) - } - return parsed + return v1.MustParseModuleWithOpts(input, setDefaultRegoVersion(opts)) } // MustParsePackage returns a Package. @@ -104,11 +86,7 @@ func MustParseStatement(input string) Statement { } func MustParseStatementWithOpts(input string, popts ParserOptions) Statement { - parsed, err := ParseStatementWithOpts(input, popts) - if err != nil { - panic(err) - } - return parsed + return v1.MustParseStatementWithOpts(input, setDefaultRegoVersion(popts)) } // MustParseRef returns a parsed reference. @@ -134,11 +112,7 @@ func MustParseRule(input string) *Rule { // MustParseRuleWithOpts returns a parsed rule. // If an error occurs during parsing, panic. func MustParseRuleWithOpts(input string, opts ParserOptions) *Rule { - parsed, err := ParseRuleWithOpts(input, opts) - if err != nil { - panic(err) - } - return parsed + return v1.MustParseRuleWithOpts(input, setDefaultRegoVersion(opts)) } // MustParseTerm returns a parsed term. @@ -154,331 +128,59 @@ func MustParseTerm(input string) *Term { // ParseRuleFromBody returns a rule if the body can be interpreted as a rule // definition. Otherwise, an error is returned. func ParseRuleFromBody(module *Module, body Body) (*Rule, error) { - - if len(body) != 1 { - return nil, fmt.Errorf("multiple expressions cannot be used for rule head") - } - - return ParseRuleFromExpr(module, body[0]) + return v1.ParseRuleFromBody(module, body) } // ParseRuleFromExpr returns a rule if the expression can be interpreted as a // rule definition. func ParseRuleFromExpr(module *Module, expr *Expr) (*Rule, error) { - - if len(expr.With) > 0 { - return nil, fmt.Errorf("expressions using with keyword cannot be used for rule head") - } - - if expr.Negated { - return nil, fmt.Errorf("negated expressions cannot be used for rule head") - } - - if _, ok := expr.Terms.(*SomeDecl); ok { - return nil, errors.New("'some' declarations cannot be used for rule head") - } - - if term, ok := expr.Terms.(*Term); ok { - switch v := term.Value.(type) { - case Ref: - if len(v) > 2 { // 2+ dots - return ParseCompleteDocRuleWithDotsFromTerm(module, term) - } - return ParsePartialSetDocRuleFromTerm(module, term) - default: - return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(v)) - } - } - - if _, ok := expr.Terms.([]*Term); !ok { - // This is a defensive check in case other kinds of expression terms are - // introduced in the future. - return nil, errors.New("expression cannot be used for rule head") - } - - if expr.IsEquality() { - return parseCompleteRuleFromEq(module, expr) - } else if expr.IsAssignment() { - rule, err := parseCompleteRuleFromEq(module, expr) - if err != nil { - return nil, err - } - rule.Head.Assign = true - return rule, nil - } - - if _, ok := BuiltinMap[expr.Operator().String()]; ok { - return nil, fmt.Errorf("rule name conflicts with built-in function") - } - - return ParseRuleFromCallExpr(module, expr.Terms.([]*Term)) -} - -func parseCompleteRuleFromEq(module *Module, expr *Expr) (rule *Rule, err error) { - - // ensure the rule location is set to the expr location - // the helper functions called below try to set the location based - // on the terms they've been provided but that is not as accurate. - defer func() { - if rule != nil { - rule.Location = expr.Location - rule.Head.Location = expr.Location - } - }() - - lhs, rhs := expr.Operand(0), expr.Operand(1) - if lhs == nil || rhs == nil { - return nil, errors.New("assignment requires two operands") - } - - rule, err = ParseRuleFromCallEqExpr(module, lhs, rhs) - if err == nil { - return rule, nil - } - - rule, err = ParsePartialObjectDocRuleFromEqExpr(module, lhs, rhs) - if err == nil { - return rule, nil - } - - return ParseCompleteDocRuleFromEqExpr(module, lhs, rhs) + return v1.ParseRuleFromExpr(module, expr) } // ParseCompleteDocRuleFromAssignmentExpr returns a rule if the expression can // be interpreted as a complete document definition declared with the assignment // operator. func ParseCompleteDocRuleFromAssignmentExpr(module *Module, lhs, rhs *Term) (*Rule, error) { - - rule, err := ParseCompleteDocRuleFromEqExpr(module, lhs, rhs) - if err != nil { - return nil, err - } - - rule.Head.Assign = true - - return rule, nil + return v1.ParseCompleteDocRuleFromAssignmentExpr(module, lhs, rhs) } // ParseCompleteDocRuleFromEqExpr returns a rule if the expression can be // interpreted as a complete document definition. func ParseCompleteDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { - var head *Head - - if v, ok := lhs.Value.(Var); ok { - // Modify the code to add the location to the head ref - // and set the head ref's jsonOptions. - head = VarHead(v, lhs.Location, &lhs.jsonOptions) - } else if r, ok := lhs.Value.(Ref); ok { // groundness ? - if _, ok := r[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", r) - } - head = RefHead(r) - if len(r) > 1 && !r[len(r)-1].IsGround() { - return nil, fmt.Errorf("ref not ground") - } - } else { - return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(lhs.Value)) - } - head.Value = rhs - head.Location = lhs.Location - head.setJSONOptions(lhs.jsonOptions) - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) - setJSONOptions(body, &rhs.jsonOptions) - - return &Rule{ - Location: lhs.Location, - Head: head, - Body: body, - Module: module, - jsonOptions: lhs.jsonOptions, - generatedBody: true, - }, nil + return v1.ParseCompleteDocRuleFromEqExpr(module, lhs, rhs) } func ParseCompleteDocRuleWithDotsFromTerm(module *Module, term *Term) (*Rule, error) { - ref, ok := term.Value.(Ref) - if !ok { - return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(term.Value)) - } - - if _, ok := ref[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", ref) - } - head := RefHead(ref, BooleanTerm(true).SetLocation(term.Location)) - head.generatedValue = true - head.Location = term.Location - head.jsonOptions = term.jsonOptions - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location)) - setJSONOptions(body, &term.jsonOptions) - - return &Rule{ - Location: term.Location, - Head: head, - Body: body, - Module: module, - - jsonOptions: term.jsonOptions, - }, nil + return v1.ParseCompleteDocRuleWithDotsFromTerm(module, term) } // ParsePartialObjectDocRuleFromEqExpr returns a rule if the expression can be // interpreted as a partial object document definition. func ParsePartialObjectDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { - ref, ok := lhs.Value.(Ref) - if !ok { - return nil, fmt.Errorf("%v cannot be used as rule name", TypeName(lhs.Value)) - } - - if _, ok := ref[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", ref) - } - - head := RefHead(ref, rhs) - if len(ref) == 2 { // backcompat for naked `foo.bar = "baz"` statements - head.Name = ref[0].Value.(Var) - head.Key = ref[1] - } - head.Location = rhs.Location - head.jsonOptions = rhs.jsonOptions - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) - setJSONOptions(body, &rhs.jsonOptions) - - rule := &Rule{ - Location: rhs.Location, - Head: head, - Body: body, - Module: module, - jsonOptions: rhs.jsonOptions, - } - - return rule, nil + return v1.ParsePartialObjectDocRuleFromEqExpr(module, lhs, rhs) } // ParsePartialSetDocRuleFromTerm returns a rule if the term can be interpreted // as a partial set document definition. func ParsePartialSetDocRuleFromTerm(module *Module, term *Term) (*Rule, error) { - - ref, ok := term.Value.(Ref) - if !ok || len(ref) == 1 { - return nil, fmt.Errorf("%vs cannot be used for rule head", TypeName(term.Value)) - } - if _, ok := ref[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", ref) - } - - head := RefHead(ref) - if len(ref) == 2 { - v, ok := ref[0].Value.(Var) - if !ok { - return nil, fmt.Errorf("%vs cannot be used for rule head", TypeName(term.Value)) - } - // Modify the code to add the location to the head ref - // and set the head ref's jsonOptions. - head = VarHead(v, ref[0].Location, &ref[0].jsonOptions) - head.Key = ref[1] - } - head.Location = term.Location - head.jsonOptions = term.jsonOptions - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location)) - setJSONOptions(body, &term.jsonOptions) - - rule := &Rule{ - Location: term.Location, - Head: head, - Body: body, - Module: module, - jsonOptions: term.jsonOptions, - } - - return rule, nil + return v1.ParsePartialSetDocRuleFromTerm(module, term) } // ParseRuleFromCallEqExpr returns a rule if the term can be interpreted as a // function definition (e.g., f(x) = y => f(x) = y { true }). func ParseRuleFromCallEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { - - call, ok := lhs.Value.(Call) - if !ok { - return nil, fmt.Errorf("must be call") - } - - ref, ok := call[0].Value.(Ref) - if !ok { - return nil, fmt.Errorf("%vs cannot be used in function signature", TypeName(call[0].Value)) - } - if _, ok := ref[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", ref) - } - - head := RefHead(ref, rhs) - head.Location = lhs.Location - head.Args = Args(call[1:]) - head.jsonOptions = lhs.jsonOptions - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) - setJSONOptions(body, &rhs.jsonOptions) - - rule := &Rule{ - Location: lhs.Location, - Head: head, - Body: body, - Module: module, - jsonOptions: lhs.jsonOptions, - } - - return rule, nil + return v1.ParseRuleFromCallEqExpr(module, lhs, rhs) } // ParseRuleFromCallExpr returns a rule if the terms can be interpreted as a // function returning true or some value (e.g., f(x) => f(x) = true { true }). func ParseRuleFromCallExpr(module *Module, terms []*Term) (*Rule, error) { - - if len(terms) <= 1 { - return nil, fmt.Errorf("rule argument list must take at least one argument") - } - - loc := terms[0].Location - ref := terms[0].Value.(Ref) - if _, ok := ref[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", ref) - } - head := RefHead(ref, BooleanTerm(true).SetLocation(loc)) - head.Location = loc - head.Args = terms[1:] - head.jsonOptions = terms[0].jsonOptions - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(loc)).SetLocation(loc)) - setJSONOptions(body, &terms[0].jsonOptions) - - rule := &Rule{ - Location: loc, - Head: head, - Module: module, - Body: body, - jsonOptions: terms[0].jsonOptions, - } - return rule, nil + return v1.ParseRuleFromCallExpr(module, terms) } // ParseImports returns a slice of Import objects. func ParseImports(input string) ([]*Import, error) { - stmts, _, err := ParseStatements("", input) - if err != nil { - return nil, err - } - result := []*Import{} - for _, stmt := range stmts { - if imp, ok := stmt.(*Import); ok { - result = append(result, imp) - } else { - return nil, fmt.Errorf("expected import but got %T", stmt) - } - } - return result, nil + return v1.ParseImports(input) } // ParseModule returns a parsed Module object. @@ -492,11 +194,7 @@ func ParseModule(filename, input string) (*Module, error) { // For details on Module objects and their fields, see policy.go. // Empty input will return nil, nil. func ParseModuleWithOpts(filename, input string, popts ParserOptions) (*Module, error) { - stmts, comments, err := ParseStatementsWithOpts(filename, input, popts) - if err != nil { - return nil, err - } - return parseModule(filename, stmts, comments, popts.RegoVersion) + return v1.ParseModuleWithOpts(filename, input, setDefaultRegoVersion(popts)) } // ParseBody returns exactly one body. @@ -508,28 +206,7 @@ func ParseBody(input string) (Body, error) { // ParseBodyWithOpts returns exactly one body. It does _not_ set SkipRules: true on its own, // but respects whatever ParserOptions it's been given. func ParseBodyWithOpts(input string, popts ParserOptions) (Body, error) { - - stmts, _, err := ParseStatementsWithOpts("", input, popts) - if err != nil { - return nil, err - } - - result := Body{} - - for _, stmt := range stmts { - switch stmt := stmt.(type) { - case Body: - for i := range stmt { - result.Append(stmt[i]) - } - case *Comment: - // skip - default: - return nil, fmt.Errorf("expected body but got %T", stmt) - } - } - - return result, nil + return v1.ParseBodyWithOpts(input, setDefaultRegoVersion(popts)) } // ParseExpr returns exactly one expression. @@ -548,15 +225,7 @@ func ParseExpr(input string) (*Expr, error) { // ParsePackage returns exactly one Package. // If multiple statements are parsed, an error is returned. func ParsePackage(input string) (*Package, error) { - stmt, err := ParseStatement(input) - if err != nil { - return nil, err - } - pkg, ok := stmt.(*Package) - if !ok { - return nil, fmt.Errorf("expected package but got %T", stmt) - } - return pkg, nil + return v1.ParsePackage(input) } // ParseTerm returns exactly one term. @@ -592,18 +261,7 @@ func ParseRef(input string) (Ref, error) { // ParseRuleWithOpts returns exactly one rule. // If multiple rules are parsed, an error is returned. func ParseRuleWithOpts(input string, opts ParserOptions) (*Rule, error) { - stmts, _, err := ParseStatementsWithOpts("", input, opts) - if err != nil { - return nil, err - } - if len(stmts) != 1 { - return nil, fmt.Errorf("expected exactly one statement (rule), got %v = %T, %T", stmts, stmts[0], stmts[1]) - } - rule, ok := stmts[0].(*Rule) - if !ok { - return nil, fmt.Errorf("expected rule but got %T", stmts[0]) - } - return rule, nil + return v1.ParseRuleWithOpts(input, setDefaultRegoVersion(opts)) } // ParseRule returns exactly one rule. @@ -622,20 +280,13 @@ func ParseStatement(input string) (Statement, error) { return nil, err } if len(stmts) != 1 { - return nil, fmt.Errorf("expected exactly one statement") + return nil, errors.New("expected exactly one statement") } return stmts[0], nil } func ParseStatementWithOpts(input string, popts ParserOptions) (Statement, error) { - stmts, _, err := ParseStatementsWithOpts("", input, popts) - if err != nil { - return nil, err - } - if len(stmts) != 1 { - return nil, fmt.Errorf("expected exactly one statement") - } - return stmts[0], nil + return v1.ParseStatementWithOpts(input, setDefaultRegoVersion(popts)) } // ParseStatements is deprecated. Use ParseStatementWithOpts instead. @@ -646,204 +297,15 @@ func ParseStatements(filename, input string) ([]Statement, []*Comment, error) { // ParseStatementsWithOpts returns a slice of parsed statements. This is the // default return value from the parser. func ParseStatementsWithOpts(filename, input string, popts ParserOptions) ([]Statement, []*Comment, error) { - - parser := NewParser(). - WithFilename(filename). - WithReader(bytes.NewBufferString(input)). - WithProcessAnnotation(popts.ProcessAnnotation). - WithFutureKeywords(popts.FutureKeywords...). - WithAllFutureKeywords(popts.AllFutureKeywords). - WithCapabilities(popts.Capabilities). - WithSkipRules(popts.SkipRules). - WithJSONOptions(popts.JSONOptions). - WithRegoVersion(popts.RegoVersion). - withUnreleasedKeywords(popts.unreleasedKeywords) - - stmts, comments, errs := parser.Parse() - - if len(errs) > 0 { - return nil, nil, errs - } - - return stmts, comments, nil -} - -func parseModule(filename string, stmts []Statement, comments []*Comment, regoCompatibilityMode RegoVersion) (*Module, error) { - - if len(stmts) == 0 { - return nil, NewError(ParseErr, &Location{File: filename}, "empty module") - } - - var errs Errors - - pkg, ok := stmts[0].(*Package) - if !ok { - loc := stmts[0].Loc() - errs = append(errs, NewError(ParseErr, loc, "package expected")) - } - - mod := &Module{ - Package: pkg, - stmts: stmts, - } - - // The comments slice only holds comments that were not their own statements. - mod.Comments = append(mod.Comments, comments...) - mod.regoVersion = regoCompatibilityMode - - for i, stmt := range stmts[1:] { - switch stmt := stmt.(type) { - case *Import: - mod.Imports = append(mod.Imports, stmt) - if mod.regoVersion == RegoV0 && Compare(stmt.Path.Value, RegoV1CompatibleRef) == 0 { - mod.regoVersion = RegoV0CompatV1 - } - case *Rule: - setRuleModule(stmt, mod) - mod.Rules = append(mod.Rules, stmt) - case Body: - rule, err := ParseRuleFromBody(mod, stmt) - if err != nil { - errs = append(errs, NewError(ParseErr, stmt[0].Location, err.Error())) - continue - } - rule.generatedBody = true - mod.Rules = append(mod.Rules, rule) - - // NOTE(tsandall): the statement should now be interpreted as a - // rule so update the statement list. This is important for the - // logic below that associates annotations with statements. - stmts[i+1] = rule - case *Package: - errs = append(errs, NewError(ParseErr, stmt.Loc(), "unexpected package")) - case *Annotations: - mod.Annotations = append(mod.Annotations, stmt) - case *Comment: - // Ignore comments, they're handled above. - default: - panic("illegal value") // Indicates grammar is out-of-sync with code. - } - } - - if mod.regoVersion == RegoV0CompatV1 || mod.regoVersion == RegoV1 { - for _, rule := range mod.Rules { - for r := rule; r != nil; r = r.Else { - errs = append(errs, CheckRegoV1(r)...) - } - } - } - - if len(errs) > 0 { - return nil, errs - } - - errs = append(errs, attachAnnotationsNodes(mod)...) - - if len(errs) > 0 { - return nil, errs - } - - attachRuleAnnotations(mod) - - return mod, nil -} - -func ruleDeclarationHasKeyword(rule *Rule, keyword tokens.Token) bool { - for _, kw := range rule.Head.keywords { - if kw == keyword { - return true - } - } - return false -} - -func newScopeAttachmentErr(a *Annotations, want string) *Error { - var have string - if a.node != nil { - have = fmt.Sprintf(" (have %v)", TypeName(a.node)) - } - return NewError(ParseErr, a.Loc(), "annotation scope '%v' must be applied to %v%v", a.Scope, want, have) -} - -func setRuleModule(rule *Rule, module *Module) { - rule.Module = module - if rule.Else != nil { - setRuleModule(rule.Else, module) - } -} - -func setJSONOptions(x interface{}, jsonOptions *astJSON.Options) { - vis := NewGenericVisitor(func(x interface{}) bool { - if x, ok := x.(customJSON); ok { - x.setJSONOptions(*jsonOptions) - } - return false - }) - vis.Walk(x) + return v1.ParseStatementsWithOpts(filename, input, setDefaultRegoVersion(popts)) } // ParserErrorDetail holds additional details for parser errors. -type ParserErrorDetail struct { - Line string `json:"line"` - Idx int `json:"idx"` -} - -func newParserErrorDetail(bs []byte, offset int) *ParserErrorDetail { - - // Find first non-space character at or before offset position. - if offset >= len(bs) { - offset = len(bs) - 1 - } else if offset < 0 { - offset = 0 - } - - for offset > 0 && unicode.IsSpace(rune(bs[offset])) { - offset-- - } - - // Find beginning of line containing offset. - begin := offset - - for begin > 0 && !isNewLineChar(bs[begin]) { - begin-- - } +type ParserErrorDetail = v1.ParserErrorDetail - if isNewLineChar(bs[begin]) { - begin++ +func setDefaultRegoVersion(opts ParserOptions) ParserOptions { + if opts.RegoVersion == RegoUndefined { + opts.RegoVersion = DefaultRegoVersion } - - // Find end of line containing offset. - end := offset - - for end < len(bs) && !isNewLineChar(bs[end]) { - end++ - } - - if begin > end { - begin = end - } - - // Extract line and compute index of offset byte in line. - line := bs[begin:end] - index := offset - begin - - return &ParserErrorDetail{ - Line: string(line), - Idx: index, - } -} - -// Lines returns the pretty formatted line output for the error details. -func (d ParserErrorDetail) Lines() []string { - line := strings.TrimLeft(d.Line, "\t") // remove leading tabs - tabCount := len(d.Line) - len(line) - indent := d.Idx - tabCount - if indent < 0 { - indent = 0 - } - return []string{line, strings.Repeat(" ", indent) + "^"} -} - -func isNewLineChar(b byte) bool { - return b == '\r' || b == '\n' + return opts } diff --git a/vendor/github.com/open-policy-agent/opa/ast/policy.go b/vendor/github.com/open-policy-agent/opa/ast/policy.go index 43e9bba4a3..5055e8f23f 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/policy.go +++ b/vendor/github.com/open-policy-agent/opa/ast/policy.go @@ -1,196 +1,113 @@ -// Copyright 2016 The OPA Authors. All rights reserved. +// Copyright 2024 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. package ast import ( - "bytes" - "encoding/json" - "fmt" - "math/rand" - "strings" - "time" - - "github.com/open-policy-agent/opa/ast/internal/tokens" astJSON "github.com/open-policy-agent/opa/ast/json" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) -// Initialize seed for term hashing. This is intentionally placed before the -// root document sets are constructed to ensure they use the same hash seed as -// subsequent lookups. If the hash seeds are out of sync, lookups will fail. -var hashSeed = rand.New(rand.NewSource(time.Now().UnixNano())) -var hashSeed0 = (uint64(hashSeed.Uint32()) << 32) | uint64(hashSeed.Uint32()) - // DefaultRootDocument is the default root document. // // All package directives inside source files are implicitly prefixed with the // DefaultRootDocument value. -var DefaultRootDocument = VarTerm("data") +var DefaultRootDocument = v1.DefaultRootDocument // InputRootDocument names the document containing query arguments. -var InputRootDocument = VarTerm("input") +var InputRootDocument = v1.InputRootDocument // SchemaRootDocument names the document containing external data schemas. -var SchemaRootDocument = VarTerm("schema") +var SchemaRootDocument = v1.SchemaRootDocument // FunctionArgRootDocument names the document containing function arguments. // It's only for internal usage, for referencing function arguments between // the index and topdown. -var FunctionArgRootDocument = VarTerm("args") +var FunctionArgRootDocument = v1.FunctionArgRootDocument // FutureRootDocument names the document containing new, to-become-default, // features. -var FutureRootDocument = VarTerm("future") +var FutureRootDocument = v1.FutureRootDocument // RegoRootDocument names the document containing new, to-become-default, // features in a future versioned release. -var RegoRootDocument = VarTerm("rego") +var RegoRootDocument = v1.RegoRootDocument // RootDocumentNames contains the names of top-level documents that can be // referred to in modules and queries. // // Note, the schema document is not currently implemented in the evaluator so it // is not registered as a root document name (yet). -var RootDocumentNames = NewSet( - DefaultRootDocument, - InputRootDocument, -) +var RootDocumentNames = v1.RootDocumentNames // DefaultRootRef is a reference to the root of the default document. // // All refs to data in the policy engine's storage layer are prefixed with this ref. -var DefaultRootRef = Ref{DefaultRootDocument} +var DefaultRootRef = v1.DefaultRootRef // InputRootRef is a reference to the root of the input document. // // All refs to query arguments are prefixed with this ref. -var InputRootRef = Ref{InputRootDocument} +var InputRootRef = v1.InputRootRef // SchemaRootRef is a reference to the root of the schema document. // // All refs to schema documents are prefixed with this ref. Note, the schema // document is not currently implemented in the evaluator so it is not // registered as a root document ref (yet). -var SchemaRootRef = Ref{SchemaRootDocument} +var SchemaRootRef = v1.SchemaRootRef // RootDocumentRefs contains the prefixes of top-level documents that all // non-local references start with. -var RootDocumentRefs = NewSet( - NewTerm(DefaultRootRef), - NewTerm(InputRootRef), -) +var RootDocumentRefs = v1.RootDocumentRefs // SystemDocumentKey is the name of the top-level key that identifies the system // document. -var SystemDocumentKey = String("system") +const SystemDocumentKey = v1.SystemDocumentKey // ReservedVars is the set of names that refer to implicitly ground vars. -var ReservedVars = NewVarSet( - DefaultRootDocument.Value.(Var), - InputRootDocument.Value.(Var), -) +var ReservedVars = v1.ReservedVars // Wildcard represents the wildcard variable as defined in the language. -var Wildcard = &Term{Value: Var("_")} +var Wildcard = v1.Wildcard // WildcardPrefix is the special character that all wildcard variables are // prefixed with when the statement they are contained in is parsed. -var WildcardPrefix = "$" +const WildcardPrefix = v1.WildcardPrefix // Keywords contains strings that map to language keywords. -var Keywords = KeywordsForRegoVersion(DefaultRegoVersion) +var Keywords = v1.Keywords -var KeywordsV0 = [...]string{ - "not", - "package", - "import", - "as", - "default", - "else", - "with", - "null", - "true", - "false", - "some", -} +var KeywordsV0 = v1.KeywordsV0 -var KeywordsV1 = [...]string{ - "not", - "package", - "import", - "as", - "default", - "else", - "with", - "null", - "true", - "false", - "some", - "if", - "contains", - "in", - "every", -} +var KeywordsV1 = v1.KeywordsV1 func KeywordsForRegoVersion(v RegoVersion) []string { - switch v { - case RegoV0: - return KeywordsV0[:] - case RegoV1, RegoV0CompatV1: - return KeywordsV1[:] - } - return nil + return v1.KeywordsForRegoVersion(v) } // IsKeyword returns true if s is a language keyword. func IsKeyword(s string) bool { - return IsInKeywords(s, Keywords) + return v1.IsKeyword(s) } func IsInKeywords(s string, keywords []string) bool { - for _, x := range keywords { - if x == s { - return true - } - } - return false + return v1.IsInKeywords(s, keywords) } // IsKeywordInRegoVersion returns true if s is a language keyword. func IsKeywordInRegoVersion(s string, regoVersion RegoVersion) bool { - switch regoVersion { - case RegoV0: - for _, x := range KeywordsV0 { - if x == s { - return true - } - } - case RegoV1, RegoV0CompatV1: - for _, x := range KeywordsV1 { - if x == s { - return true - } - } - } - - return false + return v1.IsKeywordInRegoVersion(s, regoVersion) } type ( // Node represents a node in an AST. Nodes may be statements in a policy module // or elements of an ad-hoc query, expression, etc. - Node interface { - fmt.Stringer - Loc() *Location - SetLoc(*Location) - } + Node = v1.Node // Statement represents a single statement in a policy module. - Statement interface { - Node - } + Statement = v1.Statement ) type ( @@ -198,1894 +115,121 @@ type ( // Module represents a collection of policies (defined by rules) // within a namespace (defined by the package) and optional // dependencies on external documents (defined by imports). - Module struct { - Package *Package `json:"package"` - Imports []*Import `json:"imports,omitempty"` - Annotations []*Annotations `json:"annotations,omitempty"` - Rules []*Rule `json:"rules,omitempty"` - Comments []*Comment `json:"comments,omitempty"` - stmts []Statement - regoVersion RegoVersion - } + Module = v1.Module // Comment contains the raw text from the comment in the definition. - Comment struct { - // TODO: these fields have inconsistent JSON keys with other structs in this package. - Text []byte - Location *Location - - jsonOptions astJSON.Options - } + Comment = v1.Comment // Package represents the namespace of the documents produced // by rules inside the module. - Package struct { - Path Ref `json:"path"` - Location *Location `json:"location,omitempty"` - - jsonOptions astJSON.Options - } + Package = v1.Package // Import represents a dependency on a document outside of the policy // namespace. Imports are optional. - Import struct { - Path *Term `json:"path"` - Alias Var `json:"alias,omitempty"` - Location *Location `json:"location,omitempty"` - - jsonOptions astJSON.Options - } + Import = v1.Import // Rule represents a rule as defined in the language. Rules define the // content of documents that represent policy decisions. - Rule struct { - Default bool `json:"default,omitempty"` - Head *Head `json:"head"` - Body Body `json:"body"` - Else *Rule `json:"else,omitempty"` - Location *Location `json:"location,omitempty"` - Annotations []*Annotations `json:"annotations,omitempty"` - - // Module is a pointer to the module containing this rule. If the rule - // was NOT created while parsing/constructing a module, this should be - // left unset. The pointer is not included in any standard operations - // on the rule (e.g., printing, comparison, visiting, etc.) - Module *Module `json:"-"` - - generatedBody bool - jsonOptions astJSON.Options - } + Rule = v1.Rule // Head represents the head of a rule. - Head struct { - Name Var `json:"name,omitempty"` - Reference Ref `json:"ref,omitempty"` - Args Args `json:"args,omitempty"` - Key *Term `json:"key,omitempty"` - Value *Term `json:"value,omitempty"` - Assign bool `json:"assign,omitempty"` - Location *Location `json:"location,omitempty"` - - keywords []tokens.Token - generatedValue bool - jsonOptions astJSON.Options - } + Head = v1.Head // Args represents zero or more arguments to a rule. - Args []*Term + Args = v1.Args // Body represents one or more expressions contained inside a rule or user // function. - Body []*Expr + Body = v1.Body // Expr represents a single expression contained inside the body of a rule. - Expr struct { - With []*With `json:"with,omitempty"` - Terms interface{} `json:"terms"` - Index int `json:"index"` - Generated bool `json:"generated,omitempty"` - Negated bool `json:"negated,omitempty"` - Location *Location `json:"location,omitempty"` - - jsonOptions astJSON.Options - generatedFrom *Expr - generates []*Expr - } + Expr = v1.Expr // SomeDecl represents a variable declaration statement. The symbols are variables. - SomeDecl struct { - Symbols []*Term `json:"symbols"` - Location *Location `json:"location,omitempty"` + SomeDecl = v1.SomeDecl - jsonOptions astJSON.Options - } - - Every struct { - Key *Term `json:"key"` - Value *Term `json:"value"` - Domain *Term `json:"domain"` - Body Body `json:"body"` - Location *Location `json:"location,omitempty"` - - jsonOptions astJSON.Options - } + Every = v1.Every // With represents a modifier on an expression. - With struct { - Target *Term `json:"target"` - Value *Term `json:"value"` - Location *Location `json:"location,omitempty"` - - jsonOptions astJSON.Options - } + With = v1.With ) -// Compare returns an integer indicating whether mod is less than, equal to, -// or greater than other. -func (mod *Module) Compare(other *Module) int { - if mod == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - if cmp := mod.Package.Compare(other.Package); cmp != 0 { - return cmp - } - if cmp := importsCompare(mod.Imports, other.Imports); cmp != 0 { - return cmp - } - if cmp := annotationsCompare(mod.Annotations, other.Annotations); cmp != 0 { - return cmp - } - return rulesCompare(mod.Rules, other.Rules) -} - -// Copy returns a deep copy of mod. -func (mod *Module) Copy() *Module { - cpy := *mod - cpy.Rules = make([]*Rule, len(mod.Rules)) - - nodes := make(map[Node]Node, len(mod.Rules)+len(mod.Imports)+1 /* package */) - - for i := range mod.Rules { - cpy.Rules[i] = mod.Rules[i].Copy() - cpy.Rules[i].Module = &cpy - nodes[mod.Rules[i]] = cpy.Rules[i] - } - - cpy.Imports = make([]*Import, len(mod.Imports)) - for i := range mod.Imports { - cpy.Imports[i] = mod.Imports[i].Copy() - nodes[mod.Imports[i]] = cpy.Imports[i] - } - - cpy.Package = mod.Package.Copy() - nodes[mod.Package] = cpy.Package - - cpy.Annotations = make([]*Annotations, len(mod.Annotations)) - for i, a := range mod.Annotations { - cpy.Annotations[i] = a.Copy(nodes[a.node]) - } - - cpy.Comments = make([]*Comment, len(mod.Comments)) - for i := range mod.Comments { - cpy.Comments[i] = mod.Comments[i].Copy() - } - - cpy.stmts = make([]Statement, len(mod.stmts)) - for i := range mod.stmts { - cpy.stmts[i] = nodes[mod.stmts[i]] - } - - return &cpy -} - -// Equal returns true if mod equals other. -func (mod *Module) Equal(other *Module) bool { - return mod.Compare(other) == 0 -} - -func (mod *Module) String() string { - byNode := map[Node][]*Annotations{} - for _, a := range mod.Annotations { - byNode[a.node] = append(byNode[a.node], a) - } - - appendAnnotationStrings := func(buf []string, node Node) []string { - if as, ok := byNode[node]; ok { - for i := range as { - buf = append(buf, "# METADATA") - buf = append(buf, "# "+as[i].String()) - } - } - return buf - } - - buf := []string{} - buf = appendAnnotationStrings(buf, mod.Package) - buf = append(buf, mod.Package.String()) - - if len(mod.Imports) > 0 { - buf = append(buf, "") - for _, imp := range mod.Imports { - buf = appendAnnotationStrings(buf, imp) - buf = append(buf, imp.String()) - } - } - if len(mod.Rules) > 0 { - buf = append(buf, "") - for _, rule := range mod.Rules { - buf = appendAnnotationStrings(buf, rule) - buf = append(buf, rule.stringWithOpts(toStringOpts{regoVersion: mod.regoVersion})) - } - } - return strings.Join(buf, "\n") -} - -// RuleSet returns a RuleSet containing named rules in the mod. -func (mod *Module) RuleSet(name Var) RuleSet { - rs := NewRuleSet() - for _, rule := range mod.Rules { - if rule.Head.Name.Equal(name) { - rs.Add(rule) - } - } - return rs -} - -// UnmarshalJSON parses bs and stores the result in mod. The rules in the module -// will have their module pointer set to mod. -func (mod *Module) UnmarshalJSON(bs []byte) error { - - // Declare a new type and use a type conversion to avoid recursively calling - // Module#UnmarshalJSON. - type module Module - - if err := util.UnmarshalJSON(bs, (*module)(mod)); err != nil { - return err - } - - WalkRules(mod, func(rule *Rule) bool { - rule.Module = mod - return false - }) - - return nil -} - -func (mod *Module) regoV1Compatible() bool { - return mod.regoVersion == RegoV1 || mod.regoVersion == RegoV0CompatV1 -} - -func (mod *Module) RegoVersion() RegoVersion { - return mod.regoVersion -} - -// SetRegoVersion sets the RegoVersion for the module. -// Note: Setting a rego-version that does not match the module's rego-version might have unintended consequences. -func (mod *Module) SetRegoVersion(v RegoVersion) { - mod.regoVersion = v -} - // NewComment returns a new Comment object. func NewComment(text []byte) *Comment { - return &Comment{ - Text: text, - } -} - -// Loc returns the location of the comment in the definition. -func (c *Comment) Loc() *Location { - if c == nil { - return nil - } - return c.Location -} - -// SetLoc sets the location on c. -func (c *Comment) SetLoc(loc *Location) { - c.Location = loc -} - -func (c *Comment) String() string { - return "#" + string(c.Text) -} - -// Copy returns a deep copy of c. -func (c *Comment) Copy() *Comment { - cpy := *c - cpy.Text = make([]byte, len(c.Text)) - copy(cpy.Text, c.Text) - return &cpy -} - -// Equal returns true if this comment equals the other comment. -// Unlike other equality checks on AST nodes, comment equality -// depends on location. -func (c *Comment) Equal(other *Comment) bool { - return c.Location.Equal(other.Location) && bytes.Equal(c.Text, other.Text) -} - -func (c *Comment) setJSONOptions(opts astJSON.Options) { - // Note: this is not used for location since Comments use default JSON marshaling - // behavior with struct field names in JSON. - c.jsonOptions = opts - if c.Location != nil { - c.Location.JSONOptions = opts - } -} - -// Compare returns an integer indicating whether pkg is less than, equal to, -// or greater than other. -func (pkg *Package) Compare(other *Package) int { - return Compare(pkg.Path, other.Path) -} - -// Copy returns a deep copy of pkg. -func (pkg *Package) Copy() *Package { - cpy := *pkg - cpy.Path = pkg.Path.Copy() - return &cpy -} - -// Equal returns true if pkg is equal to other. -func (pkg *Package) Equal(other *Package) bool { - return pkg.Compare(other) == 0 -} - -// Loc returns the location of the Package in the definition. -func (pkg *Package) Loc() *Location { - if pkg == nil { - return nil - } - return pkg.Location -} - -// SetLoc sets the location on pkg. -func (pkg *Package) SetLoc(loc *Location) { - pkg.Location = loc -} - -func (pkg *Package) String() string { - if pkg == nil { - return "" - } else if len(pkg.Path) <= 1 { - return fmt.Sprintf("package ", pkg.Path) - } - // Omit head as all packages have the DefaultRootDocument prepended at parse time. - path := make(Ref, len(pkg.Path)-1) - path[0] = VarTerm(string(pkg.Path[1].Value.(String))) - copy(path[1:], pkg.Path[2:]) - return fmt.Sprintf("package %v", path) -} - -func (pkg *Package) setJSONOptions(opts astJSON.Options) { - pkg.jsonOptions = opts - if pkg.Location != nil { - pkg.Location.JSONOptions = opts - } -} - -func (pkg *Package) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "path": pkg.Path, - } - - if pkg.jsonOptions.MarshalOptions.IncludeLocation.Package { - if pkg.Location != nil { - data["location"] = pkg.Location - } - } - - return json.Marshal(data) + return v1.NewComment(text) } // IsValidImportPath returns an error indicating if the import path is invalid. // If the import path is valid, err is nil. func IsValidImportPath(v Value) (err error) { - switch v := v.(type) { - case Var: - if !v.Equal(DefaultRootDocument.Value) && !v.Equal(InputRootDocument.Value) { - return fmt.Errorf("invalid path %v: path must begin with input or data", v) - } - case Ref: - if err := IsValidImportPath(v[0].Value); err != nil { - return fmt.Errorf("invalid path %v: path must begin with input or data", v) - } - for _, e := range v[1:] { - if _, ok := e.Value.(String); !ok { - return fmt.Errorf("invalid path %v: path elements must be strings", v) - } - } - default: - return fmt.Errorf("invalid path %v: path must be ref or var", v) - } - return nil -} - -// Compare returns an integer indicating whether imp is less than, equal to, -// or greater than other. -func (imp *Import) Compare(other *Import) int { - if imp == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - if cmp := Compare(imp.Path, other.Path); cmp != 0 { - return cmp - } - return Compare(imp.Alias, other.Alias) -} - -// Copy returns a deep copy of imp. -func (imp *Import) Copy() *Import { - cpy := *imp - cpy.Path = imp.Path.Copy() - return &cpy -} - -// Equal returns true if imp is equal to other. -func (imp *Import) Equal(other *Import) bool { - return imp.Compare(other) == 0 -} - -// Loc returns the location of the Import in the definition. -func (imp *Import) Loc() *Location { - if imp == nil { - return nil - } - return imp.Location -} - -// SetLoc sets the location on imp. -func (imp *Import) SetLoc(loc *Location) { - imp.Location = loc -} - -// Name returns the variable that is used to refer to the imported virtual -// document. This is the alias if defined otherwise the last element in the -// path. -func (imp *Import) Name() Var { - if len(imp.Alias) != 0 { - return imp.Alias - } - switch v := imp.Path.Value.(type) { - case Var: - return v - case Ref: - if len(v) == 1 { - return v[0].Value.(Var) - } - return Var(v[len(v)-1].Value.(String)) - } - panic("illegal import") -} - -func (imp *Import) String() string { - buf := []string{"import", imp.Path.String()} - if len(imp.Alias) > 0 { - buf = append(buf, "as "+imp.Alias.String()) - } - return strings.Join(buf, " ") -} - -func (imp *Import) setJSONOptions(opts astJSON.Options) { - imp.jsonOptions = opts - if imp.Location != nil { - imp.Location.JSONOptions = opts - } -} - -func (imp *Import) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "path": imp.Path, - } - - if len(imp.Alias) != 0 { - data["alias"] = imp.Alias - } - - if imp.jsonOptions.MarshalOptions.IncludeLocation.Import { - if imp.Location != nil { - data["location"] = imp.Location - } - } - - return json.Marshal(data) -} - -// Compare returns an integer indicating whether rule is less than, equal to, -// or greater than other. -func (rule *Rule) Compare(other *Rule) int { - if rule == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - if cmp := rule.Head.Compare(other.Head); cmp != 0 { - return cmp - } - if cmp := util.Compare(rule.Default, other.Default); cmp != 0 { - return cmp - } - if cmp := rule.Body.Compare(other.Body); cmp != 0 { - return cmp - } - - if cmp := annotationsCompare(rule.Annotations, other.Annotations); cmp != 0 { - return cmp - } - - return rule.Else.Compare(other.Else) -} - -// Copy returns a deep copy of rule. -func (rule *Rule) Copy() *Rule { - cpy := *rule - cpy.Head = rule.Head.Copy() - cpy.Body = rule.Body.Copy() - - cpy.Annotations = make([]*Annotations, len(rule.Annotations)) - for i, a := range rule.Annotations { - cpy.Annotations[i] = a.Copy(&cpy) - } - - if cpy.Else != nil { - cpy.Else = rule.Else.Copy() - } - return &cpy -} - -// Equal returns true if rule is equal to other. -func (rule *Rule) Equal(other *Rule) bool { - return rule.Compare(other) == 0 -} - -// Loc returns the location of the Rule in the definition. -func (rule *Rule) Loc() *Location { - if rule == nil { - return nil - } - return rule.Location -} - -// SetLoc sets the location on rule. -func (rule *Rule) SetLoc(loc *Location) { - rule.Location = loc -} - -// Path returns a ref referring to the document produced by this rule. If rule -// is not contained in a module, this function panics. -// Deprecated: Poor handling of ref rules. Use `(*Rule).Ref()` instead. -func (rule *Rule) Path() Ref { - if rule.Module == nil { - panic("assertion failed") - } - return rule.Module.Package.Path.Extend(rule.Head.Ref().GroundPrefix()) -} - -// Ref returns a ref referring to the document produced by this rule. If rule -// is not contained in a module, this function panics. The returned ref may -// contain variables in the last position. -func (rule *Rule) Ref() Ref { - if rule.Module == nil { - panic("assertion failed") - } - return rule.Module.Package.Path.Extend(rule.Head.Ref()) -} - -func (rule *Rule) String() string { - return rule.stringWithOpts(toStringOpts{}) -} - -type toStringOpts struct { - regoVersion RegoVersion -} - -func (rule *Rule) stringWithOpts(opts toStringOpts) string { - buf := []string{} - if rule.Default { - buf = append(buf, "default") - } - buf = append(buf, rule.Head.stringWithOpts(opts)) - if !rule.Default { - switch opts.regoVersion { - case RegoV1, RegoV0CompatV1: - buf = append(buf, "if") - } - buf = append(buf, "{") - buf = append(buf, rule.Body.String()) - buf = append(buf, "}") - } - if rule.Else != nil { - buf = append(buf, rule.Else.elseString(opts)) - } - return strings.Join(buf, " ") -} - -func (rule *Rule) isFunction() bool { - return len(rule.Head.Args) > 0 -} - -func (rule *Rule) setJSONOptions(opts astJSON.Options) { - rule.jsonOptions = opts - if rule.Location != nil { - rule.Location.JSONOptions = opts - } -} - -func (rule *Rule) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "head": rule.Head, - "body": rule.Body, - } - - if rule.Default { - data["default"] = true - } - - if rule.Else != nil { - data["else"] = rule.Else - } - - if rule.jsonOptions.MarshalOptions.IncludeLocation.Rule { - if rule.Location != nil { - data["location"] = rule.Location - } - } - - if len(rule.Annotations) != 0 { - data["annotations"] = rule.Annotations - } - - return json.Marshal(data) -} - -func (rule *Rule) elseString(opts toStringOpts) string { - var buf []string - - buf = append(buf, "else") - - value := rule.Head.Value - if value != nil { - buf = append(buf, "=") - buf = append(buf, value.String()) - } - - switch opts.regoVersion { - case RegoV1, RegoV0CompatV1: - buf = append(buf, "if") - } - - buf = append(buf, "{") - buf = append(buf, rule.Body.String()) - buf = append(buf, "}") - - if rule.Else != nil { - buf = append(buf, rule.Else.elseString(opts)) - } - - return strings.Join(buf, " ") + return v1.IsValidImportPath(v) } // NewHead returns a new Head object. If args are provided, the first will be // used for the key and the second will be used for the value. func NewHead(name Var, args ...*Term) *Head { - head := &Head{ - Name: name, // backcompat - Reference: []*Term{NewTerm(name)}, - } - if len(args) == 0 { - return head - } - head.Key = args[0] - if len(args) == 1 { - return head - } - head.Value = args[1] - if head.Key != nil && head.Value != nil { - head.Reference = head.Reference.Append(args[0]) - } - return head + return v1.NewHead(name, args...) } // VarHead creates a head object, initializes its Name, Location, and Options, // and returns the new head. func VarHead(name Var, location *Location, jsonOpts *astJSON.Options) *Head { - h := NewHead(name) - h.Reference[0].Location = location - if jsonOpts != nil { - h.Reference[0].setJSONOptions(*jsonOpts) - } - return h + return v1.VarHead(name, location, jsonOpts) } // RefHead returns a new Head object with the passed Ref. If args are provided, // the first will be used for the value. func RefHead(ref Ref, args ...*Term) *Head { - head := &Head{} - head.SetRef(ref) - if len(ref) < 2 { - head.Name = ref[0].Value.(Var) - } - if len(args) >= 1 { - head.Value = args[0] - } - return head + return v1.RefHead(ref, args...) } // DocKind represents the collection of document types that can be produced by rules. -type DocKind int +type DocKind = v1.DocKind const ( // CompleteDoc represents a document that is completely defined by the rule. - CompleteDoc = iota + CompleteDoc = v1.CompleteDoc // PartialSetDoc represents a set document that is partially defined by the rule. - PartialSetDoc + PartialSetDoc = v1.PartialSetDoc // PartialObjectDoc represents an object document that is partially defined by the rule. - PartialObjectDoc -) // TODO(sr): Deprecate? - -// DocKind returns the type of document produced by this rule. -func (head *Head) DocKind() DocKind { - if head.Key != nil { - if head.Value != nil { - return PartialObjectDoc - } - return PartialSetDoc - } - return CompleteDoc -} + PartialObjectDoc = v1.PartialObjectDoc +) -type RuleKind int +type RuleKind = v1.RuleKind const ( - SingleValue = iota - MultiValue + SingleValue = v1.SingleValue + MultiValue = v1.MultiValue ) -// RuleKind returns the type of rule this is -func (head *Head) RuleKind() RuleKind { - // NOTE(sr): This is bit verbose, since the key is irrelevant for single vs - // multi value, but as good a spot as to assert the invariant. - switch { - case head.Value != nil: - return SingleValue - case head.Key != nil: - return MultiValue - default: - panic("unreachable") - } -} - -// Ref returns the Ref of the rule. If it doesn't have one, it's filled in -// via the Head's Name. -func (head *Head) Ref() Ref { - if len(head.Reference) > 0 { - return head.Reference - } - return Ref{&Term{Value: head.Name}} -} - -// SetRef can be used to set a rule head's Reference -func (head *Head) SetRef(r Ref) { - head.Reference = r -} - -// Compare returns an integer indicating whether head is less than, equal to, -// or greater than other. -func (head *Head) Compare(other *Head) int { - if head == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - if head.Assign && !other.Assign { - return -1 - } else if !head.Assign && other.Assign { - return 1 - } - if cmp := Compare(head.Args, other.Args); cmp != 0 { - return cmp - } - if cmp := Compare(head.Reference, other.Reference); cmp != 0 { - return cmp - } - if cmp := Compare(head.Name, other.Name); cmp != 0 { - return cmp - } - if cmp := Compare(head.Key, other.Key); cmp != 0 { - return cmp - } - return Compare(head.Value, other.Value) -} - -// Copy returns a deep copy of head. -func (head *Head) Copy() *Head { - cpy := *head - cpy.Reference = head.Reference.Copy() - cpy.Args = head.Args.Copy() - cpy.Key = head.Key.Copy() - cpy.Value = head.Value.Copy() - cpy.keywords = nil - return &cpy -} - -// Equal returns true if this head equals other. -func (head *Head) Equal(other *Head) bool { - return head.Compare(other) == 0 -} - -func (head *Head) String() string { - return head.stringWithOpts(toStringOpts{}) -} - -func (head *Head) stringWithOpts(opts toStringOpts) string { - buf := strings.Builder{} - buf.WriteString(head.Ref().String()) - containsAdded := false - - switch { - case len(head.Args) != 0: - buf.WriteString(head.Args.String()) - case len(head.Reference) == 1 && head.Key != nil: - switch opts.regoVersion { - case RegoV0: - buf.WriteRune('[') - buf.WriteString(head.Key.String()) - buf.WriteRune(']') - default: - containsAdded = true - buf.WriteString(" contains ") - buf.WriteString(head.Key.String()) - } - } - if head.Value != nil { - if head.Assign { - buf.WriteString(" := ") - } else { - buf.WriteString(" = ") - } - buf.WriteString(head.Value.String()) - } else if !containsAdded && head.Name == "" && head.Key != nil { - buf.WriteString(" contains ") - buf.WriteString(head.Key.String()) - } - return buf.String() -} - -func (head *Head) setJSONOptions(opts astJSON.Options) { - head.jsonOptions = opts - if head.Location != nil { - head.Location.JSONOptions = opts - } -} - -func (head *Head) MarshalJSON() ([]byte, error) { - var loc *Location - includeLoc := head.jsonOptions.MarshalOptions.IncludeLocation - if includeLoc.Head { - if head.Location != nil { - loc = head.Location - } - - for _, term := range head.Reference { - if term.Location != nil { - term.jsonOptions.MarshalOptions.IncludeLocation.Term = includeLoc.Term - } - } - } - - // NOTE(sr): we do this to override the rendering of `head.Reference`. - // It's still what'll be used via the default means of encoding/json - // for unmarshaling a json object into a Head struct! - type h Head - return json.Marshal(struct { - h - Ref Ref `json:"ref"` - Location *Location `json:"location,omitempty"` - }{ - h: h(*head), - Ref: head.Ref(), - Location: loc, - }) -} - -// Vars returns a set of vars found in the head. -func (head *Head) Vars() VarSet { - vis := &VarVisitor{vars: VarSet{}} - // TODO: improve test coverage for this. - if head.Args != nil { - vis.Walk(head.Args) - } - if head.Key != nil { - vis.Walk(head.Key) - } - if head.Value != nil { - vis.Walk(head.Value) - } - if len(head.Reference) > 0 { - vis.Walk(head.Reference[1:]) - } - return vis.vars -} - -// Loc returns the Location of head. -func (head *Head) Loc() *Location { - if head == nil { - return nil - } - return head.Location -} - -// SetLoc sets the location on head. -func (head *Head) SetLoc(loc *Location) { - head.Location = loc -} - -func (head *Head) HasDynamicRef() bool { - pos := head.Reference.Dynamic() - // Ref is dynamic if it has one non-constant term that isn't the first or last term or if it's a partial set rule. - return pos > 0 && (pos < len(head.Reference)-1 || head.RuleKind() == MultiValue) -} - -// Copy returns a deep copy of a. -func (a Args) Copy() Args { - cpy := Args{} - for _, t := range a { - cpy = append(cpy, t.Copy()) - } - return cpy -} - -func (a Args) String() string { - buf := make([]string, 0, len(a)) - for _, t := range a { - buf = append(buf, t.String()) - } - return "(" + strings.Join(buf, ", ") + ")" -} - -// Loc returns the Location of a. -func (a Args) Loc() *Location { - if len(a) == 0 { - return nil - } - return a[0].Location -} - -// SetLoc sets the location on a. -func (a Args) SetLoc(loc *Location) { - if len(a) != 0 { - a[0].SetLocation(loc) - } -} - -// Vars returns a set of vars that appear in a. -func (a Args) Vars() VarSet { - vis := &VarVisitor{vars: VarSet{}} - vis.Walk(a) - return vis.vars -} - // NewBody returns a new Body containing the given expressions. The indices of // the immediate expressions will be reset. func NewBody(exprs ...*Expr) Body { - for i, expr := range exprs { - expr.Index = i - } - return Body(exprs) -} - -// MarshalJSON returns JSON encoded bytes representing body. -func (body Body) MarshalJSON() ([]byte, error) { - // Serialize empty Body to empty array. This handles both the empty case and the - // nil case (whereas by default the result would be null if body was nil.) - if len(body) == 0 { - return []byte(`[]`), nil - } - ret, err := json.Marshal([]*Expr(body)) - return ret, err -} - -// Append adds the expr to the body and updates the expr's index accordingly. -func (body *Body) Append(expr *Expr) { - n := len(*body) - expr.Index = n - *body = append(*body, expr) -} - -// Set sets the expr in the body at the specified position and updates the -// expr's index accordingly. -func (body Body) Set(expr *Expr, pos int) { - body[pos] = expr - expr.Index = pos -} - -// Compare returns an integer indicating whether body is less than, equal to, -// or greater than other. -// -// If body is a subset of other, it is considered less than (and vice versa). -func (body Body) Compare(other Body) int { - minLen := len(body) - if len(other) < minLen { - minLen = len(other) - } - for i := 0; i < minLen; i++ { - if cmp := body[i].Compare(other[i]); cmp != 0 { - return cmp - } - } - if len(body) < len(other) { - return -1 - } - if len(other) < len(body) { - return 1 - } - return 0 -} - -// Copy returns a deep copy of body. -func (body Body) Copy() Body { - cpy := make(Body, len(body)) - for i := range body { - cpy[i] = body[i].Copy() - } - return cpy -} - -// Contains returns true if this body contains the given expression. -func (body Body) Contains(x *Expr) bool { - for _, e := range body { - if e.Equal(x) { - return true - } - } - return false -} - -// Equal returns true if this Body is equal to the other Body. -func (body Body) Equal(other Body) bool { - return body.Compare(other) == 0 -} - -// Hash returns the hash code for the Body. -func (body Body) Hash() int { - s := 0 - for _, e := range body { - s += e.Hash() - } - return s -} - -// IsGround returns true if all of the expressions in the Body are ground. -func (body Body) IsGround() bool { - for _, e := range body { - if !e.IsGround() { - return false - } - } - return true -} - -// Loc returns the location of the Body in the definition. -func (body Body) Loc() *Location { - if len(body) == 0 { - return nil - } - return body[0].Location -} - -// SetLoc sets the location on body. -func (body Body) SetLoc(loc *Location) { - if len(body) != 0 { - body[0].SetLocation(loc) - } -} - -func (body Body) String() string { - buf := make([]string, 0, len(body)) - for _, v := range body { - buf = append(buf, v.String()) - } - return strings.Join(buf, "; ") -} - -// Vars returns a VarSet containing variables in body. The params can be set to -// control which vars are included. -func (body Body) Vars(params VarVisitorParams) VarSet { - vis := NewVarVisitor().WithParams(params) - vis.Walk(body) - return vis.Vars() + return v1.NewBody(exprs...) } // NewExpr returns a new Expr object. -func NewExpr(terms interface{}) *Expr { - switch terms.(type) { - case *SomeDecl, *Every, *Term, []*Term: // ok - default: - panic("unreachable") - } - return &Expr{ - Negated: false, - Terms: terms, - Index: 0, - With: nil, - } -} - -// Complement returns a copy of this expression with the negation flag flipped. -func (expr *Expr) Complement() *Expr { - cpy := *expr - cpy.Negated = !cpy.Negated - return &cpy -} - -// Equal returns true if this Expr equals the other Expr. -func (expr *Expr) Equal(other *Expr) bool { - return expr.Compare(other) == 0 -} - -// Compare returns an integer indicating whether expr is less than, equal to, -// or greater than other. -// -// Expressions are compared as follows: -// -// 1. Declarations are always less than other expressions. -// 2. Preceding expression (by Index) is always less than the other expression. -// 3. Non-negated expressions are always less than negated expressions. -// 4. Single term expressions are always less than built-in expressions. -// -// Otherwise, the expression terms are compared normally. If both expressions -// have the same terms, the modifiers are compared. -func (expr *Expr) Compare(other *Expr) int { - - if expr == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - - o1 := expr.sortOrder() - o2 := other.sortOrder() - if o1 < o2 { - return -1 - } else if o2 < o1 { - return 1 - } - - switch { - case expr.Index < other.Index: - return -1 - case expr.Index > other.Index: - return 1 - } - - switch { - case expr.Negated && !other.Negated: - return 1 - case !expr.Negated && other.Negated: - return -1 - } - - switch t := expr.Terms.(type) { - case *Term: - if cmp := Compare(t.Value, other.Terms.(*Term).Value); cmp != 0 { - return cmp - } - case []*Term: - if cmp := termSliceCompare(t, other.Terms.([]*Term)); cmp != 0 { - return cmp - } - case *SomeDecl: - if cmp := Compare(t, other.Terms.(*SomeDecl)); cmp != 0 { - return cmp - } - case *Every: - if cmp := Compare(t, other.Terms.(*Every)); cmp != 0 { - return cmp - } - } - - return withSliceCompare(expr.With, other.With) -} - -func (expr *Expr) sortOrder() int { - switch expr.Terms.(type) { - case *SomeDecl: - return 0 - case *Term: - return 1 - case []*Term: - return 2 - case *Every: - return 3 - } - return -1 -} - -// CopyWithoutTerms returns a deep copy of expr without its Terms -func (expr *Expr) CopyWithoutTerms() *Expr { - cpy := *expr - - cpy.With = make([]*With, len(expr.With)) - for i := range expr.With { - cpy.With[i] = expr.With[i].Copy() - } - - return &cpy -} - -// Copy returns a deep copy of expr. -func (expr *Expr) Copy() *Expr { - - cpy := expr.CopyWithoutTerms() - - switch ts := expr.Terms.(type) { - case *SomeDecl: - cpy.Terms = ts.Copy() - case []*Term: - cpyTs := make([]*Term, len(ts)) - for i := range ts { - cpyTs[i] = ts[i].Copy() - } - cpy.Terms = cpyTs - case *Term: - cpy.Terms = ts.Copy() - case *Every: - cpy.Terms = ts.Copy() - } - - return cpy -} - -// Hash returns the hash code of the Expr. -func (expr *Expr) Hash() int { - s := expr.Index - switch ts := expr.Terms.(type) { - case *SomeDecl: - s += ts.Hash() - case []*Term: - for _, t := range ts { - s += t.Value.Hash() - } - case *Term: - s += ts.Value.Hash() - } - if expr.Negated { - s++ - } - for _, w := range expr.With { - s += w.Hash() - } - return s -} - -// IncludeWith returns a copy of expr with the with modifier appended. -func (expr *Expr) IncludeWith(target *Term, value *Term) *Expr { - cpy := *expr - cpy.With = append(cpy.With, &With{Target: target, Value: value}) - return &cpy -} - -// NoWith returns a copy of expr where the with modifier has been removed. -func (expr *Expr) NoWith() *Expr { - cpy := *expr - cpy.With = nil - return &cpy -} - -// IsEquality returns true if this is an equality expression. -func (expr *Expr) IsEquality() bool { - return isGlobalBuiltin(expr, Var(Equality.Name)) -} - -// IsAssignment returns true if this an assignment expression. -func (expr *Expr) IsAssignment() bool { - return isGlobalBuiltin(expr, Var(Assign.Name)) -} - -// IsCall returns true if this expression calls a function. -func (expr *Expr) IsCall() bool { - _, ok := expr.Terms.([]*Term) - return ok -} - -// IsEvery returns true if this expression is an 'every' expression. -func (expr *Expr) IsEvery() bool { - _, ok := expr.Terms.(*Every) - return ok -} - -// IsSome returns true if this expression is a 'some' expression. -func (expr *Expr) IsSome() bool { - _, ok := expr.Terms.(*SomeDecl) - return ok -} - -// Operator returns the name of the function or built-in this expression refers -// to. If this expression is not a function call, returns nil. -func (expr *Expr) Operator() Ref { - op := expr.OperatorTerm() - if op == nil { - return nil - } - return op.Value.(Ref) -} - -// OperatorTerm returns the name of the function or built-in this expression -// refers to. If this expression is not a function call, returns nil. -func (expr *Expr) OperatorTerm() *Term { - terms, ok := expr.Terms.([]*Term) - if !ok || len(terms) == 0 { - return nil - } - return terms[0] -} - -// Operand returns the term at the zero-based pos. If the expr does not include -// at least pos+1 terms, this function returns nil. -func (expr *Expr) Operand(pos int) *Term { - terms, ok := expr.Terms.([]*Term) - if !ok { - return nil - } - idx := pos + 1 - if idx < len(terms) { - return terms[idx] - } - return nil -} - -// Operands returns the built-in function operands. -func (expr *Expr) Operands() []*Term { - terms, ok := expr.Terms.([]*Term) - if !ok { - return nil - } - return terms[1:] -} - -// IsGround returns true if all of the expression terms are ground. -func (expr *Expr) IsGround() bool { - switch ts := expr.Terms.(type) { - case []*Term: - for _, t := range ts[1:] { - if !t.IsGround() { - return false - } - } - case *Term: - return ts.IsGround() - } - return true -} - -// SetOperator sets the expr's operator and returns the expr itself. If expr is -// not a call expr, this function will panic. -func (expr *Expr) SetOperator(term *Term) *Expr { - expr.Terms.([]*Term)[0] = term - return expr -} - -// SetLocation sets the expr's location and returns the expr itself. -func (expr *Expr) SetLocation(loc *Location) *Expr { - expr.Location = loc - return expr -} - -// Loc returns the Location of expr. -func (expr *Expr) Loc() *Location { - if expr == nil { - return nil - } - return expr.Location -} - -// SetLoc sets the location on expr. -func (expr *Expr) SetLoc(loc *Location) { - expr.SetLocation(loc) -} - -func (expr *Expr) String() string { - buf := make([]string, 0, 2+len(expr.With)) - if expr.Negated { - buf = append(buf, "not") - } - switch t := expr.Terms.(type) { - case []*Term: - if expr.IsEquality() && validEqAssignArgCount(expr) { - buf = append(buf, fmt.Sprintf("%v %v %v", t[1], Equality.Infix, t[2])) - } else { - buf = append(buf, Call(t).String()) - } - case fmt.Stringer: - buf = append(buf, t.String()) - } - - for i := range expr.With { - buf = append(buf, expr.With[i].String()) - } - - return strings.Join(buf, " ") -} - -func (expr *Expr) setJSONOptions(opts astJSON.Options) { - expr.jsonOptions = opts - if expr.Location != nil { - expr.Location.JSONOptions = opts - } -} - -func (expr *Expr) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "terms": expr.Terms, - "index": expr.Index, - } - - if len(expr.With) > 0 { - data["with"] = expr.With - } - - if expr.Generated { - data["generated"] = true - } - - if expr.Negated { - data["negated"] = true - } - - if expr.jsonOptions.MarshalOptions.IncludeLocation.Expr { - if expr.Location != nil { - data["location"] = expr.Location - } - } - - return json.Marshal(data) -} - -// UnmarshalJSON parses the byte array and stores the result in expr. -func (expr *Expr) UnmarshalJSON(bs []byte) error { - v := map[string]interface{}{} - if err := util.UnmarshalJSON(bs, &v); err != nil { - return err - } - return unmarshalExpr(expr, v) -} - -// Vars returns a VarSet containing variables in expr. The params can be set to -// control which vars are included. -func (expr *Expr) Vars(params VarVisitorParams) VarSet { - vis := NewVarVisitor().WithParams(params) - vis.Walk(expr) - return vis.Vars() +func NewExpr(terms any) *Expr { + return v1.NewExpr(terms) } // NewBuiltinExpr creates a new Expr object with the supplied terms. // The builtin operator must be the first term. func NewBuiltinExpr(terms ...*Term) *Expr { - return &Expr{Terms: terms} -} - -func (expr *Expr) CogeneratedExprs() []*Expr { - visited := map[*Expr]struct{}{} - visitCogeneratedExprs(expr, func(e *Expr) bool { - if expr.Equal(e) { - return true - } - if _, ok := visited[e]; ok { - return true - } - visited[e] = struct{}{} - return false - }) - - result := make([]*Expr, 0, len(visited)) - for e := range visited { - result = append(result, e) - } - return result -} - -func (expr *Expr) BaseCogeneratedExpr() *Expr { - if expr.generatedFrom == nil { - return expr - } - return expr.generatedFrom.BaseCogeneratedExpr() -} - -func visitCogeneratedExprs(expr *Expr, f func(*Expr) bool) { - if parent := expr.generatedFrom; parent != nil { - if stop := f(parent); !stop { - visitCogeneratedExprs(parent, f) - } - } - for _, child := range expr.generates { - if stop := f(child); !stop { - visitCogeneratedExprs(child, f) - } - } -} - -func (d *SomeDecl) String() string { - if call, ok := d.Symbols[0].Value.(Call); ok { - if len(call) == 4 { - return "some " + call[1].String() + ", " + call[2].String() + " in " + call[3].String() - } - return "some " + call[1].String() + " in " + call[2].String() - } - buf := make([]string, len(d.Symbols)) - for i := range buf { - buf[i] = d.Symbols[i].String() - } - return "some " + strings.Join(buf, ", ") -} - -// SetLoc sets the Location on d. -func (d *SomeDecl) SetLoc(loc *Location) { - d.Location = loc -} - -// Loc returns the Location of d. -func (d *SomeDecl) Loc() *Location { - return d.Location -} - -// Copy returns a deep copy of d. -func (d *SomeDecl) Copy() *SomeDecl { - cpy := *d - cpy.Symbols = termSliceCopy(d.Symbols) - return &cpy -} - -// Compare returns an integer indicating whether d is less than, equal to, or -// greater than other. -func (d *SomeDecl) Compare(other *SomeDecl) int { - return termSliceCompare(d.Symbols, other.Symbols) -} - -// Hash returns a hash code of d. -func (d *SomeDecl) Hash() int { - return termSliceHash(d.Symbols) -} - -func (d *SomeDecl) setJSONOptions(opts astJSON.Options) { - d.jsonOptions = opts - if d.Location != nil { - d.Location.JSONOptions = opts - } -} - -func (d *SomeDecl) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "symbols": d.Symbols, - } - - if d.jsonOptions.MarshalOptions.IncludeLocation.SomeDecl { - if d.Location != nil { - data["location"] = d.Location - } - } - - return json.Marshal(data) -} - -func (q *Every) String() string { - if q.Key != nil { - return fmt.Sprintf("every %s, %s in %s { %s }", - q.Key, - q.Value, - q.Domain, - q.Body) - } - return fmt.Sprintf("every %s in %s { %s }", - q.Value, - q.Domain, - q.Body) -} - -func (q *Every) Loc() *Location { - return q.Location -} - -func (q *Every) SetLoc(l *Location) { - q.Location = l -} - -// Copy returns a deep copy of d. -func (q *Every) Copy() *Every { - cpy := *q - cpy.Key = q.Key.Copy() - cpy.Value = q.Value.Copy() - cpy.Domain = q.Domain.Copy() - cpy.Body = q.Body.Copy() - return &cpy -} - -func (q *Every) Compare(other *Every) int { - for _, terms := range [][2]*Term{ - {q.Key, other.Key}, - {q.Value, other.Value}, - {q.Domain, other.Domain}, - } { - if d := Compare(terms[0], terms[1]); d != 0 { - return d - } - } - return q.Body.Compare(other.Body) -} - -// KeyValueVars returns the key and val arguments of an `every` -// expression, if they are non-nil and not wildcards. -func (q *Every) KeyValueVars() VarSet { - vis := &VarVisitor{vars: VarSet{}} - if q.Key != nil { - vis.Walk(q.Key) - } - vis.Walk(q.Value) - return vis.vars -} - -func (q *Every) setJSONOptions(opts astJSON.Options) { - q.jsonOptions = opts - if q.Location != nil { - q.Location.JSONOptions = opts - } -} - -func (q *Every) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "key": q.Key, - "value": q.Value, - "domain": q.Domain, - "body": q.Body, - } - - if q.jsonOptions.MarshalOptions.IncludeLocation.Every { - if q.Location != nil { - data["location"] = q.Location - } - } - - return json.Marshal(data) -} - -func (w *With) String() string { - return "with " + w.Target.String() + " as " + w.Value.String() -} - -// Equal returns true if this With is equals the other With. -func (w *With) Equal(other *With) bool { - return Compare(w, other) == 0 -} - -// Compare returns an integer indicating whether w is less than, equal to, or -// greater than other. -func (w *With) Compare(other *With) int { - if w == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - if cmp := Compare(w.Target, other.Target); cmp != 0 { - return cmp - } - return Compare(w.Value, other.Value) -} - -// Copy returns a deep copy of w. -func (w *With) Copy() *With { - cpy := *w - cpy.Value = w.Value.Copy() - cpy.Target = w.Target.Copy() - return &cpy -} - -// Hash returns the hash code of the With. -func (w With) Hash() int { - return w.Target.Hash() + w.Value.Hash() -} - -// SetLocation sets the location on w. -func (w *With) SetLocation(loc *Location) *With { - w.Location = loc - return w -} - -// Loc returns the Location of w. -func (w *With) Loc() *Location { - if w == nil { - return nil - } - return w.Location -} - -// SetLoc sets the location on w. -func (w *With) SetLoc(loc *Location) { - w.Location = loc -} - -func (w *With) setJSONOptions(opts astJSON.Options) { - w.jsonOptions = opts - if w.Location != nil { - w.Location.JSONOptions = opts - } -} - -func (w *With) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "target": w.Target, - "value": w.Value, - } - - if w.jsonOptions.MarshalOptions.IncludeLocation.With { - if w.Location != nil { - data["location"] = w.Location - } - } - - return json.Marshal(data) + return v1.NewBuiltinExpr(terms...) } // Copy returns a deep copy of the AST node x. If x is not an AST node, x is returned unmodified. -func Copy(x interface{}) interface{} { - switch x := x.(type) { - case *Module: - return x.Copy() - case *Package: - return x.Copy() - case *Import: - return x.Copy() - case *Rule: - return x.Copy() - case *Head: - return x.Copy() - case Args: - return x.Copy() - case Body: - return x.Copy() - case *Expr: - return x.Copy() - case *With: - return x.Copy() - case *SomeDecl: - return x.Copy() - case *Every: - return x.Copy() - case *Term: - return x.Copy() - case *ArrayComprehension: - return x.Copy() - case *SetComprehension: - return x.Copy() - case *ObjectComprehension: - return x.Copy() - case Set: - return x.Copy() - case *object: - return x.Copy() - case *Array: - return x.Copy() - case Ref: - return x.Copy() - case Call: - return x.Copy() - case *Comment: - return x.Copy() - } - return x +func Copy(x any) any { + return v1.Copy(x) } // RuleSet represents a collection of rules that produce a virtual document. -type RuleSet []*Rule +type RuleSet = v1.RuleSet // NewRuleSet returns a new RuleSet containing the given rules. func NewRuleSet(rules ...*Rule) RuleSet { - rs := make(RuleSet, 0, len(rules)) - for _, rule := range rules { - rs.Add(rule) - } - return rs -} - -// Add inserts the rule into rs. -func (rs *RuleSet) Add(rule *Rule) { - for _, exist := range *rs { - if exist.Equal(rule) { - return - } - } - *rs = append(*rs, rule) -} - -// Contains returns true if rs contains rule. -func (rs RuleSet) Contains(rule *Rule) bool { - for i := range rs { - if rs[i].Equal(rule) { - return true - } - } - return false -} - -// Diff returns a new RuleSet containing rules in rs that are not in other. -func (rs RuleSet) Diff(other RuleSet) RuleSet { - result := NewRuleSet() - for i := range rs { - if !other.Contains(rs[i]) { - result.Add(rs[i]) - } - } - return result -} - -// Equal returns true if rs equals other. -func (rs RuleSet) Equal(other RuleSet) bool { - return len(rs.Diff(other)) == 0 && len(other.Diff(rs)) == 0 -} - -// Merge returns a ruleset containing the union of rules from rs an other. -func (rs RuleSet) Merge(other RuleSet) RuleSet { - result := NewRuleSet() - for i := range rs { - result.Add(rs[i]) - } - for i := range other { - result.Add(other[i]) - } - return result -} - -func (rs RuleSet) String() string { - buf := make([]string, 0, len(rs)) - for _, rule := range rs { - buf = append(buf, rule.String()) - } - return "{" + strings.Join(buf, ", ") + "}" -} - -// Returns true if the equality or assignment expression referred to by expr -// has a valid number of arguments. -func validEqAssignArgCount(expr *Expr) bool { - return len(expr.Operands()) == 2 -} - -// this function checks if the expr refers to a non-namespaced (global) built-in -// function like eq, gt, plus, etc. -func isGlobalBuiltin(expr *Expr, name Var) bool { - terms, ok := expr.Terms.([]*Term) - if !ok { - return false - } - - // NOTE(tsandall): do not use Term#Equal or Value#Compare to avoid - // allocation here. - ref, ok := terms[0].Value.(Ref) - if !ok || len(ref) != 1 { - return false - } - if head, ok := ref[0].Value.(Var); ok { - return head.Equal(name) - } - return false + return v1.NewRuleSet(rules...) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/pretty.go b/vendor/github.com/open-policy-agent/opa/ast/pretty.go index b4f05ad501..84e42f9aec 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/pretty.go +++ b/vendor/github.com/open-policy-agent/opa/ast/pretty.go @@ -5,78 +5,14 @@ package ast import ( - "fmt" "io" - "strings" + + v1 "github.com/open-policy-agent/opa/v1/ast" ) // Pretty writes a pretty representation of the AST rooted at x to w. // // This is function is intended for debug purposes when inspecting ASTs. -func Pretty(w io.Writer, x interface{}) { - pp := &prettyPrinter{ - depth: -1, - w: w, - } - NewBeforeAfterVisitor(pp.Before, pp.After).Walk(x) -} - -type prettyPrinter struct { - depth int - w io.Writer -} - -func (pp *prettyPrinter) Before(x interface{}) bool { - switch x.(type) { - case *Term: - default: - pp.depth++ - } - - switch x := x.(type) { - case *Term: - return false - case Args: - if len(x) == 0 { - return false - } - pp.writeType(x) - case *Expr: - extras := []string{} - if x.Negated { - extras = append(extras, "negated") - } - extras = append(extras, fmt.Sprintf("index=%d", x.Index)) - pp.writeIndent("%v %v", TypeName(x), strings.Join(extras, " ")) - case Null, Boolean, Number, String, Var: - pp.writeValue(x) - default: - pp.writeType(x) - } - return false -} - -func (pp *prettyPrinter) After(x interface{}) { - switch x.(type) { - case *Term: - default: - pp.depth-- - } -} - -func (pp *prettyPrinter) writeValue(x interface{}) { - pp.writeIndent(fmt.Sprint(x)) -} - -func (pp *prettyPrinter) writeType(x interface{}) { - pp.writeIndent(TypeName(x)) -} - -func (pp *prettyPrinter) writeIndent(f string, a ...interface{}) { - pad := strings.Repeat(" ", pp.depth) - pp.write(pad+f, a...) -} - -func (pp *prettyPrinter) write(f string, a ...interface{}) { - fmt.Fprintf(pp.w, f+"\n", a...) +func Pretty(w io.Writer, x any) { + v1.Pretty(w, x) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/schema.go b/vendor/github.com/open-policy-agent/opa/ast/schema.go index 8c96ac624e..979958a3c0 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/schema.go +++ b/vendor/github.com/open-policy-agent/opa/ast/schema.go @@ -5,59 +5,13 @@ package ast import ( - "fmt" - - "github.com/open-policy-agent/opa/types" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // SchemaSet holds a map from a path to a schema. -type SchemaSet struct { - m *util.HashMap -} +type SchemaSet = v1.SchemaSet // NewSchemaSet returns an empty SchemaSet. func NewSchemaSet() *SchemaSet { - - eqFunc := func(a, b util.T) bool { - return a.(Ref).Equal(b.(Ref)) - } - - hashFunc := func(x util.T) int { return x.(Ref).Hash() } - - return &SchemaSet{ - m: util.NewHashMap(eqFunc, hashFunc), - } -} - -// Put inserts a raw schema into the set. -func (ss *SchemaSet) Put(path Ref, raw interface{}) { - ss.m.Put(path, raw) -} - -// Get returns the raw schema identified by the path. -func (ss *SchemaSet) Get(path Ref) interface{} { - if ss == nil { - return nil - } - x, ok := ss.m.Get(path) - if !ok { - return nil - } - return x -} - -func loadSchema(raw interface{}, allowNet []string) (types.Type, error) { - - jsonSchema, err := compileSchema(raw, allowNet) - if err != nil { - return nil, err - } - - tpe, err := newSchemaParser().parseSchema(jsonSchema.RootSchema) - if err != nil { - return nil, fmt.Errorf("type checking: %w", err) - } - - return tpe, nil + return v1.NewSchemaSet() } diff --git a/vendor/github.com/open-policy-agent/opa/ast/strings.go b/vendor/github.com/open-policy-agent/opa/ast/strings.go index e489f6977c..c2c81de8b7 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/strings.go +++ b/vendor/github.com/open-policy-agent/opa/ast/strings.go @@ -5,14 +5,10 @@ package ast import ( - "reflect" - "strings" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // TypeName returns a human readable name for the AST element type. -func TypeName(x interface{}) string { - if _, ok := x.(*lazyObj); ok { - return "object" - } - return strings.ToLower(reflect.Indirect(reflect.ValueOf(x)).Type().Name()) +func TypeName(x any) string { + return v1.TypeName(x) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/term.go b/vendor/github.com/open-policy-agent/opa/ast/term.go index ce8ee4853d..202355070f 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/term.go +++ b/vendor/github.com/open-policy-agent/opa/ast/term.go @@ -1,40 +1,22 @@ -// Copyright 2016 The OPA Authors. All rights reserved. +// Copyright 2024 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. -// nolint: deadcode // Public API. package ast import ( - "bytes" "encoding/json" - "errors" - "fmt" "io" - "math" - "math/big" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - "sync" - - "github.com/OneOfOne/xxhash" - - astJSON "github.com/open-policy-agent/opa/ast/json" - "github.com/open-policy-agent/opa/ast/location" - "github.com/open-policy-agent/opa/util" -) -var errFindNotFound = fmt.Errorf("find: not found") + v1 "github.com/open-policy-agent/opa/v1/ast" +) // Location records a position in source code. -type Location = location.Location +type Location = v1.Location // NewLocation returns a new Location object. func NewLocation(text []byte, file string, row int, col int) *Location { - return location.NewLocation(text, file, row, col) + return v1.NewLocation(text, file, row, col) } // Value declares the common interface for all Term values. Every kind of Term value @@ -45,3230 +27,280 @@ func NewLocation(text []byte, file string, row int, col int) *Location { // - Variables, References // - Array, Set, and Object Comprehensions // - Calls -type Value interface { - Compare(other Value) int // Compare returns <0, 0, or >0 if this Value is less than, equal to, or greater than other, respectively. - Find(path Ref) (Value, error) // Find returns value referred to by path or an error if path is not found. - Hash() int // Returns hash code of the value. - IsGround() bool // IsGround returns true if this value is not a variable or contains no variables. - String() string // String returns a human readable string representation of the value. -} +type Value = v1.Value // InterfaceToValue converts a native Go value x to a Value. -func InterfaceToValue(x interface{}) (Value, error) { - switch x := x.(type) { - case nil: - return Null{}, nil - case bool: - return Boolean(x), nil - case json.Number: - return Number(x), nil - case int64: - return int64Number(x), nil - case uint64: - return uint64Number(x), nil - case float64: - return floatNumber(x), nil - case int: - return intNumber(x), nil - case string: - return String(x), nil - case []interface{}: - r := make([]*Term, len(x)) - for i, e := range x { - e, err := InterfaceToValue(e) - if err != nil { - return nil, err - } - r[i] = &Term{Value: e} - } - return NewArray(r...), nil - case map[string]interface{}: - r := newobject(len(x)) - for k, v := range x { - k, err := InterfaceToValue(k) - if err != nil { - return nil, err - } - v, err := InterfaceToValue(v) - if err != nil { - return nil, err - } - r.Insert(NewTerm(k), NewTerm(v)) - } - return r, nil - case map[string]string: - r := newobject(len(x)) - for k, v := range x { - k, err := InterfaceToValue(k) - if err != nil { - return nil, err - } - v, err := InterfaceToValue(v) - if err != nil { - return nil, err - } - r.Insert(NewTerm(k), NewTerm(v)) - } - return r, nil - default: - ptr := util.Reference(x) - if err := util.RoundTrip(ptr); err != nil { - return nil, fmt.Errorf("ast: interface conversion: %w", err) - } - return InterfaceToValue(*ptr) - } +func InterfaceToValue(x any) (Value, error) { + return v1.InterfaceToValue(x) } // ValueFromReader returns an AST value from a JSON serialized value in the reader. func ValueFromReader(r io.Reader) (Value, error) { - var x interface{} - if err := util.NewJSONDecoder(r).Decode(&x); err != nil { - return nil, err - } - return InterfaceToValue(x) + return v1.ValueFromReader(r) } // As converts v into a Go native type referred to by x. -func As(v Value, x interface{}) error { - return util.NewJSONDecoder(bytes.NewBufferString(v.String())).Decode(x) +func As(v Value, x any) error { + return v1.As(v, x) } // Resolver defines the interface for resolving references to native Go values. -type Resolver interface { - Resolve(Ref) (interface{}, error) -} +type Resolver = v1.Resolver // ValueResolver defines the interface for resolving references to AST values. -type ValueResolver interface { - Resolve(Ref) (Value, error) -} +type ValueResolver = v1.ValueResolver // UnknownValueErr indicates a ValueResolver was unable to resolve a reference // because the reference refers to an unknown value. -type UnknownValueErr struct{} - -func (UnknownValueErr) Error() string { - return "unknown value" -} +type UnknownValueErr = v1.UnknownValueErr // IsUnknownValueErr returns true if the err is an UnknownValueErr. func IsUnknownValueErr(err error) bool { - _, ok := err.(UnknownValueErr) - return ok -} - -type illegalResolver struct{} - -func (illegalResolver) Resolve(ref Ref) (interface{}, error) { - return nil, fmt.Errorf("illegal value: %v", ref) + return v1.IsUnknownValueErr(err) } // ValueToInterface returns the Go representation of an AST value. The AST // value should not contain any values that require evaluation (e.g., vars, // comprehensions, etc.) -func ValueToInterface(v Value, resolver Resolver) (interface{}, error) { - return valueToInterface(v, resolver, JSONOpt{}) -} - -func valueToInterface(v Value, resolver Resolver, opt JSONOpt) (interface{}, error) { - switch v := v.(type) { - case Null: - return nil, nil - case Boolean: - return bool(v), nil - case Number: - return json.Number(v), nil - case String: - return string(v), nil - case *Array: - buf := []interface{}{} - for i := 0; i < v.Len(); i++ { - x1, err := valueToInterface(v.Elem(i).Value, resolver, opt) - if err != nil { - return nil, err - } - buf = append(buf, x1) - } - return buf, nil - case *object: - buf := make(map[string]interface{}, v.Len()) - err := v.Iter(func(k, v *Term) error { - ki, err := valueToInterface(k.Value, resolver, opt) - if err != nil { - return err - } - var str string - var ok bool - if str, ok = ki.(string); !ok { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(ki); err != nil { - return err - } - str = strings.TrimSpace(buf.String()) - } - vi, err := valueToInterface(v.Value, resolver, opt) - if err != nil { - return err - } - buf[str] = vi - return nil - }) - if err != nil { - return nil, err - } - return buf, nil - case *lazyObj: - if opt.CopyMaps { - return valueToInterface(v.force(), resolver, opt) - } - return v.native, nil - case Set: - buf := []interface{}{} - iter := func(x *Term) error { - x1, err := valueToInterface(x.Value, resolver, opt) - if err != nil { - return err - } - buf = append(buf, x1) - return nil - } - var err error - if opt.SortSets { - err = v.Sorted().Iter(iter) - } else { - err = v.Iter(iter) - } - if err != nil { - return nil, err - } - return buf, nil - case Ref: - return resolver.Resolve(v) - default: - return nil, fmt.Errorf("%v requires evaluation", TypeName(v)) - } +func ValueToInterface(v Value, resolver Resolver) (any, error) { + return v1.ValueToInterface(v, resolver) } // JSON returns the JSON representation of v. The value must not contain any // refs or terms that require evaluation (e.g., vars, comprehensions, etc.) -func JSON(v Value) (interface{}, error) { - return JSONWithOpt(v, JSONOpt{}) +func JSON(v Value) (any, error) { + return v1.JSON(v) } // JSONOpt defines parameters for AST to JSON conversion. -type JSONOpt struct { - SortSets bool // sort sets before serializing (this makes conversion more expensive) - CopyMaps bool // enforces copying of map[string]interface{} read from the store -} +type JSONOpt = v1.JSONOpt // JSONWithOpt returns the JSON representation of v. The value must not contain any // refs or terms that require evaluation (e.g., vars, comprehensions, etc.) -func JSONWithOpt(v Value, opt JSONOpt) (interface{}, error) { - return valueToInterface(v, illegalResolver{}, opt) +func JSONWithOpt(v Value, opt JSONOpt) (any, error) { + return v1.JSONWithOpt(v, opt) } // MustJSON returns the JSON representation of v. The value must not contain any // refs or terms that require evaluation (e.g., vars, comprehensions, etc.) If // the conversion fails, this function will panic. This function is mostly for // test purposes. -func MustJSON(v Value) interface{} { - r, err := JSON(v) - if err != nil { - panic(err) - } - return r +func MustJSON(v Value) any { + return v1.MustJSON(v) } // MustInterfaceToValue converts a native Go value x to a Value. If the // conversion fails, this function will panic. This function is mostly for test // purposes. -func MustInterfaceToValue(x interface{}) Value { - v, err := InterfaceToValue(x) - if err != nil { - panic(err) - } - return v +func MustInterfaceToValue(x any) Value { + return v1.MustInterfaceToValue(x) } // Term is an argument to a function. -type Term struct { - Value Value `json:"value"` // the value of the Term as represented in Go - Location *Location `json:"location,omitempty"` // the location of the Term in the source - - jsonOptions astJSON.Options -} +type Term = v1.Term // NewTerm returns a new Term object. func NewTerm(v Value) *Term { - return &Term{ - Value: v, - } -} - -// SetLocation updates the term's Location and returns the term itself. -func (term *Term) SetLocation(loc *Location) *Term { - term.Location = loc - return term -} - -// Loc returns the Location of term. -func (term *Term) Loc() *Location { - if term == nil { - return nil - } - return term.Location -} - -// SetLoc sets the location on term. -func (term *Term) SetLoc(loc *Location) { - term.SetLocation(loc) -} - -// Copy returns a deep copy of term. -func (term *Term) Copy() *Term { - - if term == nil { - return nil - } - - cpy := *term - - switch v := term.Value.(type) { - case Null, Boolean, Number, String, Var: - cpy.Value = v - case Ref: - cpy.Value = v.Copy() - case *Array: - cpy.Value = v.Copy() - case Set: - cpy.Value = v.Copy() - case *object: - cpy.Value = v.Copy() - case *ArrayComprehension: - cpy.Value = v.Copy() - case *ObjectComprehension: - cpy.Value = v.Copy() - case *SetComprehension: - cpy.Value = v.Copy() - case Call: - cpy.Value = v.Copy() - } - - return &cpy -} - -// Equal returns true if this term equals the other term. Equality is -// defined for each kind of term. -func (term *Term) Equal(other *Term) bool { - if term == nil && other != nil { - return false - } - if term != nil && other == nil { - return false - } - if term == other { - return true - } - - // TODO(tsandall): This early-exit avoids allocations for types that have - // Equal() functions that just use == underneath. We should revisit the - // other types and implement Equal() functions that do not require - // allocations. - switch v := term.Value.(type) { - case Null: - return v.Equal(other.Value) - case Boolean: - return v.Equal(other.Value) - case Number: - return v.Equal(other.Value) - case String: - return v.Equal(other.Value) - case Var: - return v.Equal(other.Value) - } - - return term.Value.Compare(other.Value) == 0 -} - -// Get returns a value referred to by name from the term. -func (term *Term) Get(name *Term) *Term { - switch v := term.Value.(type) { - case *object: - return v.Get(name) - case *Array: - return v.Get(name) - case interface { - Get(*Term) *Term - }: - return v.Get(name) - case Set: - if v.Contains(name) { - return name - } - } - return nil -} - -// Hash returns the hash code of the Term's Value. Its Location -// is ignored. -func (term *Term) Hash() int { - return term.Value.Hash() -} - -// IsGround returns true if this term's Value is ground. -func (term *Term) IsGround() bool { - return term.Value.IsGround() -} - -func (term *Term) setJSONOptions(opts astJSON.Options) { - term.jsonOptions = opts - if term.Location != nil { - term.Location.JSONOptions = opts - } -} - -// MarshalJSON returns the JSON encoding of the term. -// -// Specialized marshalling logic is required to include a type hint for Value. -func (term *Term) MarshalJSON() ([]byte, error) { - d := map[string]interface{}{ - "type": TypeName(term.Value), - "value": term.Value, - } - if term.jsonOptions.MarshalOptions.IncludeLocation.Term { - if term.Location != nil { - d["location"] = term.Location - } - } - return json.Marshal(d) -} - -func (term *Term) String() string { - return term.Value.String() -} - -// UnmarshalJSON parses the byte array and stores the result in term. -// Specialized unmarshalling is required to handle Value and Location. -func (term *Term) UnmarshalJSON(bs []byte) error { - v := map[string]interface{}{} - if err := util.UnmarshalJSON(bs, &v); err != nil { - return err - } - val, err := unmarshalValue(v) - if err != nil { - return err - } - term.Value = val - - if loc, ok := v["location"].(map[string]interface{}); ok { - term.Location = &Location{} - err := unmarshalLocation(term.Location, loc) - if err != nil { - return err - } - } - return nil -} - -// Vars returns a VarSet with variables contained in this term. -func (term *Term) Vars() VarSet { - vis := &VarVisitor{vars: VarSet{}} - vis.Walk(term) - return vis.vars + return v1.NewTerm(v) } // IsConstant returns true if the AST value is constant. func IsConstant(v Value) bool { - found := false - vis := GenericVisitor{ - func(x interface{}) bool { - switch x.(type) { - case Var, Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call: - found = true - return true - } - return false - }, - } - vis.Walk(v) - return !found + return v1.IsConstant(v) } // IsComprehension returns true if the supplied value is a comprehension. func IsComprehension(x Value) bool { - switch x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: - return true - } - return false + return v1.IsComprehension(x) } // ContainsRefs returns true if the Value v contains refs. -func ContainsRefs(v interface{}) bool { - found := false - WalkRefs(v, func(Ref) bool { - found = true - return found - }) - return found +func ContainsRefs(v any) bool { + return v1.ContainsRefs(v) } // ContainsComprehensions returns true if the Value v contains comprehensions. -func ContainsComprehensions(v interface{}) bool { - found := false - WalkClosures(v, func(x interface{}) bool { - switch x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: - found = true - return found - } - return found - }) - return found +func ContainsComprehensions(v any) bool { + return v1.ContainsComprehensions(v) } // ContainsClosures returns true if the Value v contains closures. -func ContainsClosures(v interface{}) bool { - found := false - WalkClosures(v, func(x interface{}) bool { - switch x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every: - found = true - return found - } - return found - }) - return found +func ContainsClosures(v any) bool { + return v1.ContainsClosures(v) } // IsScalar returns true if the AST value is a scalar. func IsScalar(v Value) bool { - switch v.(type) { - case String: - return true - case Number: - return true - case Boolean: - return true - case Null: - return true - } - return false + return v1.IsScalar(v) } // Null represents the null value defined by JSON. -type Null struct{} +type Null = v1.Null // NullTerm creates a new Term with a Null value. func NullTerm() *Term { - return &Term{Value: Null{}} -} - -// Equal returns true if the other term Value is also Null. -func (null Null) Equal(other Value) bool { - switch other.(type) { - case Null: - return true - default: - return false - } -} - -// Compare compares null to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (null Null) Compare(other Value) int { - return Compare(null, other) -} - -// Find returns the current value or a not found error. -func (null Null) Find(path Ref) (Value, error) { - if len(path) == 0 { - return null, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (null Null) Hash() int { - return 0 -} - -// IsGround always returns true. -func (Null) IsGround() bool { - return true -} - -func (null Null) String() string { - return "null" + return v1.NullTerm() } // Boolean represents a boolean value defined by JSON. -type Boolean bool +type Boolean = v1.Boolean // BooleanTerm creates a new Term with a Boolean value. func BooleanTerm(b bool) *Term { - return &Term{Value: Boolean(b)} -} - -// Equal returns true if the other Value is a Boolean and is equal. -func (bol Boolean) Equal(other Value) bool { - switch other := other.(type) { - case Boolean: - return bol == other - default: - return false - } -} - -// Compare compares bol to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (bol Boolean) Compare(other Value) int { - return Compare(bol, other) -} - -// Find returns the current value or a not found error. -func (bol Boolean) Find(path Ref) (Value, error) { - if len(path) == 0 { - return bol, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (bol Boolean) Hash() int { - if bol { - return 1 - } - return 0 -} - -// IsGround always returns true. -func (Boolean) IsGround() bool { - return true -} - -func (bol Boolean) String() string { - return strconv.FormatBool(bool(bol)) + return v1.BooleanTerm(b) } // Number represents a numeric value as defined by JSON. -type Number json.Number +type Number = v1.Number // NumberTerm creates a new Term with a Number value. func NumberTerm(n json.Number) *Term { - return &Term{Value: Number(n)} + return v1.NumberTerm(n) } // IntNumberTerm creates a new Term with an integer Number value. func IntNumberTerm(i int) *Term { - return &Term{Value: Number(strconv.Itoa(i))} + return v1.IntNumberTerm(i) } // UIntNumberTerm creates a new Term with an unsigned integer Number value. func UIntNumberTerm(u uint64) *Term { - return &Term{Value: uint64Number(u)} + return v1.UIntNumberTerm(u) } // FloatNumberTerm creates a new Term with a floating point Number value. func FloatNumberTerm(f float64) *Term { - s := strconv.FormatFloat(f, 'g', -1, 64) - return &Term{Value: Number(s)} -} - -// Equal returns true if the other Value is a Number and is equal. -func (num Number) Equal(other Value) bool { - switch other := other.(type) { - case Number: - return Compare(num, other) == 0 - default: - return false - } -} - -// Compare compares num to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (num Number) Compare(other Value) int { - return Compare(num, other) -} - -// Find returns the current value or a not found error. -func (num Number) Find(path Ref) (Value, error) { - if len(path) == 0 { - return num, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (num Number) Hash() int { - f, err := json.Number(num).Float64() - if err != nil { - bs := []byte(num) - h := xxhash.Checksum64(bs) - return int(h) - } - return int(f) -} - -// Int returns the int representation of num if possible. -func (num Number) Int() (int, bool) { - i64, ok := num.Int64() - return int(i64), ok -} - -// Int64 returns the int64 representation of num if possible. -func (num Number) Int64() (int64, bool) { - i, err := json.Number(num).Int64() - if err != nil { - return 0, false - } - return i, true -} - -// Float64 returns the float64 representation of num if possible. -func (num Number) Float64() (float64, bool) { - f, err := json.Number(num).Float64() - if err != nil { - return 0, false - } - return f, true -} - -// IsGround always returns true. -func (Number) IsGround() bool { - return true -} - -// MarshalJSON returns JSON encoded bytes representing num. -func (num Number) MarshalJSON() ([]byte, error) { - return json.Marshal(json.Number(num)) -} - -func (num Number) String() string { - return string(num) -} - -func intNumber(i int) Number { - return Number(strconv.Itoa(i)) -} - -func int64Number(i int64) Number { - return Number(strconv.FormatInt(i, 10)) -} - -func uint64Number(u uint64) Number { - return Number(strconv.FormatUint(u, 10)) -} - -func floatNumber(f float64) Number { - return Number(strconv.FormatFloat(f, 'g', -1, 64)) + return v1.FloatNumberTerm(f) } // String represents a string value as defined by JSON. -type String string +type String = v1.String // StringTerm creates a new Term with a String value. func StringTerm(s string) *Term { - return &Term{Value: String(s)} -} - -// Equal returns true if the other Value is a String and is equal. -func (str String) Equal(other Value) bool { - switch other := other.(type) { - case String: - return str == other - default: - return false - } -} - -// Compare compares str to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (str String) Compare(other Value) int { - return Compare(str, other) -} - -// Find returns the current value or a not found error. -func (str String) Find(path Ref) (Value, error) { - if len(path) == 0 { - return str, nil - } - return nil, errFindNotFound -} - -// IsGround always returns true. -func (String) IsGround() bool { - return true -} - -func (str String) String() string { - return strconv.Quote(string(str)) -} - -// Hash returns the hash code for the Value. -func (str String) Hash() int { - h := xxhash.ChecksumString64S(string(str), hashSeed0) - return int(h) + return v1.StringTerm(s) } // Var represents a variable as defined by the language. -type Var string +type Var = v1.Var // VarTerm creates a new Term with a Variable value. func VarTerm(v string) *Term { - return &Term{Value: Var(v)} -} - -// Equal returns true if the other Value is a Variable and has the same value -// (name). -func (v Var) Equal(other Value) bool { - switch other := other.(type) { - case Var: - return v == other - default: - return false - } -} - -// Compare compares v to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (v Var) Compare(other Value) int { - return Compare(v, other) -} - -// Find returns the current value or a not found error. -func (v Var) Find(path Ref) (Value, error) { - if len(path) == 0 { - return v, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (v Var) Hash() int { - h := xxhash.ChecksumString64S(string(v), hashSeed0) - return int(h) -} - -// IsGround always returns false. -func (Var) IsGround() bool { - return false -} - -// IsWildcard returns true if this is a wildcard variable. -func (v Var) IsWildcard() bool { - return strings.HasPrefix(string(v), WildcardPrefix) -} - -// IsGenerated returns true if this variable was generated during compilation. -func (v Var) IsGenerated() bool { - return strings.HasPrefix(string(v), "__local") -} - -func (v Var) String() string { - // Special case for wildcard so that string representation is parseable. The - // parser mangles wildcard variables to make their names unique and uses an - // illegal variable name character (WildcardPrefix) to avoid conflicts. When - // we serialize the variable here, we need to make sure it's parseable. - if v.IsWildcard() { - return Wildcard.String() - } - return string(v) + return v1.VarTerm(v) } // Ref represents a reference as defined by the language. -type Ref []*Term +type Ref = v1.Ref // EmptyRef returns a new, empty reference. func EmptyRef() Ref { - return Ref([]*Term{}) + return v1.EmptyRef() } // PtrRef returns a new reference against the head for the pointer // s. Path components in the pointer are unescaped. func PtrRef(head *Term, s string) (Ref, error) { - s = strings.Trim(s, "/") - if s == "" { - return Ref{head}, nil - } - parts := strings.Split(s, "/") - if maxLen := math.MaxInt32; len(parts) >= maxLen { - return nil, fmt.Errorf("path too long: %s, %d > %d (max)", s, len(parts), maxLen) - } - ref := make(Ref, uint(len(parts))+1) - ref[0] = head - for i := 0; i < len(parts); i++ { - var err error - parts[i], err = url.PathUnescape(parts[i]) - if err != nil { - return nil, err - } - ref[i+1] = StringTerm(parts[i]) - } - return ref, nil + return v1.PtrRef(head, s) } // RefTerm creates a new Term with a Ref value. func RefTerm(r ...*Term) *Term { - return &Term{Value: Ref(r)} -} - -// Append returns a copy of ref with the term appended to the end. -func (ref Ref) Append(term *Term) Ref { - n := len(ref) - dst := make(Ref, n+1) - copy(dst, ref) - dst[n] = term - return dst -} - -// Insert returns a copy of the ref with x inserted at pos. If pos < len(ref), -// existing elements are shifted to the right. If pos > len(ref)+1 this -// function panics. -func (ref Ref) Insert(x *Term, pos int) Ref { - switch { - case pos == len(ref): - return ref.Append(x) - case pos > len(ref)+1: - panic("illegal index") - } - cpy := make(Ref, len(ref)+1) - copy(cpy, ref[:pos]) - cpy[pos] = x - copy(cpy[pos+1:], ref[pos:]) - return cpy -} - -// Extend returns a copy of ref with the terms from other appended. The head of -// other will be converted to a string. -func (ref Ref) Extend(other Ref) Ref { - dst := make(Ref, len(ref)+len(other)) - copy(dst, ref) - - head := other[0].Copy() - head.Value = String(head.Value.(Var)) - offset := len(ref) - dst[offset] = head - - copy(dst[offset+1:], other[1:]) - return dst -} - -// Concat returns a ref with the terms appended. -func (ref Ref) Concat(terms []*Term) Ref { - if len(terms) == 0 { - return ref - } - cpy := make(Ref, len(ref)+len(terms)) - copy(cpy, ref) - copy(cpy[len(ref):], terms) - return cpy -} - -// Dynamic returns the offset of the first non-constant operand of ref. -func (ref Ref) Dynamic() int { - switch ref[0].Value.(type) { - case Call: - return 0 - } - for i := 1; i < len(ref); i++ { - if !IsConstant(ref[i].Value) { - return i - } - } - return -1 -} - -// Copy returns a deep copy of ref. -func (ref Ref) Copy() Ref { - return termSliceCopy(ref) -} - -// Equal returns true if ref is equal to other. -func (ref Ref) Equal(other Value) bool { - return Compare(ref, other) == 0 -} - -// Compare compares ref to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (ref Ref) Compare(other Value) int { - return Compare(ref, other) -} - -// Find returns the current value or a "not found" error. -func (ref Ref) Find(path Ref) (Value, error) { - if len(path) == 0 { - return ref, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (ref Ref) Hash() int { - return termSliceHash(ref) -} - -// HasPrefix returns true if the other ref is a prefix of this ref. -func (ref Ref) HasPrefix(other Ref) bool { - if len(other) > len(ref) { - return false - } - for i := range other { - if !ref[i].Equal(other[i]) { - return false - } - } - return true -} - -// ConstantPrefix returns the constant portion of the ref starting from the head. -func (ref Ref) ConstantPrefix() Ref { - ref = ref.Copy() - - i := ref.Dynamic() - if i < 0 { - return ref - } - return ref[:i] -} - -func (ref Ref) StringPrefix() Ref { - r := ref.Copy() - - for i := 1; i < len(ref); i++ { - switch r[i].Value.(type) { - case String: // pass - default: // cut off - return r[:i] - } - } - - return r -} - -// GroundPrefix returns the ground portion of the ref starting from the head. By -// definition, the head of the reference is always ground. -func (ref Ref) GroundPrefix() Ref { - prefix := make(Ref, 0, len(ref)) - - for i, x := range ref { - if i > 0 && !x.IsGround() { - break - } - prefix = append(prefix, x) - } - - return prefix -} - -func (ref Ref) DynamicSuffix() Ref { - i := ref.Dynamic() - if i < 0 { - return nil - } - return ref[i:] -} - -// IsGround returns true if all of the parts of the Ref are ground. -func (ref Ref) IsGround() bool { - if len(ref) == 0 { - return true - } - return termSliceIsGround(ref[1:]) -} - -// IsNested returns true if this ref contains other Refs. -func (ref Ref) IsNested() bool { - for _, x := range ref { - if _, ok := x.Value.(Ref); ok { - return true - } - } - return false -} - -// Ptr returns a slash-separated path string for this ref. If the ref -// contains non-string terms this function returns an error. Path -// components are escaped. -func (ref Ref) Ptr() (string, error) { - parts := make([]string, 0, len(ref)-1) - for _, term := range ref[1:] { - if str, ok := term.Value.(String); ok { - parts = append(parts, url.PathEscape(string(str))) - } else { - return "", fmt.Errorf("invalid path value type") - } - } - return strings.Join(parts, "/"), nil + return v1.RefTerm(r...) } -var varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$") - func IsVarCompatibleString(s string) bool { - return varRegexp.MatchString(s) -} - -func (ref Ref) String() string { - if len(ref) == 0 { - return "" - } - buf := []string{ref[0].Value.String()} - path := ref[1:] - for _, p := range path { - switch p := p.Value.(type) { - case String: - str := string(p) - if varRegexp.MatchString(str) && len(buf) > 0 && !IsKeyword(str) { - buf = append(buf, "."+str) - } else { - buf = append(buf, "["+p.String()+"]") - } - default: - buf = append(buf, "["+p.String()+"]") - } - } - return strings.Join(buf, "") -} - -// OutputVars returns a VarSet containing variables that would be bound by evaluating -// this expression in isolation. -func (ref Ref) OutputVars() VarSet { - vis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true}) - vis.Walk(ref) - return vis.Vars() -} - -func (ref Ref) toArray() *Array { - a := NewArray() - for _, term := range ref { - if _, ok := term.Value.(String); ok { - a = a.Append(term) - } else { - a = a.Append(StringTerm(term.Value.String())) - } - } - return a + return v1.IsVarCompatibleString(s) } // QueryIterator defines the interface for querying AST documents with references. -type QueryIterator func(map[Var]Value, Value) error +type QueryIterator = v1.QueryIterator // ArrayTerm creates a new Term with an Array value. func ArrayTerm(a ...*Term) *Term { - return NewTerm(NewArray(a...)) + return v1.ArrayTerm(a...) } // NewArray creates an Array with the terms provided. The array will // use the provided term slice. func NewArray(a ...*Term) *Array { - hs := make([]int, len(a)) - for i, e := range a { - hs[i] = e.Value.Hash() - } - arr := &Array{elems: a, hashs: hs, ground: termSliceIsGround(a)} - arr.rehash() - return arr + return v1.NewArray(a...) } // Array represents an array as defined by the language. Arrays are similar to the // same types as defined by JSON with the exception that they can contain Vars // and References. -type Array struct { - elems []*Term - hashs []int // element hashes - hash int - ground bool -} - -// Copy returns a deep copy of arr. -func (arr *Array) Copy() *Array { - cpy := make([]int, len(arr.elems)) - copy(cpy, arr.hashs) - return &Array{ - elems: termSliceCopy(arr.elems), - hashs: cpy, - hash: arr.hash, - ground: arr.IsGround()} -} - -// Equal returns true if arr is equal to other. -func (arr *Array) Equal(other Value) bool { - return Compare(arr, other) == 0 -} - -// Compare compares arr to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (arr *Array) Compare(other Value) int { - return Compare(arr, other) -} - -// Find returns the value at the index or an out-of-range error. -func (arr *Array) Find(path Ref) (Value, error) { - if len(path) == 0 { - return arr, nil - } - num, ok := path[0].Value.(Number) - if !ok { - return nil, errFindNotFound - } - i, ok := num.Int() - if !ok { - return nil, errFindNotFound - } - if i < 0 || i >= arr.Len() { - return nil, errFindNotFound - } - return arr.Elem(i).Value.Find(path[1:]) -} - -// Get returns the element at pos or nil if not possible. -func (arr *Array) Get(pos *Term) *Term { - num, ok := pos.Value.(Number) - if !ok { - return nil - } - - i, ok := num.Int() - if !ok { - return nil - } - - if i >= 0 && i < len(arr.elems) { - return arr.elems[i] - } - - return nil -} - -// Sorted returns a new Array that contains the sorted elements of arr. -func (arr *Array) Sorted() *Array { - cpy := make([]*Term, len(arr.elems)) - for i := range cpy { - cpy[i] = arr.elems[i] - } - sort.Sort(termSlice(cpy)) - a := NewArray(cpy...) - a.hashs = arr.hashs - return a -} - -// Hash returns the hash code for the Value. -func (arr *Array) Hash() int { - return arr.hash -} - -// IsGround returns true if all of the Array elements are ground. -func (arr *Array) IsGround() bool { - return arr.ground -} - -// MarshalJSON returns JSON encoded bytes representing arr. -func (arr *Array) MarshalJSON() ([]byte, error) { - if len(arr.elems) == 0 { - return []byte(`[]`), nil - } - return json.Marshal(arr.elems) -} - -func (arr *Array) String() string { - var b strings.Builder - b.WriteRune('[') - for i, e := range arr.elems { - if i > 0 { - b.WriteString(", ") - } - b.WriteString(e.String()) - } - b.WriteRune(']') - return b.String() -} - -// Len returns the number of elements in the array. -func (arr *Array) Len() int { - return len(arr.elems) -} - -// Elem returns the element i of arr. -func (arr *Array) Elem(i int) *Term { - return arr.elems[i] -} - -// Set sets the element i of arr. -func (arr *Array) Set(i int, v *Term) { - arr.set(i, v) -} - -// rehash updates the cached hash of arr. -func (arr *Array) rehash() { - arr.hash = 0 - for _, h := range arr.hashs { - arr.hash += h - } -} - -// set sets the element i of arr. -func (arr *Array) set(i int, v *Term) { - arr.ground = arr.ground && v.IsGround() - arr.elems[i] = v - arr.hashs[i] = v.Value.Hash() - arr.rehash() -} - -// Slice returns a slice of arr starting from i index to j. -1 -// indicates the end of the array. The returned value array is not a -// copy and any modifications to either of arrays may be reflected to -// the other. -func (arr *Array) Slice(i, j int) *Array { - var elems []*Term - var hashs []int - if j == -1 { - elems = arr.elems[i:] - hashs = arr.hashs[i:] - } else { - elems = arr.elems[i:j] - hashs = arr.hashs[i:j] - } - // If arr is ground, the slice is, too. - // If it's not, the slice could still be. - gr := arr.ground || termSliceIsGround(elems) - - s := &Array{elems: elems, hashs: hashs, ground: gr} - s.rehash() - return s -} - -// Iter calls f on each element in arr. If f returns an error, -// iteration stops and the return value is the error. -func (arr *Array) Iter(f func(*Term) error) error { - for i := range arr.elems { - if err := f(arr.elems[i]); err != nil { - return err - } - } - return nil -} - -// Until calls f on each element in arr. If f returns true, iteration stops. -func (arr *Array) Until(f func(*Term) bool) bool { - err := arr.Iter(func(t *Term) error { - if f(t) { - return errStop - } - return nil - }) - return err != nil -} - -// Foreach calls f on each element in arr. -func (arr *Array) Foreach(f func(*Term)) { - _ = arr.Iter(func(t *Term) error { - f(t) - return nil - }) // ignore error -} - -// Append appends a term to arr, returning the appended array. -func (arr *Array) Append(v *Term) *Array { - cpy := *arr - cpy.elems = append(arr.elems, v) - cpy.hashs = append(arr.hashs, v.Value.Hash()) - cpy.hash = arr.hash + v.Value.Hash() - cpy.ground = arr.ground && v.IsGround() - return &cpy -} +type Array = v1.Array // Set represents a set as defined by the language. -type Set interface { - Value - Len() int - Copy() Set - Diff(Set) Set - Intersect(Set) Set - Union(Set) Set - Add(*Term) - Iter(func(*Term) error) error - Until(func(*Term) bool) bool - Foreach(func(*Term)) - Contains(*Term) bool - Map(func(*Term) (*Term, error)) (Set, error) - Reduce(*Term, func(*Term, *Term) (*Term, error)) (*Term, error) - Sorted() *Array - Slice() []*Term -} +type Set = v1.Set // NewSet returns a new Set containing t. func NewSet(t ...*Term) Set { - s := newset(len(t)) - for i := range t { - s.Add(t[i]) - } - return s + return v1.NewSet(t...) } -func newset(n int) *set { - var keys []*Term - if n > 0 { - keys = make([]*Term, 0, n) - } - return &set{ - elems: make(map[int]*Term, n), - keys: keys, - hash: 0, - ground: true, - sortGuard: new(sync.Once), - } -} - -// SetTerm returns a new Term representing a set containing terms t. func SetTerm(t ...*Term) *Term { - set := NewSet(t...) - return &Term{ - Value: set, - } -} - -type set struct { - elems map[int]*Term - keys []*Term - hash int - ground bool - sortGuard *sync.Once // Prevents race condition around sorting. -} - -// Copy returns a deep copy of s. -func (s *set) Copy() Set { - cpy := newset(s.Len()) - s.Foreach(func(x *Term) { - cpy.Add(x.Copy()) - }) - cpy.hash = s.hash - cpy.ground = s.ground - return cpy -} - -// IsGround returns true if all terms in s are ground. -func (s *set) IsGround() bool { - return s.ground -} - -// Hash returns a hash code for s. -func (s *set) Hash() int { - return s.hash -} - -func (s *set) String() string { - if s.Len() == 0 { - return "set()" - } - var b strings.Builder - b.WriteRune('{') - for i := range s.sortedKeys() { - if i > 0 { - b.WriteString(", ") - } - b.WriteString(s.keys[i].Value.String()) - } - b.WriteRune('}') - return b.String() -} - -func (s *set) sortedKeys() []*Term { - s.sortGuard.Do(func() { - sort.Sort(termSlice(s.keys)) - }) - return s.keys -} - -// Compare compares s to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (s *set) Compare(other Value) int { - o1 := sortOrder(s) - o2 := sortOrder(other) - if o1 < o2 { - return -1 - } else if o1 > o2 { - return 1 - } - t := other.(*set) - return termSliceCompare(s.sortedKeys(), t.sortedKeys()) -} - -// Find returns the set or dereferences the element itself. -func (s *set) Find(path Ref) (Value, error) { - if len(path) == 0 { - return s, nil - } - if !s.Contains(path[0]) { - return nil, errFindNotFound - } - return path[0].Value.Find(path[1:]) -} - -// Diff returns elements in s that are not in other. -func (s *set) Diff(other Set) Set { - r := NewSet() - s.Foreach(func(x *Term) { - if !other.Contains(x) { - r.Add(x) - } - }) - return r -} - -// Intersect returns the set containing elements in both s and other. -func (s *set) Intersect(other Set) Set { - o := other.(*set) - n, m := s.Len(), o.Len() - ss := s - so := o - if m < n { - ss = o - so = s - n = m - } - - r := newset(n) - ss.Foreach(func(x *Term) { - if so.Contains(x) { - r.Add(x) - } - }) - return r -} - -// Union returns the set containing all elements of s and other. -func (s *set) Union(other Set) Set { - r := NewSet() - s.Foreach(func(x *Term) { - r.Add(x) - }) - other.Foreach(func(x *Term) { - r.Add(x) - }) - return r -} - -// Add updates s to include t. -func (s *set) Add(t *Term) { - s.insert(t) -} - -// Iter calls f on each element in s. If f returns an error, iteration stops -// and the return value is the error. -func (s *set) Iter(f func(*Term) error) error { - for i := range s.sortedKeys() { - if err := f(s.keys[i]); err != nil { - return err - } - } - return nil -} - -var errStop = errors.New("stop") - -// Until calls f on each element in s. If f returns true, iteration stops. -func (s *set) Until(f func(*Term) bool) bool { - err := s.Iter(func(t *Term) error { - if f(t) { - return errStop - } - return nil - }) - return err != nil -} - -// Foreach calls f on each element in s. -func (s *set) Foreach(f func(*Term)) { - _ = s.Iter(func(t *Term) error { - f(t) - return nil - }) // ignore error -} - -// Map returns a new Set obtained by applying f to each value in s. -func (s *set) Map(f func(*Term) (*Term, error)) (Set, error) { - set := NewSet() - err := s.Iter(func(x *Term) error { - term, err := f(x) - if err != nil { - return err - } - set.Add(term) - return nil - }) - if err != nil { - return nil, err - } - return set, nil -} - -// Reduce returns a Term produced by applying f to each value in s. The first -// argument to f is the reduced value (starting with i) and the second argument -// to f is the element in s. -func (s *set) Reduce(i *Term, f func(*Term, *Term) (*Term, error)) (*Term, error) { - err := s.Iter(func(x *Term) error { - var err error - i, err = f(i, x) - if err != nil { - return err - } - return nil - }) - return i, err -} - -// Contains returns true if t is in s. -func (s *set) Contains(t *Term) bool { - return s.get(t) != nil -} - -// Len returns the number of elements in the set. -func (s *set) Len() int { - return len(s.keys) -} - -// MarshalJSON returns JSON encoded bytes representing s. -func (s *set) MarshalJSON() ([]byte, error) { - if s.keys == nil { - return []byte(`[]`), nil - } - return json.Marshal(s.sortedKeys()) -} - -// Sorted returns an Array that contains the sorted elements of s. -func (s *set) Sorted() *Array { - cpy := make([]*Term, len(s.keys)) - copy(cpy, s.sortedKeys()) - return NewArray(cpy...) -} - -// Slice returns a slice of terms contained in the set. -func (s *set) Slice() []*Term { - return s.sortedKeys() -} - -// NOTE(philipc): We assume a many-readers, single-writer model here. -// This method should NOT be used concurrently, or else we risk data races. -func (s *set) insert(x *Term) { - hash := x.Hash() - insertHash := hash - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v Value) bool - - switch x := x.Value.(type) { - case Null, Boolean, String, Var: - equal = func(y Value) bool { return x == y } - case Number: - if xi, err := json.Number(x).Int64(); err == nil { - equal = func(y Value) bool { - if y, ok := y.(Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b Value) bool { - if bNum, ok := b.(Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - - return false - } - default: - equal = func(y Value) bool { return Compare(x, y) == 0 } - } - - for curr, ok := s.elems[insertHash]; ok; { - if equal(curr.Value) { - return - } - - insertHash++ - curr, ok = s.elems[insertHash] - } - - s.elems[insertHash] = x - // O(1) insertion, but we'll have to re-sort the keys later. - s.keys = append(s.keys, x) - // Reset the sync.Once instance. - // See https://github.com/golang/go/issues/25955 for why we do it this way. - s.sortGuard = new(sync.Once) - - s.hash += hash - s.ground = s.ground && x.IsGround() -} - -func (s *set) get(x *Term) *Term { - hash := x.Hash() - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v Value) bool - - switch x := x.Value.(type) { - case Null, Boolean, String, Var: - equal = func(y Value) bool { return x == y } - case Number: - if xi, err := json.Number(x).Int64(); err == nil { - equal = func(y Value) bool { - if y, ok := y.(Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b Value) bool { - if bNum, ok := b.(Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - return false - - } - - default: - equal = func(y Value) bool { return Compare(x, y) == 0 } - } - - for curr, ok := s.elems[hash]; ok; { - if equal(curr.Value) { - return curr - } - - hash++ - curr, ok = s.elems[hash] - } - return nil + return v1.SetTerm(t...) } // Object represents an object as defined by the language. -type Object interface { - Value - Len() int - Get(*Term) *Term - Copy() Object - Insert(*Term, *Term) - Iter(func(*Term, *Term) error) error - Until(func(*Term, *Term) bool) bool - Foreach(func(*Term, *Term)) - Map(func(*Term, *Term) (*Term, *Term, error)) (Object, error) - Diff(other Object) Object - Intersect(other Object) [][3]*Term - Merge(other Object) (Object, bool) - MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) - Filter(filter Object) (Object, error) - Keys() []*Term - KeysIterator() ObjectKeysIterator - get(k *Term) *objectElem // To prevent external implementations -} +type Object = v1.Object // NewObject creates a new Object with t. func NewObject(t ...[2]*Term) Object { - obj := newobject(len(t)) - for i := range t { - obj.Insert(t[i][0], t[i][1]) - } - return obj + return v1.NewObject(t...) } // ObjectTerm creates a new Term with an Object value. func ObjectTerm(o ...[2]*Term) *Term { - return &Term{Value: NewObject(o...)} -} - -func LazyObject(blob map[string]interface{}) Object { - return &lazyObj{native: blob, cache: map[string]Value{}} -} - -type lazyObj struct { - strict Object - cache map[string]Value - native map[string]interface{} -} - -func (l *lazyObj) force() Object { - if l.strict == nil { - l.strict = MustInterfaceToValue(l.native).(Object) - // NOTE(jf): a possible performance improvement here would be to check how many - // entries have been realized to AST in the cache, and if some threshold compared to the - // total number of keys is exceeded, realize the remaining entries and set l.strict to l.cache. - l.cache = map[string]Value{} // We don't need the cache anymore; drop it to free up memory. - } - return l.strict -} - -func (l *lazyObj) Compare(other Value) int { - o1 := sortOrder(l) - o2 := sortOrder(other) - if o1 < o2 { - return -1 - } else if o2 < o1 { - return 1 - } - return l.force().Compare(other) -} - -func (l *lazyObj) Copy() Object { - return l -} - -func (l *lazyObj) Diff(other Object) Object { - return l.force().Diff(other) -} - -func (l *lazyObj) Intersect(other Object) [][3]*Term { - return l.force().Intersect(other) -} - -func (l *lazyObj) Iter(f func(*Term, *Term) error) error { - return l.force().Iter(f) -} - -func (l *lazyObj) Until(f func(*Term, *Term) bool) bool { - // NOTE(sr): there could be benefits in not forcing here -- if we abort because - // `f` returns true, we could save us from converting the rest of the object. - return l.force().Until(f) -} - -func (l *lazyObj) Foreach(f func(*Term, *Term)) { - l.force().Foreach(f) -} - -func (l *lazyObj) Filter(filter Object) (Object, error) { - return l.force().Filter(filter) -} - -func (l *lazyObj) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) { - return l.force().Map(f) -} - -func (l *lazyObj) MarshalJSON() ([]byte, error) { - return l.force().(*object).MarshalJSON() -} - -func (l *lazyObj) Merge(other Object) (Object, bool) { - return l.force().Merge(other) -} - -func (l *lazyObj) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) { - return l.force().MergeWith(other, conflictResolver) -} - -func (l *lazyObj) Len() int { - return len(l.native) + return v1.ObjectTerm(o...) } -func (l *lazyObj) String() string { - return l.force().String() +func LazyObject(blob map[string]any) Object { + return v1.LazyObject(blob) } -// get is merely there to implement the Object interface -- `get` there serves the -// purpose of prohibiting external implementations. It's never called for lazyObj. -func (*lazyObj) get(*Term) *objectElem { - return nil -} - -func (l *lazyObj) Get(k *Term) *Term { - if l.strict != nil { - return l.strict.Get(k) - } - if s, ok := k.Value.(String); ok { - if v, ok := l.cache[string(s)]; ok { - return NewTerm(v) - } - - if val, ok := l.native[string(s)]; ok { - var converted Value - switch val := val.(type) { - case map[string]interface{}: - converted = LazyObject(val) - default: - converted = MustInterfaceToValue(val) - } - l.cache[string(s)] = converted - return NewTerm(converted) - } - } - return nil -} - -func (l *lazyObj) Insert(k, v *Term) { - l.force().Insert(k, v) -} - -func (*lazyObj) IsGround() bool { - return true -} - -func (l *lazyObj) Hash() int { - return l.force().Hash() -} - -func (l *lazyObj) Keys() []*Term { - if l.strict != nil { - return l.strict.Keys() - } - ret := make([]*Term, 0, len(l.native)) - for k := range l.native { - ret = append(ret, StringTerm(k)) - } - sort.Sort(termSlice(ret)) - return ret -} - -func (l *lazyObj) KeysIterator() ObjectKeysIterator { - return &lazyObjKeysIterator{keys: l.Keys()} -} - -type lazyObjKeysIterator struct { - current int - keys []*Term -} - -func (ki *lazyObjKeysIterator) Next() (*Term, bool) { - if ki.current == len(ki.keys) { - return nil, false - } - ki.current++ - return ki.keys[ki.current-1], true -} - -func (l *lazyObj) Find(path Ref) (Value, error) { - if l.strict != nil { - return l.strict.Find(path) - } - if len(path) == 0 { - return l, nil - } - if p0, ok := path[0].Value.(String); ok { - if v, ok := l.cache[string(p0)]; ok { - return v.Find(path[1:]) - } - - if v, ok := l.native[string(p0)]; ok { - var converted Value - switch v := v.(type) { - case map[string]interface{}: - converted = LazyObject(v) - default: - converted = MustInterfaceToValue(v) - } - l.cache[string(p0)] = converted - return converted.Find(path[1:]) - } - } - return nil, errFindNotFound -} - -type object struct { - elems map[int]*objectElem - keys objectElemSlice - ground int // number of key and value grounds. Counting is - // required to support insert's key-value replace. - hash int - sortGuard *sync.Once // Prevents race condition around sorting. -} - -func newobject(n int) *object { - var keys objectElemSlice - if n > 0 { - keys = make(objectElemSlice, 0, n) - } - return &object{ - elems: make(map[int]*objectElem, n), - keys: keys, - ground: 0, - hash: 0, - sortGuard: new(sync.Once), - } -} - -type objectElem struct { - key *Term - value *Term - next *objectElem -} - -type objectElemSlice []*objectElem - -func (s objectElemSlice) Less(i, j int) bool { return Compare(s[i].key.Value, s[j].key.Value) < 0 } -func (s objectElemSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x } -func (s objectElemSlice) Len() int { return len(s) } - // Item is a helper for constructing an tuple containing two Terms // representing a key/value pair in an Object. func Item(key, value *Term) [2]*Term { - return [2]*Term{key, value} -} - -func (obj *object) sortedKeys() objectElemSlice { - obj.sortGuard.Do(func() { - sort.Sort(obj.keys) - }) - return obj.keys -} - -// Compare compares obj to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (obj *object) Compare(other Value) int { - if x, ok := other.(*lazyObj); ok { - other = x.force() - } - o1 := sortOrder(obj) - o2 := sortOrder(other) - if o1 < o2 { - return -1 - } else if o2 < o1 { - return 1 - } - a := obj - b := other.(*object) - // Ensure that keys are in canonical sorted order before use! - akeys := a.sortedKeys() - bkeys := b.sortedKeys() - minLen := len(akeys) - if len(b.keys) < len(akeys) { - minLen = len(bkeys) - } - for i := 0; i < minLen; i++ { - keysCmp := Compare(akeys[i].key, bkeys[i].key) - if keysCmp < 0 { - return -1 - } - if keysCmp > 0 { - return 1 - } - valA := akeys[i].value - valB := bkeys[i].value - valCmp := Compare(valA, valB) - if valCmp != 0 { - return valCmp - } - } - if len(akeys) < len(bkeys) { - return -1 - } - if len(bkeys) < len(akeys) { - return 1 - } - return 0 -} - -// Find returns the value at the key or undefined. -func (obj *object) Find(path Ref) (Value, error) { - if len(path) == 0 { - return obj, nil - } - value := obj.Get(path[0]) - if value == nil { - return nil, errFindNotFound - } - return value.Value.Find(path[1:]) -} - -func (obj *object) Insert(k, v *Term) { - obj.insert(k, v) -} - -// Get returns the value of k in obj if k exists, otherwise nil. -func (obj *object) Get(k *Term) *Term { - if elem := obj.get(k); elem != nil { - return elem.value - } - return nil -} - -// Hash returns the hash code for the Value. -func (obj *object) Hash() int { - return obj.hash -} - -// IsGround returns true if all of the Object key/value pairs are ground. -func (obj *object) IsGround() bool { - return obj.ground == 2*len(obj.keys) -} - -// Copy returns a deep copy of obj. -func (obj *object) Copy() Object { - cpy, _ := obj.Map(func(k, v *Term) (*Term, *Term, error) { - return k.Copy(), v.Copy(), nil - }) - cpy.(*object).hash = obj.hash - return cpy -} - -// Diff returns a new Object that contains only the key/value pairs that exist in obj. -func (obj *object) Diff(other Object) Object { - r := NewObject() - obj.Foreach(func(k, v *Term) { - if other.Get(k) == nil { - r.Insert(k, v) - } - }) - return r -} - -// Intersect returns a slice of term triplets that represent the intersection of keys -// between obj and other. For each intersecting key, the values from obj and other are included -// as the last two terms in the triplet (respectively). -func (obj *object) Intersect(other Object) [][3]*Term { - r := [][3]*Term{} - obj.Foreach(func(k, v *Term) { - if v2 := other.Get(k); v2 != nil { - r = append(r, [3]*Term{k, v, v2}) - } - }) - return r -} - -// Iter calls the function f for each key-value pair in the object. If f -// returns an error, iteration stops and the error is returned. -func (obj *object) Iter(f func(*Term, *Term) error) error { - for _, node := range obj.sortedKeys() { - if err := f(node.key, node.value); err != nil { - return err - } - } - return nil -} - -// Until calls f for each key-value pair in the object. If f returns -// true, iteration stops and Until returns true. Otherwise, return -// false. -func (obj *object) Until(f func(*Term, *Term) bool) bool { - err := obj.Iter(func(k, v *Term) error { - if f(k, v) { - return errStop - } - return nil - }) - return err != nil -} - -// Foreach calls f for each key-value pair in the object. -func (obj *object) Foreach(f func(*Term, *Term)) { - _ = obj.Iter(func(k, v *Term) error { - f(k, v) - return nil - }) // ignore error -} - -// Map returns a new Object constructed by mapping each element in the object -// using the function f. -func (obj *object) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) { - cpy := newobject(obj.Len()) - err := obj.Iter(func(k, v *Term) error { - var err error - k, v, err = f(k, v) - if err != nil { - return err - } - cpy.insert(k, v) - return nil - }) - if err != nil { - return nil, err - } - return cpy, nil -} - -// Keys returns the keys of obj. -func (obj *object) Keys() []*Term { - keys := make([]*Term, len(obj.keys)) - - for i, elem := range obj.sortedKeys() { - keys[i] = elem.key - } - - return keys -} - -// Returns an iterator over the obj's keys. -func (obj *object) KeysIterator() ObjectKeysIterator { - return newobjectKeysIterator(obj) -} - -// MarshalJSON returns JSON encoded bytes representing obj. -func (obj *object) MarshalJSON() ([]byte, error) { - sl := make([][2]*Term, obj.Len()) - for i, node := range obj.sortedKeys() { - sl[i] = Item(node.key, node.value) - } - return json.Marshal(sl) -} - -// Merge returns a new Object containing the non-overlapping keys of obj and other. If there are -// overlapping keys between obj and other, the values of associated with the keys are merged. Only -// objects can be merged with other objects. If the values cannot be merged, the second turn value -// will be false. -func (obj object) Merge(other Object) (Object, bool) { - return obj.MergeWith(other, func(v1, v2 *Term) (*Term, bool) { - obj1, ok1 := v1.Value.(Object) - obj2, ok2 := v2.Value.(Object) - if !ok1 || !ok2 { - return nil, true - } - obj3, ok := obj1.Merge(obj2) - if !ok { - return nil, true - } - return NewTerm(obj3), false - }) -} - -// MergeWith returns a new Object containing the merged keys of obj and other. -// If there are overlapping keys between obj and other, the conflictResolver -// is called. The conflictResolver can return a merged value and a boolean -// indicating if the merge has failed and should stop. -func (obj object) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) { - result := NewObject() - stop := obj.Until(func(k, v *Term) bool { - v2 := other.Get(k) - // The key didn't exist in other, keep the original value - if v2 == nil { - result.Insert(k, v) - return false - } - - // The key exists in both, resolve the conflict if possible - merged, stop := conflictResolver(v, v2) - if !stop { - result.Insert(k, merged) - } - return stop - }) - - if stop { - return nil, false - } - - // Copy in any values from other for keys that don't exist in obj - other.Foreach(func(k, v *Term) { - if v2 := obj.Get(k); v2 == nil { - result.Insert(k, v) - } - }) - return result, true -} - -// Filter returns a new object from values in obj where the keys are -// found in filter. Array indices for values can be specified as -// number strings. -func (obj *object) Filter(filter Object) (Object, error) { - filtered, err := filterObject(obj, filter) - if err != nil { - return nil, err - } - return filtered.(Object), nil -} - -// Len returns the number of elements in the object. -func (obj object) Len() int { - return len(obj.keys) -} - -func (obj object) String() string { - var b strings.Builder - b.WriteRune('{') - - for i, elem := range obj.sortedKeys() { - if i > 0 { - b.WriteString(", ") - } - b.WriteString(elem.key.String()) - b.WriteString(": ") - b.WriteString(elem.value.String()) - } - b.WriteRune('}') - return b.String() -} - -func (obj *object) get(k *Term) *objectElem { - hash := k.Hash() - - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v Value) bool - - switch x := k.Value.(type) { - case Null, Boolean, String, Var: - equal = func(y Value) bool { return x == y } - case Number: - if xi, err := json.Number(x).Int64(); err == nil { - equal = func(y Value) bool { - if y, ok := y.(Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b Value) bool { - if bNum, ok := b.(Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - - return false - } - default: - equal = func(y Value) bool { return Compare(x, y) == 0 } - } - - for curr := obj.elems[hash]; curr != nil; curr = curr.next { - if equal(curr.key.Value) { - return curr - } - } - return nil -} - -// NOTE(philipc): We assume a many-readers, single-writer model here. -// This method should NOT be used concurrently, or else we risk data races. -func (obj *object) insert(k, v *Term) { - hash := k.Hash() - head := obj.elems[hash] - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v Value) bool - - switch x := k.Value.(type) { - case Null, Boolean, String, Var: - equal = func(y Value) bool { return x == y } - case Number: - if xi, err := json.Number(x).Int64(); err == nil { - equal = func(y Value) bool { - if y, ok := y.(Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b Value) bool { - if bNum, ok := b.(Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - - return false - } - default: - equal = func(y Value) bool { return Compare(x, y) == 0 } - } - - for curr := head; curr != nil; curr = curr.next { - if equal(curr.key.Value) { - // The ground bit of the value may change in - // replace, hence adjust the counter per old - // and new value. - - if curr.value.IsGround() { - obj.ground-- - } - if v.IsGround() { - obj.ground++ - } - - curr.value = v - - obj.rehash() - return - } - } - elem := &objectElem{ - key: k, - value: v, - next: head, - } - obj.elems[hash] = elem - // O(1) insertion, but we'll have to re-sort the keys later. - obj.keys = append(obj.keys, elem) - // Reset the sync.Once instance. - // See https://github.com/golang/go/issues/25955 for why we do it this way. - obj.sortGuard = new(sync.Once) - obj.hash += hash + v.Hash() - - if k.IsGround() { - obj.ground++ - } - if v.IsGround() { - obj.ground++ - } -} - -func (obj *object) rehash() { - // obj.keys is considered truth, from which obj.hash and obj.elems are recalculated. - - obj.hash = 0 - obj.elems = make(map[int]*objectElem, len(obj.keys)) - - for _, elem := range obj.keys { - hash := elem.key.Hash() - obj.hash += hash + elem.value.Hash() - obj.elems[hash] = elem - } -} - -func filterObject(o Value, filter Value) (Value, error) { - if filter.Compare(Null{}) == 0 { - return o, nil - } - - filteredObj, ok := filter.(*object) - if !ok { - return nil, fmt.Errorf("invalid filter value %q, expected an object", filter) - } - - switch v := o.(type) { - case String, Number, Boolean, Null: - return o, nil - case *Array: - values := NewArray() - for i := 0; i < v.Len(); i++ { - subFilter := filteredObj.Get(StringTerm(strconv.Itoa(i))) - if subFilter != nil { - filteredValue, err := filterObject(v.Elem(i).Value, subFilter.Value) - if err != nil { - return nil, err - } - values = values.Append(NewTerm(filteredValue)) - } - } - return values, nil - case Set: - values := NewSet() - err := v.Iter(func(t *Term) error { - if filteredObj.Get(t) != nil { - filteredValue, err := filterObject(t.Value, filteredObj.Get(t).Value) - if err != nil { - return err - } - values.Add(NewTerm(filteredValue)) - } - return nil - }) - return values, err - case *object: - values := NewObject() - - iterObj := v - other := filteredObj - if v.Len() < filteredObj.Len() { - iterObj = filteredObj - other = v - } - - err := iterObj.Iter(func(key *Term, _ *Term) error { - if other.Get(key) != nil { - filteredValue, err := filterObject(v.Get(key).Value, filteredObj.Get(key).Value) - if err != nil { - return err - } - values.Insert(key, NewTerm(filteredValue)) - } - return nil - }) - return values, err - default: - return nil, fmt.Errorf("invalid object value type %q", v) - } + return v1.Item(key, value) } // NOTE(philipc): The only way to get an ObjectKeyIterator should be // from an Object. This ensures that the iterator can have implementation- // specific details internally, with no contracts except to the very // limited interface. -type ObjectKeysIterator interface { - Next() (*Term, bool) -} - -type objectKeysIterator struct { - obj *object - numKeys int - index int -} - -func newobjectKeysIterator(o *object) ObjectKeysIterator { - return &objectKeysIterator{ - obj: o, - numKeys: o.Len(), - index: 0, - } -} - -func (oki *objectKeysIterator) Next() (*Term, bool) { - if oki.index == oki.numKeys || oki.numKeys == 0 { - return nil, false - } - oki.index++ - return oki.obj.sortedKeys()[oki.index-1].key, true -} +type ObjectKeysIterator = v1.ObjectKeysIterator // ArrayComprehension represents an array comprehension as defined in the language. -type ArrayComprehension struct { - Term *Term `json:"term"` - Body Body `json:"body"` -} +type ArrayComprehension = v1.ArrayComprehension // ArrayComprehensionTerm creates a new Term with an ArrayComprehension value. func ArrayComprehensionTerm(term *Term, body Body) *Term { - return &Term{ - Value: &ArrayComprehension{ - Term: term, - Body: body, - }, - } -} - -// Copy returns a deep copy of ac. -func (ac *ArrayComprehension) Copy() *ArrayComprehension { - cpy := *ac - cpy.Body = ac.Body.Copy() - cpy.Term = ac.Term.Copy() - return &cpy -} - -// Equal returns true if ac is equal to other. -func (ac *ArrayComprehension) Equal(other Value) bool { - return Compare(ac, other) == 0 -} - -// Compare compares ac to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (ac *ArrayComprehension) Compare(other Value) int { - return Compare(ac, other) -} - -// Find returns the current value or a not found error. -func (ac *ArrayComprehension) Find(path Ref) (Value, error) { - if len(path) == 0 { - return ac, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code of the Value. -func (ac *ArrayComprehension) Hash() int { - return ac.Term.Hash() + ac.Body.Hash() -} - -// IsGround returns true if the Term and Body are ground. -func (ac *ArrayComprehension) IsGround() bool { - return ac.Term.IsGround() && ac.Body.IsGround() -} - -func (ac *ArrayComprehension) String() string { - return "[" + ac.Term.String() + " | " + ac.Body.String() + "]" + return v1.ArrayComprehensionTerm(term, body) } // ObjectComprehension represents an object comprehension as defined in the language. -type ObjectComprehension struct { - Key *Term `json:"key"` - Value *Term `json:"value"` - Body Body `json:"body"` -} +type ObjectComprehension = v1.ObjectComprehension // ObjectComprehensionTerm creates a new Term with an ObjectComprehension value. func ObjectComprehensionTerm(key, value *Term, body Body) *Term { - return &Term{ - Value: &ObjectComprehension{ - Key: key, - Value: value, - Body: body, - }, - } -} - -// Copy returns a deep copy of oc. -func (oc *ObjectComprehension) Copy() *ObjectComprehension { - cpy := *oc - cpy.Body = oc.Body.Copy() - cpy.Key = oc.Key.Copy() - cpy.Value = oc.Value.Copy() - return &cpy -} - -// Equal returns true if oc is equal to other. -func (oc *ObjectComprehension) Equal(other Value) bool { - return Compare(oc, other) == 0 -} - -// Compare compares oc to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (oc *ObjectComprehension) Compare(other Value) int { - return Compare(oc, other) -} - -// Find returns the current value or a not found error. -func (oc *ObjectComprehension) Find(path Ref) (Value, error) { - if len(path) == 0 { - return oc, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code of the Value. -func (oc *ObjectComprehension) Hash() int { - return oc.Key.Hash() + oc.Value.Hash() + oc.Body.Hash() -} - -// IsGround returns true if the Key, Value and Body are ground. -func (oc *ObjectComprehension) IsGround() bool { - return oc.Key.IsGround() && oc.Value.IsGround() && oc.Body.IsGround() -} - -func (oc *ObjectComprehension) String() string { - return "{" + oc.Key.String() + ": " + oc.Value.String() + " | " + oc.Body.String() + "}" + return v1.ObjectComprehensionTerm(key, value, body) } // SetComprehension represents a set comprehension as defined in the language. -type SetComprehension struct { - Term *Term `json:"term"` - Body Body `json:"body"` -} +type SetComprehension = v1.SetComprehension // SetComprehensionTerm creates a new Term with an SetComprehension value. func SetComprehensionTerm(term *Term, body Body) *Term { - return &Term{ - Value: &SetComprehension{ - Term: term, - Body: body, - }, - } -} - -// Copy returns a deep copy of sc. -func (sc *SetComprehension) Copy() *SetComprehension { - cpy := *sc - cpy.Body = sc.Body.Copy() - cpy.Term = sc.Term.Copy() - return &cpy -} - -// Equal returns true if sc is equal to other. -func (sc *SetComprehension) Equal(other Value) bool { - return Compare(sc, other) == 0 -} - -// Compare compares sc to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (sc *SetComprehension) Compare(other Value) int { - return Compare(sc, other) -} - -// Find returns the current value or a not found error. -func (sc *SetComprehension) Find(path Ref) (Value, error) { - if len(path) == 0 { - return sc, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code of the Value. -func (sc *SetComprehension) Hash() int { - return sc.Term.Hash() + sc.Body.Hash() -} - -// IsGround returns true if the Term and Body are ground. -func (sc *SetComprehension) IsGround() bool { - return sc.Term.IsGround() && sc.Body.IsGround() -} - -func (sc *SetComprehension) String() string { - return "{" + sc.Term.String() + " | " + sc.Body.String() + "}" + return v1.SetComprehensionTerm(term, body) } // Call represents as function call in the language. -type Call []*Term +type Call = v1.Call // CallTerm returns a new Term with a Call value defined by terms. The first // term is the operator and the rest are operands. func CallTerm(terms ...*Term) *Term { - return NewTerm(Call(terms)) -} - -// Copy returns a deep copy of c. -func (c Call) Copy() Call { - return termSliceCopy(c) -} - -// Compare compares c to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (c Call) Compare(other Value) int { - return Compare(c, other) -} - -// Find returns the current value or a not found error. -func (c Call) Find(Ref) (Value, error) { - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (c Call) Hash() int { - return termSliceHash(c) -} - -// IsGround returns true if the Value is ground. -func (c Call) IsGround() bool { - return termSliceIsGround(c) -} - -// MakeExpr returns an ew Expr from this call. -func (c Call) MakeExpr(output *Term) *Expr { - terms := []*Term(c) - return NewExpr(append(terms, output)) -} - -func (c Call) String() string { - args := make([]string, len(c)-1) - for i := 1; i < len(c); i++ { - args[i-1] = c[i].String() - } - return fmt.Sprintf("%v(%v)", c[0], strings.Join(args, ", ")) -} - -func termSliceCopy(a []*Term) []*Term { - cpy := make([]*Term, len(a)) - for i := range a { - cpy[i] = a[i].Copy() - } - return cpy -} - -func termSliceEqual(a, b []*Term) bool { - if len(a) == len(b) { - for i := range a { - if !a[i].Equal(b[i]) { - return false - } - } - return true - } - return false -} - -func termSliceHash(a []*Term) int { - var hash int - for _, v := range a { - hash += v.Value.Hash() - } - return hash -} - -func termSliceIsGround(a []*Term) bool { - for _, v := range a { - if !v.IsGround() { - return false - } - } - return true -} - -// NOTE(tsandall): The unmarshalling errors in these functions are not -// helpful for callers because they do not identify the source of the -// unmarshalling error. Because OPA doesn't accept JSON describing ASTs -// from callers, this is acceptable (for now). If that changes in the future, -// the error messages should be revisited. The current approach focuses -// on the happy path and treats all errors the same. If better error -// reporting is needed, the error paths will need to be fleshed out. - -func unmarshalBody(b []interface{}) (Body, error) { - buf := Body{} - for _, e := range b { - if m, ok := e.(map[string]interface{}); ok { - expr := &Expr{} - if err := unmarshalExpr(expr, m); err == nil { - buf = append(buf, expr) - continue - } - } - goto unmarshal_error - } - return buf, nil -unmarshal_error: - return nil, fmt.Errorf("ast: unable to unmarshal body") -} - -func unmarshalExpr(expr *Expr, v map[string]interface{}) error { - if x, ok := v["negated"]; ok { - if b, ok := x.(bool); ok { - expr.Negated = b - } else { - return fmt.Errorf("ast: unable to unmarshal negated field with type: %T (expected true or false)", v["negated"]) - } - } - if generatedRaw, ok := v["generated"]; ok { - if b, ok := generatedRaw.(bool); ok { - expr.Generated = b - } else { - return fmt.Errorf("ast: unable to unmarshal generated field with type: %T (expected true or false)", v["generated"]) - } - } - - if err := unmarshalExprIndex(expr, v); err != nil { - return err - } - switch ts := v["terms"].(type) { - case map[string]interface{}: - t, err := unmarshalTerm(ts) - if err != nil { - return err - } - expr.Terms = t - case []interface{}: - terms, err := unmarshalTermSlice(ts) - if err != nil { - return err - } - expr.Terms = terms - default: - return fmt.Errorf(`ast: unable to unmarshal terms field with type: %T (expected {"value": ..., "type": ...} or [{"value": ..., "type": ...}, ...])`, v["terms"]) - } - if x, ok := v["with"]; ok { - if sl, ok := x.([]interface{}); ok { - ws := make([]*With, len(sl)) - for i := range sl { - var err error - ws[i], err = unmarshalWith(sl[i]) - if err != nil { - return err - } - } - expr.With = ws - } - } - if loc, ok := v["location"].(map[string]interface{}); ok { - expr.Location = &Location{} - if err := unmarshalLocation(expr.Location, loc); err != nil { - return err - } - } - return nil -} - -func unmarshalLocation(loc *Location, v map[string]interface{}) error { - if x, ok := v["file"]; ok { - if s, ok := x.(string); ok { - loc.File = s - } else { - return fmt.Errorf("ast: unable to unmarshal file field with type: %T (expected string)", v["file"]) - } - } - if x, ok := v["row"]; ok { - if n, ok := x.(json.Number); ok { - i64, err := n.Int64() - if err != nil { - return err - } - loc.Row = int(i64) - } else { - return fmt.Errorf("ast: unable to unmarshal row field with type: %T (expected number)", v["row"]) - } - } - if x, ok := v["col"]; ok { - if n, ok := x.(json.Number); ok { - i64, err := n.Int64() - if err != nil { - return err - } - loc.Col = int(i64) - } else { - return fmt.Errorf("ast: unable to unmarshal col field with type: %T (expected number)", v["col"]) - } - } - - return nil -} - -func unmarshalExprIndex(expr *Expr, v map[string]interface{}) error { - if x, ok := v["index"]; ok { - if n, ok := x.(json.Number); ok { - i, err := n.Int64() - if err == nil { - expr.Index = int(i) - return nil - } - } - } - return fmt.Errorf("ast: unable to unmarshal index field with type: %T (expected integer)", v["index"]) -} - -func unmarshalTerm(m map[string]interface{}) (*Term, error) { - var term Term - - v, err := unmarshalValue(m) - if err != nil { - return nil, err - } - term.Value = v - - if loc, ok := m["location"].(map[string]interface{}); ok { - term.Location = &Location{} - if err := unmarshalLocation(term.Location, loc); err != nil { - return nil, err - } - } - - return &term, nil -} - -func unmarshalTermSlice(s []interface{}) ([]*Term, error) { - buf := []*Term{} - for _, x := range s { - if m, ok := x.(map[string]interface{}); ok { - t, err := unmarshalTerm(m) - if err == nil { - buf = append(buf, t) - continue - } - return nil, err - } - return nil, fmt.Errorf("ast: unable to unmarshal term") - } - return buf, nil -} - -func unmarshalTermSliceValue(d map[string]interface{}) ([]*Term, error) { - if s, ok := d["value"].([]interface{}); ok { - return unmarshalTermSlice(s) - } - return nil, fmt.Errorf(`ast: unable to unmarshal term (expected {"value": [...], "type": ...} where type is one of: ref, array, or set)`) -} - -func unmarshalWith(i interface{}) (*With, error) { - if m, ok := i.(map[string]interface{}); ok { - tgt, _ := m["target"].(map[string]interface{}) - target, err := unmarshalTerm(tgt) - if err == nil { - val, _ := m["value"].(map[string]interface{}) - value, err := unmarshalTerm(val) - if err == nil { - return &With{ - Target: target, - Value: value, - }, nil - } - return nil, err - } - return nil, err - } - return nil, fmt.Errorf(`ast: unable to unmarshal with modifier (expected {"target": {...}, "value": {...}})`) -} - -func unmarshalValue(d map[string]interface{}) (Value, error) { - v := d["value"] - switch d["type"] { - case "null": - return Null{}, nil - case "boolean": - if b, ok := v.(bool); ok { - return Boolean(b), nil - } - case "number": - if n, ok := v.(json.Number); ok { - return Number(n), nil - } - case "string": - if s, ok := v.(string); ok { - return String(s), nil - } - case "var": - if s, ok := v.(string); ok { - return Var(s), nil - } - case "ref": - if s, err := unmarshalTermSliceValue(d); err == nil { - return Ref(s), nil - } - case "array": - if s, err := unmarshalTermSliceValue(d); err == nil { - return NewArray(s...), nil - } - case "set": - if s, err := unmarshalTermSliceValue(d); err == nil { - set := NewSet() - for _, x := range s { - set.Add(x) - } - return set, nil - } - case "object": - if s, ok := v.([]interface{}); ok { - buf := NewObject() - for _, x := range s { - if i, ok := x.([]interface{}); ok && len(i) == 2 { - p, err := unmarshalTermSlice(i) - if err == nil { - buf.Insert(p[0], p[1]) - continue - } - } - goto unmarshal_error - } - return buf, nil - } - case "arraycomprehension", "setcomprehension": - if m, ok := v.(map[string]interface{}); ok { - t, ok := m["term"].(map[string]interface{}) - if !ok { - goto unmarshal_error - } - - term, err := unmarshalTerm(t) - if err != nil { - goto unmarshal_error - } - - b, ok := m["body"].([]interface{}) - if !ok { - goto unmarshal_error - } - - body, err := unmarshalBody(b) - if err != nil { - goto unmarshal_error - } - - if d["type"] == "arraycomprehension" { - return &ArrayComprehension{Term: term, Body: body}, nil - } - return &SetComprehension{Term: term, Body: body}, nil - } - case "objectcomprehension": - if m, ok := v.(map[string]interface{}); ok { - k, ok := m["key"].(map[string]interface{}) - if !ok { - goto unmarshal_error - } - - key, err := unmarshalTerm(k) - if err != nil { - goto unmarshal_error - } - - v, ok := m["value"].(map[string]interface{}) - if !ok { - goto unmarshal_error - } - - value, err := unmarshalTerm(v) - if err != nil { - goto unmarshal_error - } - - b, ok := m["body"].([]interface{}) - if !ok { - goto unmarshal_error - } - - body, err := unmarshalBody(b) - if err != nil { - goto unmarshal_error - } - - return &ObjectComprehension{Key: key, Value: value, Body: body}, nil - } - case "call": - if s, err := unmarshalTermSliceValue(d); err == nil { - return Call(s), nil - } - } -unmarshal_error: - return nil, fmt.Errorf("ast: unable to unmarshal term") + return v1.CallTerm(terms...) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/transform.go b/vendor/github.com/open-policy-agent/opa/ast/transform.go index 391a164860..8c03c48663 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/transform.go +++ b/vendor/github.com/open-policy-agent/opa/ast/transform.go @@ -5,427 +5,42 @@ package ast import ( - "fmt" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // Transformer defines the interface for transforming AST elements. If the // transformer returns nil and does not indicate an error, the AST element will // be set to nil and no transformations will be applied to children of the // element. -type Transformer interface { - Transform(interface{}) (interface{}, error) -} +type Transformer = v1.Transformer // Transform iterates the AST and calls the Transform function on the // Transformer t for x before recursing. -func Transform(t Transformer, x interface{}) (interface{}, error) { - - if term, ok := x.(*Term); ok { - return Transform(t, term.Value) - } - - y, err := t.Transform(x) - if err != nil { - return x, err - } - - if y == nil { - return nil, nil - } - - var ok bool - switch y := y.(type) { - case *Module: - p, err := Transform(t, y.Package) - if err != nil { - return nil, err - } - if y.Package, ok = p.(*Package); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Package, p) - } - for i := range y.Imports { - imp, err := Transform(t, y.Imports[i]) - if err != nil { - return nil, err - } - if y.Imports[i], ok = imp.(*Import); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Imports[i], imp) - } - } - for i := range y.Rules { - rule, err := Transform(t, y.Rules[i]) - if err != nil { - return nil, err - } - if y.Rules[i], ok = rule.(*Rule); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Rules[i], rule) - } - } - for i := range y.Annotations { - a, err := Transform(t, y.Annotations[i]) - if err != nil { - return nil, err - } - if y.Annotations[i], ok = a.(*Annotations); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Annotations[i], a) - } - } - for i := range y.Comments { - comment, err := Transform(t, y.Comments[i]) - if err != nil { - return nil, err - } - if y.Comments[i], ok = comment.(*Comment); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Comments[i], comment) - } - } - return y, nil - case *Package: - ref, err := Transform(t, y.Path) - if err != nil { - return nil, err - } - if y.Path, ok = ref.(Ref); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Path, ref) - } - return y, nil - case *Import: - y.Path, err = transformTerm(t, y.Path) - if err != nil { - return nil, err - } - if y.Alias, err = transformVar(t, y.Alias); err != nil { - return nil, err - } - return y, nil - case *Rule: - if y.Head, err = transformHead(t, y.Head); err != nil { - return nil, err - } - if y.Body, err = transformBody(t, y.Body); err != nil { - return nil, err - } - if y.Else != nil { - rule, err := Transform(t, y.Else) - if err != nil { - return nil, err - } - if y.Else, ok = rule.(*Rule); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Else, rule) - } - } - return y, nil - case *Head: - if y.Reference, err = transformRef(t, y.Reference); err != nil { - return nil, err - } - if y.Name, err = transformVar(t, y.Name); err != nil { - return nil, err - } - if y.Args, err = transformArgs(t, y.Args); err != nil { - return nil, err - } - if y.Key != nil { - if y.Key, err = transformTerm(t, y.Key); err != nil { - return nil, err - } - } - if y.Value != nil { - if y.Value, err = transformTerm(t, y.Value); err != nil { - return nil, err - } - } - return y, nil - case Args: - for i := range y { - if y[i], err = transformTerm(t, y[i]); err != nil { - return nil, err - } - } - return y, nil - case Body: - for i, e := range y { - e, err := Transform(t, e) - if err != nil { - return nil, err - } - if y[i], ok = e.(*Expr); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y[i], e) - } - } - return y, nil - case *Expr: - switch ts := y.Terms.(type) { - case *SomeDecl: - decl, err := Transform(t, ts) - if err != nil { - return nil, err - } - if y.Terms, ok = decl.(*SomeDecl); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y, decl) - } - return y, nil - case []*Term: - for i := range ts { - if ts[i], err = transformTerm(t, ts[i]); err != nil { - return nil, err - } - } - case *Term: - if y.Terms, err = transformTerm(t, ts); err != nil { - return nil, err - } - case *Every: - if ts.Key != nil { - ts.Key, err = transformTerm(t, ts.Key) - if err != nil { - return nil, err - } - } - ts.Value, err = transformTerm(t, ts.Value) - if err != nil { - return nil, err - } - ts.Domain, err = transformTerm(t, ts.Domain) - if err != nil { - return nil, err - } - ts.Body, err = transformBody(t, ts.Body) - if err != nil { - return nil, err - } - y.Terms = ts - } - for i, w := range y.With { - w, err := Transform(t, w) - if err != nil { - return nil, err - } - if y.With[i], ok = w.(*With); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.With[i], w) - } - } - return y, nil - case *With: - if y.Target, err = transformTerm(t, y.Target); err != nil { - return nil, err - } - if y.Value, err = transformTerm(t, y.Value); err != nil { - return nil, err - } - return y, nil - case Ref: - for i, term := range y { - if y[i], err = transformTerm(t, term); err != nil { - return nil, err - } - } - return y, nil - case *object: - return y.Map(func(k, v *Term) (*Term, *Term, error) { - k, err := transformTerm(t, k) - if err != nil { - return nil, nil, err - } - v, err = transformTerm(t, v) - if err != nil { - return nil, nil, err - } - return k, v, nil - }) - case *Array: - for i := 0; i < y.Len(); i++ { - v, err := transformTerm(t, y.Elem(i)) - if err != nil { - return nil, err - } - y.set(i, v) - } - return y, nil - case Set: - y, err = y.Map(func(term *Term) (*Term, error) { - return transformTerm(t, term) - }) - if err != nil { - return nil, err - } - return y, nil - case *ArrayComprehension: - if y.Term, err = transformTerm(t, y.Term); err != nil { - return nil, err - } - if y.Body, err = transformBody(t, y.Body); err != nil { - return nil, err - } - return y, nil - case *ObjectComprehension: - if y.Key, err = transformTerm(t, y.Key); err != nil { - return nil, err - } - if y.Value, err = transformTerm(t, y.Value); err != nil { - return nil, err - } - if y.Body, err = transformBody(t, y.Body); err != nil { - return nil, err - } - return y, nil - case *SetComprehension: - if y.Term, err = transformTerm(t, y.Term); err != nil { - return nil, err - } - if y.Body, err = transformBody(t, y.Body); err != nil { - return nil, err - } - return y, nil - case Call: - for i := range y { - if y[i], err = transformTerm(t, y[i]); err != nil { - return nil, err - } - } - return y, nil - default: - return y, nil - } +func Transform(t Transformer, x any) (any, error) { + return v1.Transform(t, x) } // TransformRefs calls the function f on all references under x. -func TransformRefs(x interface{}, f func(Ref) (Value, error)) (interface{}, error) { - t := &GenericTransformer{func(x interface{}) (interface{}, error) { - if r, ok := x.(Ref); ok { - return f(r) - } - return x, nil - }} - return Transform(t, x) +func TransformRefs(x any, f func(Ref) (Value, error)) (any, error) { + return v1.TransformRefs(x, f) } // TransformVars calls the function f on all vars under x. -func TransformVars(x interface{}, f func(Var) (Value, error)) (interface{}, error) { - t := &GenericTransformer{func(x interface{}) (interface{}, error) { - if v, ok := x.(Var); ok { - return f(v) - } - return x, nil - }} - return Transform(t, x) +func TransformVars(x any, f func(Var) (Value, error)) (any, error) { + return v1.TransformVars(x, f) } // TransformComprehensions calls the functio nf on all comprehensions under x. -func TransformComprehensions(x interface{}, f func(interface{}) (Value, error)) (interface{}, error) { - t := &GenericTransformer{func(x interface{}) (interface{}, error) { - switch x := x.(type) { - case *ArrayComprehension: - return f(x) - case *SetComprehension: - return f(x) - case *ObjectComprehension: - return f(x) - } - return x, nil - }} - return Transform(t, x) +func TransformComprehensions(x any, f func(any) (Value, error)) (any, error) { + return v1.TransformComprehensions(x, f) } // GenericTransformer implements the Transformer interface to provide a utility // to transform AST nodes using a closure. -type GenericTransformer struct { - f func(interface{}) (interface{}, error) -} +type GenericTransformer = v1.GenericTransformer // NewGenericTransformer returns a new GenericTransformer that will transform // AST nodes using the function f. -func NewGenericTransformer(f func(x interface{}) (interface{}, error)) *GenericTransformer { - return &GenericTransformer{ - f: f, - } -} - -// Transform calls the function f on the GenericTransformer. -func (t *GenericTransformer) Transform(x interface{}) (interface{}, error) { - return t.f(x) -} - -func transformHead(t Transformer, head *Head) (*Head, error) { - y, err := Transform(t, head) - if err != nil { - return nil, err - } - h, ok := y.(*Head) - if !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", head, y) - } - return h, nil -} - -func transformArgs(t Transformer, args Args) (Args, error) { - y, err := Transform(t, args) - if err != nil { - return nil, err - } - a, ok := y.(Args) - if !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", args, y) - } - return a, nil -} - -func transformBody(t Transformer, body Body) (Body, error) { - y, err := Transform(t, body) - if err != nil { - return nil, err - } - r, ok := y.(Body) - if !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", body, y) - } - return r, nil -} - -func transformTerm(t Transformer, term *Term) (*Term, error) { - v, err := transformValue(t, term.Value) - if err != nil { - return nil, err - } - r := &Term{ - Value: v, - Location: term.Location, - } - return r, nil -} - -func transformValue(t Transformer, v Value) (Value, error) { - v1, err := Transform(t, v) - if err != nil { - return nil, err - } - r, ok := v1.(Value) - if !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", v, v1) - } - return r, nil -} - -func transformVar(t Transformer, v Var) (Var, error) { - v1, err := Transform(t, v) - if err != nil { - return "", err - } - r, ok := v1.(Var) - if !ok { - return "", fmt.Errorf("illegal transform: %T != %T", v, v1) - } - return r, nil -} - -func transformRef(t Transformer, r Ref) (Ref, error) { - r1, err := Transform(t, r) - if err != nil { - return nil, err - } - r2, ok := r1.(Ref) - if !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", r, r2) - } - return r2, nil +func NewGenericTransformer(f func(x any) (any, error)) *GenericTransformer { + return v1.NewGenericTransformer(f) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/unify.go b/vendor/github.com/open-policy-agent/opa/ast/unify.go index 60244974a9..3cb260272a 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/unify.go +++ b/vendor/github.com/open-policy-agent/opa/ast/unify.go @@ -4,232 +4,11 @@ package ast -func isRefSafe(ref Ref, safe VarSet) bool { - switch head := ref[0].Value.(type) { - case Var: - return safe.Contains(head) - case Call: - return isCallSafe(head, safe) - default: - for v := range ref[0].Vars() { - if !safe.Contains(v) { - return false - } - } - return true - } -} - -func isCallSafe(call Call, safe VarSet) bool { - vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams) - vis.Walk(call) - unsafe := vis.Vars().Diff(safe) - return len(unsafe) == 0 -} +import v1 "github.com/open-policy-agent/opa/v1/ast" // Unify returns a set of variables that will be unified when the equality expression defined by // terms a and b is evaluated. The unifier assumes that variables in the VarSet safe are already // unified. func Unify(safe VarSet, a *Term, b *Term) VarSet { - u := &unifier{ - safe: safe, - unified: VarSet{}, - unknown: map[Var]VarSet{}, - } - u.unify(a, b) - return u.unified -} - -type unifier struct { - safe VarSet - unified VarSet - unknown map[Var]VarSet -} - -func (u *unifier) isSafe(x Var) bool { - return u.safe.Contains(x) || u.unified.Contains(x) -} - -func (u *unifier) unify(a *Term, b *Term) { - - switch a := a.Value.(type) { - - case Var: - switch b := b.Value.(type) { - case Var: - if u.isSafe(b) { - u.markSafe(a) - } else if u.isSafe(a) { - u.markSafe(b) - } else { - u.markUnknown(a, b) - u.markUnknown(b, a) - } - case *Array, Object: - u.unifyAll(a, b) - case Ref: - if isRefSafe(b, u.safe) { - u.markSafe(a) - } - case Call: - if isCallSafe(b, u.safe) { - u.markSafe(a) - } - default: - u.markSafe(a) - } - - case Ref: - if isRefSafe(a, u.safe) { - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - case *Array, Object: - u.markAllSafe(b) - } - } - - case Call: - if isCallSafe(a, u.safe) { - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - case *Array, Object: - u.markAllSafe(b) - } - } - - case *ArrayComprehension: - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - case *Array: - u.markAllSafe(b) - } - case *ObjectComprehension: - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - case *object: - u.markAllSafe(b) - } - case *SetComprehension: - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - } - - case *Array: - switch b := b.Value.(type) { - case Var: - u.unifyAll(b, a) - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: - u.markAllSafe(a) - case Ref: - if isRefSafe(b, u.safe) { - u.markAllSafe(a) - } - case Call: - if isCallSafe(b, u.safe) { - u.markAllSafe(a) - } - case *Array: - if a.Len() == b.Len() { - for i := 0; i < a.Len(); i++ { - u.unify(a.Elem(i), b.Elem(i)) - } - } - } - - case *object: - switch b := b.Value.(type) { - case Var: - u.unifyAll(b, a) - case Ref: - if isRefSafe(b, u.safe) { - u.markAllSafe(a) - } - case Call: - if isCallSafe(b, u.safe) { - u.markAllSafe(a) - } - case *object: - if a.Len() == b.Len() { - _ = a.Iter(func(k, v *Term) error { - if v2 := b.Get(k); v2 != nil { - u.unify(v, v2) - } - return nil - }) // impossible to return error - } - } - - default: - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - } - } -} - -func (u *unifier) markAllSafe(x Value) { - vis := u.varVisitor() - vis.Walk(x) - for v := range vis.Vars() { - u.markSafe(v) - } -} - -func (u *unifier) markSafe(x Var) { - u.unified.Add(x) - - // Add dependencies of 'x' to safe set - vs := u.unknown[x] - delete(u.unknown, x) - for v := range vs { - u.markSafe(v) - } - - // Add dependants of 'x' to safe set if they have no more - // dependencies. - for v, deps := range u.unknown { - if deps.Contains(x) { - delete(deps, x) - if len(deps) == 0 { - u.markSafe(v) - } - } - } -} - -func (u *unifier) markUnknown(a, b Var) { - if _, ok := u.unknown[a]; !ok { - u.unknown[a] = NewVarSet() - } - u.unknown[a].Add(b) -} - -func (u *unifier) unifyAll(a Var, b Value) { - if u.isSafe(a) { - u.markAllSafe(b) - } else { - vis := u.varVisitor() - vis.Walk(b) - unsafe := vis.Vars().Diff(u.safe).Diff(u.unified) - if len(unsafe) == 0 { - u.markSafe(a) - } else { - for v := range unsafe { - u.markUnknown(a, v) - } - } - } -} - -func (u *unifier) varVisitor() *VarVisitor { - return NewVarVisitor().WithParams(VarVisitorParams{ - SkipRefHead: true, - SkipObjectKeys: true, - SkipClosures: true, - }) + return v1.Unify(safe, a, b) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/varset.go b/vendor/github.com/open-policy-agent/opa/ast/varset.go index 14f531494b..9e7db8efda 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/varset.go +++ b/vendor/github.com/open-policy-agent/opa/ast/varset.go @@ -5,96 +5,13 @@ package ast import ( - "fmt" - "sort" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // VarSet represents a set of variables. -type VarSet map[Var]struct{} +type VarSet = v1.VarSet // NewVarSet returns a new VarSet containing the specified variables. func NewVarSet(vs ...Var) VarSet { - s := VarSet{} - for _, v := range vs { - s.Add(v) - } - return s -} - -// Add updates the set to include the variable "v". -func (s VarSet) Add(v Var) { - s[v] = struct{}{} -} - -// Contains returns true if the set contains the variable "v". -func (s VarSet) Contains(v Var) bool { - _, ok := s[v] - return ok -} - -// Copy returns a shallow copy of the VarSet. -func (s VarSet) Copy() VarSet { - cpy := VarSet{} - for v := range s { - cpy.Add(v) - } - return cpy -} - -// Diff returns a VarSet containing variables in s that are not in vs. -func (s VarSet) Diff(vs VarSet) VarSet { - r := VarSet{} - for v := range s { - if !vs.Contains(v) { - r.Add(v) - } - } - return r -} - -// Equal returns true if s contains exactly the same elements as vs. -func (s VarSet) Equal(vs VarSet) bool { - if len(s.Diff(vs)) > 0 { - return false - } - return len(vs.Diff(s)) == 0 -} - -// Intersect returns a VarSet containing variables in s that are in vs. -func (s VarSet) Intersect(vs VarSet) VarSet { - r := VarSet{} - for v := range s { - if vs.Contains(v) { - r.Add(v) - } - } - return r -} - -// Sorted returns a sorted slice of vars from s. -func (s VarSet) Sorted() []Var { - sorted := make([]Var, 0, len(s)) - for v := range s { - sorted = append(sorted, v) - } - sort.Slice(sorted, func(i, j int) bool { - return sorted[i].Compare(sorted[j]) < 0 - }) - return sorted -} - -// Update merges the other VarSet into this VarSet. -func (s VarSet) Update(vs VarSet) { - for v := range vs { - s.Add(v) - } -} - -func (s VarSet) String() string { - tmp := make([]string, 0, len(s)) - for v := range s { - tmp = append(tmp, string(v)) - } - sort.Strings(tmp) - return fmt.Sprintf("%v", tmp) + return v1.NewVarSet(vs...) } diff --git a/vendor/github.com/open-policy-agent/opa/ast/version_index.json b/vendor/github.com/open-policy-agent/opa/ast/version_index.json deleted file mode 100644 index 718df220f9..0000000000 --- a/vendor/github.com/open-policy-agent/opa/ast/version_index.json +++ /dev/null @@ -1,1450 +0,0 @@ -{ - "builtins": { - "abs": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "all": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "and": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "any": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "array.concat": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "array.reverse": { - "Major": 0, - "Minor": 36, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "array.slice": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "assign": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "base64.decode": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "base64.encode": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "base64.is_valid": { - "Major": 0, - "Minor": 24, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "base64url.decode": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "base64url.encode": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "base64url.encode_no_pad": { - "Major": 0, - "Minor": 25, - "Patch": 0, - "PreRelease": "rc2", - "Metadata": "" - }, - "bits.and": { - "Major": 0, - "Minor": 18, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "bits.lsh": { - "Major": 0, - "Minor": 18, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "bits.negate": { - "Major": 0, - "Minor": 18, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "bits.or": { - "Major": 0, - "Minor": 18, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "bits.rsh": { - "Major": 0, - "Minor": 18, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "bits.xor": { - "Major": 0, - "Minor": 18, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "cast_array": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "cast_boolean": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "cast_null": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "cast_object": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "cast_set": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "cast_string": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "ceil": { - "Major": 0, - "Minor": 26, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "concat": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "contains": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "count": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "crypto.hmac.equal": { - "Major": 0, - "Minor": 52, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "crypto.hmac.md5": { - "Major": 0, - "Minor": 36, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "crypto.hmac.sha1": { - "Major": 0, - "Minor": 36, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "crypto.hmac.sha256": { - "Major": 0, - "Minor": 36, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "crypto.hmac.sha512": { - "Major": 0, - "Minor": 36, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "crypto.md5": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "crypto.parse_private_keys": { - "Major": 0, - "Minor": 55, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "crypto.sha1": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "crypto.sha256": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "crypto.x509.parse_and_verify_certificates": { - "Major": 0, - "Minor": 31, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "crypto.x509.parse_and_verify_certificates_with_options": { - "Major": 0, - "Minor": 63, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "crypto.x509.parse_certificate_request": { - "Major": 0, - "Minor": 21, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "crypto.x509.parse_certificates": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "crypto.x509.parse_keypair": { - "Major": 0, - "Minor": 53, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "crypto.x509.parse_rsa_private_key": { - "Major": 0, - "Minor": 33, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "div": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "endswith": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "eq": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "equal": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "floor": { - "Major": 0, - "Minor": 26, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "format_int": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "glob.match": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "glob.quote_meta": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "graph.reachable": { - "Major": 0, - "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "graph.reachable_paths": { - "Major": 0, - "Minor": 37, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "graphql.is_valid": { - "Major": 0, - "Minor": 41, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "graphql.parse": { - "Major": 0, - "Minor": 41, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "graphql.parse_and_verify": { - "Major": 0, - "Minor": 41, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "graphql.parse_query": { - "Major": 0, - "Minor": 41, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "graphql.parse_schema": { - "Major": 0, - "Minor": 41, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "graphql.schema_is_valid": { - "Major": 0, - "Minor": 46, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "gt": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "gte": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "hex.decode": { - "Major": 0, - "Minor": 25, - "Patch": 0, - "PreRelease": "rc2", - "Metadata": "" - }, - "hex.encode": { - "Major": 0, - "Minor": 25, - "Patch": 0, - "PreRelease": "rc2", - "Metadata": "" - }, - "http.send": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "indexof": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "indexof_n": { - "Major": 0, - "Minor": 37, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "internal.member_2": { - "Major": 0, - "Minor": 34, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "internal.member_3": { - "Major": 0, - "Minor": 34, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "internal.print": { - "Major": 0, - "Minor": 34, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "intersection": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "io.jwt.decode": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "io.jwt.decode_verify": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "io.jwt.encode_sign": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "io.jwt.encode_sign_raw": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "io.jwt.verify_es256": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "io.jwt.verify_es384": { - "Major": 0, - "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "io.jwt.verify_es512": { - "Major": 0, - "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "io.jwt.verify_hs256": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "io.jwt.verify_hs384": { - "Major": 0, - "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "io.jwt.verify_hs512": { - "Major": 0, - "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "io.jwt.verify_ps256": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "io.jwt.verify_ps384": { - "Major": 0, - "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "io.jwt.verify_ps512": { - "Major": 0, - "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "io.jwt.verify_rs256": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "io.jwt.verify_rs384": { - "Major": 0, - "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "io.jwt.verify_rs512": { - "Major": 0, - "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "is_array": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "is_boolean": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "is_null": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "is_number": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "is_object": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "is_set": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "is_string": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "json.filter": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "json.is_valid": { - "Major": 0, - "Minor": 25, - "Patch": 0, - "PreRelease": "rc1", - "Metadata": "" - }, - "json.marshal": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "json.marshal_with_options": { - "Major": 0, - "Minor": 64, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "json.match_schema": { - "Major": 0, - "Minor": 50, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "json.patch": { - "Major": 0, - "Minor": 25, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "json.remove": { - "Major": 0, - "Minor": 18, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "json.unmarshal": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "json.verify_schema": { - "Major": 0, - "Minor": 50, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "lower": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "lt": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "lte": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "max": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "min": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "minus": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "mul": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "neq": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "net.cidr_contains": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "net.cidr_contains_matches": { - "Major": 0, - "Minor": 19, - "Patch": 0, - "PreRelease": "rc1", - "Metadata": "" - }, - "net.cidr_expand": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "net.cidr_intersects": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "net.cidr_is_valid": { - "Major": 0, - "Minor": 46, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "net.cidr_merge": { - "Major": 0, - "Minor": 24, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "net.cidr_overlap": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "net.lookup_ip_addr": { - "Major": 0, - "Minor": 35, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "numbers.range": { - "Major": 0, - "Minor": 22, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "numbers.range_step": { - "Major": 0, - "Minor": 56, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "object.filter": { - "Major": 0, - "Minor": 17, - "Patch": 2, - "PreRelease": "", - "Metadata": "" - }, - "object.get": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "object.keys": { - "Major": 0, - "Minor": 47, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "object.remove": { - "Major": 0, - "Minor": 17, - "Patch": 2, - "PreRelease": "", - "Metadata": "" - }, - "object.subset": { - "Major": 0, - "Minor": 42, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "object.union": { - "Major": 0, - "Minor": 17, - "Patch": 2, - "PreRelease": "", - "Metadata": "" - }, - "object.union_n": { - "Major": 0, - "Minor": 37, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "opa.runtime": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "or": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "plus": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "print": { - "Major": 0, - "Minor": 34, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "product": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "providers.aws.sign_req": { - "Major": 0, - "Minor": 47, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "rand.intn": { - "Major": 0, - "Minor": 31, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "re_match": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "regex.find_all_string_submatch_n": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "regex.find_n": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "regex.globs_match": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "regex.is_valid": { - "Major": 0, - "Minor": 23, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "regex.match": { - "Major": 0, - "Minor": 23, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "regex.replace": { - "Major": 0, - "Minor": 45, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "regex.split": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "regex.template_match": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "rego.metadata.chain": { - "Major": 0, - "Minor": 40, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "rego.metadata.rule": { - "Major": 0, - "Minor": 40, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "rego.parse_module": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "rem": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "replace": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "round": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "semver.compare": { - "Major": 0, - "Minor": 22, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "semver.is_valid": { - "Major": 0, - "Minor": 22, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "set_diff": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "sort": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "split": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "sprintf": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "startswith": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "strings.any_prefix_match": { - "Major": 0, - "Minor": 44, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "strings.any_suffix_match": { - "Major": 0, - "Minor": 44, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "strings.count": { - "Major": 0, - "Minor": 67, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "strings.render_template": { - "Major": 0, - "Minor": 59, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "strings.replace_n": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "strings.reverse": { - "Major": 0, - "Minor": 36, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "substring": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "sum": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "time.add_date": { - "Major": 0, - "Minor": 19, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "time.clock": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "time.date": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "time.diff": { - "Major": 0, - "Minor": 28, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "time.format": { - "Major": 0, - "Minor": 48, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "time.now_ns": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "time.parse_duration_ns": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "time.parse_ns": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "time.parse_rfc3339_ns": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "time.weekday": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "to_number": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "trace": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "trim": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "trim_left": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "trim_prefix": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "trim_right": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "trim_space": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "trim_suffix": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "type_name": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "union": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "units.parse": { - "Major": 0, - "Minor": 41, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "units.parse_bytes": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "upper": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "urlquery.decode": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "urlquery.decode_object": { - "Major": 0, - "Minor": 24, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "urlquery.encode": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "urlquery.encode_object": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "uuid.parse": { - "Major": 0, - "Minor": 57, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "uuid.rfc4122": { - "Major": 0, - "Minor": 20, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "walk": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "yaml.is_valid": { - "Major": 0, - "Minor": 25, - "Patch": 0, - "PreRelease": "rc1", - "Metadata": "" - }, - "yaml.marshal": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "yaml.unmarshal": { - "Major": 0, - "Minor": 17, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - } - }, - "features": { - "rego_v1_import": { - "Major": 0, - "Minor": 59, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "rule_head_ref_string_prefixes": { - "Major": 0, - "Minor": 46, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "rule_head_refs": { - "Major": 0, - "Minor": 59, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - } - }, - "keywords": { - "contains": { - "Major": 0, - "Minor": 42, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "every": { - "Major": 0, - "Minor": 38, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "if": { - "Major": 0, - "Minor": 42, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - }, - "in": { - "Major": 0, - "Minor": 34, - "Patch": 0, - "PreRelease": "", - "Metadata": "" - } - } -} diff --git a/vendor/github.com/open-policy-agent/opa/ast/visit.go b/vendor/github.com/open-policy-agent/opa/ast/visit.go index d83c31149e..50028269d2 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/visit.go +++ b/vendor/github.com/open-policy-agent/opa/ast/visit.go @@ -4,780 +4,124 @@ package ast +import v1 "github.com/open-policy-agent/opa/v1/ast" + // Visitor defines the interface for iterating AST elements. The Visit function // can return a Visitor w which will be used to visit the children of the AST // element v. If the Visit function returns nil, the children will not be // visited. +// // Deprecated: use GenericVisitor or another visitor implementation -type Visitor interface { - Visit(v interface{}) (w Visitor) -} +type Visitor = v1.Visitor //nolint:staticcheck // BeforeAndAfterVisitor wraps Visitor to provide hooks for being called before // and after the AST has been visited. +// // Deprecated: use GenericVisitor or another visitor implementation -type BeforeAndAfterVisitor interface { - Visitor - Before(x interface{}) - After(x interface{}) -} +type BeforeAndAfterVisitor = v1.BeforeAndAfterVisitor //nolint:staticcheck // Walk iterates the AST by calling the Visit function on the Visitor // v for x before recursing. +// // Deprecated: use GenericVisitor.Walk -func Walk(v Visitor, x interface{}) { - if bav, ok := v.(BeforeAndAfterVisitor); !ok { - walk(v, x) - } else { - bav.Before(x) - defer bav.After(x) - walk(bav, x) - } +func Walk(v Visitor, x any) { + v1.Walk(v, x) } // WalkBeforeAndAfter iterates the AST by calling the Visit function on the // Visitor v for x before recursing. +// // Deprecated: use GenericVisitor.Walk -func WalkBeforeAndAfter(v BeforeAndAfterVisitor, x interface{}) { - Walk(v, x) -} - -func walk(v Visitor, x interface{}) { - w := v.Visit(x) - if w == nil { - return - } - switch x := x.(type) { - case *Module: - Walk(w, x.Package) - for i := range x.Imports { - Walk(w, x.Imports[i]) - } - for i := range x.Rules { - Walk(w, x.Rules[i]) - } - for i := range x.Annotations { - Walk(w, x.Annotations[i]) - } - for i := range x.Comments { - Walk(w, x.Comments[i]) - } - case *Package: - Walk(w, x.Path) - case *Import: - Walk(w, x.Path) - Walk(w, x.Alias) - case *Rule: - Walk(w, x.Head) - Walk(w, x.Body) - if x.Else != nil { - Walk(w, x.Else) - } - case *Head: - Walk(w, x.Name) - Walk(w, x.Args) - if x.Key != nil { - Walk(w, x.Key) - } - if x.Value != nil { - Walk(w, x.Value) - } - case Body: - for i := range x { - Walk(w, x[i]) - } - case Args: - for i := range x { - Walk(w, x[i]) - } - case *Expr: - switch ts := x.Terms.(type) { - case *Term, *SomeDecl, *Every: - Walk(w, ts) - case []*Term: - for i := range ts { - Walk(w, ts[i]) - } - } - for i := range x.With { - Walk(w, x.With[i]) - } - case *With: - Walk(w, x.Target) - Walk(w, x.Value) - case *Term: - Walk(w, x.Value) - case Ref: - for i := range x { - Walk(w, x[i]) - } - case *object: - x.Foreach(func(k, vv *Term) { - Walk(w, k) - Walk(w, vv) - }) - case *Array: - x.Foreach(func(t *Term) { - Walk(w, t) - }) - case Set: - x.Foreach(func(t *Term) { - Walk(w, t) - }) - case *ArrayComprehension: - Walk(w, x.Term) - Walk(w, x.Body) - case *ObjectComprehension: - Walk(w, x.Key) - Walk(w, x.Value) - Walk(w, x.Body) - case *SetComprehension: - Walk(w, x.Term) - Walk(w, x.Body) - case Call: - for i := range x { - Walk(w, x[i]) - } - case *Every: - if x.Key != nil { - Walk(w, x.Key) - } - Walk(w, x.Value) - Walk(w, x.Domain) - Walk(w, x.Body) - case *SomeDecl: - for i := range x.Symbols { - Walk(w, x.Symbols[i]) - } - } +func WalkBeforeAndAfter(v BeforeAndAfterVisitor, x any) { + v1.WalkBeforeAndAfter(v, x) } // WalkVars calls the function f on all vars under x. If the function f // returns true, AST nodes under the last node will not be visited. -func WalkVars(x interface{}, f func(Var) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if v, ok := x.(Var); ok { - return f(v) - } - return false - }} - vis.Walk(x) +func WalkVars(x any, f func(Var) bool) { + v1.WalkVars(x, f) } // WalkClosures calls the function f on all closures under x. If the function f // returns true, AST nodes under the last node will not be visited. -func WalkClosures(x interface{}, f func(interface{}) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - switch x := x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every: - return f(x) - } - return false - }} - vis.Walk(x) +func WalkClosures(x any, f func(any) bool) { + v1.WalkClosures(x, f) } // WalkRefs calls the function f on all references under x. If the function f // returns true, AST nodes under the last node will not be visited. -func WalkRefs(x interface{}, f func(Ref) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if r, ok := x.(Ref); ok { - return f(r) - } - return false - }} - vis.Walk(x) +func WalkRefs(x any, f func(Ref) bool) { + v1.WalkRefs(x, f) } // WalkTerms calls the function f on all terms under x. If the function f // returns true, AST nodes under the last node will not be visited. -func WalkTerms(x interface{}, f func(*Term) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if term, ok := x.(*Term); ok { - return f(term) - } - return false - }} - vis.Walk(x) +func WalkTerms(x any, f func(*Term) bool) { + v1.WalkTerms(x, f) } // WalkWiths calls the function f on all with modifiers under x. If the function f // returns true, AST nodes under the last node will not be visited. -func WalkWiths(x interface{}, f func(*With) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if w, ok := x.(*With); ok { - return f(w) - } - return false - }} - vis.Walk(x) +func WalkWiths(x any, f func(*With) bool) { + v1.WalkWiths(x, f) } // WalkExprs calls the function f on all expressions under x. If the function f // returns true, AST nodes under the last node will not be visited. -func WalkExprs(x interface{}, f func(*Expr) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if r, ok := x.(*Expr); ok { - return f(r) - } - return false - }} - vis.Walk(x) +func WalkExprs(x any, f func(*Expr) bool) { + v1.WalkExprs(x, f) } // WalkBodies calls the function f on all bodies under x. If the function f // returns true, AST nodes under the last node will not be visited. -func WalkBodies(x interface{}, f func(Body) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if b, ok := x.(Body); ok { - return f(b) - } - return false - }} - vis.Walk(x) +func WalkBodies(x any, f func(Body) bool) { + v1.WalkBodies(x, f) } // WalkRules calls the function f on all rules under x. If the function f // returns true, AST nodes under the last node will not be visited. -func WalkRules(x interface{}, f func(*Rule) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if r, ok := x.(*Rule); ok { - stop := f(r) - // NOTE(tsandall): since rules cannot be embedded inside of queries - // we can stop early if there is no else block. - if stop || r.Else == nil { - return true - } - } - return false - }} - vis.Walk(x) +func WalkRules(x any, f func(*Rule) bool) { + v1.WalkRules(x, f) } // WalkNodes calls the function f on all nodes under x. If the function f // returns true, AST nodes under the last node will not be visited. -func WalkNodes(x interface{}, f func(Node) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if n, ok := x.(Node); ok { - return f(n) - } - return false - }} - vis.Walk(x) +func WalkNodes(x any, f func(Node) bool) { + v1.WalkNodes(x, f) } // GenericVisitor provides a utility to walk over AST nodes using a // closure. If the closure returns true, the visitor will not walk // over AST nodes under x. -type GenericVisitor struct { - f func(x interface{}) bool -} +type GenericVisitor = v1.GenericVisitor // NewGenericVisitor returns a new GenericVisitor that will invoke the function // f on AST nodes. -func NewGenericVisitor(f func(x interface{}) bool) *GenericVisitor { - return &GenericVisitor{f} -} - -// Walk iterates the AST by calling the function f on the -// GenericVisitor before recursing. Contrary to the generic Walk, this -// does not require allocating the visitor from heap. -func (vis *GenericVisitor) Walk(x interface{}) { - if vis.f(x) { - return - } - - switch x := x.(type) { - case *Module: - vis.Walk(x.Package) - for i := range x.Imports { - vis.Walk(x.Imports[i]) - } - for i := range x.Rules { - vis.Walk(x.Rules[i]) - } - for i := range x.Annotations { - vis.Walk(x.Annotations[i]) - } - for i := range x.Comments { - vis.Walk(x.Comments[i]) - } - case *Package: - vis.Walk(x.Path) - case *Import: - vis.Walk(x.Path) - vis.Walk(x.Alias) - case *Rule: - vis.Walk(x.Head) - vis.Walk(x.Body) - if x.Else != nil { - vis.Walk(x.Else) - } - case *Head: - vis.Walk(x.Name) - vis.Walk(x.Args) - if x.Key != nil { - vis.Walk(x.Key) - } - if x.Value != nil { - vis.Walk(x.Value) - } - case Body: - for i := range x { - vis.Walk(x[i]) - } - case Args: - for i := range x { - vis.Walk(x[i]) - } - case *Expr: - switch ts := x.Terms.(type) { - case *Term, *SomeDecl, *Every: - vis.Walk(ts) - case []*Term: - for i := range ts { - vis.Walk(ts[i]) - } - } - for i := range x.With { - vis.Walk(x.With[i]) - } - case *With: - vis.Walk(x.Target) - vis.Walk(x.Value) - case *Term: - vis.Walk(x.Value) - case Ref: - for i := range x { - vis.Walk(x[i]) - } - case *object: - x.Foreach(func(k, _ *Term) { - vis.Walk(k) - vis.Walk(x.Get(k)) - }) - case Object: - x.Foreach(func(k, _ *Term) { - vis.Walk(k) - vis.Walk(x.Get(k)) - }) - case *Array: - x.Foreach(func(t *Term) { - vis.Walk(t) - }) - case Set: - xSlice := x.Slice() - for i := range xSlice { - vis.Walk(xSlice[i]) - } - case *ArrayComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case *ObjectComprehension: - vis.Walk(x.Key) - vis.Walk(x.Value) - vis.Walk(x.Body) - case *SetComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case Call: - for i := range x { - vis.Walk(x[i]) - } - case *Every: - if x.Key != nil { - vis.Walk(x.Key) - } - vis.Walk(x.Value) - vis.Walk(x.Domain) - vis.Walk(x.Body) - case *SomeDecl: - for i := range x.Symbols { - vis.Walk(x.Symbols[i]) - } - } +func NewGenericVisitor(f func(x any) bool) *GenericVisitor { + return v1.NewGenericVisitor(f) } // BeforeAfterVisitor provides a utility to walk over AST nodes using // closures. If the before closure returns true, the visitor will not // walk over AST nodes under x. The after closure is invoked always // after visiting a node. -type BeforeAfterVisitor struct { - before func(x interface{}) bool - after func(x interface{}) -} +type BeforeAfterVisitor = v1.BeforeAfterVisitor // NewBeforeAfterVisitor returns a new BeforeAndAfterVisitor that // will invoke the functions before and after AST nodes. -func NewBeforeAfterVisitor(before func(x interface{}) bool, after func(x interface{})) *BeforeAfterVisitor { - return &BeforeAfterVisitor{before, after} -} - -// Walk iterates the AST by calling the functions on the -// BeforeAndAfterVisitor before and after recursing. Contrary to the -// generic Walk, this does not require allocating the visitor from -// heap. -func (vis *BeforeAfterVisitor) Walk(x interface{}) { - defer vis.after(x) - if vis.before(x) { - return - } - - switch x := x.(type) { - case *Module: - vis.Walk(x.Package) - for i := range x.Imports { - vis.Walk(x.Imports[i]) - } - for i := range x.Rules { - vis.Walk(x.Rules[i]) - } - for i := range x.Annotations { - vis.Walk(x.Annotations[i]) - } - for i := range x.Comments { - vis.Walk(x.Comments[i]) - } - case *Package: - vis.Walk(x.Path) - case *Import: - vis.Walk(x.Path) - vis.Walk(x.Alias) - case *Rule: - vis.Walk(x.Head) - vis.Walk(x.Body) - if x.Else != nil { - vis.Walk(x.Else) - } - case *Head: - if len(x.Reference) > 0 { - vis.Walk(x.Reference) - } else { - vis.Walk(x.Name) - if x.Key != nil { - vis.Walk(x.Key) - } - } - vis.Walk(x.Args) - if x.Value != nil { - vis.Walk(x.Value) - } - case Body: - for i := range x { - vis.Walk(x[i]) - } - case Args: - for i := range x { - vis.Walk(x[i]) - } - case *Expr: - switch ts := x.Terms.(type) { - case *Term, *SomeDecl, *Every: - vis.Walk(ts) - case []*Term: - for i := range ts { - vis.Walk(ts[i]) - } - } - for i := range x.With { - vis.Walk(x.With[i]) - } - case *With: - vis.Walk(x.Target) - vis.Walk(x.Value) - case *Term: - vis.Walk(x.Value) - case Ref: - for i := range x { - vis.Walk(x[i]) - } - case *object: - x.Foreach(func(k, _ *Term) { - vis.Walk(k) - vis.Walk(x.Get(k)) - }) - case Object: - x.Foreach(func(k, _ *Term) { - vis.Walk(k) - vis.Walk(x.Get(k)) - }) - case *Array: - x.Foreach(func(t *Term) { - vis.Walk(t) - }) - case Set: - xSlice := x.Slice() - for i := range xSlice { - vis.Walk(xSlice[i]) - } - case *ArrayComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case *ObjectComprehension: - vis.Walk(x.Key) - vis.Walk(x.Value) - vis.Walk(x.Body) - case *SetComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case Call: - for i := range x { - vis.Walk(x[i]) - } - case *Every: - if x.Key != nil { - vis.Walk(x.Key) - } - vis.Walk(x.Value) - vis.Walk(x.Domain) - vis.Walk(x.Body) - case *SomeDecl: - for i := range x.Symbols { - vis.Walk(x.Symbols[i]) - } - } +func NewBeforeAfterVisitor(before func(x any) bool, after func(x any)) *BeforeAfterVisitor { + return v1.NewBeforeAfterVisitor(before, after) } // VarVisitor walks AST nodes under a given node and collects all encountered // variables. The collected variables can be controlled by specifying // VarVisitorParams when creating the visitor. -type VarVisitor struct { - params VarVisitorParams - vars VarSet -} +type VarVisitor = v1.VarVisitor // VarVisitorParams contains settings for a VarVisitor. -type VarVisitorParams struct { - SkipRefHead bool - SkipRefCallHead bool - SkipObjectKeys bool - SkipClosures bool - SkipWithTarget bool - SkipSets bool -} +type VarVisitorParams = v1.VarVisitorParams // NewVarVisitor returns a new VarVisitor object. func NewVarVisitor() *VarVisitor { - return &VarVisitor{ - vars: NewVarSet(), - } -} - -// WithParams sets the parameters in params on vis. -func (vis *VarVisitor) WithParams(params VarVisitorParams) *VarVisitor { - vis.params = params - return vis -} - -// Vars returns a VarSet that contains collected vars. -func (vis *VarVisitor) Vars() VarSet { - return vis.vars -} - -// visit determines if the VarVisitor will recurse into x: if it returns `true`, -// the visitor will _skip_ that branch of the AST -func (vis *VarVisitor) visit(v interface{}) bool { - if vis.params.SkipObjectKeys { - if o, ok := v.(Object); ok { - o.Foreach(func(_, v *Term) { - vis.Walk(v) - }) - return true - } - } - if vis.params.SkipRefHead { - if r, ok := v.(Ref); ok { - rSlice := r[1:] - for i := range rSlice { - vis.Walk(rSlice[i]) - } - return true - } - } - if vis.params.SkipClosures { - switch v := v.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: - return true - case *Expr: - if ev, ok := v.Terms.(*Every); ok { - vis.Walk(ev.Domain) - // We're _not_ walking ev.Body -- that's the closure here - return true - } - } - } - if vis.params.SkipWithTarget { - if v, ok := v.(*With); ok { - vis.Walk(v.Value) - return true - } - } - if vis.params.SkipSets { - if _, ok := v.(Set); ok { - return true - } - } - if vis.params.SkipRefCallHead { - switch v := v.(type) { - case *Expr: - if terms, ok := v.Terms.([]*Term); ok { - termSlice := terms[0].Value.(Ref)[1:] - for i := range termSlice { - vis.Walk(termSlice[i]) - } - for i := 1; i < len(terms); i++ { - vis.Walk(terms[i]) - } - for i := range v.With { - vis.Walk(v.With[i]) - } - return true - } - case Call: - operator := v[0].Value.(Ref) - for i := 1; i < len(operator); i++ { - vis.Walk(operator[i]) - } - for i := 1; i < len(v); i++ { - vis.Walk(v[i]) - } - return true - case *With: - if ref, ok := v.Target.Value.(Ref); ok { - refSlice := ref[1:] - for i := range refSlice { - vis.Walk(refSlice[i]) - } - } - if ref, ok := v.Value.Value.(Ref); ok { - refSlice := ref[1:] - for i := range refSlice { - vis.Walk(refSlice[i]) - } - } else { - vis.Walk(v.Value) - } - return true - } - } - if v, ok := v.(Var); ok { - vis.vars.Add(v) - } - return false -} - -// Walk iterates the AST by calling the function f on the -// GenericVisitor before recursing. Contrary to the generic Walk, this -// does not require allocating the visitor from heap. -func (vis *VarVisitor) Walk(x interface{}) { - if vis.visit(x) { - return - } - - switch x := x.(type) { - case *Module: - vis.Walk(x.Package) - for i := range x.Imports { - vis.Walk(x.Imports[i]) - } - for i := range x.Rules { - vis.Walk(x.Rules[i]) - } - for i := range x.Comments { - vis.Walk(x.Comments[i]) - } - case *Package: - vis.Walk(x.Path) - case *Import: - vis.Walk(x.Path) - vis.Walk(x.Alias) - case *Rule: - vis.Walk(x.Head) - vis.Walk(x.Body) - if x.Else != nil { - vis.Walk(x.Else) - } - case *Head: - if len(x.Reference) > 0 { - vis.Walk(x.Reference) - } else { - vis.Walk(x.Name) - if x.Key != nil { - vis.Walk(x.Key) - } - } - vis.Walk(x.Args) - - if x.Value != nil { - vis.Walk(x.Value) - } - case Body: - for i := range x { - vis.Walk(x[i]) - } - case Args: - for i := range x { - vis.Walk(x[i]) - } - case *Expr: - switch ts := x.Terms.(type) { - case *Term, *SomeDecl, *Every: - vis.Walk(ts) - case []*Term: - for i := range ts { - vis.Walk(ts[i]) - } - } - for i := range x.With { - vis.Walk(x.With[i]) - } - case *With: - vis.Walk(x.Target) - vis.Walk(x.Value) - case *Term: - vis.Walk(x.Value) - case Ref: - for i := range x { - vis.Walk(x[i]) - } - case *object: - x.Foreach(func(k, _ *Term) { - vis.Walk(k) - vis.Walk(x.Get(k)) - }) - case *Array: - x.Foreach(func(t *Term) { - vis.Walk(t) - }) - case Set: - xSlice := x.Slice() - for i := range xSlice { - vis.Walk(xSlice[i]) - } - case *ArrayComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case *ObjectComprehension: - vis.Walk(x.Key) - vis.Walk(x.Value) - vis.Walk(x.Body) - case *SetComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case Call: - for i := range x { - vis.Walk(x[i]) - } - case *Every: - if x.Key != nil { - vis.Walk(x.Key) - } - vis.Walk(x.Value) - vis.Walk(x.Domain) - vis.Walk(x.Body) - case *SomeDecl: - for i := range x.Symbols { - vis.Walk(x.Symbols[i]) - } - } + return v1.NewVarVisitor() } diff --git a/vendor/github.com/open-policy-agent/opa/bundle/bundle.go b/vendor/github.com/open-policy-agent/opa/bundle/bundle.go index 0e159384ef..50ad97349a 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/bundle.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/bundle.go @@ -6,1386 +6,97 @@ package bundle import ( - "archive/tar" - "bytes" - "compress/gzip" - "encoding/hex" - "encoding/json" - "errors" - "fmt" "io" - "net/url" - "os" - "path" - "path/filepath" - "reflect" - "strings" - "github.com/gobwas/glob" "github.com/open-policy-agent/opa/ast" - astJSON "github.com/open-policy-agent/opa/ast/json" - "github.com/open-policy-agent/opa/format" - "github.com/open-policy-agent/opa/internal/file/archive" - "github.com/open-policy-agent/opa/internal/merge" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) // Common file extensions and file names. const ( - RegoExt = ".rego" - WasmFile = "policy.wasm" - PlanFile = "plan.json" - ManifestExt = ".manifest" - SignaturesFile = "signatures.json" - patchFile = "patch.json" - dataFile = "data.json" - yamlDataFile = "data.yaml" - ymlDataFile = "data.yml" - defaultHashingAlg = "SHA-256" - DefaultSizeLimitBytes = (1024 * 1024 * 1024) // limit bundle reads to 1GB to protect against gzip bombs - DeltaBundleType = "delta" - SnapshotBundleType = "snapshot" + RegoExt = v1.RegoExt + WasmFile = v1.WasmFile + PlanFile = v1.PlanFile + ManifestExt = v1.ManifestExt + SignaturesFile = v1.SignaturesFile + + DefaultSizeLimitBytes = v1.DefaultSizeLimitBytes + DeltaBundleType = v1.DeltaBundleType + SnapshotBundleType = v1.SnapshotBundleType ) // Bundle represents a loaded bundle. The bundle can contain data and policies. -type Bundle struct { - Signatures SignaturesConfig - Manifest Manifest - Data map[string]interface{} - Modules []ModuleFile - Wasm []byte // Deprecated. Use WasmModules instead - WasmModules []WasmModuleFile - PlanModules []PlanModuleFile - Patch Patch - Etag string - Raw []Raw - - lazyLoadingMode bool - sizeLimitBytes int64 -} +type Bundle = v1.Bundle // Raw contains raw bytes representing the bundle's content -type Raw struct { - Path string - Value []byte -} +type Raw = v1.Raw // Patch contains an array of objects wherein each object represents the patch operation to be // applied to the bundle data. -type Patch struct { - Data []PatchOperation `json:"data,omitempty"` -} +type Patch = v1.Patch // PatchOperation models a single patch operation against a document. -type PatchOperation struct { - Op string `json:"op"` - Path string `json:"path"` - Value interface{} `json:"value"` -} +type PatchOperation = v1.PatchOperation // SignaturesConfig represents an array of JWTs that encapsulate the signatures for the bundle. -type SignaturesConfig struct { - Signatures []string `json:"signatures,omitempty"` - Plugin string `json:"plugin,omitempty"` -} - -// isEmpty returns if the SignaturesConfig is empty. -func (s SignaturesConfig) isEmpty() bool { - return reflect.DeepEqual(s, SignaturesConfig{}) -} +type SignaturesConfig = v1.SignaturesConfig // DecodedSignature represents the decoded JWT payload. -type DecodedSignature struct { - Files []FileInfo `json:"files"` - KeyID string `json:"keyid"` // Deprecated, use kid in the JWT header instead. - Scope string `json:"scope"` - IssuedAt int64 `json:"iat"` - Issuer string `json:"iss"` -} +type DecodedSignature = v1.DecodedSignature // FileInfo contains the hashing algorithm used, resulting digest etc. -type FileInfo struct { - Name string `json:"name"` - Hash string `json:"hash"` - Algorithm string `json:"algorithm"` -} +type FileInfo = v1.FileInfo // NewFile returns a new FileInfo. func NewFile(name, hash, alg string) FileInfo { - return FileInfo{ - Name: name, - Hash: hash, - Algorithm: alg, - } + return v1.NewFile(name, hash, alg) } // Manifest represents the manifest from a bundle. The manifest may contain // metadata such as the bundle revision. -type Manifest struct { - Revision string `json:"revision"` - Roots *[]string `json:"roots,omitempty"` - WasmResolvers []WasmResolver `json:"wasm,omitempty"` - // RegoVersion is the global Rego version for the bundle described by this Manifest. - // The Rego version of individual files can be overridden in FileRegoVersions. - // We don't use ast.RegoVersion here, as this iota type's order isn't guaranteed to be stable over time. - // We use a pointer so that we can support hand-made bundles that don't have an explicit version appropriately. - // E.g. in OPA 0.x if --v1-compatible is used when consuming the bundle, and there is no specified version, - // we should default to v1; if --v1-compatible isn't used, we should default to v0. In OPA 1.0, no --x-compatible - // flag and no explicit bundle version should default to v1. - RegoVersion *int `json:"rego_version,omitempty"` - // FileRegoVersions is a map from file paths to Rego versions. - // This allows individual files to override the global Rego version specified by RegoVersion. - FileRegoVersions map[string]int `json:"file_rego_versions,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` - - compiledFileRegoVersions []fileRegoVersion -} - -type fileRegoVersion struct { - path glob.Glob - version int -} +type Manifest = v1.Manifest // WasmResolver maps a wasm module to an entrypoint ref. -type WasmResolver struct { - Entrypoint string `json:"entrypoint,omitempty"` - Module string `json:"module,omitempty"` - Annotations []*ast.Annotations `json:"annotations,omitempty"` -} - -// Init initializes the manifest. If you instantiate a manifest -// manually, call Init to ensure that the roots are set properly. -func (m *Manifest) Init() { - if m.Roots == nil { - defaultRoots := []string{""} - m.Roots = &defaultRoots - } -} - -// AddRoot adds r to the roots of m. This function is idempotent. -func (m *Manifest) AddRoot(r string) { - m.Init() - if !RootPathsContain(*m.Roots, r) { - *m.Roots = append(*m.Roots, r) - } -} - -func (m *Manifest) SetRegoVersion(v ast.RegoVersion) { - m.Init() - regoVersion := 0 - if v == ast.RegoV1 { - regoVersion = 1 - } - m.RegoVersion = ®oVersion -} - -// Equal returns true if m is semantically equivalent to other. -func (m Manifest) Equal(other Manifest) bool { - - // This is safe since both are passed by value. - m.Init() - other.Init() - - if m.Revision != other.Revision { - return false - } - - if m.RegoVersion == nil && other.RegoVersion != nil { - return false - } - if m.RegoVersion != nil && other.RegoVersion == nil { - return false - } - if m.RegoVersion != nil && other.RegoVersion != nil && *m.RegoVersion != *other.RegoVersion { - return false - } - - // If both are nil, or both are empty, we consider them equal. - if !(len(m.FileRegoVersions) == 0 && len(other.FileRegoVersions) == 0) && - !reflect.DeepEqual(m.FileRegoVersions, other.FileRegoVersions) { - return false - } - - if !reflect.DeepEqual(m.Metadata, other.Metadata) { - return false - } - - return m.equalWasmResolversAndRoots(other) -} - -func (m Manifest) Empty() bool { - return m.Equal(Manifest{}) -} - -// Copy returns a deep copy of the manifest. -func (m Manifest) Copy() Manifest { - m.Init() - roots := make([]string, len(*m.Roots)) - copy(roots, *m.Roots) - m.Roots = &roots - - wasmModules := make([]WasmResolver, len(m.WasmResolvers)) - copy(wasmModules, m.WasmResolvers) - m.WasmResolvers = wasmModules - - metadata := m.Metadata - - if metadata != nil { - m.Metadata = make(map[string]interface{}) - for k, v := range metadata { - m.Metadata[k] = v - } - } - - return m -} - -func (m Manifest) String() string { - m.Init() - if m.RegoVersion != nil { - return fmt.Sprintf("", - m.Revision, *m.RegoVersion, *m.Roots, m.WasmResolvers, m.Metadata) - } - return fmt.Sprintf("", - m.Revision, *m.Roots, m.WasmResolvers, m.Metadata) -} - -func (m Manifest) rootSet() stringSet { - rs := map[string]struct{}{} - - for _, r := range *m.Roots { - rs[r] = struct{}{} - } - - return stringSet(rs) -} - -func (m Manifest) equalWasmResolversAndRoots(other Manifest) bool { - if len(m.WasmResolvers) != len(other.WasmResolvers) { - return false - } - - for i := 0; i < len(m.WasmResolvers); i++ { - if !m.WasmResolvers[i].Equal(&other.WasmResolvers[i]) { - return false - } - } - - return m.rootSet().Equal(other.rootSet()) -} - -func (wr *WasmResolver) Equal(other *WasmResolver) bool { - if wr == nil && other == nil { - return true - } - - if wr == nil || other == nil { - return false - } - - if wr.Module != other.Module { - return false - } - - if wr.Entrypoint != other.Entrypoint { - return false - } - - annotLen := len(wr.Annotations) - if annotLen != len(other.Annotations) { - return false - } - - for i := 0; i < annotLen; i++ { - if wr.Annotations[i].Compare(other.Annotations[i]) != 0 { - return false - } - } - - return true -} - -type stringSet map[string]struct{} - -func (ss stringSet) Equal(other stringSet) bool { - if len(ss) != len(other) { - return false - } - for k := range other { - if _, ok := ss[k]; !ok { - return false - } - } - return true -} - -func (m *Manifest) validateAndInjectDefaults(b Bundle) error { - - m.Init() - - // Validate roots in bundle. - roots := *m.Roots - - // Standardize the roots (no starting or trailing slash) - for i := range roots { - roots[i] = strings.Trim(roots[i], "/") - } - - for i := 0; i < len(roots)-1; i++ { - for j := i + 1; j < len(roots); j++ { - if RootPathsOverlap(roots[i], roots[j]) { - return fmt.Errorf("manifest has overlapped roots: '%v' and '%v'", roots[i], roots[j]) - } - } - } - - // Validate modules in bundle. - for _, module := range b.Modules { - found := false - if path, err := module.Parsed.Package.Path.Ptr(); err == nil { - found = RootPathsContain(roots, path) - } - if !found { - return fmt.Errorf("manifest roots %v do not permit '%v' in module '%v'", roots, module.Parsed.Package, module.Path) - } - } - - // Build a set of wasm module entrypoints to validate - wasmModuleToEps := map[string]string{} - seenEps := map[string]struct{}{} - for _, wm := range b.WasmModules { - wasmModuleToEps[wm.Path] = "" - } - - for _, wmConfig := range b.Manifest.WasmResolvers { - _, ok := wasmModuleToEps[wmConfig.Module] - if !ok { - return fmt.Errorf("manifest references wasm module '%s' but the module file does not exist", wmConfig.Module) - } - - // Ensure wasm module entrypoint in within bundle roots - if !RootPathsContain(roots, wmConfig.Entrypoint) { - return fmt.Errorf("manifest roots %v do not permit '%v' entrypoint for wasm module '%v'", roots, wmConfig.Entrypoint, wmConfig.Module) - } - - if _, ok := seenEps[wmConfig.Entrypoint]; ok { - return fmt.Errorf("entrypoint '%s' cannot be used by more than one wasm module", wmConfig.Entrypoint) - } - seenEps[wmConfig.Entrypoint] = struct{}{} - - wasmModuleToEps[wmConfig.Module] = wmConfig.Entrypoint - } - - // Validate data patches in bundle. - for _, patch := range b.Patch.Data { - path := strings.Trim(patch.Path, "/") - if !RootPathsContain(roots, path) { - return fmt.Errorf("manifest roots %v do not permit data patch at path '%s'", roots, path) - } - } - - if b.lazyLoadingMode { - return nil - } - - // Validate data in bundle. - return dfs(b.Data, "", func(path string, node interface{}) (bool, error) { - path = strings.Trim(path, "/") - if RootPathsContain(roots, path) { - return true, nil - } - - if _, ok := node.(map[string]interface{}); ok { - for i := range roots { - if RootPathsContain(strings.Split(path, "/"), roots[i]) { - return false, nil - } - } - } - return false, fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, path) - }) -} +type WasmResolver = v1.WasmResolver // ModuleFile represents a single module contained in a bundle. -type ModuleFile struct { - URL string - Path string - RelativePath string - Raw []byte - Parsed *ast.Module -} +type ModuleFile = v1.ModuleFile // WasmModuleFile represents a single wasm module contained in a bundle. -type WasmModuleFile struct { - URL string - Path string - Entrypoints []ast.Ref - Raw []byte -} +type WasmModuleFile = v1.WasmModuleFile // PlanModuleFile represents a single plan module contained in a bundle. // // NOTE(tsandall): currently the plans are just opaque binary blobs. In the // future we could inject the entrypoints so that the plans could be executed // inside of OPA proper like we do for Wasm modules. -type PlanModuleFile struct { - URL string - Path string - Raw []byte -} +type PlanModuleFile = v1.PlanModuleFile // Reader contains the reader to load the bundle from. -type Reader struct { - loader DirectoryLoader - includeManifestInData bool - metrics metrics.Metrics - baseDir string - verificationConfig *VerificationConfig - skipVerify bool - processAnnotations bool - jsonOptions *astJSON.Options - capabilities *ast.Capabilities - files map[string]FileInfo // files in the bundle signature payload - sizeLimitBytes int64 - etag string - lazyLoadingMode bool - name string - persist bool - regoVersion ast.RegoVersion - followSymlinks bool -} +type Reader = v1.Reader // NewReader is deprecated. Use NewCustomReader instead. func NewReader(r io.Reader) *Reader { - return NewCustomReader(NewTarballLoader(r)) + return v1.NewReader(r).WithRegoVersion(ast.DefaultRegoVersion) } // NewCustomReader returns a new Reader configured to use the // specified DirectoryLoader. func NewCustomReader(loader DirectoryLoader) *Reader { - nr := Reader{ - loader: loader, - metrics: metrics.New(), - files: make(map[string]FileInfo), - sizeLimitBytes: DefaultSizeLimitBytes + 1, - } - return &nr -} - -// IncludeManifestInData sets whether the manifest metadata should be -// included in the bundle's data. -func (r *Reader) IncludeManifestInData(includeManifestInData bool) *Reader { - r.includeManifestInData = includeManifestInData - return r -} - -// WithMetrics sets the metrics object to be used while loading bundles -func (r *Reader) WithMetrics(m metrics.Metrics) *Reader { - r.metrics = m - return r -} - -// WithBaseDir sets a base directory for file paths of loaded Rego -// modules. This will *NOT* affect the loaded path of data files. -func (r *Reader) WithBaseDir(dir string) *Reader { - r.baseDir = dir - return r -} - -// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle -func (r *Reader) WithBundleVerificationConfig(config *VerificationConfig) *Reader { - r.verificationConfig = config - return r -} - -// WithSkipBundleVerification skips verification of a signed bundle -func (r *Reader) WithSkipBundleVerification(skipVerify bool) *Reader { - r.skipVerify = skipVerify - return r -} - -// WithProcessAnnotations enables annotation processing during .rego file parsing. -func (r *Reader) WithProcessAnnotations(yes bool) *Reader { - r.processAnnotations = yes - return r -} - -// WithCapabilities sets the supported capabilities when loading the files -func (r *Reader) WithCapabilities(caps *ast.Capabilities) *Reader { - r.capabilities = caps - return r -} - -// WithJSONOptions sets the JSONOptions to use when parsing policy files -func (r *Reader) WithJSONOptions(opts *astJSON.Options) *Reader { - r.jsonOptions = opts - return r -} - -// WithSizeLimitBytes sets the size limit to apply to files in the bundle. If files are larger -// than this, an error will be returned by the reader. -func (r *Reader) WithSizeLimitBytes(n int64) *Reader { - r.sizeLimitBytes = n + 1 - return r -} - -// WithBundleEtag sets the given etag value on the bundle -func (r *Reader) WithBundleEtag(etag string) *Reader { - r.etag = etag - return r -} - -// WithBundleName specifies the bundle name -func (r *Reader) WithBundleName(name string) *Reader { - r.name = name - return r -} - -func (r *Reader) WithFollowSymlinks(yes bool) *Reader { - r.followSymlinks = yes - return r -} - -// WithLazyLoadingMode sets the bundle loading mode. If true, -// bundles will be read in lazy mode. In this mode, data files in the bundle will not be -// deserialized and the check to validate that the bundle data does not contain paths -// outside the bundle's roots will not be performed while reading the bundle. -func (r *Reader) WithLazyLoadingMode(yes bool) *Reader { - r.lazyLoadingMode = yes - return r -} - -// WithBundlePersistence specifies if the downloaded bundle will eventually be persisted to disk. -func (r *Reader) WithBundlePersistence(persist bool) *Reader { - r.persist = persist - return r -} - -func (r *Reader) WithRegoVersion(version ast.RegoVersion) *Reader { - r.regoVersion = version - return r -} - -func (r *Reader) ParserOptions() ast.ParserOptions { - return ast.ParserOptions{ - ProcessAnnotation: r.processAnnotations, - Capabilities: r.capabilities, - JSONOptions: r.jsonOptions, - RegoVersion: r.regoVersion, - } -} - -// Read returns a new Bundle loaded from the reader. -func (r *Reader) Read() (Bundle, error) { - - var bundle Bundle - var descriptors []*Descriptor - var err error - var raw []Raw - - bundle.Signatures, bundle.Patch, descriptors, err = preProcessBundle(r.loader, r.skipVerify, r.sizeLimitBytes) - if err != nil { - return bundle, err - } - - bundle.lazyLoadingMode = r.lazyLoadingMode - bundle.sizeLimitBytes = r.sizeLimitBytes - - if bundle.Type() == SnapshotBundleType { - err = r.checkSignaturesAndDescriptors(bundle.Signatures) - if err != nil { - return bundle, err - } - - bundle.Data = map[string]interface{}{} - } - - var modules []ModuleFile - for _, f := range descriptors { - buf, err := readFile(f, r.sizeLimitBytes) - if err != nil { - return bundle, err - } - - // verify the file content - if bundle.Type() == SnapshotBundleType && !bundle.Signatures.isEmpty() { - path := f.Path() - if r.baseDir != "" { - path = f.URL() - } - path = strings.TrimPrefix(path, "/") - - // check if the file is to be excluded from bundle verification - if r.isFileExcluded(path) { - delete(r.files, path) - } else { - if err = r.verifyBundleFile(path, buf); err != nil { - return bundle, err - } - } - } - - // Normalize the paths to use `/` separators - path := filepath.ToSlash(f.Path()) - - if strings.HasSuffix(path, RegoExt) { - fullPath := r.fullPath(path) - bs := buf.Bytes() - - if r.lazyLoadingMode { - p := fullPath - if r.name != "" { - p = modulePathWithPrefix(r.name, fullPath) - } - - raw = append(raw, Raw{Path: p, Value: bs}) - } - - // Modules are parsed after we've had a chance to read the manifest - mf := ModuleFile{ - URL: f.URL(), - Path: fullPath, - RelativePath: path, - Raw: bs, - } - modules = append(modules, mf) - } else if filepath.Base(path) == WasmFile { - bundle.WasmModules = append(bundle.WasmModules, WasmModuleFile{ - URL: f.URL(), - Path: r.fullPath(path), - Raw: buf.Bytes(), - }) - } else if filepath.Base(path) == PlanFile { - bundle.PlanModules = append(bundle.PlanModules, PlanModuleFile{ - URL: f.URL(), - Path: r.fullPath(path), - Raw: buf.Bytes(), - }) - } else if filepath.Base(path) == dataFile { - if r.lazyLoadingMode { - raw = append(raw, Raw{Path: path, Value: buf.Bytes()}) - continue - } - - var value interface{} - - r.metrics.Timer(metrics.RegoDataParse).Start() - err := util.UnmarshalJSON(buf.Bytes(), &value) - r.metrics.Timer(metrics.RegoDataParse).Stop() - - if err != nil { - return bundle, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err) - } - - if err := insertValue(&bundle, path, value); err != nil { - return bundle, err - } - - } else if filepath.Base(path) == yamlDataFile || filepath.Base(path) == ymlDataFile { - if r.lazyLoadingMode { - raw = append(raw, Raw{Path: path, Value: buf.Bytes()}) - continue - } - - var value interface{} - - r.metrics.Timer(metrics.RegoDataParse).Start() - err := util.Unmarshal(buf.Bytes(), &value) - r.metrics.Timer(metrics.RegoDataParse).Stop() - - if err != nil { - return bundle, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err) - } - - if err := insertValue(&bundle, path, value); err != nil { - return bundle, err - } - - } else if strings.HasSuffix(path, ManifestExt) { - if err := util.NewJSONDecoder(&buf).Decode(&bundle.Manifest); err != nil { - return bundle, fmt.Errorf("bundle load failed on manifest decode: %w", err) - } - } - } - - // Parse modules - popts := r.ParserOptions() - popts.RegoVersion = bundle.RegoVersion(popts.RegoVersion) - for _, mf := range modules { - modulePopts := popts - if modulePopts.RegoVersion, err = bundle.RegoVersionForFile(mf.RelativePath, popts.RegoVersion); err != nil { - return bundle, err - } - r.metrics.Timer(metrics.RegoModuleParse).Start() - mf.Parsed, err = ast.ParseModuleWithOpts(mf.Path, string(mf.Raw), modulePopts) - r.metrics.Timer(metrics.RegoModuleParse).Stop() - if err != nil { - return bundle, err - } - bundle.Modules = append(bundle.Modules, mf) - } - - if bundle.Type() == DeltaBundleType { - if len(bundle.Data) != 0 { - return bundle, fmt.Errorf("delta bundle expected to contain only patch file but data files found") - } - - if len(bundle.Modules) != 0 { - return bundle, fmt.Errorf("delta bundle expected to contain only patch file but policy files found") - } - - if len(bundle.WasmModules) != 0 { - return bundle, fmt.Errorf("delta bundle expected to contain only patch file but wasm files found") - } - - if r.persist { - return bundle, fmt.Errorf("'persist' property is true in config. persisting delta bundle to disk is not supported") - } - } - - // check if the bundle signatures specify any files that weren't found in the bundle - if bundle.Type() == SnapshotBundleType && len(r.files) != 0 { - extra := []string{} - for k := range r.files { - extra = append(extra, k) - } - return bundle, fmt.Errorf("file(s) %v specified in bundle signatures but not found in the target bundle", extra) - } - - if err := bundle.Manifest.validateAndInjectDefaults(bundle); err != nil { - return bundle, err - } - - // Inject the wasm module entrypoint refs into the WasmModuleFile structs - epMap := map[string][]string{} - for _, r := range bundle.Manifest.WasmResolvers { - epMap[r.Module] = append(epMap[r.Module], r.Entrypoint) - } - for i := 0; i < len(bundle.WasmModules); i++ { - entrypoints := epMap[bundle.WasmModules[i].Path] - for _, entrypoint := range entrypoints { - ref, err := ast.PtrRef(ast.DefaultRootDocument, entrypoint) - if err != nil { - return bundle, fmt.Errorf("failed to parse wasm module entrypoint '%s': %s", entrypoint, err) - } - bundle.WasmModules[i].Entrypoints = append(bundle.WasmModules[i].Entrypoints, ref) - } - } - - if r.includeManifestInData { - var metadata map[string]interface{} - - b, err := json.Marshal(&bundle.Manifest) - if err != nil { - return bundle, fmt.Errorf("bundle load failed on manifest marshal: %w", err) - } - - err = util.UnmarshalJSON(b, &metadata) - if err != nil { - return bundle, fmt.Errorf("bundle load failed on manifest unmarshal: %w", err) - } - - // For backwards compatibility always write to the old unnamed manifest path - // This will *not* be correct if >1 bundle is in use... - if err := bundle.insertData(legacyManifestStoragePath, metadata); err != nil { - return bundle, fmt.Errorf("bundle load failed on %v: %w", legacyRevisionStoragePath, err) - } - } - - bundle.Etag = r.etag - bundle.Raw = raw - - return bundle, nil -} - -func (r *Reader) isFileExcluded(path string) bool { - for _, e := range r.verificationConfig.Exclude { - match, _ := filepath.Match(e, path) - if match { - return true - } - } - return false -} - -func (r *Reader) checkSignaturesAndDescriptors(signatures SignaturesConfig) error { - if r.skipVerify { - return nil - } - - if signatures.isEmpty() && r.verificationConfig != nil && r.verificationConfig.KeyID != "" { - return fmt.Errorf("bundle missing .signatures.json file") - } - - if !signatures.isEmpty() { - if r.verificationConfig == nil { - return fmt.Errorf("verification key not provided") - } - - // verify the JWT signatures included in the `.signatures.json` file - if err := r.verifyBundleSignature(signatures); err != nil { - return err - } - } - return nil -} - -func (r *Reader) verifyBundleSignature(sc SignaturesConfig) error { - var err error - r.files, err = VerifyBundleSignature(sc, r.verificationConfig) - return err -} - -func (r *Reader) verifyBundleFile(path string, data bytes.Buffer) error { - return VerifyBundleFile(path, data, r.files) -} - -func (r *Reader) fullPath(path string) string { - if r.baseDir != "" { - path = filepath.Join(r.baseDir, path) - } - return path + return v1.NewCustomReader(loader).WithRegoVersion(ast.DefaultRegoVersion) } // Write is deprecated. Use NewWriter instead. func Write(w io.Writer, bundle Bundle) error { - return NewWriter(w). - UseModulePath(true). - DisableFormat(true). - Write(bundle) + return v1.Write(w, bundle) } // Writer implements bundle serialization. -type Writer struct { - usePath bool - disableFormat bool - w io.Writer -} +type Writer = v1.Writer // NewWriter returns a bundle writer that writes to w. func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - } -} - -// UseModulePath configures the writer to use the module file path instead of the -// module file URL during serialization. This is for backwards compatibility. -func (w *Writer) UseModulePath(yes bool) *Writer { - w.usePath = yes - return w -} - -// DisableFormat configures the writer to just write out raw bytes instead -// of formatting modules before serialization. -func (w *Writer) DisableFormat(yes bool) *Writer { - w.disableFormat = yes - return w -} - -// Write writes the bundle to the writer's output stream. -func (w *Writer) Write(bundle Bundle) error { - gw := gzip.NewWriter(w.w) - tw := tar.NewWriter(gw) - - bundleType := bundle.Type() - - if bundleType == SnapshotBundleType { - var buf bytes.Buffer - - if err := json.NewEncoder(&buf).Encode(bundle.Data); err != nil { - return err - } - - if err := archive.WriteFile(tw, "data.json", buf.Bytes()); err != nil { - return err - } - - for _, module := range bundle.Modules { - path := module.URL - if w.usePath { - path = module.Path - } - - if err := archive.WriteFile(tw, path, module.Raw); err != nil { - return err - } - } - - if err := w.writeWasm(tw, bundle); err != nil { - return err - } - - if err := writeSignatures(tw, bundle); err != nil { - return err - } - - if err := w.writePlan(tw, bundle); err != nil { - return err - } - } else if bundleType == DeltaBundleType { - if err := writePatch(tw, bundle); err != nil { - return err - } - } - - if err := writeManifest(tw, bundle); err != nil { - return err - } - - if err := tw.Close(); err != nil { - return err - } - - return gw.Close() -} - -func (w *Writer) writeWasm(tw *tar.Writer, bundle Bundle) error { - for _, wm := range bundle.WasmModules { - path := wm.URL - if w.usePath { - path = wm.Path - } - - err := archive.WriteFile(tw, path, wm.Raw) - if err != nil { - return err - } - } - - if len(bundle.Wasm) > 0 { - err := archive.WriteFile(tw, "/"+WasmFile, bundle.Wasm) - if err != nil { - return err - } - } - - return nil -} - -func (w *Writer) writePlan(tw *tar.Writer, bundle Bundle) error { - for _, wm := range bundle.PlanModules { - path := wm.URL - if w.usePath { - path = wm.Path - } - - err := archive.WriteFile(tw, path, wm.Raw) - if err != nil { - return err - } - } - - return nil -} - -func writeManifest(tw *tar.Writer, bundle Bundle) error { - - if bundle.Manifest.Empty() { - return nil - } - - var buf bytes.Buffer - - if err := json.NewEncoder(&buf).Encode(bundle.Manifest); err != nil { - return err - } - - return archive.WriteFile(tw, ManifestExt, buf.Bytes()) -} - -func writePatch(tw *tar.Writer, bundle Bundle) error { - - var buf bytes.Buffer - - if err := json.NewEncoder(&buf).Encode(bundle.Patch); err != nil { - return err - } - - return archive.WriteFile(tw, patchFile, buf.Bytes()) -} - -func writeSignatures(tw *tar.Writer, bundle Bundle) error { - - if bundle.Signatures.isEmpty() { - return nil - } - - bs, err := json.MarshalIndent(bundle.Signatures, "", " ") - if err != nil { - return err - } - - return archive.WriteFile(tw, fmt.Sprintf(".%v", SignaturesFile), bs) -} - -func hashBundleFiles(hash SignatureHasher, b *Bundle) ([]FileInfo, error) { - - files := []FileInfo{} - - bs, err := hash.HashFile(b.Data) - if err != nil { - return files, err - } - files = append(files, NewFile(strings.TrimPrefix("data.json", "/"), hex.EncodeToString(bs), defaultHashingAlg)) - - if len(b.Wasm) != 0 { - bs, err := hash.HashFile(b.Wasm) - if err != nil { - return files, err - } - files = append(files, NewFile(strings.TrimPrefix(WasmFile, "/"), hex.EncodeToString(bs), defaultHashingAlg)) - } - - for _, wasmModule := range b.WasmModules { - bs, err := hash.HashFile(wasmModule.Raw) - if err != nil { - return files, err - } - files = append(files, NewFile(strings.TrimPrefix(wasmModule.Path, "/"), hex.EncodeToString(bs), defaultHashingAlg)) - } - - for _, planmodule := range b.PlanModules { - bs, err := hash.HashFile(planmodule.Raw) - if err != nil { - return files, err - } - files = append(files, NewFile(strings.TrimPrefix(planmodule.Path, "/"), hex.EncodeToString(bs), defaultHashingAlg)) - } - - // If the manifest is essentially empty, don't add it to the signatures since it - // won't be written to the bundle. Otherwise: - // parse the manifest into a JSON structure; - // then recursively order the fields of all objects alphabetically and then apply - // the hash function to result to compute the hash. - if !b.Manifest.Empty() { - mbs, err := json.Marshal(b.Manifest) - if err != nil { - return files, err - } - - var result map[string]interface{} - if err := util.Unmarshal(mbs, &result); err != nil { - return files, err - } - - bs, err = hash.HashFile(result) - if err != nil { - return files, err - } - - files = append(files, NewFile(strings.TrimPrefix(ManifestExt, "/"), hex.EncodeToString(bs), defaultHashingAlg)) - } - - return files, err -} - -// FormatModules formats Rego modules -// Modules will be formatted to comply with rego-v0, but Rego compatibility of individual parsed modules will be respected (e.g. if 'rego.v1' is imported). -func (b *Bundle) FormatModules(useModulePath bool) error { - return b.FormatModulesForRegoVersion(ast.RegoV0, true, useModulePath) -} - -// FormatModulesForRegoVersion formats Rego modules to comply with a given Rego version -func (b *Bundle) FormatModulesForRegoVersion(version ast.RegoVersion, preserveModuleRegoVersion bool, useModulePath bool) error { - var err error - - for i, module := range b.Modules { - opts := format.Opts{} - if preserveModuleRegoVersion { - opts.RegoVersion = module.Parsed.RegoVersion() - opts.ParserOptions = &ast.ParserOptions{ - RegoVersion: opts.RegoVersion, - } - } else { - opts.RegoVersion = version - } - - if module.Raw == nil { - module.Raw, err = format.AstWithOpts(module.Parsed, opts) - if err != nil { - return err - } - } else { - path := module.URL - if useModulePath { - path = module.Path - } - - module.Raw, err = format.SourceWithOpts(path, module.Raw, opts) - if err != nil { - return err - } - } - b.Modules[i].Raw = module.Raw - } - return nil -} - -// GenerateSignature generates the signature for the given bundle. -func (b *Bundle) GenerateSignature(signingConfig *SigningConfig, keyID string, useModulePath bool) error { - - hash, err := NewSignatureHasher(HashingAlgorithm(defaultHashingAlg)) - if err != nil { - return err - } - - files := []FileInfo{} - - for _, module := range b.Modules { - bytes, err := hash.HashFile(module.Raw) - if err != nil { - return err - } - - path := module.URL - if useModulePath { - path = module.Path - } - files = append(files, NewFile(strings.TrimPrefix(path, "/"), hex.EncodeToString(bytes), defaultHashingAlg)) - } - - result, err := hashBundleFiles(hash, b) - if err != nil { - return err - } - files = append(files, result...) - - // generate signed token - token, err := GenerateSignedToken(files, signingConfig, keyID) - if err != nil { - return err - } - - if b.Signatures.isEmpty() { - b.Signatures = SignaturesConfig{} - } - - if signingConfig.Plugin != "" { - b.Signatures.Plugin = signingConfig.Plugin - } - - b.Signatures.Signatures = []string{token} - - return nil -} - -// ParsedModules returns a map of parsed modules with names that are -// unique and human readable for the given a bundle name. -func (b *Bundle) ParsedModules(bundleName string) map[string]*ast.Module { - - mods := make(map[string]*ast.Module, len(b.Modules)) - - for _, mf := range b.Modules { - mods[modulePathWithPrefix(bundleName, mf.Path)] = mf.Parsed - } - - return mods -} - -func (b *Bundle) RegoVersion(def ast.RegoVersion) ast.RegoVersion { - if v := b.Manifest.RegoVersion; v != nil { - if *v == 0 { - return ast.RegoV0 - } else if *v == 1 { - return ast.RegoV1 - } - } - return def -} - -func (b *Bundle) SetRegoVersion(v ast.RegoVersion) { - b.Manifest.SetRegoVersion(v) -} - -// RegoVersionForFile returns the rego-version for the specified file path. -// If there is no defined version for the given path, the default version def is returned. -// If the version does not correspond to ast.RegoV0 or ast.RegoV1, an error is returned. -func (b *Bundle) RegoVersionForFile(path string, def ast.RegoVersion) (ast.RegoVersion, error) { - version, err := b.Manifest.numericRegoVersionForFile(path) - if err != nil { - return def, err - } else if version == nil { - return def, nil - } else if *version == 0 { - return ast.RegoV0, nil - } else if *version == 1 { - return ast.RegoV1, nil - } - return def, fmt.Errorf("unknown bundle rego-version %d for file '%s'", *version, path) -} - -func (m *Manifest) numericRegoVersionForFile(path string) (*int, error) { - var version *int - - if len(m.FileRegoVersions) != len(m.compiledFileRegoVersions) { - m.compiledFileRegoVersions = make([]fileRegoVersion, 0, len(m.FileRegoVersions)) - for pattern, v := range m.FileRegoVersions { - compiled, err := glob.Compile(pattern) - if err != nil { - return nil, fmt.Errorf("failed to compile glob pattern %s: %s", pattern, err) - } - m.compiledFileRegoVersions = append(m.compiledFileRegoVersions, fileRegoVersion{compiled, v}) - } - } - - for _, fv := range m.compiledFileRegoVersions { - if fv.path.Match(path) { - version = &fv.version - break - } - } - - if version == nil { - version = m.RegoVersion - } - return version, nil -} - -// Equal returns true if this bundle's contents equal the other bundle's -// contents. -func (b Bundle) Equal(other Bundle) bool { - if !reflect.DeepEqual(b.Data, other.Data) { - return false - } - - if len(b.Modules) != len(other.Modules) { - return false - } - for i := range b.Modules { - // To support bundles built from rootless filesystems we ignore a "/" prefix - // for URLs and Paths, such that "/file" and "file" are equivalent - if strings.TrimPrefix(b.Modules[i].URL, string(filepath.Separator)) != - strings.TrimPrefix(other.Modules[i].URL, string(filepath.Separator)) { - return false - } - if strings.TrimPrefix(b.Modules[i].Path, string(filepath.Separator)) != - strings.TrimPrefix(other.Modules[i].Path, string(filepath.Separator)) { - return false - } - if !b.Modules[i].Parsed.Equal(other.Modules[i].Parsed) { - return false - } - if !bytes.Equal(b.Modules[i].Raw, other.Modules[i].Raw) { - return false - } - } - if (b.Wasm == nil && other.Wasm != nil) || (b.Wasm != nil && other.Wasm == nil) { - return false - } - - return bytes.Equal(b.Wasm, other.Wasm) -} - -// Copy returns a deep copy of the bundle. -func (b Bundle) Copy() Bundle { - - // Copy data. - var x interface{} = b.Data - - if err := util.RoundTrip(&x); err != nil { - panic(err) - } - - if x != nil { - b.Data = x.(map[string]interface{}) - } - - // Copy modules. - for i := range b.Modules { - bs := make([]byte, len(b.Modules[i].Raw)) - copy(bs, b.Modules[i].Raw) - b.Modules[i].Raw = bs - b.Modules[i].Parsed = b.Modules[i].Parsed.Copy() - } - - // Copy manifest. - b.Manifest = b.Manifest.Copy() - - return b -} - -func (b *Bundle) insertData(key []string, value interface{}) error { - // Build an object with the full structure for the value - obj, err := mktree(key, value) - if err != nil { - return err - } - - // Merge the new data in with the current bundle data object - merged, ok := merge.InterfaceMaps(b.Data, obj) - if !ok { - return fmt.Errorf("failed to insert data file from path %s", filepath.Join(key...)) - } - - b.Data = merged - - return nil -} - -func (b *Bundle) readData(key []string) *interface{} { - - if len(key) == 0 { - if len(b.Data) == 0 { - return nil - } - var result interface{} = b.Data - return &result - } - - node := b.Data - - for i := 0; i < len(key)-1; i++ { - - child, ok := node[key[i]] - if !ok { - return nil - } - - childObj, ok := child.(map[string]interface{}) - if !ok { - return nil - } - - node = childObj - } - - child, ok := node[key[len(key)-1]] - if !ok { - return nil - } - - return &child -} - -// Type returns the type of the bundle. -func (b *Bundle) Type() string { - if len(b.Patch.Data) != 0 { - return DeltaBundleType - } - return SnapshotBundleType -} - -func mktree(path []string, value interface{}) (map[string]interface{}, error) { - if len(path) == 0 { - // For 0 length path the value is the full tree. - obj, ok := value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("root value must be object") - } - return obj, nil - } - - dir := map[string]interface{}{} - for i := len(path) - 1; i > 0; i-- { - dir[path[i]] = value - value = dir - dir = map[string]interface{}{} - } - dir[path[0]] = value - - return dir, nil + return v1.NewWriter(w) } // Merge accepts a set of bundles and merges them into a single result bundle. If there are @@ -1393,7 +104,7 @@ func mktree(path []string, value interface{}) (map[string]interface{}, error) { // will have an empty revision except in the special case where a single bundle is provided // (and in that case the bundle is just returned unmodified.) func Merge(bundles []*Bundle) (*Bundle, error) { - return MergeWithRegoVersion(bundles, ast.RegoV0, false) + return MergeWithRegoVersion(bundles, ast.DefaultRegoVersion, false) } // MergeWithRegoVersion creates a merged bundle from the provided bundles, similar to Merge. @@ -1405,348 +116,19 @@ func Merge(bundles []*Bundle) (*Bundle, error) { // If usePath is true, per-file rego-versions will be calculated using the file's ModuleFile.Path; otherwise, the file's // ModuleFile.URL will be used. func MergeWithRegoVersion(bundles []*Bundle, regoVersion ast.RegoVersion, usePath bool) (*Bundle, error) { - - if len(bundles) == 0 { - return nil, errors.New("expected at least one bundle") - } - - if len(bundles) == 1 { - result := bundles[0] - // We respect the bundle rego-version, defaulting to the provided rego version if not set. - result.SetRegoVersion(result.RegoVersion(regoVersion)) - fileRegoVersions, err := bundleRegoVersions(result, result.RegoVersion(regoVersion), usePath) - if err != nil { - return nil, err - } - result.Manifest.FileRegoVersions = fileRegoVersions - return result, nil + if regoVersion == ast.RegoUndefined { + regoVersion = ast.DefaultRegoVersion } - var roots []string - var result Bundle - - for _, b := range bundles { - - if b.Manifest.Roots == nil { - return nil, errors.New("bundle manifest not initialized") - } - - roots = append(roots, *b.Manifest.Roots...) - - result.Modules = append(result.Modules, b.Modules...) - - for _, root := range *b.Manifest.Roots { - key := strings.Split(root, "/") - if val := b.readData(key); val != nil { - if err := result.insertData(key, *val); err != nil { - return nil, err - } - } - } - - result.Manifest.WasmResolvers = append(result.Manifest.WasmResolvers, b.Manifest.WasmResolvers...) - result.WasmModules = append(result.WasmModules, b.WasmModules...) - result.PlanModules = append(result.PlanModules, b.PlanModules...) - - if b.Manifest.RegoVersion != nil || len(b.Manifest.FileRegoVersions) > 0 { - if result.Manifest.FileRegoVersions == nil { - result.Manifest.FileRegoVersions = map[string]int{} - } - - fileRegoVersions, err := bundleRegoVersions(b, regoVersion, usePath) - if err != nil { - return nil, err - } - for k, v := range fileRegoVersions { - result.Manifest.FileRegoVersions[k] = v - } - } - } - - // We respect the bundle rego-version, defaulting to the provided rego version if not set. - result.SetRegoVersion(result.RegoVersion(regoVersion)) - - if result.Data == nil { - result.Data = map[string]interface{}{} - } - - result.Manifest.Roots = &roots - - if err := result.Manifest.validateAndInjectDefaults(result); err != nil { - return nil, err - } - - return &result, nil -} - -func bundleRegoVersions(bundle *Bundle, regoVersion ast.RegoVersion, usePath bool) (map[string]int, error) { - fileRegoVersions := map[string]int{} - - // we drop the bundle-global rego versions and record individual rego versions for each module. - for _, m := range bundle.Modules { - // We fetch rego-version by the path relative to the bundle root, as the complete path of the module might - // contain the path between OPA working directory and the bundle root. - v, err := bundle.RegoVersionForFile(bundleRelativePath(m, usePath), bundle.RegoVersion(regoVersion)) - if err != nil { - return nil, err - } - // only record the rego version if it's different from one applied globally to the result bundle - if v != regoVersion { - // We store the rego version by the absolute path to the bundle root, as this will be the - possibly new - path - // to the module inside the merged bundle. - fileRegoVersions[bundleAbsolutePath(m, usePath)] = v.Int() - } - } - - return fileRegoVersions, nil -} - -func bundleRelativePath(m ModuleFile, usePath bool) string { - p := m.RelativePath - if p == "" { - if usePath { - p = m.Path - } else { - p = m.URL - } - } - return p -} - -func bundleAbsolutePath(m ModuleFile, usePath bool) string { - var p string - if usePath { - p = m.Path - } else { - p = m.URL - } - if !path.IsAbs(p) { - p = "/" + p - } - return path.Clean(p) + return v1.MergeWithRegoVersion(bundles, regoVersion, usePath) } // RootPathsOverlap takes in two bundle root paths and returns true if they overlap. func RootPathsOverlap(pathA string, pathB string) bool { - a := rootPathSegments(pathA) - b := rootPathSegments(pathB) - return rootContains(a, b) || rootContains(b, a) + return v1.RootPathsOverlap(pathA, pathB) } // RootPathsContain takes a set of bundle root paths and returns true if the path is contained. func RootPathsContain(roots []string, path string) bool { - segments := rootPathSegments(path) - for i := range roots { - if rootContains(rootPathSegments(roots[i]), segments) { - return true - } - } - return false -} - -func rootPathSegments(path string) []string { - return strings.Split(path, "/") -} - -func rootContains(root []string, other []string) bool { - - // A single segment, empty string root always contains the other. - if len(root) == 1 && root[0] == "" { - return true - } - - if len(root) > len(other) { - return false - } - - for j := range root { - if root[j] != other[j] { - return false - } - } - - return true -} - -func insertValue(b *Bundle, path string, value interface{}) error { - if err := b.insertData(getNormalizedPath(path), value); err != nil { - return fmt.Errorf("bundle load failed on %v: %w", path, err) - } - return nil -} - -func getNormalizedPath(path string) []string { - // Remove leading / and . characters from the directory path. If the bundle - // was written with OPA then the paths will contain a leading slash. On the - // other hand, if the path is empty, filepath.Dir will return '.'. - // Note: filepath.Dir can return paths with '\' separators, always use - // filepath.ToSlash to keep them normalized. - dirpath := strings.TrimLeft(normalizePath(filepath.Dir(path)), "/.") - var key []string - if dirpath != "" { - key = strings.Split(dirpath, "/") - } - return key -} - -func dfs(value interface{}, path string, fn func(string, interface{}) (bool, error)) error { - if stop, err := fn(path, value); err != nil { - return err - } else if stop { - return nil - } - obj, ok := value.(map[string]interface{}) - if !ok { - return nil - } - for key := range obj { - if err := dfs(obj[key], path+"/"+key, fn); err != nil { - return err - } - } - return nil -} - -func modulePathWithPrefix(bundleName string, modulePath string) string { - // Default prefix is just the bundle name - prefix := bundleName - - // Bundle names are sometimes just file paths, some of which - // are full urls (file:///foo/). Parse these and only use the path. - parsed, err := url.Parse(bundleName) - if err == nil { - prefix = filepath.Join(parsed.Host, parsed.Path) - } - - // Note: filepath.Join can return paths with '\' separators, always use - // filepath.ToSlash to keep them normalized. - return normalizePath(filepath.Join(prefix, modulePath)) -} - -// IsStructuredDoc checks if the file name equals a structured file extension ex. ".json" -func IsStructuredDoc(name string) bool { - return filepath.Base(name) == dataFile || filepath.Base(name) == yamlDataFile || - filepath.Base(name) == SignaturesFile || filepath.Base(name) == ManifestExt -} - -func preProcessBundle(loader DirectoryLoader, skipVerify bool, sizeLimitBytes int64) (SignaturesConfig, Patch, []*Descriptor, error) { - descriptors := []*Descriptor{} - var signatures SignaturesConfig - var patch Patch - - for { - f, err := loader.NextFile() - if err == io.EOF { - break - } - - if err != nil { - return signatures, patch, nil, fmt.Errorf("bundle read failed: %w", err) - } - - // check for the signatures file - if !skipVerify && strings.HasSuffix(f.Path(), SignaturesFile) { - buf, err := readFile(f, sizeLimitBytes) - if err != nil { - return signatures, patch, nil, err - } - - if err := util.NewJSONDecoder(&buf).Decode(&signatures); err != nil { - return signatures, patch, nil, fmt.Errorf("bundle load failed on signatures decode: %w", err) - } - } else if !strings.HasSuffix(f.Path(), SignaturesFile) { - descriptors = append(descriptors, f) - - if filepath.Base(f.Path()) == patchFile { - - var b bytes.Buffer - tee := io.TeeReader(f.reader, &b) - f.reader = tee - - buf, err := readFile(f, sizeLimitBytes) - if err != nil { - return signatures, patch, nil, err - } - - if err := util.NewJSONDecoder(&buf).Decode(&patch); err != nil { - return signatures, patch, nil, fmt.Errorf("bundle load failed on patch decode: %w", err) - } - - f.reader = &b - } - } - } - return signatures, patch, descriptors, nil -} - -func readFile(f *Descriptor, sizeLimitBytes int64) (bytes.Buffer, error) { - // Case for pre-loaded byte buffers, like those from the tarballLoader. - if bb, ok := f.reader.(*bytes.Buffer); ok { - _ = f.Close() // always close, even on error - - if int64(bb.Len()) >= sizeLimitBytes { - return *bb, fmt.Errorf("bundle file '%v' size (%d bytes) exceeded max size (%v bytes)", - strings.TrimPrefix(f.Path(), "/"), bb.Len(), sizeLimitBytes-1) - } - - return *bb, nil - } - - // Case for *lazyFile readers: - if lf, ok := f.reader.(*lazyFile); ok { - var buf bytes.Buffer - if lf.file == nil { - var err error - if lf.file, err = os.Open(lf.path); err != nil { - return buf, fmt.Errorf("failed to open file %s: %w", f.path, err) - } - } - // Bail out if we can't read the whole file-- there's nothing useful we can do at that point! - fileSize, _ := fstatFileSize(lf.file) - if fileSize > sizeLimitBytes { - return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), fileSize, sizeLimitBytes-1) - } - // Prealloc the buffer for the file read. - buffer := make([]byte, fileSize) - _, err := io.ReadFull(lf.file, buffer) - if err != nil { - return buf, err - } - _ = lf.file.Close() // always close, even on error - - // Note(philipc): Replace the lazyFile reader in the *Descriptor with a - // pointer to the wrapping bytes.Buffer, so that we don't re-read the - // file on disk again by accident. - buf = *bytes.NewBuffer(buffer) - f.reader = &buf - return buf, nil - } - - // Fallback case: - var buf bytes.Buffer - n, err := f.Read(&buf, sizeLimitBytes) - _ = f.Close() // always close, even on error - - if err != nil && err != io.EOF { - return buf, err - } else if err == nil && n >= sizeLimitBytes { - return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), n, sizeLimitBytes-1) - } - - return buf, nil -} - -// Takes an already open file handle and invokes the os.Stat system call on it -// to determine the file's size. Passes any errors from *File.Stat on up to the -// caller. -func fstatFileSize(f *os.File) (int64, error) { - fileInfo, err := f.Stat() - if err != nil { - return 0, err - } - return fileInfo.Size(), nil -} - -func normalizePath(p string) string { - return filepath.ToSlash(p) + return v1.RootPathsContain(roots, path) } diff --git a/vendor/github.com/open-policy-agent/opa/bundle/doc.go b/vendor/github.com/open-policy-agent/opa/bundle/doc.go new file mode 100644 index 0000000000..7ec7c9b332 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/bundle/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package bundle diff --git a/vendor/github.com/open-policy-agent/opa/bundle/file.go b/vendor/github.com/open-policy-agent/opa/bundle/file.go index 80b1a87eb1..ccb7b23510 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/file.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/file.go @@ -1,508 +1,50 @@ package bundle import ( - "archive/tar" - "bytes" - "compress/gzip" - "fmt" "io" - "io/fs" - "os" - "path/filepath" - "sort" - "strings" - "sync" - - "github.com/open-policy-agent/opa/loader/filter" "github.com/open-policy-agent/opa/storage" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) -const maxSizeLimitBytesErrMsg = "bundle file %s size (%d bytes) exceeds configured size_limit_bytes (%d bytes)" - // Descriptor contains information about a file and // can be used to read the file contents. -type Descriptor struct { - url string - path string - reader io.Reader - closer io.Closer - closeOnce *sync.Once -} - -// lazyFile defers reading the file until the first call of Read -type lazyFile struct { - path string - file *os.File -} - -// newLazyFile creates a new instance of lazyFile -func newLazyFile(path string) *lazyFile { - return &lazyFile{path: path} -} - -// Read implements io.Reader. It will check if the file has been opened -// and open it if it has not before attempting to read using the file's -// read method -func (f *lazyFile) Read(b []byte) (int, error) { - var err error - - if f.file == nil { - if f.file, err = os.Open(f.path); err != nil { - return 0, fmt.Errorf("failed to open file %s: %w", f.path, err) - } - } - - return f.file.Read(b) -} - -// Close closes the lazy file if it has been opened using the file's -// close method -func (f *lazyFile) Close() error { - if f.file != nil { - return f.file.Close() - } - - return nil -} +type Descriptor = v1.Descriptor func NewDescriptor(url, path string, reader io.Reader) *Descriptor { - return &Descriptor{ - url: url, - path: path, - reader: reader, - } -} - -func (d *Descriptor) WithCloser(closer io.Closer) *Descriptor { - d.closer = closer - d.closeOnce = new(sync.Once) - return d -} - -// Path returns the path of the file. -func (d *Descriptor) Path() string { - return d.path -} - -// URL returns the url of the file. -func (d *Descriptor) URL() string { - return d.url -} - -// Read will read all the contents from the file the Descriptor refers to -// into the dest writer up n bytes. Will return an io.EOF error -// if EOF is encountered before n bytes are read. -func (d *Descriptor) Read(dest io.Writer, n int64) (int64, error) { - n, err := io.CopyN(dest, d.reader, n) - return n, err + return v1.NewDescriptor(url, path, reader) } -// Close the file, on some Loader implementations this might be a no-op. -// It should *always* be called regardless of file. -func (d *Descriptor) Close() error { - var err error - if d.closer != nil { - d.closeOnce.Do(func() { - err = d.closer.Close() - }) - } - return err -} - -type PathFormat int64 +type PathFormat = v1.PathFormat const ( - Chrooted PathFormat = iota - SlashRooted - Passthrough + Chrooted = v1.Chrooted + SlashRooted = v1.SlashRooted + Passthrough = v1.Passthrough ) // DirectoryLoader defines an interface which can be used to load // files from a directory by iterating over each one in the tree. -type DirectoryLoader interface { - // NextFile must return io.EOF if there is no next value. The returned - // descriptor should *always* be closed when no longer needed. - NextFile() (*Descriptor, error) - WithFilter(filter filter.LoaderFilter) DirectoryLoader - WithPathFormat(PathFormat) DirectoryLoader - WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader - WithFollowSymlinks(followSymlinks bool) DirectoryLoader -} - -type dirLoader struct { - root string - files []string - idx int - filter filter.LoaderFilter - pathFormat PathFormat - maxSizeLimitBytes int64 - followSymlinks bool -} - -// Normalize root directory, ex "./src/bundle" -> "src/bundle" -// We don't need an absolute path, but this makes the joined/trimmed -// paths more uniform. -func normalizeRootDirectory(root string) string { - if len(root) > 1 { - if root[0] == '.' && root[1] == filepath.Separator { - if len(root) == 2 { - root = root[:1] // "./" -> "." - } else { - root = root[2:] // remove leading "./" - } - } - } - return root -} +type DirectoryLoader = v1.DirectoryLoader // NewDirectoryLoader returns a basic DirectoryLoader implementation // that will load files from a given root directory path. func NewDirectoryLoader(root string) DirectoryLoader { - d := dirLoader{ - root: normalizeRootDirectory(root), - pathFormat: Chrooted, - } - return &d -} - -// WithFilter specifies the filter object to use to filter files while loading bundles -func (d *dirLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader { - d.filter = filter - return d -} - -// WithPathFormat specifies how a path is formatted in a Descriptor -func (d *dirLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader { - d.pathFormat = pathFormat - return d -} - -// WithSizeLimitBytes specifies the maximum size of any file in the directory to read -func (d *dirLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { - d.maxSizeLimitBytes = sizeLimitBytes - return d -} - -// WithFollowSymlinks specifies whether to follow symlinks when loading files from the directory -func (d *dirLoader) WithFollowSymlinks(followSymlinks bool) DirectoryLoader { - d.followSymlinks = followSymlinks - return d -} - -func formatPath(fileName string, root string, pathFormat PathFormat) string { - switch pathFormat { - case SlashRooted: - if !strings.HasPrefix(fileName, string(filepath.Separator)) { - return string(filepath.Separator) + fileName - } - return fileName - case Chrooted: - // Trim off the root directory and return path as if chrooted - result := strings.TrimPrefix(fileName, filepath.FromSlash(root)) - if root == "." && filepath.Base(fileName) == ManifestExt { - result = fileName - } - if !strings.HasPrefix(result, string(filepath.Separator)) { - result = string(filepath.Separator) + result - } - return result - case Passthrough: - fallthrough - default: - return fileName - } -} - -// NextFile iterates to the next file in the directory tree -// and returns a file Descriptor for the file. -func (d *dirLoader) NextFile() (*Descriptor, error) { - // build a list of all files we will iterate over and read, but only one time - if d.files == nil { - d.files = []string{} - err := filepath.Walk(d.root, func(path string, info os.FileInfo, _ error) error { - if info == nil { - return nil - } - - if info.Mode().IsRegular() { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { - return nil - } - if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { - return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes) - } - d.files = append(d.files, path) - } else if d.followSymlinks && info.Mode().Type()&fs.ModeSymlink == fs.ModeSymlink { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { - return nil - } - if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { - return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes) - } - d.files = append(d.files, path) - } else if info.Mode().IsDir() { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) { - return filepath.SkipDir - } - } - return nil - }) - if err != nil { - return nil, fmt.Errorf("failed to list files: %w", err) - } - } - - // If done reading files then just return io.EOF - // errors for each NextFile() call - if d.idx >= len(d.files) { - return nil, io.EOF - } - - fileName := d.files[d.idx] - d.idx++ - fh := newLazyFile(fileName) - - cleanedPath := formatPath(fileName, d.root, d.pathFormat) - f := NewDescriptor(filepath.Join(d.root, cleanedPath), cleanedPath, fh).WithCloser(fh) - return f, nil -} - -type tarballLoader struct { - baseURL string - r io.Reader - tr *tar.Reader - files []file - idx int - filter filter.LoaderFilter - skipDir map[string]struct{} - pathFormat PathFormat - maxSizeLimitBytes int64 -} - -type file struct { - name string - reader io.Reader - path storage.Path - raw []byte + return v1.NewDirectoryLoader(root) } // NewTarballLoader is deprecated. Use NewTarballLoaderWithBaseURL instead. func NewTarballLoader(r io.Reader) DirectoryLoader { - l := tarballLoader{ - r: r, - pathFormat: Passthrough, - } - return &l + return v1.NewTarballLoader(r) } // NewTarballLoaderWithBaseURL returns a new DirectoryLoader that reads // files out of a gzipped tar archive. The file URLs will be prefixed // with the baseURL. func NewTarballLoaderWithBaseURL(r io.Reader, baseURL string) DirectoryLoader { - l := tarballLoader{ - baseURL: strings.TrimSuffix(baseURL, "/"), - r: r, - pathFormat: Passthrough, - } - return &l -} - -// WithFilter specifies the filter object to use to filter files while loading bundles -func (t *tarballLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader { - t.filter = filter - return t -} - -// WithPathFormat specifies how a path is formatted in a Descriptor -func (t *tarballLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader { - t.pathFormat = pathFormat - return t -} - -// WithSizeLimitBytes specifies the maximum size of any file in the tarball to read -func (t *tarballLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { - t.maxSizeLimitBytes = sizeLimitBytes - return t -} - -// WithFollowSymlinks is a no-op for tarballLoader -func (t *tarballLoader) WithFollowSymlinks(_ bool) DirectoryLoader { - return t -} - -// NextFile iterates to the next file in the directory tree -// and returns a file Descriptor for the file. -func (t *tarballLoader) NextFile() (*Descriptor, error) { - if t.tr == nil { - gr, err := gzip.NewReader(t.r) - if err != nil { - return nil, fmt.Errorf("archive read failed: %w", err) - } - - t.tr = tar.NewReader(gr) - } - - if t.files == nil { - t.files = []file{} - - if t.skipDir == nil { - t.skipDir = map[string]struct{}{} - } - - for { - header, err := t.tr.Next() - - if err == io.EOF { - break - } - - if err != nil { - return nil, err - } - - // Keep iterating on the archive until we find a normal file - if header.Typeflag == tar.TypeReg { - - if t.filter != nil { - - if t.filter(filepath.ToSlash(header.Name), header.FileInfo(), getdepth(header.Name, false)) { - continue - } - - basePath := strings.Trim(filepath.Dir(filepath.ToSlash(header.Name)), "/") - - // check if the directory is to be skipped - if _, ok := t.skipDir[basePath]; ok { - continue - } - - match := false - for p := range t.skipDir { - if strings.HasPrefix(basePath, p) { - match = true - break - } - } - - if match { - continue - } - } - - if t.maxSizeLimitBytes > 0 && header.Size > t.maxSizeLimitBytes { - return nil, fmt.Errorf(maxSizeLimitBytesErrMsg, header.Name, header.Size, t.maxSizeLimitBytes) - } - - f := file{name: header.Name} - - // Note(philipc): We rely on the previous size check in this loop for safety. - buf := bytes.NewBuffer(make([]byte, 0, header.Size)) - if _, err := io.Copy(buf, t.tr); err != nil { - return nil, fmt.Errorf("failed to copy file %s: %w", header.Name, err) - } - - f.reader = buf - - t.files = append(t.files, f) - } else if header.Typeflag == tar.TypeDir { - cleanedPath := filepath.ToSlash(header.Name) - if t.filter != nil && t.filter(cleanedPath, header.FileInfo(), getdepth(header.Name, true)) { - t.skipDir[strings.Trim(cleanedPath, "/")] = struct{}{} - } - } - } - } - - // If done reading files then just return io.EOF - // errors for each NextFile() call - if t.idx >= len(t.files) { - return nil, io.EOF - } - - f := t.files[t.idx] - t.idx++ - - cleanedPath := formatPath(f.name, "", t.pathFormat) - d := NewDescriptor(filepath.Join(t.baseURL, cleanedPath), cleanedPath, f.reader) - return d, nil -} - -// Next implements the storage.Iterator interface. -// It iterates to the next policy or data file in the directory tree -// and returns a storage.Update for the file. -func (it *iterator) Next() (*storage.Update, error) { - if it.files == nil { - it.files = []file{} - - for _, item := range it.raw { - f := file{name: item.Path} - - fpath := strings.TrimLeft(normalizePath(filepath.Dir(f.name)), "/.") - if strings.HasSuffix(f.name, RegoExt) { - fpath = strings.Trim(normalizePath(f.name), "/") - } - - p, ok := storage.ParsePathEscaped("/" + fpath) - if !ok { - return nil, fmt.Errorf("storage path invalid: %v", f.name) - } - f.path = p - - f.raw = item.Value - - it.files = append(it.files, f) - } - - sortFilePathAscend(it.files) - } - - // If done reading files then just return io.EOF - // errors for each NextFile() call - if it.idx >= len(it.files) { - return nil, io.EOF - } - - f := it.files[it.idx] - it.idx++ - - isPolicy := false - if strings.HasSuffix(f.name, RegoExt) { - isPolicy = true - } - - return &storage.Update{ - Path: f.path, - Value: f.raw, - IsPolicy: isPolicy, - }, nil -} - -type iterator struct { - raw []Raw - files []file - idx int + return v1.NewTarballLoaderWithBaseURL(r, baseURL) } func NewIterator(raw []Raw) storage.Iterator { - it := iterator{ - raw: raw, - } - return &it -} - -func sortFilePathAscend(files []file) { - sort.Slice(files, func(i, j int) bool { - return len(files[i].path) < len(files[j].path) - }) -} - -func getdepth(path string, isDir bool) int { - if isDir { - cleanedPath := strings.Trim(filepath.ToSlash(path), "/") - return len(strings.Split(cleanedPath, "/")) - } - - basePath := strings.Trim(filepath.Dir(filepath.ToSlash(path)), "/") - return len(strings.Split(basePath, "/")) + return v1.NewIterator(raw) } diff --git a/vendor/github.com/open-policy-agent/opa/bundle/filefs.go b/vendor/github.com/open-policy-agent/opa/bundle/filefs.go index a3a0dbf204..f6d559e8c2 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/filefs.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/filefs.go @@ -1,143 +1,19 @@ -//go:build go1.16 -// +build go1.16 - package bundle import ( - "fmt" - "io" "io/fs" - "path/filepath" - "sync" - - "github.com/open-policy-agent/opa/loader/filter" -) -const ( - defaultFSLoaderRoot = "." + v1 "github.com/open-policy-agent/opa/v1/bundle" ) -type dirLoaderFS struct { - sync.Mutex - filesystem fs.FS - files []string - idx int - filter filter.LoaderFilter - root string - pathFormat PathFormat - maxSizeLimitBytes int64 - followSymlinks bool -} - // NewFSLoader returns a basic DirectoryLoader implementation // that will load files from a fs.FS interface func NewFSLoader(filesystem fs.FS) (DirectoryLoader, error) { - return NewFSLoaderWithRoot(filesystem, defaultFSLoaderRoot), nil + return v1.NewFSLoader(filesystem) } // NewFSLoaderWithRoot returns a basic DirectoryLoader implementation // that will load files from a fs.FS interface at the supplied root func NewFSLoaderWithRoot(filesystem fs.FS, root string) DirectoryLoader { - d := dirLoaderFS{ - filesystem: filesystem, - root: normalizeRootDirectory(root), - pathFormat: Chrooted, - } - - return &d -} - -func (d *dirLoaderFS) walkDir(path string, dirEntry fs.DirEntry, err error) error { - if err != nil { - return err - } - - if dirEntry != nil { - info, err := dirEntry.Info() - if err != nil { - return err - } - - if dirEntry.Type().IsRegular() { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { - return nil - } - - if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { - return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes) - } - - d.files = append(d.files, path) - } else if dirEntry.Type()&fs.ModeSymlink != 0 && d.followSymlinks { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { - return nil - } - - if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { - return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes) - } - - d.files = append(d.files, path) - } else if dirEntry.Type().IsDir() { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) { - return fs.SkipDir - } - } - } - return nil -} - -// WithFilter specifies the filter object to use to filter files while loading bundles -func (d *dirLoaderFS) WithFilter(filter filter.LoaderFilter) DirectoryLoader { - d.filter = filter - return d -} - -// WithPathFormat specifies how a path is formatted in a Descriptor -func (d *dirLoaderFS) WithPathFormat(pathFormat PathFormat) DirectoryLoader { - d.pathFormat = pathFormat - return d -} - -// WithSizeLimitBytes specifies the maximum size of any file in the filesystem directory to read -func (d *dirLoaderFS) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { - d.maxSizeLimitBytes = sizeLimitBytes - return d -} - -func (d *dirLoaderFS) WithFollowSymlinks(followSymlinks bool) DirectoryLoader { - d.followSymlinks = followSymlinks - return d -} - -// NextFile iterates to the next file in the directory tree -// and returns a file Descriptor for the file. -func (d *dirLoaderFS) NextFile() (*Descriptor, error) { - d.Lock() - defer d.Unlock() - - if d.files == nil { - err := fs.WalkDir(d.filesystem, d.root, d.walkDir) - if err != nil { - return nil, fmt.Errorf("failed to list files: %w", err) - } - } - - // If done reading files then just return io.EOF - // errors for each NextFile() call - if d.idx >= len(d.files) { - return nil, io.EOF - } - - fileName := d.files[d.idx] - d.idx++ - - fh, err := d.filesystem.Open(fileName) - if err != nil { - return nil, fmt.Errorf("failed to open file %s: %w", fileName, err) - } - - cleanedPath := formatPath(fileName, d.root, d.pathFormat) - f := NewDescriptor(cleanedPath, cleanedPath, fh).WithCloser(fh) - return f, nil + return v1.NewFSLoaderWithRoot(filesystem, root) } diff --git a/vendor/github.com/open-policy-agent/opa/bundle/hash.go b/vendor/github.com/open-policy-agent/opa/bundle/hash.go index 021801bb0a..d4cc601dea 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/hash.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/hash.go @@ -5,137 +5,28 @@ package bundle import ( - "bytes" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/json" - "fmt" - "hash" - "io" - "sort" - "strings" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) // HashingAlgorithm represents a subset of hashing algorithms implemented in Go -type HashingAlgorithm string +type HashingAlgorithm = v1.HashingAlgorithm // Supported values for HashingAlgorithm const ( - MD5 HashingAlgorithm = "MD5" - SHA1 HashingAlgorithm = "SHA-1" - SHA224 HashingAlgorithm = "SHA-224" - SHA256 HashingAlgorithm = "SHA-256" - SHA384 HashingAlgorithm = "SHA-384" - SHA512 HashingAlgorithm = "SHA-512" - SHA512224 HashingAlgorithm = "SHA-512-224" - SHA512256 HashingAlgorithm = "SHA-512-256" + MD5 = v1.MD5 + SHA1 = v1.SHA1 + SHA224 = v1.SHA224 + SHA256 = v1.SHA256 + SHA384 = v1.SHA384 + SHA512 = v1.SHA512 + SHA512224 = v1.SHA512224 + SHA512256 = v1.SHA512256 ) -// String returns the string representation of a HashingAlgorithm -func (alg HashingAlgorithm) String() string { - return string(alg) -} - // SignatureHasher computes a signature digest for a file with (structured or unstructured) data and policy -type SignatureHasher interface { - HashFile(v interface{}) ([]byte, error) -} - -type hasher struct { - h func() hash.Hash // hash function factory -} +type SignatureHasher = v1.SignatureHasher // NewSignatureHasher returns a signature hasher suitable for a particular hashing algorithm func NewSignatureHasher(alg HashingAlgorithm) (SignatureHasher, error) { - h := &hasher{} - - switch alg { - case MD5: - h.h = md5.New - case SHA1: - h.h = sha1.New - case SHA224: - h.h = sha256.New224 - case SHA256: - h.h = sha256.New - case SHA384: - h.h = sha512.New384 - case SHA512: - h.h = sha512.New - case SHA512224: - h.h = sha512.New512_224 - case SHA512256: - h.h = sha512.New512_256 - default: - return nil, fmt.Errorf("unsupported hashing algorithm: %s", alg) - } - - return h, nil -} - -// HashFile hashes the file content, JSON or binary, both in golang native format. -func (h *hasher) HashFile(v interface{}) ([]byte, error) { - hf := h.h() - walk(v, hf) - return hf.Sum(nil), nil -} - -// walk hashes the file content, JSON or binary, both in golang native format. -// -// Computation for unstructured documents is a hash of the document. -// -// Computation for the types of structured JSON document is as follows: -// -// object: Hash {, then each key (in alphabetical order) and digest of the value, then comma (between items) and finally }. -// -// array: Hash [, then digest of the value, then comma (between items) and finally ]. -func walk(v interface{}, h io.Writer) { - - switch x := v.(type) { - case map[string]interface{}: - _, _ = h.Write([]byte("{")) - - var keys []string - for k := range x { - keys = append(keys, k) - } - sort.Strings(keys) - - for i, key := range keys { - if i > 0 { - _, _ = h.Write([]byte(",")) - } - - _, _ = h.Write(encodePrimitive(key)) - _, _ = h.Write([]byte(":")) - walk(x[key], h) - } - - _, _ = h.Write([]byte("}")) - case []interface{}: - _, _ = h.Write([]byte("[")) - - for i, e := range x { - if i > 0 { - _, _ = h.Write([]byte(",")) - } - walk(e, h) - } - - _, _ = h.Write([]byte("]")) - case []byte: - _, _ = h.Write(x) - default: - _, _ = h.Write(encodePrimitive(x)) - } -} - -func encodePrimitive(v interface{}) []byte { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - encoder.SetEscapeHTML(false) - _ = encoder.Encode(v) - return []byte(strings.Trim(buf.String(), "\n")) + return v1.NewSignatureHasher(alg) } diff --git a/vendor/github.com/open-policy-agent/opa/bundle/keys.go b/vendor/github.com/open-policy-agent/opa/bundle/keys.go index 810bee4b72..99f9b0f165 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/keys.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/keys.go @@ -6,139 +6,25 @@ package bundle import ( - "encoding/pem" - "fmt" - "os" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" - "github.com/open-policy-agent/opa/internal/jwx/jws/sign" - "github.com/open-policy-agent/opa/keys" - - "github.com/open-policy-agent/opa/util" -) - -const ( - defaultTokenSigningAlg = "RS256" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) // KeyConfig holds the keys used to sign or verify bundles and tokens // Moved to own package, alias kept for backwards compatibility -type KeyConfig = keys.Config +type KeyConfig = v1.KeyConfig // VerificationConfig represents the key configuration used to verify a signed bundle -type VerificationConfig struct { - PublicKeys map[string]*KeyConfig - KeyID string `json:"keyid"` - Scope string `json:"scope"` - Exclude []string `json:"exclude_files"` -} +type VerificationConfig = v1.VerificationConfig // NewVerificationConfig return a new VerificationConfig func NewVerificationConfig(keys map[string]*KeyConfig, id, scope string, exclude []string) *VerificationConfig { - return &VerificationConfig{ - PublicKeys: keys, - KeyID: id, - Scope: scope, - Exclude: exclude, - } -} - -// ValidateAndInjectDefaults validates the config and inserts default values -func (vc *VerificationConfig) ValidateAndInjectDefaults(keys map[string]*KeyConfig) error { - vc.PublicKeys = keys - - if vc.KeyID != "" { - found := false - for key := range keys { - if key == vc.KeyID { - found = true - break - } - } - - if !found { - return fmt.Errorf("key id %s not found", vc.KeyID) - } - } - return nil -} - -// GetPublicKey returns the public key corresponding to the given key id -func (vc *VerificationConfig) GetPublicKey(id string) (*KeyConfig, error) { - var kc *KeyConfig - var ok bool - - if kc, ok = vc.PublicKeys[id]; !ok { - return nil, fmt.Errorf("verification key corresponding to ID %v not found", id) - } - return kc, nil + return v1.NewVerificationConfig(keys, id, scope, exclude) } // SigningConfig represents the key configuration used to generate a signed bundle -type SigningConfig struct { - Plugin string - Key string - Algorithm string - ClaimsPath string -} +type SigningConfig = v1.SigningConfig // NewSigningConfig return a new SigningConfig func NewSigningConfig(key, alg, claimsPath string) *SigningConfig { - if alg == "" { - alg = defaultTokenSigningAlg - } - - return &SigningConfig{ - Plugin: defaultSignerID, - Key: key, - Algorithm: alg, - ClaimsPath: claimsPath, - } -} - -// WithPlugin sets the signing plugin in the signing config -func (s *SigningConfig) WithPlugin(plugin string) *SigningConfig { - if plugin != "" { - s.Plugin = plugin - } - return s -} - -// GetPrivateKey returns the private key or secret from the signing config -func (s *SigningConfig) GetPrivateKey() (interface{}, error) { - - block, _ := pem.Decode([]byte(s.Key)) - if block != nil { - return sign.GetSigningKey(s.Key, jwa.SignatureAlgorithm(s.Algorithm)) - } - - var priv string - if _, err := os.Stat(s.Key); err == nil { - bs, err := os.ReadFile(s.Key) - if err != nil { - return nil, err - } - priv = string(bs) - } else if os.IsNotExist(err) { - priv = s.Key - } else { - return nil, err - } - - return sign.GetSigningKey(priv, jwa.SignatureAlgorithm(s.Algorithm)) -} - -// GetClaims returns the claims by reading the file specified in the signing config -func (s *SigningConfig) GetClaims() (map[string]interface{}, error) { - var claims map[string]interface{} - - bs, err := os.ReadFile(s.ClaimsPath) - if err != nil { - return claims, err - } - - if err := util.UnmarshalJSON(bs, &claims); err != nil { - return claims, err - } - return claims, nil + return v1.NewSigningConfig(key, alg, claimsPath) } diff --git a/vendor/github.com/open-policy-agent/opa/bundle/sign.go b/vendor/github.com/open-policy-agent/opa/bundle/sign.go index cf9a3e183a..56e25eec9c 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/sign.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/sign.go @@ -6,130 +6,30 @@ package bundle import ( - "crypto/rand" - "encoding/json" - "fmt" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" - "github.com/open-policy-agent/opa/internal/jwx/jws" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) -const defaultSignerID = "_default" - -var signers map[string]Signer - // Signer is the interface expected for implementations that generate bundle signatures. -type Signer interface { - GenerateSignedToken([]FileInfo, *SigningConfig, string) (string, error) -} +type Signer v1.Signer // GenerateSignedToken will retrieve the Signer implementation based on the Plugin specified // in SigningConfig, and call its implementation of GenerateSignedToken. The signer generates // a signed token given the list of files to be included in the payload and the bundle // signing config. The keyID if non-empty, represents the value for the "keyid" claim in the token. func GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) { - var plugin string - // for backwards compatibility, check if there is no plugin specified, and use default - if sc.Plugin == "" { - plugin = defaultSignerID - } else { - plugin = sc.Plugin - } - signer, err := GetSigner(plugin) - if err != nil { - return "", err - } - return signer.GenerateSignedToken(files, sc, keyID) + return v1.GenerateSignedToken(files, sc, keyID) } // DefaultSigner is the default bundle signing implementation. It signs bundles by generating // a JWT and signing it using a locally-accessible private key. -type DefaultSigner struct{} - -// GenerateSignedToken generates a signed token given the list of files to be -// included in the payload and the bundle signing config. The keyID if non-empty, -// represents the value for the "keyid" claim in the token -func (*DefaultSigner) GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) { - payload, err := generatePayload(files, sc, keyID) - if err != nil { - return "", err - } - - privateKey, err := sc.GetPrivateKey() - if err != nil { - return "", err - } - - var headers jws.StandardHeaders - - if err := headers.Set(jws.AlgorithmKey, jwa.SignatureAlgorithm(sc.Algorithm)); err != nil { - return "", err - } - - if keyID != "" { - if err := headers.Set(jws.KeyIDKey, keyID); err != nil { - return "", err - } - } - - hdr, err := json.Marshal(headers) - if err != nil { - return "", err - } - - token, err := jws.SignLiteral(payload, - jwa.SignatureAlgorithm(sc.Algorithm), - privateKey, - hdr, - rand.Reader) - if err != nil { - return "", err - } - return string(token), nil -} - -func generatePayload(files []FileInfo, sc *SigningConfig, keyID string) ([]byte, error) { - payload := make(map[string]interface{}) - payload["files"] = files - - if sc.ClaimsPath != "" { - claims, err := sc.GetClaims() - if err != nil { - return nil, err - } - - for claim, value := range claims { - payload[claim] = value - } - } else { - if keyID != "" { - // keyid claim is deprecated but include it for backwards compatibility. - payload["keyid"] = keyID - } - } - return json.Marshal(payload) -} +type DefaultSigner v1.DefaultSigner // GetSigner returns the Signer registered under the given id func GetSigner(id string) (Signer, error) { - signer, ok := signers[id] - if !ok { - return nil, fmt.Errorf("no signer exists under id %s", id) - } - return signer, nil + return v1.GetSigner(id) } // RegisterSigner registers a Signer under the given id func RegisterSigner(id string, s Signer) error { - if id == defaultSignerID { - return fmt.Errorf("signer id %s is reserved, use a different id", id) - } - signers[id] = s - return nil -} - -func init() { - signers = map[string]Signer{ - defaultSignerID: &DefaultSigner{}, - } + return v1.RegisterSigner(id, s) } diff --git a/vendor/github.com/open-policy-agent/opa/bundle/store.go b/vendor/github.com/open-policy-agent/opa/bundle/store.go index 9a49f025e8..85b8515eb2 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/store.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/store.go @@ -6,1031 +6,151 @@ package bundle import ( "context" - "encoding/base64" - "encoding/json" - "fmt" - "path/filepath" - "strings" "github.com/open-policy-agent/opa/ast" - iCompiler "github.com/open-policy-agent/opa/internal/compiler" - "github.com/open-policy-agent/opa/internal/json/patch" - "github.com/open-policy-agent/opa/metrics" "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) // BundlesBasePath is the storage path used for storing bundle metadata -var BundlesBasePath = storage.MustParsePath("/system/bundles") +var BundlesBasePath = v1.BundlesBasePath // Note: As needed these helpers could be memoized. // ManifestStoragePath is the storage path used for the given named bundle manifest. func ManifestStoragePath(name string) storage.Path { - return append(BundlesBasePath, name, "manifest") + return v1.ManifestStoragePath(name) } // EtagStoragePath is the storage path used for the given named bundle etag. func EtagStoragePath(name string) storage.Path { - return append(BundlesBasePath, name, "etag") -} - -func namedBundlePath(name string) storage.Path { - return append(BundlesBasePath, name) -} - -func rootsPath(name string) storage.Path { - return append(BundlesBasePath, name, "manifest", "roots") -} - -func revisionPath(name string) storage.Path { - return append(BundlesBasePath, name, "manifest", "revision") -} - -func wasmModulePath(name string) storage.Path { - return append(BundlesBasePath, name, "wasm") -} - -func wasmEntrypointsPath(name string) storage.Path { - return append(BundlesBasePath, name, "manifest", "wasm") -} - -func metadataPath(name string) storage.Path { - return append(BundlesBasePath, name, "manifest", "metadata") -} - -func read(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (interface{}, error) { - value, err := store.Read(ctx, txn, path) - if err != nil { - return nil, err - } - - if astValue, ok := value.(ast.Value); ok { - value, err = ast.JSON(astValue) - if err != nil { - return nil, err - } - } - - return value, nil + return v1.EtagStoragePath(name) } // ReadBundleNamesFromStore will return a list of bundle names which have had their metadata stored. func ReadBundleNamesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) ([]string, error) { - value, err := read(ctx, store, txn, BundlesBasePath) - if err != nil { - return nil, err - } - - bundleMap, ok := value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("corrupt manifest roots") - } - - bundles := make([]string, len(bundleMap)) - idx := 0 - for name := range bundleMap { - bundles[idx] = name - idx++ - } - return bundles, nil + return v1.ReadBundleNamesFromStore(ctx, store, txn) } // WriteManifestToStore will write the manifest into the storage. This function is called when // the bundle is activated. func WriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, manifest Manifest) error { - return write(ctx, store, txn, ManifestStoragePath(name), manifest) + return v1.WriteManifestToStore(ctx, store, txn, name, manifest) } // WriteEtagToStore will write the bundle etag into the storage. This function is called when the bundle is activated. func WriteEtagToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name, etag string) error { - return write(ctx, store, txn, EtagStoragePath(name), etag) -} - -func write(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path, value interface{}) error { - if err := util.RoundTrip(&value); err != nil { - return err - } - - var dir []string - if len(path) > 1 { - dir = path[:len(path)-1] - } - - if err := storage.MakeDir(ctx, store, txn, dir); err != nil { - return err - } - - return store.Write(ctx, txn, storage.AddOp, path, value) + return v1.WriteEtagToStore(ctx, store, txn, name, etag) } // EraseManifestFromStore will remove the manifest from storage. This function is called // when the bundle is deactivated. func EraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { - path := namedBundlePath(name) - err := store.Write(ctx, txn, storage.RemoveOp, path, nil) - return suppressNotFound(err) -} - -// eraseBundleEtagFromStore will remove the bundle etag from storage. This function is called -// when the bundle is deactivated. -func eraseBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { - path := EtagStoragePath(name) - err := store.Write(ctx, txn, storage.RemoveOp, path, nil) - return suppressNotFound(err) -} - -func suppressNotFound(err error) error { - if err == nil || storage.IsNotFound(err) { - return nil - } - return err -} - -func writeWasmModulesToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, b *Bundle) error { - basePath := wasmModulePath(name) - for _, wm := range b.WasmModules { - path := append(basePath, wm.Path) - err := write(ctx, store, txn, path, base64.StdEncoding.EncodeToString(wm.Raw)) - if err != nil { - return err - } - } - return nil -} - -func eraseWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { - path := wasmModulePath(name) - - err := store.Write(ctx, txn, storage.RemoveOp, path, nil) - return suppressNotFound(err) -} - -// ReadWasmMetadataFromStore will read Wasm module resolver metadata from the store. -func ReadWasmMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]WasmResolver, error) { - path := wasmEntrypointsPath(name) - value, err := read(ctx, store, txn, path) - if err != nil { - return nil, err - } - - bs, err := json.Marshal(value) - if err != nil { - return nil, fmt.Errorf("corrupt wasm manifest data") - } - - var wasmMetadata []WasmResolver - - err = util.UnmarshalJSON(bs, &wasmMetadata) - if err != nil { - return nil, fmt.Errorf("corrupt wasm manifest data") - } - - return wasmMetadata, nil + return v1.EraseManifestFromStore(ctx, store, txn, name) } // ReadWasmModulesFromStore will write Wasm module resolver metadata from the store. func ReadWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string][]byte, error) { - path := wasmModulePath(name) - value, err := read(ctx, store, txn, path) - if err != nil { - return nil, err - } - - encodedModules, ok := value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("corrupt wasm modules") - } - - rawModules := map[string][]byte{} - for path, enc := range encodedModules { - encStr, ok := enc.(string) - if !ok { - return nil, fmt.Errorf("corrupt wasm modules") - } - bs, err := base64.StdEncoding.DecodeString(encStr) - if err != nil { - return nil, err - } - rawModules[path] = bs - } - return rawModules, nil + return v1.ReadWasmModulesFromStore(ctx, store, txn, name) } // ReadBundleRootsFromStore returns the roots in the specified bundle. // If the bundle is not activated, this function will return // storage NotFound error. func ReadBundleRootsFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]string, error) { - value, err := read(ctx, store, txn, rootsPath(name)) - if err != nil { - return nil, err - } - - sl, ok := value.([]interface{}) - if !ok { - return nil, fmt.Errorf("corrupt manifest roots") - } - - roots := make([]string, len(sl)) - - for i := range sl { - roots[i], ok = sl[i].(string) - if !ok { - return nil, fmt.Errorf("corrupt manifest root") - } - } - - return roots, nil + return v1.ReadBundleRootsFromStore(ctx, store, txn, name) } // ReadBundleRevisionFromStore returns the revision in the specified bundle. // If the bundle is not activated, this function will return // storage NotFound error. func ReadBundleRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) { - return readRevisionFromStore(ctx, store, txn, revisionPath(name)) -} - -func readRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) { - value, err := read(ctx, store, txn, path) - if err != nil { - return "", err - } - - str, ok := value.(string) - if !ok { - return "", fmt.Errorf("corrupt manifest revision") - } - - return str, nil + return v1.ReadBundleRevisionFromStore(ctx, store, txn, name) } // ReadBundleMetadataFromStore returns the metadata in the specified bundle. // If the bundle is not activated, this function will return // storage NotFound error. -func ReadBundleMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string]interface{}, error) { - return readMetadataFromStore(ctx, store, txn, metadataPath(name)) -} - -func readMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (map[string]interface{}, error) { - value, err := read(ctx, store, txn, path) - if err != nil { - return nil, suppressNotFound(err) - } - - data, ok := value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("corrupt manifest metadata") - } - - return data, nil +func ReadBundleMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string]any, error) { + return v1.ReadBundleMetadataFromStore(ctx, store, txn, name) } // ReadBundleEtagFromStore returns the etag for the specified bundle. // If the bundle is not activated, this function will return // storage NotFound error. func ReadBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) { - return readEtagFromStore(ctx, store, txn, EtagStoragePath(name)) -} - -func readEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) { - value, err := read(ctx, store, txn, path) - if err != nil { - return "", err - } - - str, ok := value.(string) - if !ok { - return "", fmt.Errorf("corrupt bundle etag") - } - - return str, nil + return v1.ReadBundleEtagFromStore(ctx, store, txn, name) } // ActivateOpts defines options for the Activate API call. -type ActivateOpts struct { - Ctx context.Context - Store storage.Store - Txn storage.Transaction - TxnCtx *storage.Context - Compiler *ast.Compiler - Metrics metrics.Metrics - Bundles map[string]*Bundle // Optional - ExtraModules map[string]*ast.Module // Optional - AuthorizationDecisionRef ast.Ref - ParserOptions ast.ParserOptions - - legacy bool -} +type ActivateOpts = v1.ActivateOpts // Activate the bundle(s) by loading into the given Store. This will load policies, data, and record // the manifest in storage. The compiler provided will have had the polices compiled on it. func Activate(opts *ActivateOpts) error { - opts.legacy = false - return activateBundles(opts) + return v1.Activate(setActivateDefaultRegoVersion(opts)) } // DeactivateOpts defines options for the Deactivate API call -type DeactivateOpts struct { - Ctx context.Context - Store storage.Store - Txn storage.Transaction - BundleNames map[string]struct{} - ParserOptions ast.ParserOptions -} +type DeactivateOpts = v1.DeactivateOpts // Deactivate the bundle(s). This will erase associated data, policies, and the manifest entry from the store. func Deactivate(opts *DeactivateOpts) error { - erase := map[string]struct{}{} - for name := range opts.BundleNames { - roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name) - if suppressNotFound(err) != nil { - return err - } - for _, root := range roots { - erase[root] = struct{}{} - } - } - _, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.ParserOptions, opts.BundleNames, erase) - return err -} - -func activateBundles(opts *ActivateOpts) error { - - // Build collections of bundle names, modules, and roots to erase - erase := map[string]struct{}{} - names := map[string]struct{}{} - deltaBundles := map[string]*Bundle{} - snapshotBundles := map[string]*Bundle{} - - for name, b := range opts.Bundles { - if b.Type() == DeltaBundleType { - deltaBundles[name] = b - } else { - snapshotBundles[name] = b - names[name] = struct{}{} - - roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name) - if suppressNotFound(err) != nil { - return err - } - for _, root := range roots { - erase[root] = struct{}{} - } - - // Erase data at new roots to prepare for writing the new data - for _, root := range *b.Manifest.Roots { - erase[root] = struct{}{} - } - } - } - - // Before changing anything make sure the roots don't collide with any - // other bundles that already are activated or other bundles being activated. - err := hasRootsOverlap(opts.Ctx, opts.Store, opts.Txn, opts.Bundles) - if err != nil { - return err - } - - if len(deltaBundles) != 0 { - err := activateDeltaBundles(opts, deltaBundles) - if err != nil { - return err - } - } - - // Erase data and policies at new + old roots, and remove the old - // manifests before activating a new snapshot bundle. - remaining, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.ParserOptions, names, erase) - if err != nil { - return err - } - - // Validate data in bundle does not contain paths outside the bundle's roots. - for _, b := range snapshotBundles { - - if b.lazyLoadingMode { - - for _, item := range b.Raw { - path := filepath.ToSlash(item.Path) - - if filepath.Base(path) == dataFile || filepath.Base(path) == yamlDataFile { - var val map[string]json.RawMessage - err = util.Unmarshal(item.Value, &val) - if err == nil { - err = doDFS(val, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots) - if err != nil { - return err - } - } else { - // Build an object for the value - p := getNormalizedPath(path) - - if len(p) == 0 { - return fmt.Errorf("root value must be object") - } - - // verify valid YAML or JSON value - var x interface{} - err := util.Unmarshal(item.Value, &x) - if err != nil { - return err - } - - value := item.Value - dir := map[string]json.RawMessage{} - for i := len(p) - 1; i > 0; i-- { - dir[p[i]] = value - - bs, err := json.Marshal(dir) - if err != nil { - return err - } - - value = bs - dir = map[string]json.RawMessage{} - } - dir[p[0]] = value - - err = doDFS(dir, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots) - if err != nil { - return err - } - } - } - } - } - } - - // Compile the modules all at once to avoid having to re-do work. - remainingAndExtra := make(map[string]*ast.Module) - for name, mod := range remaining { - remainingAndExtra[name] = mod - } - for name, mod := range opts.ExtraModules { - remainingAndExtra[name] = mod - } - - err = compileModules(opts.Compiler, opts.Metrics, snapshotBundles, remainingAndExtra, opts.legacy, opts.AuthorizationDecisionRef) - if err != nil { - return err - } - - if err := writeDataAndModules(opts.Ctx, opts.Store, opts.Txn, opts.TxnCtx, snapshotBundles, opts.legacy); err != nil { - return err - } - - if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 { - return err - } - - for name, b := range snapshotBundles { - if err := writeManifestToStore(opts, name, b.Manifest); err != nil { - return err - } - - if err := writeEtagToStore(opts, name, b.Etag); err != nil { - return err - } - - if err := writeWasmModulesToStore(opts.Ctx, opts.Store, opts.Txn, name, b); err != nil { - return err - } - } - - return nil + return v1.Deactivate(setDeactivateDefaultRegoVersion(opts)) } -func doDFS(obj map[string]json.RawMessage, path string, roots []string) error { - if len(roots) == 1 && roots[0] == "" { - return nil - } - - for key := range obj { - - newPath := filepath.Join(strings.Trim(path, "/"), key) - - // Note: filepath.Join can return paths with '\' separators, always use - // filepath.ToSlash to keep them normalized. - newPath = strings.TrimLeft(normalizePath(newPath), "/.") - - contains := false - prefix := false - if RootPathsContain(roots, newPath) { - contains = true - } else { - for i := range roots { - if strings.HasPrefix(strings.Trim(roots[i], "/"), newPath) { - prefix = true - break - } - } - } - - if !contains && !prefix { - return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath) - } - - if contains { - continue - } - - var next map[string]json.RawMessage - err := util.Unmarshal(obj[key], &next) - if err != nil { - return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath) - } - - if err := doDFS(next, newPath, roots); err != nil { - return err - } - } - return nil -} - -func activateDeltaBundles(opts *ActivateOpts, bundles map[string]*Bundle) error { - - // Check that the manifest roots and wasm resolvers in the delta bundle - // match with those currently in the store - for name, b := range bundles { - value, err := opts.Store.Read(opts.Ctx, opts.Txn, ManifestStoragePath(name)) - if err != nil { - if storage.IsNotFound(err) { - continue - } - return err - } - - manifest, err := valueToManifest(value) - if err != nil { - return fmt.Errorf("corrupt manifest data: %w", err) - } - - if !b.Manifest.equalWasmResolversAndRoots(manifest) { - return fmt.Errorf("delta bundle '%s' has wasm resolvers or manifest roots that are different from those in the store", name) - } - } - - for _, b := range bundles { - err := applyPatches(opts.Ctx, opts.Store, opts.Txn, b.Patch.Data) - if err != nil { - return err - } - } - - if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 { - return err - } - - for name, b := range bundles { - if err := writeManifestToStore(opts, name, b.Manifest); err != nil { - return err - } - - if err := writeEtagToStore(opts, name, b.Etag); err != nil { - return err - } - } - - return nil -} - -func valueToManifest(v interface{}) (Manifest, error) { - if astV, ok := v.(ast.Value); ok { - var err error - v, err = ast.JSON(astV) - if err != nil { - return Manifest{}, err - } - } - - var manifest Manifest - - bs, err := json.Marshal(v) - if err != nil { - return Manifest{}, err - } - - err = util.UnmarshalJSON(bs, &manifest) - if err != nil { - return Manifest{}, err - } - - return manifest, nil -} - -// erase bundles by name and roots. This will clear all policies and data at its roots and remove its -// manifest from storage. -func eraseBundles(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, names map[string]struct{}, roots map[string]struct{}) (map[string]*ast.Module, error) { - - if err := eraseData(ctx, store, txn, roots); err != nil { - return nil, err - } - - remaining, err := erasePolicies(ctx, store, txn, parserOpts, roots) - if err != nil { - return nil, err - } - - for name := range names { - if err := EraseManifestFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { - return nil, err - } - - if err := LegacyEraseManifestFromStore(ctx, store, txn); suppressNotFound(err) != nil { - return nil, err - } - - if err := eraseBundleEtagFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { - return nil, err - } - - if err := eraseWasmModulesFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { - return nil, err - } - } - - return remaining, nil -} - -func eraseData(ctx context.Context, store storage.Store, txn storage.Transaction, roots map[string]struct{}) error { - for root := range roots { - path, ok := storage.ParsePathEscaped("/" + root) - if !ok { - return fmt.Errorf("manifest root path invalid: %v", root) - } - - if len(path) > 0 { - if err := store.Write(ctx, txn, storage.RemoveOp, path, nil); suppressNotFound(err) != nil { - return err - } - } - } - return nil -} - -func erasePolicies(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, roots map[string]struct{}) (map[string]*ast.Module, error) { - - ids, err := store.ListPolicies(ctx, txn) - if err != nil { - return nil, err - } - - remaining := map[string]*ast.Module{} - - for _, id := range ids { - bs, err := store.GetPolicy(ctx, txn, id) - if err != nil { - return nil, err - } - module, err := ast.ParseModuleWithOpts(id, string(bs), parserOpts) - if err != nil { - return nil, err - } - path, err := module.Package.Path.Ptr() - if err != nil { - return nil, err - } - deleted := false - for root := range roots { - if RootPathsContain([]string{root}, path) { - if err := store.DeletePolicy(ctx, txn, id); err != nil { - return nil, err - } - deleted = true - break - } - } - if !deleted { - remaining[id] = module - } - } - - return remaining, nil -} - -func writeManifestToStore(opts *ActivateOpts, name string, manifest Manifest) error { - // Always write manifests to the named location. If the plugin is in the older style config - // then also write to the old legacy unnamed location. - if err := WriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, name, manifest); err != nil { - return err - } - - if opts.legacy { - if err := LegacyWriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, manifest); err != nil { - return err - } - } - - return nil +// LegacyWriteManifestToStore will write the bundle manifest to the older single (unnamed) bundle manifest location. +// +// Deprecated: Use WriteManifestToStore and named bundles instead. +func LegacyWriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, manifest Manifest) error { + return v1.LegacyWriteManifestToStore(ctx, store, txn, manifest) } -func writeEtagToStore(opts *ActivateOpts, name, etag string) error { - if err := WriteEtagToStore(opts.Ctx, opts.Store, opts.Txn, name, etag); err != nil { - return err - } - - return nil +// LegacyEraseManifestFromStore will erase the bundle manifest from the older single (unnamed) bundle manifest location. +// +// Deprecated: Use WriteManifestToStore and named bundles instead. +func LegacyEraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) error { + return v1.LegacyEraseManifestFromStore(ctx, store, txn) } -func writeDataAndModules(ctx context.Context, store storage.Store, txn storage.Transaction, txnCtx *storage.Context, bundles map[string]*Bundle, legacy bool) error { - params := storage.WriteParams - params.Context = txnCtx - - for name, b := range bundles { - if len(b.Raw) == 0 { - // Write data from each new bundle into the store. Only write under the - // roots contained in their manifest. - if err := writeData(ctx, store, txn, *b.Manifest.Roots, b.Data); err != nil { - return err - } - - for _, mf := range b.Modules { - var path string - - // For backwards compatibility, in legacy mode, upsert policies to - // the unprefixed path. - if legacy { - path = mf.Path - } else { - path = modulePathWithPrefix(name, mf.Path) - } - - if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil { - return err - } - } - } else { - params.BasePaths = *b.Manifest.Roots - - err := store.Truncate(ctx, txn, params, NewIterator(b.Raw)) - if err != nil { - return fmt.Errorf("store truncate failed for bundle '%s': %v", name, err) - } - } - } - - return nil +// LegacyReadRevisionFromStore will read the bundle manifest revision from the older single (unnamed) bundle manifest location. +// +// Deprecated: Use ReadBundleRevisionFromStore and named bundles instead. +func LegacyReadRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) (string, error) { + return v1.LegacyReadRevisionFromStore(ctx, store, txn) } -func writeData(ctx context.Context, store storage.Store, txn storage.Transaction, roots []string, data map[string]interface{}) error { - for _, root := range roots { - path, ok := storage.ParsePathEscaped("/" + root) - if !ok { - return fmt.Errorf("manifest root path invalid: %v", root) - } - if value, ok := lookup(path, data); ok { - if len(path) > 0 { - if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { - return err - } - } - if err := store.Write(ctx, txn, storage.AddOp, path, value); err != nil { - return err - } - } - } - return nil +// ActivateLegacy calls Activate for the bundles but will also write their manifest to the older unnamed store location. +// +// Deprecated: Use Activate with named bundles instead. +func ActivateLegacy(opts *ActivateOpts) error { + return v1.ActivateLegacy(opts) } -func compileModules(compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool, authorizationDecisionRef ast.Ref) error { - - m.Timer(metrics.RegoModuleCompile).Start() - defer m.Timer(metrics.RegoModuleCompile).Stop() - - modules := map[string]*ast.Module{} - - // preserve any modules already on the compiler - for name, module := range compiler.Modules { - modules[name] = module - } - - // preserve any modules passed in from the store - for name, module := range extraModules { - modules[name] = module - } - - // include all the new bundle modules - for bundleName, b := range bundles { - if legacy { - for _, mf := range b.Modules { - modules[mf.Path] = mf.Parsed - } - } else { - for name, module := range b.ParsedModules(bundleName) { - modules[name] = module - } - } - } - - if compiler.Compile(modules); compiler.Failed() { - return compiler.Errors - } - - if authorizationDecisionRef.Equal(ast.EmptyRef()) { +func setActivateDefaultRegoVersion(opts *ActivateOpts) *ActivateOpts { + if opts == nil { return nil } - return iCompiler.VerifyAuthorizationPolicySchema(compiler, authorizationDecisionRef) -} - -func writeModules(ctx context.Context, store storage.Store, txn storage.Transaction, compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool) error { - - m.Timer(metrics.RegoModuleCompile).Start() - defer m.Timer(metrics.RegoModuleCompile).Stop() - - modules := map[string]*ast.Module{} - - // preserve any modules already on the compiler - for name, module := range compiler.Modules { - modules[name] = module + if opts.ParserOptions.RegoVersion == ast.RegoUndefined { + cpy := *opts + cpy.ParserOptions.RegoVersion = ast.DefaultRegoVersion + return &cpy } - // preserve any modules passed in from the store - for name, module := range extraModules { - modules[name] = module - } - - // include all the new bundle modules - for bundleName, b := range bundles { - if legacy { - for _, mf := range b.Modules { - modules[mf.Path] = mf.Parsed - } - } else { - for name, module := range b.ParsedModules(bundleName) { - modules[name] = module - } - } - } - - if compiler.Compile(modules); compiler.Failed() { - return compiler.Errors - } - for bundleName, b := range bundles { - for _, mf := range b.Modules { - var path string - - // For backwards compatibility, in legacy mode, upsert policies to - // the unprefixed path. - if legacy { - path = mf.Path - } else { - path = modulePathWithPrefix(bundleName, mf.Path) - } - - if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil { - return err - } - } - } - return nil + return opts } -func lookup(path storage.Path, data map[string]interface{}) (interface{}, bool) { - if len(path) == 0 { - return data, true - } - for i := 0; i < len(path)-1; i++ { - value, ok := data[path[i]] - if !ok { - return nil, false - } - obj, ok := value.(map[string]interface{}) - if !ok { - return nil, false - } - data = obj - } - value, ok := data[path[len(path)-1]] - return value, ok -} - -func hasRootsOverlap(ctx context.Context, store storage.Store, txn storage.Transaction, bundles map[string]*Bundle) error { - collisions := map[string][]string{} - allBundles, err := ReadBundleNamesFromStore(ctx, store, txn) - if suppressNotFound(err) != nil { - return err - } - - allRoots := map[string][]string{} - - // Build a map of roots for existing bundles already in the system - for _, name := range allBundles { - roots, err := ReadBundleRootsFromStore(ctx, store, txn, name) - if suppressNotFound(err) != nil { - return err - } - allRoots[name] = roots - } - - // Add in any bundles that are being activated, overwrite existing roots - // with new ones where bundles are in both groups. - for name, bundle := range bundles { - allRoots[name] = *bundle.Manifest.Roots - } - - // Now check for each new bundle if it conflicts with any of the others - for name, bundle := range bundles { - for otherBundle, otherRoots := range allRoots { - if name == otherBundle { - // Skip the current bundle being checked - continue - } - - // Compare the "new" roots with other existing (or a different bundles new roots) - for _, newRoot := range *bundle.Manifest.Roots { - for _, otherRoot := range otherRoots { - if RootPathsOverlap(newRoot, otherRoot) { - collisions[otherBundle] = append(collisions[otherBundle], newRoot) - } - } - } - } - } - - if len(collisions) > 0 { - var bundleNames []string - for name := range collisions { - bundleNames = append(bundleNames, name) - } - return fmt.Errorf("detected overlapping roots in bundle manifest with: %s", bundleNames) - } - return nil -} - -func applyPatches(ctx context.Context, store storage.Store, txn storage.Transaction, patches []PatchOperation) error { - for _, pat := range patches { - - // construct patch path - path, ok := patch.ParsePatchPathEscaped("/" + strings.Trim(pat.Path, "/")) - if !ok { - return fmt.Errorf("error parsing patch path") - } - - var op storage.PatchOp - switch pat.Op { - case "upsert": - op = storage.AddOp - - _, err := store.Read(ctx, txn, path[:len(path)-1]) - if err != nil { - if !storage.IsNotFound(err) { - return err - } - - if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { - return err - } - } - case "remove": - op = storage.RemoveOp - case "replace": - op = storage.ReplaceOp - default: - return fmt.Errorf("bad patch operation: %v", pat.Op) - } - - // apply the patch - if err := store.Write(ctx, txn, op, path, pat.Value); err != nil { - return err - } +func setDeactivateDefaultRegoVersion(opts *DeactivateOpts) *DeactivateOpts { + if opts == nil { + return nil } - return nil -} - -// Helpers for the older single (unnamed) bundle style manifest storage. - -// LegacyManifestStoragePath is the older unnamed bundle path for manifests to be stored. -// Deprecated: Use ManifestStoragePath and named bundles instead. -var legacyManifestStoragePath = storage.MustParsePath("/system/bundle/manifest") -var legacyRevisionStoragePath = append(legacyManifestStoragePath, "revision") - -// LegacyWriteManifestToStore will write the bundle manifest to the older single (unnamed) bundle manifest location. -// Deprecated: Use WriteManifestToStore and named bundles instead. -func LegacyWriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, manifest Manifest) error { - return write(ctx, store, txn, legacyManifestStoragePath, manifest) -} - -// LegacyEraseManifestFromStore will erase the bundle manifest from the older single (unnamed) bundle manifest location. -// Deprecated: Use WriteManifestToStore and named bundles instead. -func LegacyEraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) error { - err := store.Write(ctx, txn, storage.RemoveOp, legacyManifestStoragePath, nil) - if err != nil { - return err + if opts.ParserOptions.RegoVersion == ast.RegoUndefined { + cpy := *opts + cpy.ParserOptions.RegoVersion = ast.DefaultRegoVersion + return &cpy } - return nil -} -// LegacyReadRevisionFromStore will read the bundle manifest revision from the older single (unnamed) bundle manifest location. -// Deprecated: Use ReadBundleRevisionFromStore and named bundles instead. -func LegacyReadRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) (string, error) { - return readRevisionFromStore(ctx, store, txn, legacyRevisionStoragePath) -} - -// ActivateLegacy calls Activate for the bundles but will also write their manifest to the older unnamed store location. -// Deprecated: Use Activate with named bundles instead. -func ActivateLegacy(opts *ActivateOpts) error { - opts.legacy = true - return activateBundles(opts) + return opts } diff --git a/vendor/github.com/open-policy-agent/opa/bundle/verify.go b/vendor/github.com/open-policy-agent/opa/bundle/verify.go index e85be835be..ef2e1e32db 100644 --- a/vendor/github.com/open-policy-agent/opa/bundle/verify.go +++ b/vendor/github.com/open-policy-agent/opa/bundle/verify.go @@ -6,26 +6,11 @@ package bundle import ( - "bytes" - "encoding/base64" - "encoding/hex" - "encoding/json" - "fmt" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" - "github.com/open-policy-agent/opa/internal/jwx/jws" - "github.com/open-policy-agent/opa/internal/jwx/jws/verify" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) -const defaultVerifierID = "_default" - -var verifiers map[string]Verifier - // Verifier is the interface expected for implementations that verify bundle signatures. -type Verifier interface { - VerifyBundleSignature(SignaturesConfig, *VerificationConfig) (map[string]FileInfo, error) -} +type Verifier v1.Verifier // VerifyBundleSignature will retrieve the Verifier implementation based // on the Plugin specified in SignaturesConfig, and call its implementation @@ -33,199 +18,19 @@ type Verifier interface { // using the given public keys or secret. If a signature is verified, it keeps // track of the files specified in the JWT payload func VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) { - // default implementation does not return a nil for map, so don't - // do it here either - files := make(map[string]FileInfo) - var plugin string - // for backwards compatibility, check if there is no plugin specified, and use default - if sc.Plugin == "" { - plugin = defaultVerifierID - } else { - plugin = sc.Plugin - } - verifier, err := GetVerifier(plugin) - if err != nil { - return files, err - } - return verifier.VerifyBundleSignature(sc, bvc) + return v1.VerifyBundleSignature(sc, bvc) } // DefaultVerifier is the default bundle verification implementation. It verifies bundles by checking // the JWT signature using a locally-accessible public key. -type DefaultVerifier struct{} - -// VerifyBundleSignature verifies the bundle signature using the given public keys or secret. -// If a signature is verified, it keeps track of the files specified in the JWT payload -func (*DefaultVerifier) VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) { - files := make(map[string]FileInfo) - - if len(sc.Signatures) == 0 { - return files, fmt.Errorf(".signatures.json: missing JWT (expected exactly one)") - } - - if len(sc.Signatures) > 1 { - return files, fmt.Errorf(".signatures.json: multiple JWTs not supported (expected exactly one)") - } - - for _, token := range sc.Signatures { - payload, err := verifyJWTSignature(token, bvc) - if err != nil { - return files, err - } - - for _, file := range payload.Files { - files[file.Name] = file - } - } - return files, nil -} - -func verifyJWTSignature(token string, bvc *VerificationConfig) (*DecodedSignature, error) { - // decode JWT to check if the header specifies the key to use and/or if claims have the scope. - - parts, err := jws.SplitCompact(token) - if err != nil { - return nil, err - } - - var decodedHeader []byte - if decodedHeader, err = base64.RawURLEncoding.DecodeString(parts[0]); err != nil { - return nil, fmt.Errorf("failed to base64 decode JWT headers: %w", err) - } - - var hdr jws.StandardHeaders - if err := json.Unmarshal(decodedHeader, &hdr); err != nil { - return nil, fmt.Errorf("failed to parse JWT headers: %w", err) - } - - payload, err := base64.RawURLEncoding.DecodeString(parts[1]) - if err != nil { - return nil, err - } - - var ds DecodedSignature - if err := json.Unmarshal(payload, &ds); err != nil { - return nil, err - } - - // check for the id of the key to use for JWT signature verification - // first in the OPA config. If not found, then check the JWT kid. - keyID := bvc.KeyID - if keyID == "" { - keyID = hdr.KeyID - } - if keyID == "" { - // If header has no key id, check the deprecated key claim. - keyID = ds.KeyID - } - - if keyID == "" { - return nil, fmt.Errorf("verification key ID is empty") - } - - // now that we have the keyID, fetch the actual key - keyConfig, err := bvc.GetPublicKey(keyID) - if err != nil { - return nil, err - } - - // verify JWT signature - alg := jwa.SignatureAlgorithm(keyConfig.Algorithm) - key, err := verify.GetSigningKey(keyConfig.Key, alg) - if err != nil { - return nil, err - } - - _, err = jws.Verify([]byte(token), alg, key) - if err != nil { - return nil, err - } - - // verify the scope - scope := bvc.Scope - if scope == "" { - scope = keyConfig.Scope - } - - if ds.Scope != scope { - return nil, fmt.Errorf("scope mismatch") - } - return &ds, nil -} - -// VerifyBundleFile verifies the hash of a file in the bundle matches to that provided in the bundle's signature -func VerifyBundleFile(path string, data bytes.Buffer, files map[string]FileInfo) error { - var file FileInfo - var ok bool - - if file, ok = files[path]; !ok { - return fmt.Errorf("file %v not included in bundle signature", path) - } - - if file.Algorithm == "" { - return fmt.Errorf("no hashing algorithm provided for file %v", path) - } - - hash, err := NewSignatureHasher(HashingAlgorithm(file.Algorithm)) - if err != nil { - return err - } - - // hash the file content - // For unstructured files, hash the byte stream of the file - // For structured files, read the byte stream and parse into a JSON structure; - // then recursively order the fields of all objects alphabetically and then apply - // the hash function to result to compute the hash. This ensures that the digital signature is - // independent of whitespace and other non-semantic JSON features. - var value interface{} - if IsStructuredDoc(path) { - err := util.Unmarshal(data.Bytes(), &value) - if err != nil { - return err - } - } else { - value = data.Bytes() - } - - bs, err := hash.HashFile(value) - if err != nil { - return err - } - - // compare file hash with same file in the JWT payloads - fb, err := hex.DecodeString(file.Hash) - if err != nil { - return err - } - - if !bytes.Equal(fb, bs) { - return fmt.Errorf("%v: digest mismatch (want: %x, got: %x)", path, fb, bs) - } - - delete(files, path) - return nil -} +type DefaultVerifier = v1.DefaultVerifier // GetVerifier returns the Verifier registered under the given id func GetVerifier(id string) (Verifier, error) { - verifier, ok := verifiers[id] - if !ok { - return nil, fmt.Errorf("no verifier exists under id %s", id) - } - return verifier, nil + return v1.GetVerifier(id) } // RegisterVerifier registers a Verifier under the given id func RegisterVerifier(id string, v Verifier) error { - if id == defaultVerifierID { - return fmt.Errorf("verifier id %s is reserved, use a different id", id) - } - verifiers[id] = v - return nil -} - -func init() { - verifiers = map[string]Verifier{ - defaultVerifierID: &DefaultVerifier{}, - } + return v1.RegisterVerifier(id, v) } diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/capabilities.go b/vendor/github.com/open-policy-agent/opa/capabilities/capabilities.go index ba32cf977e..5482720f7c 100644 --- a/vendor/github.com/open-policy-agent/opa/capabilities/capabilities.go +++ b/vendor/github.com/open-policy-agent/opa/capabilities/capabilities.go @@ -2,9 +2,6 @@ // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. -//go:build go1.16 -// +build go1.16 - package capabilities import ( diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/doc.go b/vendor/github.com/open-policy-agent/opa/capabilities/doc.go new file mode 100644 index 0000000000..189c2e727a --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package capabilities diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.0.json new file mode 100644 index 0000000000..48a87b0c35 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.0.json @@ -0,0 +1,4835 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.1.json new file mode 100644 index 0000000000..48a87b0c35 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.1.json @@ -0,0 +1,4835 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.1.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.1.0.json new file mode 100644 index 0000000000..48a87b0c35 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.1.0.json @@ -0,0 +1,4835 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.10.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.10.0.json new file mode 100644 index 0000000000..0a37621d0c --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.10.0.json @@ -0,0 +1,4867 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.11.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.11.0.json new file mode 100644 index 0000000000..d58fc6760f --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.11.0.json @@ -0,0 +1,4878 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.11.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.11.1.json new file mode 100644 index 0000000000..d58fc6760f --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.11.1.json @@ -0,0 +1,4878 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.0.json new file mode 100644 index 0000000000..e4bf21abf3 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.0.json @@ -0,0 +1,4896 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.1.json new file mode 100644 index 0000000000..e4bf21abf3 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.1.json @@ -0,0 +1,4896 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.2.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.2.json new file mode 100644 index 0000000000..e4bf21abf3 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.2.json @@ -0,0 +1,4896 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.3.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.3.json new file mode 100644 index 0000000000..e4bf21abf3 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.12.3.json @@ -0,0 +1,4896 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.0.json new file mode 100644 index 0000000000..9eb82c2968 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.0.json @@ -0,0 +1,4916 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.flatten", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.1.json new file mode 100644 index 0000000000..9eb82c2968 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.1.json @@ -0,0 +1,4916 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.flatten", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.2.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.2.json new file mode 100644 index 0000000000..9eb82c2968 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.13.2.json @@ -0,0 +1,4916 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.flatten", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.14.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.14.0.json new file mode 100644 index 0000000000..9eb82c2968 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.14.0.json @@ -0,0 +1,4916 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.flatten", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.14.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.14.1.json new file mode 100644 index 0000000000..9eb82c2968 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.14.1.json @@ -0,0 +1,4916 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.flatten", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.15.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.15.0.json new file mode 100644 index 0000000000..9eb82c2968 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.15.0.json @@ -0,0 +1,4916 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.flatten", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.15.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.15.1.json new file mode 100644 index 0000000000..9eb82c2968 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.15.1.json @@ -0,0 +1,4916 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.flatten", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.15.2.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.15.2.json new file mode 100644 index 0000000000..9eb82c2968 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.15.2.json @@ -0,0 +1,4916 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.flatten", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.template_string", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "deprecated": true + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1", + "template_strings" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.2.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.2.0.json new file mode 100644 index 0000000000..1253c88b30 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.2.0.json @@ -0,0 +1,4849 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.3.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.3.0.json new file mode 100644 index 0000000000..1253c88b30 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.3.0.json @@ -0,0 +1,4849 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.0.json new file mode 100644 index 0000000000..1253c88b30 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.0.json @@ -0,0 +1,4849 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.1.json new file mode 100644 index 0000000000..1253c88b30 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.1.json @@ -0,0 +1,4849 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.2.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.2.json new file mode 100644 index 0000000000..1253c88b30 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.4.2.json @@ -0,0 +1,4849 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.5.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.5.0.json new file mode 100644 index 0000000000..1253c88b30 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.5.0.json @@ -0,0 +1,4849 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.5.1.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.5.1.json new file mode 100644 index 0000000000..1253c88b30 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.5.1.json @@ -0,0 +1,4849 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.6.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.6.0.json new file mode 100644 index 0000000000..110c3eca91 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.6.0.json @@ -0,0 +1,4850 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.7.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.7.0.json new file mode 100644 index 0000000000..110c3eca91 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.7.0.json @@ -0,0 +1,4850 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.8.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.8.0.json new file mode 100644 index 0000000000..0a37621d0c --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.8.0.json @@ -0,0 +1,4867 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/capabilities/v1.9.0.json b/vendor/github.com/open-policy-agent/opa/capabilities/v1.9.0.json new file mode 100644 index 0000000000..0a37621d0c --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/capabilities/v1.9.0.json @@ -0,0 +1,4867 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "internal.test_case", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_eddsa", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "keywords_in_refs", + "rego_v1" + ] +} diff --git a/vendor/github.com/open-policy-agent/opa/config/config.go b/vendor/github.com/open-policy-agent/opa/config/config.go deleted file mode 100644 index 87ab109113..0000000000 --- a/vendor/github.com/open-policy-agent/opa/config/config.go +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package config implements OPA configuration file parsing and validation. -package config - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "reflect" - "sort" - "strings" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/internal/ref" - "github.com/open-policy-agent/opa/util" - "github.com/open-policy-agent/opa/version" -) - -// Config represents the configuration file that OPA can be started with. -type Config struct { - Services json.RawMessage `json:"services,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - Discovery json.RawMessage `json:"discovery,omitempty"` - Bundle json.RawMessage `json:"bundle,omitempty"` // Deprecated: Use `bundles` instead - Bundles json.RawMessage `json:"bundles,omitempty"` - DecisionLogs json.RawMessage `json:"decision_logs,omitempty"` - Status json.RawMessage `json:"status,omitempty"` - Plugins map[string]json.RawMessage `json:"plugins,omitempty"` - Keys json.RawMessage `json:"keys,omitempty"` - DefaultDecision *string `json:"default_decision,omitempty"` - DefaultAuthorizationDecision *string `json:"default_authorization_decision,omitempty"` - Caching json.RawMessage `json:"caching,omitempty"` - NDBuiltinCache bool `json:"nd_builtin_cache,omitempty"` - PersistenceDirectory *string `json:"persistence_directory,omitempty"` - DistributedTracing json.RawMessage `json:"distributed_tracing,omitempty"` - Server *struct { - Encoding json.RawMessage `json:"encoding,omitempty"` - Decoding json.RawMessage `json:"decoding,omitempty"` - Metrics json.RawMessage `json:"metrics,omitempty"` - } `json:"server,omitempty"` - Storage *struct { - Disk json.RawMessage `json:"disk,omitempty"` - } `json:"storage,omitempty"` - Extra map[string]json.RawMessage `json:"-"` -} - -// ParseConfig returns a valid Config object with defaults injected. The id -// and version parameters will be set in the labels map. -func ParseConfig(raw []byte, id string) (*Config, error) { - // NOTE(sr): based on https://stackoverflow.com/a/33499066/993018 - var result Config - objValue := reflect.ValueOf(&result).Elem() - knownFields := map[string]reflect.Value{} - for i := 0; i != objValue.NumField(); i++ { - jsonName := strings.Split(objValue.Type().Field(i).Tag.Get("json"), ",")[0] - knownFields[jsonName] = objValue.Field(i) - } - - if err := util.Unmarshal(raw, &result.Extra); err != nil { - return nil, err - } - - for key, chunk := range result.Extra { - if field, found := knownFields[key]; found { - if err := util.Unmarshal(chunk, field.Addr().Interface()); err != nil { - return nil, err - } - delete(result.Extra, key) - } - } - if len(result.Extra) == 0 { - result.Extra = nil - } - return &result, result.validateAndInjectDefaults(id) -} - -// PluginNames returns a sorted list of names of enabled plugins. -func (c Config) PluginNames() (result []string) { - if c.Bundle != nil || c.Bundles != nil { - result = append(result, "bundles") - } - if c.Status != nil { - result = append(result, "status") - } - if c.DecisionLogs != nil { - result = append(result, "decision_logs") - } - for name := range c.Plugins { - result = append(result, name) - } - sort.Strings(result) - return result -} - -// PluginsEnabled returns true if one or more plugin features are enabled. -// -// Deprecated. Use PluginNames instead. -func (c Config) PluginsEnabled() bool { - return c.Bundle != nil || c.Bundles != nil || c.DecisionLogs != nil || c.Status != nil || len(c.Plugins) > 0 -} - -// DefaultDecisionRef returns the default decision as a reference. -func (c Config) DefaultDecisionRef() ast.Ref { - r, _ := ref.ParseDataPath(*c.DefaultDecision) - return r -} - -// DefaultAuthorizationDecisionRef returns the default authorization decision -// as a reference. -func (c Config) DefaultAuthorizationDecisionRef() ast.Ref { - r, _ := ref.ParseDataPath(*c.DefaultAuthorizationDecision) - return r -} - -// NDBuiltinCacheEnabled returns if the ND builtins cache should be used. -func (c Config) NDBuiltinCacheEnabled() bool { - return c.NDBuiltinCache -} - -func (c *Config) validateAndInjectDefaults(id string) error { - - if c.DefaultDecision == nil { - s := defaultDecisionPath - c.DefaultDecision = &s - } - - _, err := ref.ParseDataPath(*c.DefaultDecision) - if err != nil { - return err - } - - if c.DefaultAuthorizationDecision == nil { - s := defaultAuthorizationDecisionPath - c.DefaultAuthorizationDecision = &s - } - - _, err = ref.ParseDataPath(*c.DefaultAuthorizationDecision) - if err != nil { - return err - } - - if c.Labels == nil { - c.Labels = map[string]string{} - } - - c.Labels["id"] = id - c.Labels["version"] = version.Version - - return nil -} - -// GetPersistenceDirectory returns the configured persistence directory, or $PWD/.opa if none is configured -func (c Config) GetPersistenceDirectory() (string, error) { - if c.PersistenceDirectory == nil { - pwd, err := os.Getwd() - if err != nil { - return "", err - } - return filepath.Join(pwd, ".opa"), nil - } - return *c.PersistenceDirectory, nil -} - -// ActiveConfig returns OPA's active configuration -// with the credentials and crypto keys removed -func (c *Config) ActiveConfig() (interface{}, error) { - bs, err := json.Marshal(c) - if err != nil { - return nil, err - } - - var result map[string]interface{} - if err := util.UnmarshalJSON(bs, &result); err != nil { - return nil, err - } - for k, e := range c.Extra { - var v any - if err := util.UnmarshalJSON(e, &v); err != nil { - return nil, err - } - result[k] = v - } - - if err := removeServiceCredentials(result["services"]); err != nil { - return nil, err - } - - if err := removeCryptoKeys(result["keys"]); err != nil { - return nil, err - } - - return result, nil -} - -func removeServiceCredentials(x interface{}) error { - switch x := x.(type) { - case nil: - return nil - case []interface{}: - for _, v := range x { - err := removeKey(v, "credentials") - if err != nil { - return err - } - } - - case map[string]interface{}: - for _, v := range x { - err := removeKey(v, "credentials") - if err != nil { - return err - } - } - default: - return fmt.Errorf("illegal service config type: %T", x) - } - - return nil -} - -func removeCryptoKeys(x interface{}) error { - switch x := x.(type) { - case nil: - return nil - case map[string]interface{}: - for _, v := range x { - err := removeKey(v, "key", "private_key") - if err != nil { - return err - } - } - default: - return fmt.Errorf("illegal keys config type: %T", x) - } - - return nil -} - -func removeKey(x interface{}, keys ...string) error { - val, ok := x.(map[string]interface{}) - if !ok { - return fmt.Errorf("type assertion error") - } - - for _, key := range keys { - delete(val, key) - } - - return nil -} - -const ( - defaultDecisionPath = "/system/main" - defaultAuthorizationDecisionPath = "/system/authz/allow" -) diff --git a/vendor/github.com/open-policy-agent/opa/format/format.go b/vendor/github.com/open-policy-agent/opa/format/format.go deleted file mode 100644 index 43e5594669..0000000000 --- a/vendor/github.com/open-policy-agent/opa/format/format.go +++ /dev/null @@ -1,1640 +0,0 @@ -// Copyright 2017 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package format implements formatting of Rego source files. -package format - -import ( - "bytes" - "fmt" - "regexp" - "sort" - "strings" - "unicode" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/internal/future" - "github.com/open-policy-agent/opa/types" -) - -// Opts lets you control the code formatting via `AstWithOpts()`. -type Opts struct { - // IgnoreLocations instructs the formatter not to use the AST nodes' locations - // into account when laying out the code: notably, when the input is the result - // of partial evaluation, arguments maybe have been shuffled around, but still - // carry along their original source locations. - IgnoreLocations bool - - // RegoVersion is the version of Rego to format code for. - RegoVersion ast.RegoVersion - - // ParserOptions is the parser options used when parsing the module to be formatted. - ParserOptions *ast.ParserOptions -} - -// defaultLocationFile is the file name used in `Ast()` for terms -// without a location, as could happen when pretty-printing the -// results of partial eval. -const defaultLocationFile = "__format_default__" - -// Source formats a Rego source file. The bytes provided must describe a complete -// Rego module. If they don't, Source will return an error resulting from the attempt -// to parse the bytes. -func Source(filename string, src []byte) ([]byte, error) { - return SourceWithOpts(filename, src, Opts{}) -} - -func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) { - var parserOpts ast.ParserOptions - if opts.ParserOptions != nil { - parserOpts = *opts.ParserOptions - } else { - if opts.RegoVersion == ast.RegoV1 { - // If the rego version is V1, we need to parse it as such, to allow for future keywords not being imported. - // Otherwise, we'll default to the default rego-version. - parserOpts.RegoVersion = ast.RegoV1 - } - } - - module, err := ast.ParseModuleWithOpts(filename, string(src), parserOpts) - if err != nil { - return nil, err - } - - if opts.RegoVersion == ast.RegoV0CompatV1 || opts.RegoVersion == ast.RegoV1 { - checkOpts := ast.NewRegoCheckOptions() - // The module is parsed as v0, so we need to disable checks that will be automatically amended by the AstWithOpts call anyways. - checkOpts.RequireIfKeyword = false - checkOpts.RequireContainsKeyword = false - checkOpts.RequireRuleBodyOrValue = false - errors := ast.CheckRegoV1WithOptions(module, checkOpts) - if len(errors) > 0 { - return nil, errors - } - } - - formatted, err := AstWithOpts(module, opts) - if err != nil { - return nil, fmt.Errorf("%s: %v", filename, err) - } - - return formatted, nil -} - -// MustAst is a helper function to format a Rego AST element. If any errors -// occurs this function will panic. This is mostly used for test -func MustAst(x interface{}) []byte { - bs, err := Ast(x) - if err != nil { - panic(err) - } - return bs -} - -// MustAstWithOpts is a helper function to format a Rego AST element. If any errors -// occurs this function will panic. This is mostly used for test -func MustAstWithOpts(x interface{}, opts Opts) []byte { - bs, err := AstWithOpts(x, opts) - if err != nil { - panic(err) - } - return bs -} - -// Ast formats a Rego AST element. If the passed value is not a valid AST -// element, Ast returns nil and an error. If AST nodes are missing locations -// an arbitrary location will be used. -func Ast(x interface{}) ([]byte, error) { - return AstWithOpts(x, Opts{}) -} - -type fmtOpts struct { - // When the future keyword "contains" is imported, all the pretty-printed - // modules will use that format for partial sets. - // NOTE(sr): For ref-head rules, this will be the default behaviour, since - // we need "contains" to disambiguate complete rules from partial sets. - contains bool - - // Same logic applies as for "contains": if `future.keywords.if` (or all - // future keywords) is imported, we'll render rules that can use `if` with - // `if`. - ifs bool - - // We check all rule ref heads to see if any of them _requires_ support - // for ref heads -- if they do, we'll print all of them in a different way - // than if they don't. - refHeads bool - - regoV1 bool - futureKeywords []string -} - -func (o fmtOpts) keywords() []string { - if o.regoV1 { - return ast.KeywordsV1[:] - } - kws := ast.KeywordsV0[:] - return append(kws, o.futureKeywords...) -} - -func AstWithOpts(x interface{}, opts Opts) ([]byte, error) { - // The node has to be deep copied because it may be mutated below. Alternatively, - // we could avoid the copy by checking if mutation will occur first. For now, - // since format is not latency sensitive, just deep copy in all cases. - x = ast.Copy(x) - - wildcards := map[ast.Var]*ast.Term{} - - // NOTE(sr): When the formatter encounters a call to internal.member_2 - // or internal.member_3, it will sugarize them into usage of the `in` - // operator. It has to ensure that the proper future keyword import is - // present. - extraFutureKeywordImports := map[string]struct{}{} - - o := fmtOpts{} - - if opts.RegoVersion == ast.RegoV0CompatV1 || opts.RegoVersion == ast.RegoV1 { - o.regoV1 = true - o.ifs = true - o.contains = true - } - - // Preprocess the AST. Set any required defaults and calculate - // values required for printing the formatted output. - ast.WalkNodes(x, func(x ast.Node) bool { - switch n := x.(type) { - case ast.Body: - if len(n) == 0 { - return false - } - case *ast.Term: - unmangleWildcardVar(wildcards, n) - - case *ast.Expr: - switch { - case n.IsCall() && ast.Member.Ref().Equal(n.Operator()) || ast.MemberWithKey.Ref().Equal(n.Operator()): - extraFutureKeywordImports["in"] = struct{}{} - case n.IsEvery(): - extraFutureKeywordImports["every"] = struct{}{} - } - - case *ast.Import: - if kw, ok := future.WhichFutureKeyword(n); ok { - o.futureKeywords = append(o.futureKeywords, kw) - } - - switch { - case isRegoV1Compatible(n): - o.contains = true - o.ifs = true - case future.IsAllFutureKeywords(n): - o.contains = true - o.ifs = true - case future.IsFutureKeyword(n, "contains"): - o.contains = true - case future.IsFutureKeyword(n, "if"): - o.ifs = true - } - - case *ast.Rule: - if len(n.Head.Ref()) > 2 { - o.refHeads = true - } - if len(n.Head.Ref()) == 2 && n.Head.Key != nil && n.Head.Value == nil { // p.q contains "x" - o.refHeads = true - } - } - - if opts.IgnoreLocations || x.Loc() == nil { - x.SetLoc(defaultLocation(x)) - } - return false - }) - - w := &writer{ - indent: "\t", - errs: make([]*ast.Error, 0), - fmtOpts: o, - } - - switch x := x.(type) { - case *ast.Module: - if opts.RegoVersion == ast.RegoV1 { - x.Imports = filterRegoV1Import(x.Imports) - } else if opts.RegoVersion == ast.RegoV0CompatV1 { - x.Imports = ensureRegoV1Import(x.Imports) - } - - if opts.RegoVersion == ast.RegoV0CompatV1 || opts.RegoVersion == ast.RegoV1 || moduleIsRegoV1Compatible(x) { - x.Imports = future.FilterFutureImports(x.Imports) - } else { - for kw := range extraFutureKeywordImports { - x.Imports = ensureFutureKeywordImport(x.Imports, kw) - } - } - w.writeModule(x) - case *ast.Package: - w.writePackage(x, nil) - case *ast.Import: - w.writeImports([]*ast.Import{x}, nil) - case *ast.Rule: - w.writeRule(x, false /* isElse */, nil) - case *ast.Head: - w.writeHead(x, - false, // isDefault - false, // isExpandedConst - nil) - case ast.Body: - w.writeBody(x, nil) - case *ast.Expr: - w.writeExpr(x, nil) - case *ast.With: - w.writeWith(x, nil, false) - case *ast.Term: - w.writeTerm(x, nil) - case ast.Value: - w.writeTerm(&ast.Term{Value: x, Location: &ast.Location{}}, nil) - case *ast.Comment: - w.writeComments([]*ast.Comment{x}) - default: - return nil, fmt.Errorf("not an ast element: %v", x) - } - - if len(w.errs) > 0 { - return nil, w.errs - } - return squashTrailingNewlines(w.buf.Bytes()), nil -} - -func unmangleWildcardVar(wildcards map[ast.Var]*ast.Term, n *ast.Term) { - - v, ok := n.Value.(ast.Var) - if !ok || !v.IsWildcard() { - return - } - - first, ok := wildcards[v] - if !ok { - wildcards[v] = n - return - } - - w := v[len(ast.WildcardPrefix):] - - // Prepend an underscore to ensure the variable will parse. - if len(w) == 0 || w[0] != '_' { - w = "_" + w - } - - if first != nil { - first.Value = w - wildcards[v] = nil - } - - n.Value = w -} - -func squashTrailingNewlines(bs []byte) []byte { - if bytes.HasSuffix(bs, []byte("\n")) { - return append(bytes.TrimRight(bs, "\n"), '\n') - } - return bs -} - -func defaultLocation(x ast.Node) *ast.Location { - return ast.NewLocation([]byte(x.String()), defaultLocationFile, 1, 1) -} - -type writer struct { - buf bytes.Buffer - - indent string - level int - inline bool - beforeEnd *ast.Comment - delay bool - errs ast.Errors - fmtOpts fmtOpts -} - -func (w *writer) writeModule(module *ast.Module) { - var pkg *ast.Package - var others []interface{} - var comments []*ast.Comment - visitor := ast.NewGenericVisitor(func(x interface{}) bool { - switch x := x.(type) { - case *ast.Comment: - comments = append(comments, x) - return true - case *ast.Import, *ast.Rule: - others = append(others, x) - return true - case *ast.Package: - pkg = x - return true - default: - return false - } - }) - visitor.Walk(module) - - sort.Slice(comments, func(i, j int) bool { - return locLess(comments[i], comments[j]) - }) - - sort.Slice(others, func(i, j int) bool { - return locLess(others[i], others[j]) - }) - - comments = trimTrailingWhitespaceInComments(comments) - - comments = w.writePackage(pkg, comments) - var imports []*ast.Import - var rules []*ast.Rule - for len(others) > 0 { - imports, others = gatherImports(others) - comments = w.writeImports(imports, comments) - rules, others = gatherRules(others) - comments = w.writeRules(rules, comments) - } - - for i, c := range comments { - w.writeLine(c.String()) - if i == len(comments)-1 { - w.write("\n") - } - } -} - -func trimTrailingWhitespaceInComments(comments []*ast.Comment) []*ast.Comment { - for _, c := range comments { - c.Text = bytes.TrimRightFunc(c.Text, unicode.IsSpace) - } - - return comments -} - -func (w *writer) writePackage(pkg *ast.Package, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, pkg.Location) - - w.startLine() - - // Omit head as all packages have the DefaultRootDocument prepended at parse time. - path := make(ast.Ref, len(pkg.Path)-1) - path[0] = ast.VarTerm(string(pkg.Path[1].Value.(ast.String))) - copy(path[1:], pkg.Path[2:]) - - w.write("package ") - w.writeRef(path) - - w.blankLine() - - return comments -} - -func (w *writer) writeComments(comments []*ast.Comment) { - for i := 0; i < len(comments); i++ { - if i > 0 && locCmp(comments[i], comments[i-1]) > 1 { - w.blankLine() - } - w.writeLine(comments[i].String()) - } -} - -func (w *writer) writeRules(rules []*ast.Rule, comments []*ast.Comment) []*ast.Comment { - for _, rule := range rules { - comments = w.insertComments(comments, rule.Location) - comments = w.writeRule(rule, false, comments) - w.blankLine() - } - return comments -} - -func (w *writer) writeRule(rule *ast.Rule, isElse bool, comments []*ast.Comment) []*ast.Comment { - if rule == nil { - return comments - } - - if !isElse { - w.startLine() - } - - if rule.Default { - w.write("default ") - } - - // OPA transforms lone bodies like `foo = {"a": "b"}` into rules of the form - // `foo = {"a": "b"} { true }` in the AST. We want to preserve that notation - // in the formatted code instead of expanding the bodies into rules, so we - // pretend that the rule has no body in this case. - isExpandedConst := rule.Body.Equal(ast.NewBody(ast.NewExpr(ast.BooleanTerm(true)))) && rule.Else == nil - - comments = w.writeHead(rule.Head, rule.Default, isExpandedConst, comments) - - // this excludes partial sets UNLESS `contains` is used - partialSetException := w.fmtOpts.contains || rule.Head.Value != nil - - if len(rule.Body) == 0 || isExpandedConst { - w.endLine() - return comments - } - - if (w.fmtOpts.regoV1 || w.fmtOpts.ifs) && partialSetException { - w.write(" if") - if len(rule.Body) == 1 { - if rule.Body[0].Location.Row == rule.Head.Location.Row { - w.write(" ") - comments = w.writeExpr(rule.Body[0], comments) - w.endLine() - if rule.Else != nil { - comments = w.writeElse(rule, comments) - } - return comments - } - } - } - w.write(" {") - w.endLine() - w.up() - - comments = w.writeBody(rule.Body, comments) - - var closeLoc *ast.Location - - if len(rule.Head.Args) > 0 { - closeLoc = closingLoc('(', ')', '{', '}', rule.Location) - } else if rule.Head.Key != nil { - closeLoc = closingLoc('[', ']', '{', '}', rule.Location) - } else { - closeLoc = closingLoc(0, 0, '{', '}', rule.Location) - } - - comments = w.insertComments(comments, closeLoc) - - w.down() - w.startLine() - w.write("}") - if rule.Else != nil { - comments = w.writeElse(rule, comments) - } - return comments -} - -func (w *writer) writeElse(rule *ast.Rule, comments []*ast.Comment) []*ast.Comment { - // If there was nothing else on the line before the "else" starts - // then preserve this style of else block, otherwise it will be - // started as an "inline" else eg: - // - // p { - // ... - // } - // - // else { - // ... - // } - // - // versus - // - // p { - // ... - // } else { - // ... - // } - // - // Note: This doesn't use the `close` as it currently isn't accurate for all - // types of values. Checking the actual line text is the most consistent approach. - wasInline := false - ruleLines := bytes.Split(rule.Location.Text, []byte("\n")) - relativeElseRow := rule.Else.Location.Row - rule.Location.Row - if relativeElseRow > 0 && relativeElseRow < len(ruleLines) { - elseLine := ruleLines[relativeElseRow] - if !bytes.HasPrefix(bytes.TrimSpace(elseLine), []byte("else")) { - wasInline = true - } - } - - // If there are any comments between the closing brace of the previous rule and the start - // of the else block we will always insert a new blank line between them. - hasCommentAbove := len(comments) > 0 && comments[0].Location.Row-rule.Else.Head.Location.Row < 0 || w.beforeEnd != nil - - if !hasCommentAbove && wasInline { - w.write(" ") - } else { - w.blankLine() - w.startLine() - } - - rule.Else.Head.Name = "else" // NOTE(sr): whaaat - rule.Else.Head.Reference = ast.Ref{ast.VarTerm("else")} - rule.Else.Head.Args = nil - comments = w.insertComments(comments, rule.Else.Head.Location) - - if hasCommentAbove && !wasInline { - // The comments would have ended the line, be sure to start one again - // before writing the rest of the "else" rule. - w.startLine() - } - - // For backwards compatibility adjust the rule head value location - // TODO: Refactor the logic for inserting comments, or special - // case comments in a rule head value so this can be removed - if rule.Else.Head.Value != nil { - rule.Else.Head.Value.Location = rule.Else.Head.Location - } - - return w.writeRule(rule.Else, true, comments) -} - -func (w *writer) writeHead(head *ast.Head, isDefault, isExpandedConst bool, comments []*ast.Comment) []*ast.Comment { - ref := head.Ref() - if head.Key != nil && head.Value == nil && !head.HasDynamicRef() { - ref = ref.GroundPrefix() - } - if w.fmtOpts.refHeads || len(ref) == 1 { - w.writeRef(ref) - } else { - w.write(ref[0].String()) - w.write("[") - w.write(ref[1].String()) - w.write("]") - } - - if len(head.Args) > 0 { - w.write("(") - var args []interface{} - for _, arg := range head.Args { - args = append(args, arg) - } - comments = w.writeIterable(args, head.Location, closingLoc(0, 0, '(', ')', head.Location), comments, w.listWriter()) - w.write(")") - } - if head.Key != nil { - if w.fmtOpts.contains && head.Value == nil { - w.write(" contains ") - comments = w.writeTerm(head.Key, comments) - } else if head.Value == nil { // no `if` for p[x] notation - w.write("[") - comments = w.writeTerm(head.Key, comments) - w.write("]") - } - } - - if head.Value != nil && - (head.Key != nil || ast.Compare(head.Value, ast.BooleanTerm(true)) != 0 || isExpandedConst || isDefault) { - - // in rego v1, explicitly print value for ref-head constants that aren't partial set assignments, e.g.: - // * a -> parser error, won't reach here - // * a.b -> a contains "b" - // * a.b.c -> a.b.c := true - // * a.b.c.d -> a.b.c.d := true - isRegoV1RefConst := w.fmtOpts.regoV1 && isExpandedConst && head.Key == nil && len(head.Args) == 0 - - if head.Location == head.Value.Location && - head.Name != "else" && - ast.Compare(head.Value, ast.BooleanTerm(true)) == 0 && - !isRegoV1RefConst { - // If the value location is the same as the location of the head, - // we know that the value is generated, i.e. f(1) - // Don't print the value (` = true`) as it is implied. - return comments - } - - if head.Assign || w.fmtOpts.regoV1 { - // preserve assignment operator, and enforce it if formatting for Rego v1 - w.write(" := ") - } else { - w.write(" = ") - } - comments = w.writeTerm(head.Value, comments) - } - return comments -} - -func (w *writer) insertComments(comments []*ast.Comment, loc *ast.Location) []*ast.Comment { - before, at, comments := partitionComments(comments, loc) - w.writeComments(before) - if len(before) > 0 && loc.Row-before[len(before)-1].Location.Row > 1 { - w.blankLine() - } - - w.beforeLineEnd(at) - return comments -} - -func (w *writer) writeBody(body ast.Body, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, body.Loc()) - for i, expr := range body { - // Insert a blank line in before the expression if it was not right - // after the previous expression. - if i > 0 { - lastRow := body[i-1].Location.Row - for _, c := range body[i-1].Location.Text { - if c == '\n' { - lastRow++ - } - } - if expr.Location.Row > lastRow+1 { - w.blankLine() - } - } - w.startLine() - - comments = w.writeExpr(expr, comments) - w.endLine() - } - return comments -} - -func (w *writer) writeExpr(expr *ast.Expr, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, expr.Location) - if !w.inline { - w.startLine() - } - - if expr.Negated { - w.write("not ") - } - - switch t := expr.Terms.(type) { - case *ast.SomeDecl: - comments = w.writeSomeDecl(t, comments) - case *ast.Every: - comments = w.writeEvery(t, comments) - case []*ast.Term: - comments = w.writeFunctionCall(expr, comments) - case *ast.Term: - comments = w.writeTerm(t, comments) - } - - var indented bool - for i, with := range expr.With { - if i == 0 || with.Location.Row == expr.With[i-1].Location.Row { // we're on the same line - comments = w.writeWith(with, comments, false) - } else { // we're on a new line - if !indented { - indented = true - - w.up() - defer w.down() - } - w.endLine() - w.startLine() - comments = w.writeWith(with, comments, true) - } - } - - return comments -} - -func (w *writer) writeSomeDecl(decl *ast.SomeDecl, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, decl.Location) - w.write("some ") - - row := decl.Location.Row - - for i, term := range decl.Symbols { - switch val := term.Value.(type) { - case ast.Var: - if term.Location.Row > row { - w.endLine() - w.startLine() - w.write(w.indent) - row = term.Location.Row - } else if i > 0 { - w.write(" ") - } - - comments = w.writeTerm(term, comments) - - if i < len(decl.Symbols)-1 { - w.write(",") - } - case ast.Call: - comments = w.writeInOperator(false, val[1:], comments, decl.Location, ast.BuiltinMap[val[0].String()].Decl) - } - } - - return comments -} - -func (w *writer) writeEvery(every *ast.Every, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, every.Location) - w.write("every ") - if every.Key != nil { - comments = w.writeTerm(every.Key, comments) - w.write(", ") - } - comments = w.writeTerm(every.Value, comments) - w.write(" in ") - comments = w.writeTerm(every.Domain, comments) - w.write(" {") - comments = w.writeComprehensionBody('{', '}', every.Body, every.Loc(), every.Loc(), comments) - - if len(every.Body) == 1 && - every.Body[0].Location.Row == every.Location.Row { - w.write(" ") - } - w.write("}") - return comments -} - -func (w *writer) writeFunctionCall(expr *ast.Expr, comments []*ast.Comment) []*ast.Comment { - - terms := expr.Terms.([]*ast.Term) - operator := terms[0].Value.String() - - switch operator { - case ast.Member.Name, ast.MemberWithKey.Name: - return w.writeInOperator(false, terms[1:], comments, terms[0].Location, ast.BuiltinMap[terms[0].String()].Decl) - } - - bi, ok := ast.BuiltinMap[operator] - if !ok || bi.Infix == "" { - return w.writeFunctionCallPlain(terms, comments) - } - - numDeclArgs := len(bi.Decl.Args()) - numCallArgs := len(terms) - 1 - - switch numCallArgs { - case numDeclArgs: // Print infix where result is unassigned (e.g., x != y) - comments = w.writeTerm(terms[1], comments) - w.write(" " + bi.Infix + " ") - return w.writeTerm(terms[2], comments) - - case numDeclArgs + 1: // Print infix where result is assigned (e.g., z = x + y) - comments = w.writeTerm(terms[3], comments) - w.write(" " + ast.Equality.Infix + " ") - comments = w.writeTerm(terms[1], comments) - w.write(" " + bi.Infix + " ") - comments = w.writeTerm(terms[2], comments) - return comments - } - // NOTE(Trolloldem): in this point we are operating with a built-in function with the - // wrong arity even when the assignment notation is used - w.errs = append(w.errs, ArityFormatMismatchError(terms[1:], terms[0].String(), terms[0].Location, bi.Decl)) - return w.writeFunctionCallPlain(terms, comments) -} - -func (w *writer) writeFunctionCallPlain(terms []*ast.Term, comments []*ast.Comment) []*ast.Comment { - w.write(terms[0].String() + "(") - defer w.write(")") - args := make([]interface{}, len(terms)-1) - for i, t := range terms[1:] { - args[i] = t - } - loc := terms[0].Location - return w.writeIterable(args, loc, closingLoc(0, 0, '(', ')', loc), comments, w.listWriter()) -} - -func (w *writer) writeWith(with *ast.With, comments []*ast.Comment, indented bool) []*ast.Comment { - comments = w.insertComments(comments, with.Location) - if !indented { - w.write(" ") - } - w.write("with ") - comments = w.writeTerm(with.Target, comments) - w.write(" as ") - return w.writeTerm(with.Value, comments) -} - -func (w *writer) writeTerm(term *ast.Term, comments []*ast.Comment) []*ast.Comment { - return w.writeTermParens(false, term, comments) -} - -func (w *writer) writeTermParens(parens bool, term *ast.Term, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, term.Location) - if !w.inline { - w.startLine() - } - - switch x := term.Value.(type) { - case ast.Ref: - w.writeRef(x) - case ast.Object: - comments = w.writeObject(x, term.Location, comments) - case *ast.Array: - comments = w.writeArray(x, term.Location, comments) - case ast.Set: - comments = w.writeSet(x, term.Location, comments) - case *ast.ArrayComprehension: - comments = w.writeArrayComprehension(x, term.Location, comments) - case *ast.ObjectComprehension: - comments = w.writeObjectComprehension(x, term.Location, comments) - case *ast.SetComprehension: - comments = w.writeSetComprehension(x, term.Location, comments) - case ast.String: - if term.Location.Text[0] == '`' { - // To preserve raw strings, we need to output the original text, - // not what x.String() would give us. - w.write(string(term.Location.Text)) - } else { - w.write(x.String()) - } - case ast.Var: - w.write(w.formatVar(x)) - case ast.Call: - comments = w.writeCall(parens, x, term.Location, comments) - case fmt.Stringer: - w.write(x.String()) - } - - if !w.inline { - w.startLine() - } - return comments -} - -func (w *writer) writeRef(x ast.Ref) { - if len(x) > 0 { - parens := false - _, ok := x[0].Value.(ast.Call) - if ok { - parens = x[0].Location.Text[0] == 40 // Starts with "(" - } - w.writeTermParens(parens, x[0], nil) - path := x[1:] - for _, t := range path { - switch p := t.Value.(type) { - case ast.String: - w.writeRefStringPath(p) - case ast.Var: - w.writeBracketed(w.formatVar(p)) - default: - w.write("[") - w.writeTerm(t, nil) - w.write("]") - } - } - } -} - -func (w *writer) writeBracketed(str string) { - w.write("[" + str + "]") -} - -var varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$") - -func (w *writer) writeRefStringPath(s ast.String) { - str := string(s) - if varRegexp.MatchString(str) && !ast.IsInKeywords(str, w.fmtOpts.keywords()) { - w.write("." + str) - } else { - w.writeBracketed(s.String()) - } -} - -func (w *writer) formatVar(v ast.Var) string { - if v.IsWildcard() { - return ast.Wildcard.String() - } - return v.String() -} - -func (w *writer) writeCall(parens bool, x ast.Call, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { - bi, ok := ast.BuiltinMap[x[0].String()] - if !ok || bi.Infix == "" { - return w.writeFunctionCallPlain(x, comments) - } - - if bi.Infix == "in" { - // NOTE(sr): `in` requires special handling, mirroring what happens in the parser, - // since there can be one or two lhs arguments. - return w.writeInOperator(true, x[1:], comments, loc, bi.Decl) - } - - // TODO(tsandall): improve to consider precedence? - if parens { - w.write("(") - } - - // NOTE(Trolloldem): writeCall is only invoked when the function call is a term - // of another function. The only valid arity is the one of the - // built-in function - if len(bi.Decl.Args()) != len(x)-1 { - w.errs = append(w.errs, ArityFormatMismatchError(x[1:], x[0].String(), loc, bi.Decl)) - return comments - } - - comments = w.writeTermParens(true, x[1], comments) - w.write(" " + bi.Infix + " ") - comments = w.writeTermParens(true, x[2], comments) - if parens { - w.write(")") - } - - return comments -} - -func (w *writer) writeInOperator(parens bool, operands []*ast.Term, comments []*ast.Comment, loc *ast.Location, f *types.Function) []*ast.Comment { - - if len(operands) != len(f.Args()) { - // The number of operands does not math the arity of the `in` operator - operator := ast.Member.Name - if len(f.Args()) == 3 { - operator = ast.MemberWithKey.Name - } - w.errs = append(w.errs, ArityFormatMismatchError(operands, operator, loc, f)) - return comments - } - kw := "in" - switch len(operands) { - case 2: - comments = w.writeTermParens(true, operands[0], comments) - w.write(" ") - w.write(kw) - w.write(" ") - comments = w.writeTermParens(true, operands[1], comments) - case 3: - if parens { - w.write("(") - defer w.write(")") - } - comments = w.writeTermParens(true, operands[0], comments) - w.write(", ") - comments = w.writeTermParens(true, operands[1], comments) - w.write(" ") - w.write(kw) - w.write(" ") - comments = w.writeTermParens(true, operands[2], comments) - } - return comments -} - -func (w *writer) writeObject(obj ast.Object, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { - w.write("{") - defer w.write("}") - - var s []interface{} - obj.Foreach(func(k, v *ast.Term) { - s = append(s, ast.Item(k, v)) - }) - return w.writeIterable(s, loc, closingLoc(0, 0, '{', '}', loc), comments, w.objectWriter()) -} - -func (w *writer) writeArray(arr *ast.Array, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { - w.write("[") - defer w.write("]") - - var s []interface{} - arr.Foreach(func(t *ast.Term) { - s = append(s, t) - }) - return w.writeIterable(s, loc, closingLoc(0, 0, '[', ']', loc), comments, w.listWriter()) -} - -func (w *writer) writeSet(set ast.Set, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { - - if set.Len() == 0 { - w.write("set()") - return w.insertComments(comments, closingLoc(0, 0, '(', ')', loc)) - } - - w.write("{") - defer w.write("}") - - var s []interface{} - set.Foreach(func(t *ast.Term) { - s = append(s, t) - }) - return w.writeIterable(s, loc, closingLoc(0, 0, '{', '}', loc), comments, w.listWriter()) -} - -func (w *writer) writeArrayComprehension(arr *ast.ArrayComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { - w.write("[") - defer w.write("]") - - return w.writeComprehension('[', ']', arr.Term, arr.Body, loc, comments) -} - -func (w *writer) writeSetComprehension(set *ast.SetComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { - w.write("{") - defer w.write("}") - - return w.writeComprehension('{', '}', set.Term, set.Body, loc, comments) -} - -func (w *writer) writeObjectComprehension(object *ast.ObjectComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { - w.write("{") - defer w.write("}") - - object.Value.Location = object.Key.Location // Ensure the value is not written on the next line. - if object.Key.Location.Row-loc.Row > 1 { - w.endLine() - w.startLine() - } - - comments = w.writeTerm(object.Key, comments) - w.write(": ") - return w.writeComprehension('{', '}', object.Value, object.Body, loc, comments) -} - -func (w *writer) writeComprehension(open, close byte, term *ast.Term, body ast.Body, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { - if term.Location.Row-loc.Row >= 1 { - w.endLine() - w.startLine() - } - - parens := false - _, ok := term.Value.(ast.Call) - if ok { - parens = term.Location.Text[0] == 40 // Starts with "(" - } - comments = w.writeTermParens(parens, term, comments) - w.write(" |") - - return w.writeComprehensionBody(open, close, body, term.Location, loc, comments) -} - -func (w *writer) writeComprehensionBody(open, close byte, body ast.Body, term, compr *ast.Location, comments []*ast.Comment) []*ast.Comment { - exprs := make([]interface{}, 0, len(body)) - for _, expr := range body { - exprs = append(exprs, expr) - } - lines := groupIterable(exprs, term) - - if body.Loc().Row-term.Row > 0 || len(lines) > 1 { - w.endLine() - w.up() - defer w.startLine() - defer w.down() - - comments = w.writeBody(body, comments) - } else { - w.write(" ") - i := 0 - for ; i < len(body)-1; i++ { - comments = w.writeExpr(body[i], comments) - w.write("; ") - } - comments = w.writeExpr(body[i], comments) - } - - return w.insertComments(comments, closingLoc(0, 0, open, close, compr)) -} - -func (w *writer) writeImports(imports []*ast.Import, comments []*ast.Comment) []*ast.Comment { - m, comments := mapImportsToComments(imports, comments) - - groups := groupImports(imports) - for _, group := range groups { - comments = w.insertComments(comments, group[0].Loc()) - - // Sort imports within a newline grouping. - sort.Slice(group, func(i, j int) bool { - a := group[i] - b := group[j] - return a.Compare(b) < 0 - }) - for _, i := range group { - w.startLine() - w.writeImport(i) - if c, ok := m[i]; ok { - w.write(" " + c.String()) - } - w.endLine() - } - w.blankLine() - } - - return comments -} - -func (w *writer) writeImport(imp *ast.Import) { - path := imp.Path.Value.(ast.Ref) - - buf := []string{"import"} - - if _, ok := future.WhichFutureKeyword(imp); ok { - // We don't want to wrap future.keywords imports in parens, so we create a new writer that doesn't - w2 := writer{ - buf: bytes.Buffer{}, - } - w2.writeRef(path) - buf = append(buf, w2.buf.String()) - } else { - buf = append(buf, path.String()) - } - - if len(imp.Alias) > 0 { - buf = append(buf, "as "+imp.Alias.String()) - } - w.write(strings.Join(buf, " ")) -} - -type entryWriter func(interface{}, []*ast.Comment) []*ast.Comment - -func (w *writer) writeIterable(elements []interface{}, last *ast.Location, close *ast.Location, comments []*ast.Comment, fn entryWriter) []*ast.Comment { - lines := groupIterable(elements, last) - if len(lines) > 1 { - w.delayBeforeEnd() - w.startMultilineSeq() - } - - i := 0 - for ; i < len(lines)-1; i++ { - comments = w.writeIterableLine(lines[i], comments, fn) - w.write(",") - - w.endLine() - w.startLine() - } - - comments = w.writeIterableLine(lines[i], comments, fn) - - if len(lines) > 1 { - w.write(",") - w.endLine() - comments = w.insertComments(comments, close) - w.down() - w.startLine() - } - - return comments -} - -func (w *writer) writeIterableLine(elements []interface{}, comments []*ast.Comment, fn entryWriter) []*ast.Comment { - if len(elements) == 0 { - return comments - } - - i := 0 - for ; i < len(elements)-1; i++ { - comments = fn(elements[i], comments) - w.write(", ") - } - - return fn(elements[i], comments) -} - -func (w *writer) objectWriter() entryWriter { - return func(x interface{}, comments []*ast.Comment) []*ast.Comment { - entry := x.([2]*ast.Term) - - call, isCall := entry[0].Value.(ast.Call) - - paren := false - if isCall && ast.Or.Ref().Equal(call[0].Value) && entry[0].Location.Text[0] == 40 { // Starts with "(" - paren = true - w.write("(") - } - - comments = w.writeTerm(entry[0], comments) - if paren { - w.write(")") - } - - w.write(": ") - - call, isCall = entry[1].Value.(ast.Call) - if isCall && ast.Or.Ref().Equal(call[0].Value) && entry[1].Location.Text[0] == 40 { // Starts with "(" - w.write("(") - defer w.write(")") - } - - return w.writeTerm(entry[1], comments) - } -} - -func (w *writer) listWriter() entryWriter { - return func(x interface{}, comments []*ast.Comment) []*ast.Comment { - t, ok := x.(*ast.Term) - if ok { - call, isCall := t.Value.(ast.Call) - if isCall && ast.Or.Ref().Equal(call[0].Value) && t.Location.Text[0] == 40 { // Starts with "(" - w.write("(") - defer w.write(")") - } - } - - return w.writeTerm(t, comments) - } -} - -// groupIterable will group the `elements` slice into slices according to their -// location: anything on the same line will be put into a slice. -func groupIterable(elements []interface{}, last *ast.Location) [][]interface{} { - // Generated vars occur in the AST when we're rendering the result of - // partial evaluation in a bundle build with optimization. - // Those variables, and wildcard variables have the "default location", - // set in `Ast()`). That is no proper file location, and the grouping - // based on source location will yield a bad result. - // Another case is generated variables: they do have proper file locations, - // but their row/col information may no longer match their AST location. - // So, for generated variables, we also don't trust the location, but - // keep them ungrouped. - def := false // default location found? - for _, elem := range elements { - ast.WalkTerms(elem, func(t *ast.Term) bool { - if t.Location.File == defaultLocationFile { - def = true - return true - } - return false - }) - ast.WalkVars(elem, func(v ast.Var) bool { - if v.IsGenerated() { - def = true - return true - } - return false - }) - if def { // return as-is - return [][]interface{}{elements} - } - } - sort.Slice(elements, func(i, j int) bool { - return locLess(elements[i], elements[j]) - }) - - var lines [][]interface{} - cur := make([]interface{}, 0, len(elements)) - for i, t := range elements { - elem := t - loc := getLoc(elem) - lineDiff := loc.Row - last.Row - if lineDiff > 0 && i > 0 { - lines = append(lines, cur) - cur = nil - } - - last = loc - cur = append(cur, elem) - } - return append(lines, cur) -} - -func mapImportsToComments(imports []*ast.Import, comments []*ast.Comment) (map[*ast.Import]*ast.Comment, []*ast.Comment) { - var leftovers []*ast.Comment - m := map[*ast.Import]*ast.Comment{} - - for _, c := range comments { - matched := false - for _, i := range imports { - if c.Loc().Row == i.Loc().Row { - m[i] = c - matched = true - break - } - } - if !matched { - leftovers = append(leftovers, c) - } - } - - return m, leftovers -} - -func groupImports(imports []*ast.Import) [][]*ast.Import { - switch len(imports) { // shortcuts - case 0: - return nil - case 1: - return [][]*ast.Import{imports} - } - // there are >=2 imports to group - - var groups [][]*ast.Import - group := []*ast.Import{imports[0]} - - for _, i := range imports[1:] { - last := group[len(group)-1] - - // nil-location imports have been sorted up to come first - if i.Loc() != nil && last.Loc() != nil && // first import with a location, or - i.Loc().Row-last.Loc().Row > 1 { // more than one row apart from previous import - - // start a new group - groups = append(groups, group) - group = []*ast.Import{} - } - group = append(group, i) - } - if len(group) > 0 { - groups = append(groups, group) - } - - return groups -} - -func partitionComments(comments []*ast.Comment, l *ast.Location) (before []*ast.Comment, at *ast.Comment, after []*ast.Comment) { - for _, c := range comments { - switch cmp := c.Location.Row - l.Row; { - case cmp < 0: - before = append(before, c) - case cmp > 0: - after = append(after, c) - case cmp == 0: - at = c - } - } - - return before, at, after -} - -func gatherImports(others []interface{}) (imports []*ast.Import, rest []interface{}) { - i := 0 -loop: - for ; i < len(others); i++ { - switch x := others[i].(type) { - case *ast.Import: - imports = append(imports, x) - case *ast.Rule: - break loop - } - } - return imports, others[i:] -} - -func gatherRules(others []interface{}) (rules []*ast.Rule, rest []interface{}) { - i := 0 -loop: - for ; i < len(others); i++ { - switch x := others[i].(type) { - case *ast.Rule: - rules = append(rules, x) - case *ast.Import: - break loop - } - } - return rules, others[i:] -} - -func locLess(a, b interface{}) bool { - return locCmp(a, b) < 0 -} - -func locCmp(a, b interface{}) int { - al := getLoc(a) - bl := getLoc(b) - switch { - case al == nil && bl == nil: - return 0 - case al == nil: - return -1 - case bl == nil: - return 1 - } - - if cmp := al.Row - bl.Row; cmp != 0 { - return cmp - - } - return al.Col - bl.Col -} - -func getLoc(x interface{}) *ast.Location { - switch x := x.(type) { - case ast.Node: // *ast.Head, *ast.Expr, *ast.With, *ast.Term - return x.Loc() - case *ast.Location: - return x - case [2]*ast.Term: // Special case to allow for easy printing of objects. - return x[0].Location - default: - panic("Not reached") - } -} - -func closingLoc(skipOpen, skipClose, open, close byte, loc *ast.Location) *ast.Location { - i, offset := 0, 0 - - // Skip past parens/brackets/braces in rule heads. - if skipOpen > 0 { - i, offset = skipPast(skipOpen, skipClose, loc) - } - - for ; i < len(loc.Text); i++ { - if loc.Text[i] == open { - break - } - } - - if i >= len(loc.Text) { - return &ast.Location{Row: -1} - } - - state := 1 - for state > 0 { - i++ - if i >= len(loc.Text) { - return &ast.Location{Row: -1} - } - - switch loc.Text[i] { - case open: - state++ - case close: - state-- - case '\n': - offset++ - } - } - - return &ast.Location{Row: loc.Row + offset} -} - -func skipPast(open, close byte, loc *ast.Location) (int, int) { - i := 0 - for ; i < len(loc.Text); i++ { - if loc.Text[i] == open { - break - } - } - - state := 1 - offset := 0 - for state > 0 { - i++ - if i >= len(loc.Text) { - return i, offset - } - - switch loc.Text[i] { - case open: - state++ - case close: - state-- - case '\n': - offset++ - } - } - - return i, offset -} - -// startLine begins a line with the current indentation level. -func (w *writer) startLine() { - w.inline = true - for i := 0; i < w.level; i++ { - w.write(w.indent) - } -} - -// endLine ends a line with a newline. -func (w *writer) endLine() { - w.inline = false - if w.beforeEnd != nil && !w.delay { - w.write(" " + w.beforeEnd.String()) - w.beforeEnd = nil - } - w.delay = false - w.write("\n") -} - -// beforeLineEnd registers a comment to be printed at the end of the current line. -func (w *writer) beforeLineEnd(c *ast.Comment) { - if w.beforeEnd != nil { - if c == nil { - return - } - panic("overwriting non-nil beforeEnd") - } - w.beforeEnd = c -} - -func (w *writer) delayBeforeEnd() { - w.delay = true -} - -// line prints a blank line. If the writer is currently in the middle of a line, -// line ends it and then prints a blank one. -func (w *writer) blankLine() { - if w.inline { - w.endLine() - } - w.write("\n") -} - -// write the input string and writes it to the buffer. -func (w *writer) write(s string) { - w.buf.WriteString(s) -} - -// writeLine writes the string on a newly started line, then terminate the line. -func (w *writer) writeLine(s string) { - if !w.inline { - w.startLine() - } - w.write(s) - w.endLine() -} - -func (w *writer) startMultilineSeq() { - w.endLine() - w.up() - w.startLine() -} - -// up increases the indentation level -func (w *writer) up() { - w.level++ -} - -// down decreases the indentation level -func (w *writer) down() { - if w.level == 0 { - panic("negative indentation level") - } - w.level-- -} - -func ensureFutureKeywordImport(imps []*ast.Import, kw string) []*ast.Import { - for _, imp := range imps { - if future.IsAllFutureKeywords(imp) || - future.IsFutureKeyword(imp, kw) || - (future.IsFutureKeyword(imp, "every") && kw == "in") { // "every" implies "in", so we don't need to add both - return imps - } - } - imp := &ast.Import{ - // NOTE: This is a hack to not error on the ref containing a keyword already present in v1. - // A cleaner solution would be to instead allow refs to contain keyword terms. - // E.g. in v1, `import future.keywords["in"]` is valid, but `import future.keywords.in` is not - // as it contains a reserved keyword. - Path: ast.MustParseTerm("future.keywords[\"" + kw + "\"]"), - //Path: ast.MustParseTerm("future.keywords." + kw), - } - imp.Location = defaultLocation(imp) - return append(imps, imp) -} - -func ensureRegoV1Import(imps []*ast.Import) []*ast.Import { - return ensureImport(imps, ast.RegoV1CompatibleRef) -} - -func filterRegoV1Import(imps []*ast.Import) []*ast.Import { - var ret []*ast.Import - for _, imp := range imps { - path := imp.Path.Value.(ast.Ref) - if !ast.RegoV1CompatibleRef.Equal(path) { - ret = append(ret, imp) - } - } - return ret -} - -func ensureImport(imps []*ast.Import, path ast.Ref) []*ast.Import { - for _, imp := range imps { - p := imp.Path.Value.(ast.Ref) - if p.Equal(path) { - return imps - } - } - imp := &ast.Import{ - Path: ast.NewTerm(path), - } - imp.Location = defaultLocation(imp) - return append(imps, imp) -} - -// ArgErrDetail but for `fmt` checks since compiler has not run yet. -type ArityFormatErrDetail struct { - Have []string `json:"have"` - Want []string `json:"want"` -} - -// arityMismatchError but for `fmt` checks since the compiler has not run yet. -func ArityFormatMismatchError(operands []*ast.Term, operator string, loc *ast.Location, f *types.Function) *ast.Error { - want := make([]string, len(f.Args())) - for i := range f.Args() { - want[i] = types.Sprint(f.Args()[i]) - } - - have := make([]string, len(operands)) - for i := 0; i < len(operands); i++ { - have[i] = ast.TypeName(operands[i].Value) - } - err := ast.NewError(ast.TypeErr, loc, "%s: %s", operator, "arity mismatch") - err.Details = &ArityFormatErrDetail{ - Have: have, - Want: want, - } - return err -} - -// Lines returns the string representation of the detail. -func (d *ArityFormatErrDetail) Lines() []string { - return []string{ - "have: " + "(" + strings.Join(d.Have, ",") + ")", - "want: " + "(" + strings.Join(d.Want, ",") + ")", - } -} - -func moduleIsRegoV1Compatible(m *ast.Module) bool { - for _, imp := range m.Imports { - if isRegoV1Compatible(imp) { - return true - } - } - return false -} - -// isRegoV1Compatible returns true if the passed *ast.Import is `rego.v1` -func isRegoV1Compatible(imp *ast.Import) bool { - path := imp.Path.Value.(ast.Ref) - return len(path) == 2 && - ast.RegoRootDocument.Equal(path[0]) && - path[1].Equal(ast.StringTerm("v1")) -} diff --git a/vendor/github.com/open-policy-agent/opa/hooks/hooks.go b/vendor/github.com/open-policy-agent/opa/hooks/hooks.go deleted file mode 100644 index 9659d7b499..0000000000 --- a/vendor/github.com/open-policy-agent/opa/hooks/hooks.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2023 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package hooks - -import ( - "context" - "fmt" - - "github.com/open-policy-agent/opa/config" -) - -// Hook is a hook to be called in some select places in OPA's operation. -// -// The base Hook interface is any, and wherever a hook can occur, the calling code -// will check if your hook implements an appropriate interface. If so, your hook -// is called. -// -// This allows you to only hook in to behavior you care about, and it allows the -// OPA to add more hooks in the future. -// -// All hook interfaces in this package have Hook in the name. Hooks must be safe -// for concurrent use. It is expected that hooks are fast; if a hook needs to take -// time, then copy what you need and ensure the hook is async. -// -// When multiple instances of a hook are provided, they are all going to be executed -// in an unspecified order (it's a map-range call underneath). If you need hooks to -// be run in order, you can wrap them into another hook, and configure that one. -type Hook any - -// Hooks is the type used for every struct in OPA that can work with hooks. -type Hooks struct { - m map[Hook]struct{} // we are NOT providing a stable invocation ordering -} - -// New creates a new instance of Hooks. -func New(hs ...Hook) Hooks { - h := Hooks{m: make(map[Hook]struct{}, len(hs))} - for i := range hs { - h.m[hs[i]] = struct{}{} - } - return h -} - -func (hs Hooks) Each(fn func(Hook)) { - for h := range hs.m { - fn(h) - } -} - -// ConfigHook allows inspecting or rewriting the configuration when the plugin -// manager is processing it. -// Note that this hook is not run when the plugin manager is reconfigured. This -// usually only happens when there's a new config from a discovery bundle, and -// for processing _that_, there's `ConfigDiscoveryHook`. -type ConfigHook interface { - OnConfig(context.Context, *config.Config) (*config.Config, error) -} - -// ConfigHook allows inspecting or rewriting the discovered configuration when -// the discovery plugin is processing it. -type ConfigDiscoveryHook interface { - OnConfigDiscovery(context.Context, *config.Config) (*config.Config, error) -} - -func (hs Hooks) Validate() error { - for h := range hs.m { - switch h.(type) { - case ConfigHook, - ConfigDiscoveryHook: // OK - default: - return fmt.Errorf("unknown hook type %T", h) - } - } - return nil -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go b/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go index 064649733a..98093b774e 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go +++ b/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go @@ -6,15 +6,16 @@ package bundle import ( "context" + "errors" "fmt" "io" "os" "path/filepath" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/bundle" - "github.com/open-policy-agent/opa/resolver/wasm" - "github.com/open-policy-agent/opa/storage" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/bundle" + "github.com/open-policy-agent/opa/v1/resolver/wasm" + "github.com/open-policy-agent/opa/v1/storage" ) // LoadWasmResolversFromStore will lookup all Wasm modules from the store along with the @@ -71,7 +72,7 @@ func LoadWasmResolversFromStore(ctx context.Context, store storage.Store, txn st var resolvers []*wasm.Resolver if len(resolversToLoad) > 0 { // Get a full snapshot of the current data (including any from "outside" the bundles) - data, err := store.Read(ctx, txn, storage.Path{}) + data, err := store.Read(ctx, txn, storage.RootPath) if err != nil { return nil, fmt.Errorf("failed to initialize wasm runtime: %s", err) } @@ -97,7 +98,7 @@ func LoadBundleFromDiskForRegoVersion(regoVersion ast.RegoVersion, path, name st _, err := os.Stat(bundlePath) if err == nil { - f, err := os.Open(filepath.Join(bundlePath)) + f, err := os.Open(bundlePath) if err != nil { return nil, err } @@ -132,7 +133,7 @@ func SaveBundleToDisk(path string, raw io.Reader) (string, error) { } if raw == nil { - return "", fmt.Errorf("no raw bundle bytes to persist to disk") + return "", errors.New("no raw bundle bytes to persist to disk") } dest, err := os.CreateTemp(path, ".bundle.tar.gz.*.tmp") diff --git a/vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go b/vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go index a019cde128..c2392b6775 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go +++ b/vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go @@ -114,7 +114,7 @@ func GetAddressRange(ipNet net.IPNet) (net.IP, net.IP) { copy(lastIPMask, ipNet.Mask) for i := range lastIPMask { lastIPMask[len(lastIPMask)-i-1] = ^lastIPMask[len(lastIPMask)-i-1] - lastIP[net.IPv6len-i-1] = lastIP[net.IPv6len-i-1] | lastIPMask[len(lastIPMask)-i-1] + lastIP[net.IPv6len-i-1] |= lastIPMask[len(lastIPMask)-i-1] } return firstIP, lastIP diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go b/vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go index 4d80aeeef9..5d2e778b13 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go +++ b/vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go @@ -5,9 +5,12 @@ package compiler import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/schemas" - "github.com/open-policy-agent/opa/util" + "errors" + "sync" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/schemas" + "github.com/open-policy-agent/opa/v1/util" ) type SchemaFile string @@ -16,12 +19,35 @@ const ( AuthorizationPolicySchema SchemaFile = "authorizationPolicy.json" ) -var schemaDefinitions = map[SchemaFile]interface{}{} +var schemaDefinitions = map[SchemaFile]any{} + +var loadOnce = sync.OnceValue(func() error { + cont, err := schemas.FS.ReadFile(string(AuthorizationPolicySchema)) + if err != nil { + return err + } + + if len(cont) == 0 { + return errors.New("expected authorization policy schema file to be present") + } + + var schema any + if err := util.Unmarshal(cont, &schema); err != nil { + return err + } + + schemaDefinitions[AuthorizationPolicySchema] = schema + + return nil +}) // VerifyAuthorizationPolicySchema performs type checking on rules against the schema for the Authorization Policy // Input document. // NOTE: The provided compiler should have already run the compilation process on the input modules func VerifyAuthorizationPolicySchema(compiler *ast.Compiler, ref ast.Ref) error { + if err := loadOnce(); err != nil { + panic(err) + } rules := getRulesWithDependencies(compiler, ref) @@ -32,7 +58,10 @@ func VerifyAuthorizationPolicySchema(compiler *ast.Compiler, ref ast.Ref) error schemaSet := ast.NewSchemaSet() schemaSet.Put(ast.SchemaRootRef, schemaDefinitions[AuthorizationPolicySchema]) - errs := ast.NewCompiler().WithSchemas(schemaSet).PassesTypeCheckRules(rules) + errs := ast.NewCompiler(). + WithDefaultRegoVersion(compiler.DefaultRegoVersion()). + WithSchemas(schemaSet). + PassesTypeCheckRules(rules) if len(errs) > 0 { return errs @@ -64,26 +93,3 @@ func transitiveDependencies(compiler *ast.Compiler, rule *ast.Rule, deps map[*as transitiveDependencies(compiler, other, deps) } } - -func loadAuthorizationPolicySchema() { - - cont, err := schemas.FS.ReadFile(string(AuthorizationPolicySchema)) - if err != nil { - panic(err) - } - - if len(cont) == 0 { - panic("expected authorization policy schema file to be present") - } - - var schema interface{} - if err := util.Unmarshal(cont, &schema); err != nil { - panic(err) - } - - schemaDefinitions[AuthorizationPolicySchema] = schema -} - -func init() { - loadAuthorizationPolicySchema() -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv index 473497abbd..10dc4d482e 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv +++ b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv @@ -111,6 +111,9 @@ opa_arith_rem,opa_bf_to_number opa_array_concat,opa_value_type opa_array_concat,opa_array_with_cap opa_array_concat,opa_array_append +opa_array_flatten,opa_value_type +opa_array_flatten,opa_array_with_cap +opa_array_flatten,opa_array_append opa_array_slice,opa_value_type opa_array_slice,opa_number_try_int opa_array_slice,opa_array_with_cap @@ -194,7 +197,9 @@ opa_cidr_contains,parse_ip opa_cidr_contains,opa_boolean parse_cidr,parse_ip parse_cidr,opa_atoi64 +parse_ip,inet_pton4 parse_ip,memchr +inet_pton4,memchr opa_cidr_intersects,opa_value_type opa_cidr_intersects,parse_cidr opa_cidr_intersects,opa_boolean @@ -347,15 +352,15 @@ opa_value_dump,opa_json_writer_write move_freelists,opa_abort opa_heap_blocks_stash,move_freelists opa_heap_blocks_restore,move_freelists -opa_malloc,opa_free_bulk_commit +opa_malloc,merge_sort_blocks +opa_malloc,merge_blocks opa_malloc,opa_abort -opa_free_bulk_commit,merge_sort_blocks +merge_sort_blocks,merge_sort_blocks +merge_sort_blocks,merge_blocks opa_realloc,opa_malloc opa_realloc,memcpy -opa_realloc,opa_free opa_builtin_cache_get,opa_abort opa_builtin_cache_set,opa_abort -merge_sort_blocks,merge_sort_blocks opa_memoize_init,opa_malloc opa_memoize_init,opa_object opa_memoize_push,opa_malloc @@ -583,6 +588,8 @@ opa_sets_union,opa_value_type opa_sets_union,opa_set opa_sets_union,opa_set_add opa_sets_union,opa_value_free_shallow +opa_strlen,strlen +opa_itoa,strlen opa_strings_any_prefix_match,opa_value_type opa_strings_any_prefix_match,opa_value_iter opa_strings_any_prefix_match,opa_value_get @@ -719,6 +726,18 @@ opa_strings_upper,opa_abort opa_strings_upper,opa_unicode_to_upper opa_strings_upper,opa_realloc opa_strings_upper,opa_unicode_encode_utf8 +to_string,opa_value_type +to_string,opa_string_terminated +to_string,opa_value_dump +to_string,opa_strlen +to_string,opa_string_allocated +opa_template_string,opa_value_type +opa_template_string,opa_array_with_cap +opa_template_string,to_string +opa_template_string,opa_array_append +opa_template_string,opa_malloc +opa_template_string,memcpy +opa_template_string,opa_string_allocated opa_types_is_number,opa_value_type opa_types_is_number,opa_boolean opa_types_is_string,opa_value_type @@ -789,8 +808,8 @@ opa_object_keys,opa_value_compare opa_object_keys,opa_value_compare_number opa_object_keys,opa_strncmp opa_object_keys,opa_value_compare_object -opa_object_keys,opa_abort opa_object_keys,opa_value_compare_set +opa_object_keys,opa_abort opa_array_free,__opa_value_free opa_array_free,opa_free opa_array_free,opa_free_bulk @@ -836,17 +855,16 @@ opa_number_ref,opa_malloc opa_number_int,opa_malloc opa_string,opa_malloc opa_value_shallow_copy_object,opa_malloc +opa_value_shallow_copy_object,memset opa_value_shallow_copy_object,opa_value_iter opa_value_shallow_copy_object,opa_value_get opa_value_shallow_copy_object,__opa_object_insert -opa_value_shallow_copy_set,opa_malloc -opa_value_shallow_copy_set,opa_value_iter -opa_value_shallow_copy_set,opa_set_add opa_set_add,opa_value_hash opa_set_add,opa_value_compare opa_set_add,__opa_set_grow opa_set_add,opa_malloc __opa_set_grow,opa_malloc +__opa_set_grow,memset __opa_set_grow,opa_value_hash __opa_set_grow,opa_value_compare_number __opa_set_grow,opa_strncmp @@ -857,7 +875,9 @@ __opa_set_grow,opa_abort __opa_set_grow,opa_free opa_value_shallow_copy,opa_malloc opa_value_shallow_copy,opa_value_shallow_copy_object -opa_value_shallow_copy,opa_value_shallow_copy_set +opa_value_shallow_copy,memset +opa_value_shallow_copy,opa_set_add +opa_value_shallow_copy,opa_value_iter opa_value_shallow_copy,opa_abort opa_value_transitive_closure,opa_malloc opa_value_transitive_closure,__opa_value_transitive_closure @@ -881,7 +901,9 @@ opa_array_with_cap,opa_free opa_object,opa_malloc opa_set,opa_malloc opa_set_with_cap,opa_malloc +opa_set_with_cap,memset __opa_object_grow,opa_malloc +__opa_object_grow,memset __opa_object_grow,opa_value_hash __opa_object_grow,opa_value_compare_number __opa_object_grow,opa_strncmp @@ -908,170 +930,191 @@ opa_lookup,opa_abort opa_lookup,opa_atoi64 opa_mapping_init,opa_json_parse opa_mapping_lookup,opa_lookup -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,std::__1::basic_string\2c\20std::__1::allocator\20>::assign\28char\20const*\29 -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29 -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,operator\20delete\28void*\29 -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\29 -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,escape\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,operator\20new\28unsigned\20long\29 -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,memcpy -node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,abort -escape\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,std::__1::basic_string\2c\20std::__1::allocator\20>::push_back\28char\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,operator\20new\28unsigned\20long\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,lexer::lexer\28char\20const*\2c\20unsigned\20long\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,glob_parse\28lexer*\2c\20node**\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::compare\28unsigned\20long\2c\20unsigned\20long\2c\20char\20const*\2c\20unsigned\20long\29\20const -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,lexer::~lexer\28\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,operator\20delete\28void*\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::assign\28char\20const*\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,opa_unicode_decode_utf8 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,escape\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,node::~node\28\29 -lexer::~lexer\28\29,operator\20delete\28void*\29 +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::assign\28char\20const*\29 +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29 +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29 +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\29 +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::push_back\28char\29 +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,operator\20new\28unsigned\20long\29 +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,memmove +node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,operator\20new\28unsigned\20long\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,lexer::lexer\28char\20const*\2c\20unsigned\20long\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,glob_parse\28lexer*\2c\20node**\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,lexer::~lexer\28\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::assign\28char\20const*\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,opa_unicode_decode_utf8 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::push_back\28char\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,node::re2\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29 +glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,node::~node\28\29 +std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29,std::_LIBCPP_ABI_NAMESPACE::__libcpp_verbose_abort\28char\20const*\2c\20...\29 +lexer::~lexer\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 lexer::next\28token*\29,strlen lexer::next\28token*\29,operator\20new\28unsigned\20long\29 -lexer::next\28token*\29,memcpy -lexer::next\28token*\29,operator\20delete\28void*\29 -lexer::next\28token*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::operator=\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -lexer::next\28token*\29,abort +lexer::next\28token*\29,memmove +lexer::next\28token*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +lexer::next\28token*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::operator=\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29 +lexer::next\28token*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 lexer::next\28token*\29,lexer::fetch_item\28\29 lexer::fetch_item\28\29,opa_unicode_decode_utf8 lexer::fetch_item\28\29,operator\20new\28unsigned\20long\29 -lexer::fetch_item\28\29,memcpy -lexer::fetch_item\28\29,operator\20delete\28void*\29 +lexer::fetch_item\28\29,memmove +lexer::fetch_item\28\29,std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28token*&&\29 lexer::fetch_item\28\29,lexer::fetch_range\28\29 lexer::fetch_item\28\29,lexer::fetch_text\28int\20const*\29 -lexer::fetch_item\28\29,abort +lexer::fetch_item\28\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28token*&&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28token*&&\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28token*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28token*&&\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28token*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 lexer::fetch_range\28\29,opa_unicode_decode_utf8 lexer::fetch_range\28\29,operator\20new\28unsigned\20long\29 -lexer::fetch_range\28\29,memcpy -lexer::fetch_range\28\29,operator\20delete\28void*\29 +lexer::fetch_range\28\29,memmove +lexer::fetch_range\28\29,std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28token*&&\29 lexer::fetch_range\28\29,lexer::fetch_text\28int\20const*\29 -lexer::fetch_range\28\29,abort +lexer::fetch_range\28\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 lexer::fetch_text\28int\20const*\29,opa_unicode_decode_utf8 lexer::fetch_text\28int\20const*\29,operator\20new\28unsigned\20long\29 lexer::fetch_text\28int\20const*\29,memcpy -lexer::fetch_text\28int\20const*\29,operator\20delete\28void*\29 +lexer::fetch_text\28int\20const*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +lexer::fetch_text\28int\20const*\29,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__throw_length_error\5babi:nn210108\5d\28\29 +lexer::fetch_text\28int\20const*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 lexer::fetch_text\28int\20const*\29,opa_malloc +lexer::fetch_text\28int\20const*\29,memmove +lexer::fetch_text\28int\20const*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28token*&&\29 lexer::fetch_text\28int\20const*\29,opa_free -lexer::fetch_text\28int\20const*\29,abort +lexer::fetch_text\28int\20const*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 +std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__libcpp_verbose_abort\28char\20const*\2c\20...\29 +std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 node::~node\28\29,node::~node\28\29 -node::~node\28\29,operator\20delete\28void*\29 -node::insert\28node*\29,operator\20new\28unsigned\20long\29 -node::insert\28node*\29,memcpy -node::insert\28node*\29,operator\20delete\28void*\29 -node::insert\28node*\29,abort +node::~node\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28node*\20const&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28node*\20const&\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28node*\20const&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28node*\20const&\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28node*\20const&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 glob_parse\28lexer*\2c\20node**\29,operator\20new\28unsigned\20long\29 -glob_parse\28lexer*\2c\20node**\29,std::__1::basic_string\2c\20std::__1::allocator\20>::compare\28unsigned\20long\2c\20unsigned\20long\2c\20char\20const*\2c\20unsigned\20long\29\20const +glob_parse\28lexer*\2c\20node**\29,operator\20delete\28void*\2c\20unsigned\20long\29 glob_parse\28lexer*\2c\20node**\29,node::~node\28\29 -glob_parse\28lexer*\2c\20node**\29,operator\20delete\28void*\29 -glob_parse\28lexer*\2c\20node**\29,std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 +glob_parse\28lexer*\2c\20node**\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29 parser_main\28state*\2c\20lexer*\29,lexer::next\28token*\29 -parser_main\28state*\2c\20lexer*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::operator=\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 +parser_main\28state*\2c\20lexer*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::operator=\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29 parser_main\28state*\2c\20lexer*\29,operator\20new\28unsigned\20long\29 -parser_main\28state*\2c\20lexer*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -parser_main\28state*\2c\20lexer*\29,node::insert\28node*\29 -parser_main\28state*\2c\20lexer*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::assign\28char\20const*\29 -parser_main\28state*\2c\20lexer*\29,operator\20delete\28void*\29 +parser_main\28state*\2c\20lexer*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29 +parser_main\28state*\2c\20lexer*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28node*\20const&\29 +parser_main\28state*\2c\20lexer*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::assign\28char\20const*\29 +parser_main\28state*\2c\20lexer*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29,memmove +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 parser_range\28state*\2c\20lexer*\29,lexer::next\28token*\29 -parser_range\28state*\2c\20lexer*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::assign\28char\20const*\29 -parser_range\28state*\2c\20lexer*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::operator=\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 +parser_range\28state*\2c\20lexer*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::assign\28char\20const*\29 +parser_range\28state*\2c\20lexer*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::operator=\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29 parser_range\28state*\2c\20lexer*\29,opa_unicode_decode_utf8 parser_range\28state*\2c\20lexer*\29,memcmp -parser_range\28state*\2c\20lexer*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::compare\28unsigned\20long\2c\20unsigned\20long\2c\20char\20const*\2c\20unsigned\20long\29\20const parser_range\28state*\2c\20lexer*\29,operator\20new\28unsigned\20long\29 -parser_range\28state*\2c\20lexer*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -parser_range\28state*\2c\20lexer*\29,node::insert\28node*\29 -parser_range\28state*\2c\20lexer*\29,operator\20delete\28void*\29 +parser_range\28state*\2c\20lexer*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29 +parser_range\28state*\2c\20lexer*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28node*\20const&\29 +parser_range\28state*\2c\20lexer*\29,operator\20delete\28void*\2c\20unsigned\20long\29 opa_glob_match,opa_value_type opa_glob_match,opa_value_iter opa_glob_match,opa_value_get opa_glob_match,operator\20new\28unsigned\20long\29 -opa_glob_match,memcpy -opa_glob_match,operator\20delete\28void*\29 -opa_glob_match,void\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>::__push_back_slow_path\2c\20std::__1::allocator\20>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>&&\29 +opa_glob_match,memmove +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::push_back\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&&\29 +opa_glob_match,operator\20delete\28void*\2c\20unsigned\20long\29 opa_glob_match,opa_builtin_cache_get opa_glob_match,opa_builtin_cache_set -opa_glob_match,std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -opa_glob_match,std::__1::__hash_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::find\28cache_key\20const&\29 -opa_glob_match,std::__1::basic_string\2c\20std::__1::allocator\20>::operator=\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -opa_glob_match,glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20const&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29 -opa_glob_match,std::__1::unordered_map\2c\20std::__1::allocator\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::erase\28std::__1::__hash_map_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20>\29 -opa_glob_match,std::__1::pair\2c\20std::__1::allocator\20>\20>::pair\2c\20std::__1::allocator\20>&\2c\20false>\28cache_key&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>&\29 -opa_glob_match,std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29 +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29 +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::find\28cache_key\20const&\29 +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::operator=\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29 +opa_glob_match,glob_translate\28char\20const*\2c\20unsigned\20long\2c\20std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29 +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::unordered_map\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::erase\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::__hash_map_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>>\29 +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::pair\5babi:nn210108\5d\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\2c\200>\28cache_key&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\29 +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\28cache_key\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>&&\29 opa_glob_match,opa_string opa_glob_match,opa_regex_match -opa_glob_match,abort -void\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>::__push_back_slow_path\2c\20std::__1::allocator\20>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>&&\29,operator\20new\28unsigned\20long\29 -void\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>::__push_back_slow_path\2c\20std::__1::allocator\20>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>&&\29,operator\20delete\28void*\29 -void\20std::__1::vector\2c\20std::__1::allocator\20>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>::__push_back_slow_path\2c\20std::__1::allocator\20>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>&&\29,abort -std::__1::__hash_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::find\28cache_key\20const&\29,std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>::operator\28\29\28cache_key\20const&\29\20const -std::__1::__hash_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::find\28cache_key\20const&\29,std::__1::equal_to::operator\28\29\28cache_key\20const&\2c\20cache_key\20const&\29\20const -std::__1::unordered_map\2c\20std::__1::allocator\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::erase\28std::__1::__hash_map_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20>\29,std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::remove\28std::__1::__hash_const_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\29 -std::__1::unordered_map\2c\20std::__1::allocator\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::erase\28std::__1::__hash_map_iterator\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\20>\29,std::__1::unique_ptr\2c\20std::__1::allocator\20>\20>\2c\20void*>\2c\20std::__1::__hash_node_destructor\2c\20std::__1::allocator\20>\20>\2c\20void*>\20>\20>\20>::~unique_ptr\28\29 -std::__1::pair\2c\20std::__1::allocator\20>\20>::pair\2c\20std::__1::allocator\20>&\2c\20false>\28cache_key&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>&\29,std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -std::__1::pair\2c\20std::__1::allocator\20>\20>::pair\2c\20std::__1::allocator\20>&\2c\20false>\28cache_key&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>&\29,operator\20new\28unsigned\20long\29 -std::__1::pair\2c\20std::__1::allocator\20>\20>::pair\2c\20std::__1::allocator\20>&\2c\20false>\28cache_key&\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>&\29,abort -std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29,std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>::operator\28\29\28cache_key\20const&\29\20const -std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29,std::__1::equal_to::operator\28\29\28cache_key\20const&\2c\20cache_key\20const&\29\20const -std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29,operator\20new\28unsigned\20long\29 -std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29,std::__1::__next_prime\28unsigned\20long\29 -std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29,std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__rehash\28unsigned\20long\29 -std::__1::pair\2c\20std::__1::allocator\20>\20>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\20>\20>\28cache_key\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\20>&&\29,std::__1::unique_ptr\2c\20std::__1::allocator\20>\20>\2c\20void*>\2c\20std::__1::__hash_node_destructor\2c\20std::__1::allocator\20>\20>\2c\20void*>\20>\20>\20>::~unique_ptr\28\29 -std::__1::unique_ptr\2c\20std::__1::allocator\20>\20>\2c\20void*>\2c\20std::__1::__hash_node_destructor\2c\20std::__1::allocator\20>\20>\2c\20void*>\20>\20>\20>::~unique_ptr\28\29,operator\20delete\28void*\29 -std::__1::equal_to::operator\28\29\28cache_key\20const&\2c\20cache_key\20const&\29\20const,memcmp -std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__rehash\28unsigned\20long\29,operator\20new\28unsigned\20long\29 -std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__rehash\28unsigned\20long\29,operator\20delete\28void*\29 -std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__rehash\28unsigned\20long\29,memcmp -std::__1::__hash_table\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::__rehash\28unsigned\20long\29,abort +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +opa_glob_match,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::push_back\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::push_back\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&&\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::push_back\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::push_back\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&&\29,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::push_back\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::find\28cache_key\20const&\29,std::_LIBCPP_ABI_NAMESPACE::__hash_memory\28void\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::find\28cache_key\20const&\29,cache_key::operator==\28cache_key\20const&\29\20const +std::_LIBCPP_ABI_NAMESPACE::unordered_map\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::erase\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::__hash_map_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>>\29,std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::remove\28std::_LIBCPP_ABI_NAMESPACE::__hash_const_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\29 +std::_LIBCPP_ABI_NAMESPACE::unordered_map\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::erase\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::__hash_map_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>>\29,void\20std::_LIBCPP_ABI_NAMESPACE::__destroy_at\5babi:nn210108\5d\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\200>\28std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>*\29 +std::_LIBCPP_ABI_NAMESPACE::unordered_map\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::erase\5babi:nn210108\5d\28std::_LIBCPP_ABI_NAMESPACE::__hash_map_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>>\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::pair\5babi:nn210108\5d\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\2c\200>\28cache_key&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::pair\5babi:nn210108\5d\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\2c\200>\28cache_key&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::pair\5babi:nn210108\5d\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\2c\200>\28cache_key&\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\29,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\28cache_key\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>&&\29,std::_LIBCPP_ABI_NAMESPACE::__hash_memory\28void\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\28cache_key\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>&&\29,cache_key::operator==\28cache_key\20const&\29\20const +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\28cache_key\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>&&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\28cache_key\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>&&\29,std::_LIBCPP_ABI_NAMESPACE::__next_prime\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>\28cache_key\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>&&\29,void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__do_rehash\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__destroy_at\5babi:nn210108\5d\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\200>\28std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +cache_key::operator==\28cache_key\20const&\29\20const,memcmp +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__do_rehash\28unsigned\20long\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__do_rehash\28unsigned\20long\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__do_rehash\28unsigned\20long\29,memset +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::__do_rehash\28unsigned\20long\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 opa_regex_is_valid,opa_value_type opa_regex_is_valid,opa_boolean opa_regex_is_valid,operator\20new\28unsigned\20long\29 -opa_regex_is_valid,memcpy +opa_regex_is_valid,memmove opa_regex_is_valid,re2::RE2::RE2\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29 opa_regex_is_valid,re2::RE2::~RE2\28\29 -opa_regex_is_valid,operator\20delete\28void*\29 -opa_regex_is_valid,abort +opa_regex_is_valid,operator\20delete\28void*\2c\20unsigned\20long\29 +opa_regex_is_valid,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 opa_regex_match,opa_value_type opa_regex_match,operator\20new\28unsigned\20long\29 -opa_regex_match,memcpy +opa_regex_match,memmove opa_regex_match,compile\28char\20const*\29 opa_regex_match,re2::RE2::PartialMatchN\28re2::StringPiece\20const&\2c\20re2::RE2\20const&\2c\20re2::RE2::Arg\20const*\20const*\2c\20int\29 opa_regex_match,reuse\28re2::RE2*\29 opa_regex_match,opa_boolean -opa_regex_match,operator\20delete\28void*\29 -opa_regex_match,abort +opa_regex_match,operator\20delete\28void*\2c\20unsigned\20long\29 +opa_regex_match,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 compile\28char\20const*\29,opa_builtin_cache_get compile\28char\20const*\29,operator\20new\28unsigned\20long\29 compile\28char\20const*\29,opa_builtin_cache_set compile\28char\20const*\29,strlen compile\28char\20const*\29,memcpy -compile\28char\20const*\29,std::__1::__hash_iterator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::find\2c\20std::__1::allocator\20>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -compile\28char\20const*\29,operator\20delete\28void*\29 +compile\28char\20const*\29,std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::find\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29 +compile\28char\20const*\29,operator\20delete\28void*\2c\20unsigned\20long\29 compile\28char\20const*\29,re2::RE2::RE2\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29 compile\28char\20const*\29,re2::RE2::~RE2\28\29 -compile\28char\20const*\29,abort +compile\28char\20const*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 reuse\28re2::RE2*\29,opa_builtin_cache_get reuse\28re2::RE2*\29,operator\20new\28unsigned\20long\29 reuse\28re2::RE2*\29,opa_builtin_cache_set reuse\28re2::RE2*\29,re2::RE2::~RE2\28\29 -reuse\28re2::RE2*\29,operator\20delete\28void*\29 -reuse\28re2::RE2*\29,std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::remove\28std::__1::__hash_const_iterator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\29 -reuse\28re2::RE2*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29 -reuse\28re2::RE2*\29,std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>&&\29 -std::__1::__hash_iterator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::find\2c\20std::__1::allocator\20>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,memcmp -std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>&&\29,memcmp -std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>&&\29,operator\20new\28unsigned\20long\29 -std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>&&\29,std::__1::__next_prime\28unsigned\20long\29 -std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__emplace_unique_key_args\2c\20std::__1::allocator\20>\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\2c\20std::__1::pair\2c\20std::__1::allocator\20>\2c\20re2::RE2*>&&\29,std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__rehash\28unsigned\20long\29 +reuse\28re2::RE2*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +reuse\28re2::RE2*\29,std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::remove\28std::_LIBCPP_ABI_NAMESPACE::__hash_const_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\29 +reuse\28re2::RE2*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__init_copy_ctor_external\28char\20const*\2c\20unsigned\20long\29 +reuse\28re2::RE2*\29,std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>&&\29 +std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::find\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,std::_LIBCPP_ABI_NAMESPACE::__hash_memory\28void\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::find\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,memcmp +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>&&\29,std::_LIBCPP_ABI_NAMESPACE::__hash_memory\28void\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>&&\29,memcmp +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>&&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>&&\29,std::_LIBCPP_ABI_NAMESPACE::__next_prime\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>>\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>&&\29,void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__do_rehash\28unsigned\20long\29 opa_regex_find_all_string_submatch,opa_value_type opa_regex_find_all_string_submatch,opa_number_try_int opa_regex_find_all_string_submatch,operator\20new\28unsigned\20long\29 -opa_regex_find_all_string_submatch,memcpy +opa_regex_find_all_string_submatch,memmove opa_regex_find_all_string_submatch,compile\28char\20const*\29 opa_regex_find_all_string_submatch,opa_array opa_regex_find_all_string_submatch,memset @@ -1079,16 +1122,19 @@ opa_regex_find_all_string_submatch,re2::RE2::Match\28re2::StringPiece\20const&\2 opa_regex_find_all_string_submatch,fullrune opa_regex_find_all_string_submatch,chartorune opa_regex_find_all_string_submatch,opa_array_with_cap +opa_regex_find_all_string_submatch,reuse\28re2::RE2*\29 opa_regex_find_all_string_submatch,opa_malloc +opa_regex_find_all_string_submatch,memcpy opa_regex_find_all_string_submatch,opa_string_allocated opa_regex_find_all_string_submatch,opa_array_append -opa_regex_find_all_string_submatch,reuse\28re2::RE2*\29 -opa_regex_find_all_string_submatch,operator\20delete\28void*\29 -opa_regex_find_all_string_submatch,abort -std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__rehash\28unsigned\20long\29,operator\20new\28unsigned\20long\29 -std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__rehash\28unsigned\20long\29,operator\20delete\28void*\29 -std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__rehash\28unsigned\20long\29,memcmp -std::__1::__hash_table\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::allocator\20>\2c\20std::__1::__hash_value_type\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\2c\20std::__1::equal_to\2c\20std::__1::allocator\20>\20>\2c\20std::__1::hash\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20re2::RE2*>\20>\20>::__rehash\28unsigned\20long\29,abort +opa_regex_find_all_string_submatch,operator\20delete\28void*\2c\20unsigned\20long\29 +opa_regex_find_all_string_submatch,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +opa_regex_find_all_string_submatch,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__do_rehash\28unsigned\20long\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__do_rehash\28unsigned\20long\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__do_rehash\28unsigned\20long\29,memset +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20re2::RE2*>>>::__do_rehash\28unsigned\20long\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 snprintf_,_vsnprintf _vsnprintf,_ntoa_format _vsnprintf,_ftoa @@ -1311,14 +1357,14 @@ mpd_qplus,opa_abort mpd_qplus,mpd_qfinalize mpd_qadd,mpd_qcopy mpd_qadd,_mpd_fix_nan -mpd_qadd,opa_abort -mpd_qadd,mpd_realloc +mpd_qadd,_mpd_qaddsub_inf mpd_qadd,_mpd_qaddsub mpd_qadd,mpd_qfinalize +_mpd_qaddsub_inf,opa_abort +_mpd_qaddsub_inf,mpd_realloc mpd_qsub,mpd_qcopy mpd_qsub,_mpd_fix_nan -mpd_qsub,opa_abort -mpd_qsub,mpd_realloc +mpd_qsub,_mpd_qaddsub_inf mpd_qsub,_mpd_qaddsub mpd_qsub,mpd_qfinalize mpd_qdiv,_mpd_qdiv @@ -1349,15 +1395,15 @@ _mpd_base_ndivmod,_mpd_qmul_exact _mpd_base_ndivmod,mpd_qshiftr _mpd_base_ndivmod,_mpd_qmul _mpd_base_ndivmod,mpd_qfinalize -_mpd_base_ndivmod,mpd_qsub -_mpd_base_ndivmod,mpd_realloc +_mpd_base_ndivmod,_mpd_qsub_exact _mpd_base_ndivmod,_mpd_qround_to_integral +_mpd_base_ndivmod,_mpd_cmp +_mpd_base_ndivmod,_mpd_qadd_exact _mpd_base_ndivmod,fprintf _mpd_base_ndivmod,fwrite _mpd_base_ndivmod,fputc -_mpd_base_ndivmod,_mpd_cmp -_mpd_base_ndivmod,mpd_qadd _mpd_base_ndivmod,mpd_qcopy +_mpd_base_ndivmod,mpd_realloc _mpd_qdivmod,opa_abort _mpd_qdivmod,mpd_qcopy _mpd_qdivmod,_settriple @@ -1373,43 +1419,37 @@ _mpd_qmul,mpd_qcopy _mpd_qmul,_mpd_fix_nan _mpd_qmul,opa_abort _mpd_qmul,mpd_realloc -_mpd_qmul,_mpd_mul_2_le2 _mpd_qmul,memset _mpd_qmul,_mpd_shortmul _mpd_qmul,_mpd_basemul -_mpd_qmul,mpd_switch_to_dyn -_mpd_qmul,mpd_realloc_dyn _mpd_qmul,mpd_calloc _mpd_qmul,_mpd_kmul _mpd_qmul,_mpd_fntmul _mpd_qmul,_mpd_kmul_fnt _mpd_qmul,mpd_seterror +_mpd_qmul,_mpd_mul_2_le2 +_mpd_qmul,mpd_switch_to_dyn +_mpd_qmul,mpd_realloc_dyn _mpd_kmul,opa_abort +_mpd_kmul,_kmul_resultsize _mpd_kmul,mpd_calloc _mpd_kmul,_kmul_worksize _mpd_kmul,_karatsuba_rec -_mpd_kmul,fprintf -_mpd_kmul,fwrite -_mpd_kmul,fputc -_mpd_kmul,abort _mpd_fntmul,opa_abort -_mpd_fntmul,fprintf -_mpd_fntmul,fwrite -_mpd_fntmul,fputc -_mpd_fntmul,abort _mpd_fntmul,mpd_calloc _mpd_fntmul,memcpy _mpd_fntmul,fnt_autoconvolute _mpd_fntmul,fnt_convolute _mpd_fntmul,memset _mpd_fntmul,crt3 +_mpd_fntmul,fprintf +_mpd_fntmul,fwrite +_mpd_fntmul,fputc +_mpd_fntmul,abort _mpd_kmul_fnt,opa_abort +_mpd_kmul_fnt,_kmul_resultsize _mpd_kmul_fnt,mpd_calloc _mpd_kmul_fnt,_kmul_worksize -_mpd_kmul_fnt,fprintf -_mpd_kmul_fnt,fwrite -_mpd_kmul_fnt,fputc -_mpd_kmul_fnt,abort _mpd_kmul_fnt,_karatsuba_rec_fnt mpd_qmul,_mpd_qmul mpd_qmul,mpd_qfinalize @@ -1435,6 +1475,9 @@ mpd_qround_to_intx,_mpd_qround_to_integral mpd_qtrunc,_mpd_qround_to_integral mpd_qfloor,_mpd_qround_to_integral mpd_qceil,_mpd_qround_to_integral +_mpd_qadd_exact,mpd_qadd +_mpd_qadd_exact,opa_abort +_mpd_qadd_exact,mpd_realloc mpd_sizeinbase,opa_abort mpd_sizeinbase,log10 mpd_qexport_u16,opa_abort @@ -1454,6 +1497,13 @@ mpd_qimport_u16,_mpd_shortmul_c mpd_qimport_u16,_mpd_shortadd mpd_qimport_u16,mpd_qfinalize _mpd_basecmp,opa_abort +_mpd_qsub_exact,mpd_qsub +_mpd_qsub_exact,opa_abort +_mpd_qsub_exact,mpd_realloc +_kmul_resultsize,fprintf +_kmul_resultsize,fwrite +_kmul_resultsize,fputc +_kmul_resultsize,abort _kmul_worksize,_kmul_worksize _kmul_worksize,fprintf _kmul_worksize,fwrite @@ -1502,78 +1552,79 @@ swap_halfrows_pow2,fprintf swap_halfrows_pow2,fwrite swap_halfrows_pow2,fputc swap_halfrows_pow2,abort -std::__1::__next_prime\28unsigned\20long\29,abort +std::_LIBCPP_ABI_NAMESPACE::__next_prime\28unsigned\20long\29,std::_LIBCPP_ABI_NAMESPACE::__throw_overflow_error\5babi:nn210108\5d\28char\20const*\29 +std::_LIBCPP_ABI_NAMESPACE::__throw_overflow_error\5babi:nn210108\5d\28char\20const*\29,std::_LIBCPP_ABI_NAMESPACE::__libcpp_verbose_abort\28char\20const*\2c\20...\29 operator\20new\28unsigned\20long\29,opa_malloc -operator\20delete\28void*\29,opa_free +operator\20delete\28void*\2c\20unsigned\20long\29,opa_free operator\20new\5b\5d\28unsigned\20long\29,opa_malloc operator\20delete\5b\5d\28void*\29,opa_free __cxa_pure_virtual,opa_abort -std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,opa_malloc -std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,memcpy -std::__1::basic_string\2c\20std::__1::allocator\20>::basic_string\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,abort -std::__1::basic_string\2c\20std::__1::allocator\20>::operator=\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,memcpy -std::__1::basic_string\2c\20std::__1::allocator\20>::operator=\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,opa_malloc -std::__1::basic_string\2c\20std::__1::allocator\20>::operator=\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,opa_free -std::__1::basic_string\2c\20std::__1::allocator\20>::operator=\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,abort -std::__1::basic_string\2c\20std::__1::allocator\20>::resize\28unsigned\20long\2c\20char\29,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28unsigned\20long\2c\20char\29 -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28unsigned\20long\2c\20char\29,opa_malloc -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28unsigned\20long\2c\20char\29,memcpy -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28unsigned\20long\2c\20char\29,opa_free -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28unsigned\20long\2c\20char\29,memset -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28unsigned\20long\2c\20char\29,abort -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29,memcpy -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29,opa_malloc -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29,opa_free -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29,abort -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\29,strlen -std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29 -std::__1::basic_string\2c\20std::__1::allocator\20>::push_back\28char\29,opa_malloc -std::__1::basic_string\2c\20std::__1::allocator\20>::push_back\28char\29,memcpy -std::__1::basic_string\2c\20std::__1::allocator\20>::push_back\28char\29,opa_free -std::__1::basic_string\2c\20std::__1::allocator\20>::push_back\28char\29,abort -std::__1::basic_string\2c\20std::__1::allocator\20>::__assign_external\28char\20const*\2c\20unsigned\20long\29,memmove -std::__1::basic_string\2c\20std::__1::allocator\20>::__assign_external\28char\20const*\2c\20unsigned\20long\29,opa_malloc -std::__1::basic_string\2c\20std::__1::allocator\20>::__assign_external\28char\20const*\2c\20unsigned\20long\29,memcpy -std::__1::basic_string\2c\20std::__1::allocator\20>::__assign_external\28char\20const*\2c\20unsigned\20long\29,opa_free -std::__1::basic_string\2c\20std::__1::allocator\20>::__assign_external\28char\20const*\2c\20unsigned\20long\29,abort -std::__1::basic_string\2c\20std::__1::allocator\20>::assign\28char\20const*\2c\20unsigned\20long\29,std::__1::basic_string\2c\20std::__1::allocator\20>::__assign_external\28char\20const*\2c\20unsigned\20long\29 -std::__1::basic_string\2c\20std::__1::allocator\20>::assign\28char\20const*\29,strlen -std::__1::basic_string\2c\20std::__1::allocator\20>::assign\28char\20const*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::__assign_external\28char\20const*\2c\20unsigned\20long\29 -std::__1::basic_string\2c\20std::__1::allocator\20>::compare\28unsigned\20long\2c\20unsigned\20long\2c\20char\20const*\2c\20unsigned\20long\29\20const,memcmp -std::__1::basic_string\2c\20std::__1::allocator\20>::compare\28unsigned\20long\2c\20unsigned\20long\2c\20char\20const*\2c\20unsigned\20long\29\20const,abort -void\20std::__1::__sort&\2c\20int*>\28int*\2c\20int*\2c\20std::__1::__less&\29,unsigned\20int\20std::__1::__sort5&\2c\20int*>\28int*\2c\20int*\2c\20int*\2c\20int*\2c\20int*\2c\20std::__1::__less&\29 -void\20std::__1::__sort&\2c\20int*>\28int*\2c\20int*\2c\20std::__1::__less&\29,bool\20std::__1::__insertion_sort_incomplete&\2c\20int*>\28int*\2c\20int*\2c\20std::__1::__less&\29 -void\20std::__1::__sort&\2c\20int*>\28int*\2c\20int*\2c\20std::__1::__less&\29,void\20std::__1::__sort&\2c\20int*>\28int*\2c\20int*\2c\20std::__1::__less&\29 -bool\20std::__1::__insertion_sort_incomplete&\2c\20int*>\28int*\2c\20int*\2c\20std::__1::__less&\29,unsigned\20int\20std::__1::__sort5&\2c\20int*>\28int*\2c\20int*\2c\20int*\2c\20int*\2c\20int*\2c\20std::__1::__less&\29 +std::_LIBCPP_ABI_NAMESPACE::__libcpp_verbose_abort\28char\20const*\2c\20...\29,opa_abort +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::operator=\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::operator=\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,memmove +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,opa_malloc +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,opa_free +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,memmove +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,opa_malloc +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>&\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_no_alias\28char\20const*\2c\20unsigned\20long\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::resize\28unsigned\20long\2c\20char\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28unsigned\20long\2c\20char\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28unsigned\20long\2c\20char\29,opa_malloc +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28unsigned\20long\2c\20char\29,memmove +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28unsigned\20long\2c\20char\29,opa_free +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28unsigned\20long\2c\20char\29,memset +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28unsigned\20long\2c\20char\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29,memmove +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29,opa_malloc +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29,opa_free +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\29,strlen +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::push_back\28char\29,opa_malloc +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::push_back\28char\29,memmove +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::push_back\28char\29,opa_free +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\2c\20unsigned\20long\29,memmove +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\2c\20unsigned\20long\29,opa_malloc +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\2c\20unsigned\20long\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\2c\20unsigned\20long\29,opa_free +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\2c\20unsigned\20long\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::assign\28char\20const*\2c\20unsigned\20long\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::assign\28char\20const*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\29 +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\29,strlen +std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__assign_external\28char\20const*\2c\20unsigned\20long\29 re2::BitState::Push\28int\2c\20char\20const*\29,operator\20new\28unsigned\20long\29 re2::BitState::Push\28int\2c\20char\20const*\29,memmove -re2::BitState::Push\28int\2c\20char\20const*\29,operator\20delete\28void*\29 -re2::BitState::Push\28int\2c\20char\20const*\29,abort +re2::BitState::Push\28int\2c\20char\20const*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::BitState::Push\28int\2c\20char\20const*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::BitState::TrySearch\28int\2c\20char\20const*\29,re2::BitState::Push\28int\2c\20char\20const*\29 re2::BitState::TrySearch\28int\2c\20char\20const*\29,opa_abort re2::BitState::TrySearch\28int\2c\20char\20const*\29,re2::Prog::EmptyFlags\28re2::StringPiece\20const&\2c\20char\20const*\29 re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,operator\20new\28unsigned\20long\29 -re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,operator\20delete\28void*\29 +re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,memset re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,re2::BitState::TrySearch\28int\2c\20char\20const*\29 re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,memchr re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,re2::Prog::PrefixAccel_FrontAndBack\28void\20const*\2c\20unsigned\20long\29 -re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,abort +re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::Prog::SearchBitState\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,re2::BitState::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29 -re2::Prog::SearchBitState\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,operator\20delete\28void*\29 +re2::Prog::SearchBitState\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Compiler::AllocInst\28int\29,operator\20new\28unsigned\20long\29 re2::Compiler::AllocInst\28int\29,memset re2::Compiler::AllocInst\28int\29,memmove -re2::Compiler::AllocInst\28int\29,operator\20delete\28void*\29 -re2::Compiler::AllocInst\28int\29,abort +re2::Compiler::AllocInst\28int\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Compiler::AllocInst\28int\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::Compiler::~Compiler\28\29,re2::Prog::~Prog\28\29 -re2::Compiler::~Compiler\28\29,operator\20delete\28void*\29 +re2::Compiler::~Compiler\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Compiler::~Compiler\28\29,re2::Regexp::Walker::~Walker\28\29 re2::Regexp::Walker::~Walker\28\29,operator\20delete\5b\5d\28void*\29 -re2::Regexp::Walker::~Walker\28\29,operator\20delete\28void*\29 -re2::Regexp::Walker::~Walker\28\29,std::__1::__deque_base\2c\20std::__1::allocator\20>\20>::~__deque_base\28\29 -re2::Compiler::~Compiler\28\29.1,re2::Compiler::~Compiler\28\29 -re2::Compiler::~Compiler\28\29.1,operator\20delete\28void*\29 +re2::Regexp::Walker::~Walker\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Regexp::Walker::~Walker\28\29,std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::~deque\5babi:nn210108\5d\28\29 +re2::Compiler::~Compiler\28\29_479,re2::Compiler::~Compiler\28\29 +re2::Compiler::~Compiler\28\29_479,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Compiler::Cat\28re2::Frag\2c\20re2::Frag\29,opa_abort re2::Compiler::Star\28re2::Frag\2c\20bool\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::Star\28re2::Frag\2c\20bool\29,re2::Prog::Inst::InitAlt\28unsigned\20int\2c\20unsigned\20int\29 @@ -1583,45 +1634,40 @@ re2::Compiler::Quest\28re2::Frag\2c\20bool\29,re2::Prog::Inst::InitNop\28unsigne re2::Compiler::Quest\28re2::Frag\2c\20bool\29,re2::Prog::Inst::InitAlt\28unsigned\20int\2c\20unsigned\20int\29 re2::Compiler::Nop\28\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::Nop\28\29,re2::Prog::Inst::InitNop\28unsigned\20int\29 -re2::Compiler::ByteRange\28int\2c\20int\2c\20bool\29,re2::Compiler::AllocInst\28int\29 -re2::Compiler::ByteRange\28int\2c\20int\2c\20bool\29,re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29 -re2::Compiler::Match\28int\29,re2::Compiler::AllocInst\28int\29 -re2::Compiler::Match\28int\29,re2::Prog::Inst::InitMatch\28int\29 re2::Compiler::EmptyWidth\28re2::EmptyOp\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::EmptyWidth\28re2::EmptyOp\29,re2::Prog::Inst::InitEmptyWidth\28re2::EmptyOp\2c\20unsigned\20int\29 re2::Compiler::Capture\28re2::Frag\2c\20int\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::Capture\28re2::Frag\2c\20int\29,re2::Prog::Inst::InitCapture\28int\2c\20unsigned\20int\29 re2::Compiler::Capture\28re2::Frag\2c\20int\29,opa_abort -re2::Compiler::BeginRange\28\29,operator\20delete\28void*\29 +re2::Compiler::BeginRange\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Compiler::BeginRange\28\29,memset re2::Compiler::UncachedRuneByteSuffix\28unsigned\20char\2c\20unsigned\20char\2c\20bool\2c\20int\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::UncachedRuneByteSuffix\28unsigned\20char\2c\20unsigned\20char\2c\20bool\2c\20int\29,re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29 re2::Compiler::UncachedRuneByteSuffix\28unsigned\20char\2c\20unsigned\20char\2c\20bool\2c\20int\29,opa_abort -std::__1::pair\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::__emplace_unique_key_args\2c\20std::__1::tuple<>\20>\28unsigned\20long\20long\20const&\2c\20std::__1::piecewise_construct_t\20const&\2c\20std::__1::tuple&&\2c\20std::__1::tuple<>&&\29,operator\20new\28unsigned\20long\29 -std::__1::pair\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::__emplace_unique_key_args\2c\20std::__1::tuple<>\20>\28unsigned\20long\20long\20const&\2c\20std::__1::piecewise_construct_t\20const&\2c\20std::__1::tuple&&\2c\20std::__1::tuple<>&&\29,std::__1::__next_prime\28unsigned\20long\29 -std::__1::pair\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::__emplace_unique_key_args\2c\20std::__1::tuple<>\20>\28unsigned\20long\20long\20const&\2c\20std::__1::piecewise_construct_t\20const&\2c\20std::__1::tuple&&\2c\20std::__1::tuple<>&&\29,std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::__rehash\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::find\28unsigned\20long\20long\20const&\29,std::_LIBCPP_ABI_NAMESPACE::__hash_memory\28void\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>>\28unsigned\20long\20long\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::piecewise_construct_t\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple&&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>&&\29,std::_LIBCPP_ABI_NAMESPACE::__hash_memory\28void\20const*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>>\28unsigned\20long\20long\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::piecewise_construct_t\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple&&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>&&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>>\28unsigned\20long\20long\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::piecewise_construct_t\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple&&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>&&\29,std::_LIBCPP_ABI_NAMESPACE::__next_prime\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>>\28unsigned\20long\20long\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::piecewise_construct_t\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple&&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>&&\29,void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__do_rehash\28unsigned\20long\29 re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,opa_abort -re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,re2::Compiler::FindByteRange\28int\2c\20int\29 +re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,re2::Compiler::ByteRangeEqual\28int\2c\20int\29 re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,re2::Prog::Inst::InitAlt\28unsigned\20int\2c\20unsigned\20int\29 -re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,std::__1::__hash_iterator\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::find\28unsigned\20long\20long\20const&\29 +re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::find\28unsigned\20long\20long\20const&\29 re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29 re2::Compiler::AddSuffixRecursive\28int\2c\20int\29,re2::Compiler::AddSuffixRecursive\28int\2c\20int\29 -re2::Compiler::FindByteRange\28int\2c\20int\29,opa_abort -re2::Compiler::FindByteRange\28int\2c\20int\29,re2::Compiler::ByteRangeEqual\28int\2c\20int\29 re2::Compiler::ByteRangeEqual\28int\2c\20int\29,opa_abort -re2::Compiler::AddRuneRange\28int\2c\20int\2c\20bool\29,re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29 -re2::Compiler::AddRuneRange\28int\2c\20int\2c\20bool\29,re2::Compiler::AddRuneRangeLatin1\28int\2c\20int\2c\20bool\29 -re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Compiler::Add_80_10ffff\28\29 -re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,runetochar re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29 -re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,opa_abort -re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,std::__1::__hash_iterator\2c\20void*>*>\20std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::find\28unsigned\20long\20long\20const&\29 -re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Compiler::UncachedRuneByteSuffix\28unsigned\20char\2c\20unsigned\20char\2c\20bool\2c\20int\29 -re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,std::__1::pair\2c\20void*>*>\2c\20bool>\20std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::__emplace_unique_key_args\2c\20std::__1::tuple<>\20>\28unsigned\20long\20long\20const&\2c\20std::__1::piecewise_construct_t\20const&\2c\20std::__1::tuple&&\2c\20std::__1::tuple<>&&\29 -re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Compiler::AddSuffixRecursive\28int\2c\20int\29 re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Compiler::AllocInst\28int\29 -re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Prog::Inst::InitAlt\28unsigned\20int\2c\20unsigned\20int\29 re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29 +re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Compiler::AddSuffixRecursive\28int\2c\20int\29 +re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Prog::Inst::InitAlt\28unsigned\20int\2c\20unsigned\20int\29 +re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,runetochar +re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,opa_abort +re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,std::_LIBCPP_ABI_NAMESPACE::__hash_iterator\2c\20void*>*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::find\28unsigned\20long\20long\20const&\29 +re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Compiler::UncachedRuneByteSuffix\28unsigned\20char\2c\20unsigned\20char\2c\20bool\2c\20int\29 +re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,std::_LIBCPP_ABI_NAMESPACE::pair\2c\20void*>*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__emplace_unique_key_args\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>>\28unsigned\20long\20long\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::piecewise_construct_t\20const&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple&&\2c\20std::_LIBCPP_ABI_NAMESPACE::tuple<>&&\29 +re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29,re2::Compiler::Add_80_10ffff\28\29 re2::Compiler::AddRuneRangeLatin1\28int\2c\20int\2c\20bool\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::AddRuneRangeLatin1\28int\2c\20int\2c\20bool\29,re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29 re2::Compiler::AddRuneRangeLatin1\28int\2c\20int\2c\20bool\29,re2::Compiler::AddSuffixRecursive\28int\2c\20int\29 @@ -1635,22 +1681,25 @@ re2::Compiler::Literal\28int\2c\20bool\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::Literal\28int\2c\20bool\29,re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29 re2::Compiler::Literal\28int\2c\20bool\29,runetochar re2::Compiler::Literal\28int\2c\20bool\29,re2::Compiler::Cat\28re2::Frag\2c\20re2::Frag\29 -re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::Nop\28\29 -re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::Match\28int\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::AllocInst\28int\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Prog::Inst::InitNop\28unsigned\20int\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Prog::Inst::InitMatch\28int\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::EmptyWidth\28re2::EmptyOp\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::Cat\28re2::Frag\2c\20re2::Frag\29 -re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Prog::Inst::InitAlt\28unsigned\20int\2c\20unsigned\20int\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::Star\28re2::Frag\2c\20bool\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::Quest\28re2::Frag\2c\20bool\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::Literal\28int\2c\20bool\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::Nop\28\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,opa_abort -re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::BeginRange\28\29 -re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::AddRuneRange\28int\2c\20int\2c\20bool\29 -re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::ByteRange\28int\2c\20int\2c\20bool\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,memset re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::AddRuneRangeUTF8\28int\2c\20int\2c\20bool\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::AddRuneRangeLatin1\28int\2c\20int\2c\20bool\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::BeginRange\28\29 re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Compiler::Capture\28re2::Frag\2c\20int\29 +re2::Compiler::PostVisit\28re2::Regexp*\2c\20re2::Frag\2c\20re2::Frag\2c\20re2::Frag*\2c\20int\29,re2::Prog::Inst::InitEmptyWidth\28re2::EmptyOp\2c\20unsigned\20int\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,operator\20new\28unsigned\20long\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Prog::Prog\28\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Prog::Inst::InitFail\28\29 @@ -1661,19 +1710,19 @@ re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Regexp: re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Regexp::Decref\28\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,memset re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,memmove -re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,operator\20delete\28void*\29 +re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Prog::Inst::InitMatch\28int\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Compiler::Cat\28re2::Frag\2c\20re2::Frag\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Compiler::DotStar\28\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Compiler::Finish\28re2::Regexp*\29 re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,re2::Compiler::~Compiler\28\29 -re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,abort +re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,re2::Regexp::Incref\28\29 re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,re2::IsAnchorStart\28re2::Regexp**\2c\20int\29 re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,operator\20new\28unsigned\20long\29 re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,re2::Regexp::Concat\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29 re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,re2::Regexp::Decref\28\29 -re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,operator\20delete\28void*\29 +re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,opa_abort re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,re2::Regexp::Capture\28re2::Regexp*\2c\20re2::Regexp::ParseFlags\2c\20int\29 re2::IsAnchorStart\28re2::Regexp**\2c\20int\29,re2::Regexp::LiteralString\28int*\2c\20int\2c\20re2::Regexp::ParseFlags\29 @@ -1682,73 +1731,74 @@ re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,re2::IsAnchorEnd\28re2::Regexp**\2c re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,operator\20new\28unsigned\20long\29 re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,re2::Regexp::Concat\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29 re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,re2::Regexp::Decref\28\29 -re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,operator\20delete\28void*\29 +re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,opa_abort re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,re2::Regexp::Capture\28re2::Regexp*\2c\20re2::Regexp::ParseFlags\2c\20int\29 re2::IsAnchorEnd\28re2::Regexp**\2c\20int\29,re2::Regexp::LiteralString\28int*\2c\20int\2c\20re2::Regexp::ParseFlags\29 re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Frag\2c\20bool\29,operator\20delete\5b\5d\28void*\29 -re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Frag\2c\20bool\29,operator\20delete\28void*\29 -re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Frag\2c\20bool\29,std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29 +re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Frag\2c\20bool\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Frag\2c\20bool\29,std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29 re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Frag\2c\20bool\29,operator\20new\5b\5d\28unsigned\20long\29 re2::Compiler::DotStar\28\29,re2::Compiler::AllocInst\28int\29 re2::Compiler::DotStar\28\29,re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29 re2::Compiler::DotStar\28\29,re2::Prog::Inst::InitAlt\28unsigned\20int\2c\20unsigned\20int\29 re2::Compiler::DotStar\28\29,opa_abort -re2::Compiler::Finish\28re2::Regexp*\29,operator\20delete\28void*\29 +re2::Compiler::Finish\28re2::Regexp*\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Compiler::Finish\28re2::Regexp*\29,re2::Prog::Optimize\28\29 re2::Compiler::Finish\28re2::Regexp*\29,re2::Prog::Flatten\28\29 re2::Compiler::Finish\28re2::Regexp*\29,re2::Prog::ComputeByteMap\28\29 -re2::Compiler::Finish\28re2::Regexp*\29,re2::Regexp::RequiredPrefixForAccel\28std::__1::basic_string\2c\20std::__1::allocator\20>*\2c\20bool*\29 +re2::Compiler::Finish\28re2::Regexp*\29,re2::Regexp::RequiredPrefixForAccel\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\2c\20bool*\29 re2::Regexp::CompileToProg\28long\20long\29,re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29 re2::Regexp::CompileToReverseProg\28long\20long\29,re2::Compiler::Compile\28re2::Regexp*\2c\20bool\2c\20long\20long\29 -std::__1::__deque_base\2c\20std::__1::allocator\20>\20>::~__deque_base\28\29,operator\20delete\28void*\29 -std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::__rehash\28unsigned\20long\29,operator\20new\28unsigned\20long\29 -std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::__rehash\28unsigned\20long\29,operator\20delete\28void*\29 -std::__1::__hash_table\2c\20std::__1::__unordered_map_hasher\2c\20std::__1::hash\2c\20std::__1::equal_to\2c\20true>\2c\20std::__1::__unordered_map_equal\2c\20std::__1::equal_to\2c\20std::__1::hash\2c\20true>\2c\20std::__1::allocator\20>\20>::__rehash\28unsigned\20long\29,abort -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,memmove -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,operator\20new\28unsigned\20long\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,operator\20delete\28void*\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,abort +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::~deque\5babi:nn210108\5d\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__do_rehash\28unsigned\20long\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__do_rehash\28unsigned\20long\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__do_rehash\28unsigned\20long\29,memset +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_hasher\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::__unordered_map_equal\2c\20std::_LIBCPP_ABI_NAMESPACE::equal_to\2c\20std::_LIBCPP_ABI_NAMESPACE::hash\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__do_rehash\28unsigned\20long\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29,operator\20new\28unsigned\20long\29 -re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29,abort +re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::DFA::~DFA\28\29,opa_abort -re2::DFA::~DFA\28\29,operator\20delete\28void*\29 +re2::DFA::~DFA\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::DFA::~DFA\28\29,memset re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,operator\20new\28unsigned\20long\29 +re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,opa_abort -re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,void\20std::__1::__sort&\2c\20int*>\28int*\2c\20int*\2c\20std::__1::__less&\29 -re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,abort +re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,void\20std::_LIBCPP_ABI_NAMESPACE::__sort&\2c\20int*>\28int*\2c\20int*\2c\20std::_LIBCPP_ABI_NAMESPACE::__less&\29 re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29 -re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,operator\20delete\28void*\29 -re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29,std::__1::__hash_iterator*>\20std::__1::__hash_table\20>::find\28re2::DFA::State*\20const&\29 +re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29,std::_LIBCPP_ABI_NAMESPACE::__hash_iterator*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::find\28re2::DFA::State*\20const&\29 re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29,operator\20new\28unsigned\20long\29 re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29,memset -re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29,memmove -re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29,std::__1::pair*>\2c\20bool>\20std::__1::__hash_table\20>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29 -std::__1::__hash_iterator*>\20std::__1::__hash_table\20>::find\28re2::DFA::State*\20const&\29,opa_abort -std::__1::pair*>\2c\20bool>\20std::__1::__hash_table\20>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29,opa_abort -std::__1::pair*>\2c\20bool>\20std::__1::__hash_table\20>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29,operator\20new\28unsigned\20long\29 -std::__1::pair*>\2c\20bool>\20std::__1::__hash_table\20>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29,std::__1::__next_prime\28unsigned\20long\29 -std::__1::pair*>\2c\20bool>\20std::__1::__hash_table\20>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29,std::__1::__hash_table\20>::__rehash\28unsigned\20long\29 +re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29,memcpy +re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29,std::_LIBCPP_ABI_NAMESPACE::pair*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29 +std::_LIBCPP_ABI_NAMESPACE::__hash_iterator*>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::find\28re2::DFA::State*\20const&\29,opa_abort +std::_LIBCPP_ABI_NAMESPACE::pair*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29,opa_abort +std::_LIBCPP_ABI_NAMESPACE::pair*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29,std::_LIBCPP_ABI_NAMESPACE::__next_prime\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::pair*>\2c\20bool>\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__emplace_unique_key_args\28re2::DFA::State*\20const&\2c\20re2::DFA::State*\20const&\29,void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__do_rehash\28unsigned\20long\29 re2::SparseSetT::InsertInternal\28bool\2c\20int\29,opa_abort re2::SparseSetT::InsertInternal\28bool\2c\20int\29,re2::SparseSetT::create_index\28int\29 re2::DFA::AddToQueue\28re2::DFA::Workq*\2c\20int\2c\20unsigned\20int\29,opa_abort @@ -1761,7 +1811,8 @@ re2::DFA::RunStateOnByte\28re2::DFA::State*\2c\20int\29,re2::DFA::AddToQueue\28r re2::DFA::RunStateOnByte\28re2::DFA::State*\2c\20int\29,re2::DFA::RunWorkqOnByte\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20int\2c\20unsigned\20int\2c\20bool*\29 re2::DFA::RunStateOnByte\28re2::DFA::State*\2c\20int\29,re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29 re2::DFA::ResetCache\28re2::DFA::RWLocker*\29,re2::hooks::GetDFAStateCacheResetHook\28\29 -re2::DFA::ResetCache\28re2::DFA::RWLocker*\29,operator\20delete\28void*\29 +re2::DFA::ResetCache\28re2::DFA::RWLocker*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::DFA::ResetCache\28re2::DFA::RWLocker*\29,memset re2::DFA::SearchFFF\28re2::DFA::SearchParams*\29,bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::RunStateOnByte\28re2::DFA::State*\2c\20int\29 @@ -1821,9 +1872,9 @@ bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::Search bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,memchr bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::Prog::PrefixAccel_FrontAndBack\28void\20const*\2c\20unsigned\20long\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::RunStateOnByte\28re2::DFA::State*\2c\20int\29 +bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::ResetCache\28re2::DFA::RWLocker*\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,operator\20new\5b\5d\28unsigned\20long\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,memmove -bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::ResetCache\28re2::DFA::RWLocker*\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,operator\20delete\5b\5d\28void*\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 @@ -1832,61 +1883,60 @@ bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchP bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,memchr bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::Prog::PrefixAccel_FrontAndBack\28void\20const*\2c\20unsigned\20long\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::RunStateOnByte\28re2::DFA::State*\2c\20int\29 +bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::ResetCache\28re2::DFA::RWLocker*\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,operator\20new\5b\5d\28unsigned\20long\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,memmove -bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::ResetCache\28re2::DFA::RWLocker*\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::DFA::CachedState\28int*\2c\20int\2c\20unsigned\20int\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,operator\20delete\5b\5d\28void*\29 bool\20re2::DFA::InlinedSearchLoop\28re2::DFA::SearchParams*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 re2::DFA::AnalyzeSearch\28re2::DFA::SearchParams*\29,re2::DFA::AddToQueue\28re2::DFA::Workq*\2c\20int\2c\20unsigned\20int\29 re2::DFA::AnalyzeSearch\28re2::DFA::SearchParams*\29,re2::DFA::WorkqToCachedState\28re2::DFA::Workq*\2c\20re2::DFA::Workq*\2c\20unsigned\20int\29 re2::DFA::AnalyzeSearch\28re2::DFA::SearchParams*\29,re2::DFA::ResetCache\28re2::DFA::RWLocker*\29 -re2::DFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20bool\2c\20bool*\2c\20char\20const**\2c\20re2::SparseSetT*\29,re2::DFA::AnalyzeSearch\28re2::DFA::SearchParams*\29 -re2::Prog::GetDFA\28re2::Prog::MatchKind\29,std::__1::__call_once\28unsigned\20long\20volatile&\2c\20void*\2c\20void\20\28*\29\28void*\29\29 -void\20std::__1::__call_once_proxy\20>\28void*\29,operator\20new\28unsigned\20long\29 -void\20std::__1::__call_once_proxy\20>\28void*\29,re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29 -void\20std::__1::__call_once_proxy\20>\28void*\29,operator\20new\28unsigned\20long\29 -void\20std::__1::__call_once_proxy\20>\28void*\29,re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29 -void\20std::__1::__call_once_proxy\20>\28void*\29,operator\20new\28unsigned\20long\29 -void\20std::__1::__call_once_proxy\20>\28void*\29,re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29 +re2::Prog::GetDFA\28re2::Prog::MatchKind\29,std::_LIBCPP_ABI_NAMESPACE::__call_once\28unsigned\20long\20volatile&\2c\20void*\2c\20void\20\28*\29\28void*\29\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,re2::DFA::DFA\28re2::Prog*\2c\20re2::Prog::MatchKind\2c\20long\20long\29 re2::Prog::DeleteDFA\28re2::DFA*\29,re2::DFA::~DFA\28\29 -re2::Prog::DeleteDFA\28re2::DFA*\29,operator\20delete\28void*\29 +re2::Prog::DeleteDFA\28re2::DFA*\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Prog::SearchDFA\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20bool*\2c\20re2::SparseSetT*\29,re2::Prog::GetDFA\28re2::Prog::MatchKind\29 -re2::Prog::SearchDFA\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20bool*\2c\20re2::SparseSetT*\29,re2::DFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20bool\2c\20bool*\2c\20char\20const**\2c\20re2::SparseSetT*\29 +re2::Prog::SearchDFA\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20bool*\2c\20re2::SparseSetT*\29,re2::DFA::AnalyzeSearch\28re2::DFA::SearchParams*\29 re2::Prog::SearchDFA\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20bool*\2c\20re2::SparseSetT*\29,re2::hooks::GetDFASearchFailureHook\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 re2::SparseSetT::create_index\28int\29,opa_abort -std::__1::__hash_table\20>::__rehash\28unsigned\20long\29,operator\20new\28unsigned\20long\29 -std::__1::__hash_table\20>::__rehash\28unsigned\20long\29,operator\20delete\28void*\29 -std::__1::__hash_table\20>::__rehash\28unsigned\20long\29,opa_abort -std::__1::__hash_table\20>::__rehash\28unsigned\20long\29,abort +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__do_rehash\28unsigned\20long\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__do_rehash\28unsigned\20long\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__do_rehash\28unsigned\20long\29,memset +void\20std::_LIBCPP_ABI_NAMESPACE::__hash_table>::__do_rehash\28unsigned\20long\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::NFA::NFA\28re2::Prog*\29,memset re2::NFA::NFA\28re2::Prog*\29,re2::SparseArray::resize\28int\29 re2::NFA::NFA\28re2::Prog*\29,operator\20new\28unsigned\20long\29 -re2::NFA::NFA\28re2::Prog*\29,operator\20delete\28void*\29 -re2::NFA::NFA\28re2::Prog*\29,abort +re2::NFA::NFA\28re2::Prog*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::NFA::NFA\28re2::Prog*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::SparseArray::resize\28int\29,opa_abort re2::SparseArray::resize\28int\29,operator\20new\28unsigned\20long\29 re2::SparseArray::resize\28int\29,memmove -re2::SparseArray::resize\28int\29,operator\20delete\28void*\29 -re2::SparseArray::resize\28int\29,abort +re2::SparseArray::resize\28int\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::SparseArray::resize\28int\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::NFA::~NFA\28\29,operator\20delete\5b\5d\28void*\29 -re2::NFA::~NFA\28\29,std::__1::__deque_base\20>::~__deque_base\28\29 -re2::NFA::~NFA\28\29,operator\20delete\28void*\29 +re2::NFA::~NFA\28\29,std::_LIBCPP_ABI_NAMESPACE::deque>::~deque\5babi:nn210108\5d\28\29 +re2::NFA::~NFA\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::NFA::~NFA\28\29,opa_abort -std::__1::__deque_base\20>::~__deque_base\28\29,operator\20delete\28void*\29 +std::_LIBCPP_ABI_NAMESPACE::deque>::~deque\5babi:nn210108\5d\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::NFA::AddToThreadq\28re2::SparseArray*\2c\20int\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\2c\20re2::NFA::Thread*\29,opa_abort -re2::NFA::AddToThreadq\28re2::SparseArray*\2c\20int\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\2c\20re2::NFA::Thread*\29,std::__1::deque\20>::__add_back_capacity\28\29 +re2::NFA::AddToThreadq\28re2::SparseArray*\2c\20int\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\2c\20re2::NFA::Thread*\29,std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29 re2::NFA::AddToThreadq\28re2::SparseArray*\2c\20int\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\2c\20re2::NFA::Thread*\29,operator\20new\5b\5d\28unsigned\20long\29 re2::NFA::AddToThreadq\28re2::SparseArray*\2c\20int\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\2c\20re2::NFA::Thread*\29,memmove re2::NFA::AddToThreadq\28re2::SparseArray*\2c\20int\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\2c\20re2::NFA::Thread*\29,re2::Prog::EmptyFlags\28re2::StringPiece\20const&\2c\20char\20const*\29 -std::__1::deque\20>::__add_back_capacity\28\29,memmove -std::__1::deque\20>::__add_back_capacity\28\29,operator\20new\28unsigned\20long\29 -std::__1::deque\20>::__add_back_capacity\28\29,operator\20delete\28void*\29 -std::__1::deque\20>::__add_back_capacity\28\29,std::__1::__split_buffer\20>::push_back\28re2::NFA::Thread*&&\29 -std::__1::deque\20>::__add_back_capacity\28\29,std::__1::__split_buffer\20>::push_front\28re2::NFA::Thread*&&\29 -std::__1::deque\20>::__add_back_capacity\28\29,std::__1::__split_buffer&>::push_back\28re2::NFA::Thread*&&\29 -std::__1::deque\20>::__add_back_capacity\28\29,std::__1::__split_buffer&>::push_front\28re2::NFA::Thread*\20const&\29 -std::__1::deque\20>::__add_back_capacity\28\29,abort +std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&\29 +std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&&\29 +std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer&>::emplace_front\28re2::NFA::Thread*&\29 +std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_front\28re2::NFA::Thread*&&\29 +std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::NFA::Step\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\29,memmove re2::NFA::Step\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\29,opa_abort re2::NFA::Step\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\29,re2::NFA::AddToThreadq\28re2::SparseArray*\2c\20int\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\2c\20re2::NFA::Thread*\29 @@ -1894,13 +1944,11 @@ re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\2 re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,memset re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,re2::NFA::Step\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\29 re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,opa_abort -re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,re2::Prog::PrefixAccel\28void\20const*\2c\20unsigned\20long\29 -re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,std::__1::deque\20>::__add_back_capacity\28\29 +re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,memchr +re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,re2::Prog::PrefixAccel_FrontAndBack\28void\20const*\2c\20unsigned\20long\29 +re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,std::_LIBCPP_ABI_NAMESPACE::deque>::__add_back_capacity\28\29 re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,memmove re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29,re2::NFA::AddToThreadq\28re2::SparseArray*\2c\20int\2c\20int\2c\20re2::StringPiece\20const&\2c\20char\20const*\2c\20re2::NFA::Thread*\29 -re2::Prog::PrefixAccel\28void\20const*\2c\20unsigned\20long\29,opa_abort -re2::Prog::PrefixAccel\28void\20const*\2c\20unsigned\20long\29,memchr -re2::Prog::PrefixAccel\28void\20const*\2c\20unsigned\20long\29,re2::Prog::PrefixAccel_FrontAndBack\28void\20const*\2c\20unsigned\20long\29 re2::Prog::SearchNFA\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,re2::NFA::NFA\28re2::Prog*\29 re2::Prog::SearchNFA\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,re2::NFA::Search\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20bool\2c\20bool\2c\20re2::StringPiece*\2c\20int\29 re2::Prog::SearchNFA\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,re2::NFA::~NFA\28\29 @@ -1909,22 +1957,22 @@ re2::SparseArray::SetInternal\28bool\2c\20int\2c\20int\20const&\29,re2::Spa re2::SparseArray::SetInternal\28bool\2c\20int\2c\20int\20const&\29,re2::SparseArray::SetExistingInternal\28int\2c\20int\20const&\29 re2::SparseArray::create_index\28int\29,opa_abort re2::SparseArray::SetExistingInternal\28int\2c\20int\20const&\29,opa_abort -std::__1::__split_buffer\20>::push_back\28re2::NFA::Thread*&&\29,memmove -std::__1::__split_buffer\20>::push_back\28re2::NFA::Thread*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer\20>::push_back\28re2::NFA::Thread*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer\20>::push_back\28re2::NFA::Thread*&&\29,abort -std::__1::__split_buffer\20>::push_front\28re2::NFA::Thread*&&\29,memmove -std::__1::__split_buffer\20>::push_front\28re2::NFA::Thread*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer\20>::push_front\28re2::NFA::Thread*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer\20>::push_front\28re2::NFA::Thread*&&\29,abort -std::__1::__split_buffer&>::push_back\28re2::NFA::Thread*&&\29,memmove -std::__1::__split_buffer&>::push_back\28re2::NFA::Thread*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer&>::push_back\28re2::NFA::Thread*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer&>::push_back\28re2::NFA::Thread*&&\29,abort -std::__1::__split_buffer&>::push_front\28re2::NFA::Thread*\20const&\29,memmove -std::__1::__split_buffer&>::push_front\28re2::NFA::Thread*\20const&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer&>::push_front\28re2::NFA::Thread*\20const&\29,operator\20delete\28void*\29 -std::__1::__split_buffer&>::push_front\28re2::NFA::Thread*\20const&\29,abort +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_back\28re2::NFA::Thread*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer&>::emplace_front\28re2::NFA::Thread*&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer&>::emplace_front\28re2::NFA::Thread*&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer&>::emplace_front\28re2::NFA::Thread*&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer&>::emplace_front\28re2::NFA::Thread*&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_front\28re2::NFA::Thread*&&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_front\28re2::NFA::Thread*&&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_front\28re2::NFA::Thread*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer>::emplace_front\28re2::NFA::Thread*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::Prog::SearchOnePass\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,memset re2::Prog::SearchOnePass\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,re2::Prog::EmptyFlags\28re2::StringPiece\20const&\2c\20char\20const*\29 re2::Prog::SearchOnePass\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29,memcpy @@ -1932,17 +1980,19 @@ re2::Prog::IsOnePass\28\29,operator\20new\28unsigned\20long\29 re2::Prog::IsOnePass\28\29,memset re2::Prog::IsOnePass\28\29,opa_abort re2::Prog::IsOnePass\28\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 -re2::Prog::IsOnePass\28\29,std::__1::vector\20>::insert\28std::__1::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29 -re2::Prog::IsOnePass\28\29,operator\20delete\28void*\29 +re2::Prog::IsOnePass\28\29,std::_LIBCPP_ABI_NAMESPACE::vector>::insert\28std::_LIBCPP_ABI_NAMESPACE::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29 +re2::Prog::IsOnePass\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Prog::IsOnePass\28\29,memmove -re2::Prog::IsOnePass\28\29,abort -std::__1::vector\20>::insert\28std::__1::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,memmove -std::__1::vector\20>::insert\28std::__1::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,operator\20new\28unsigned\20long\29 -std::__1::vector\20>::insert\28std::__1::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,memcpy -std::__1::vector\20>::insert\28std::__1::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,operator\20delete\28void*\29 -std::__1::vector\20>::insert\28std::__1::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,abort -std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29,operator\20delete\28void*\29 +re2::Prog::IsOnePass\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::insert\28std::_LIBCPP_ABI_NAMESPACE::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,memset +std::_LIBCPP_ABI_NAMESPACE::vector>::insert\28std::_LIBCPP_ABI_NAMESPACE::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,memmove +std::_LIBCPP_ABI_NAMESPACE::vector>::insert\28std::_LIBCPP_ABI_NAMESPACE::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::insert\28std::_LIBCPP_ABI_NAMESPACE::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::vector>::insert\28std::_LIBCPP_ABI_NAMESPACE::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::insert\28std::_LIBCPP_ABI_NAMESPACE::__wrap_iter\2c\20unsigned\20long\2c\20unsigned\20char\20const&\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 +std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29,re2::Regexp::ParseState::MaybeConcatString\28int\2c\20re2::Regexp::ParseFlags\29 re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29,re2::CharClassBuilder::RemoveAbove\28int\29 re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29,re2::Regexp::Decref\28\29 @@ -1965,36 +2015,36 @@ re2::Regexp::ParseState::PushSimpleOp\28re2::RegexpOp\29,re2::Regexp::Regexp\28r re2::Regexp::ParseState::PushSimpleOp\28re2::RegexpOp\29,re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29 re2::Regexp::ParseState::PushDot\28\29,operator\20new\28unsigned\20long\29 re2::Regexp::ParseState::PushDot\28\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 -re2::Regexp::ParseState::PushDot\28\29,re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29 re2::Regexp::ParseState::PushDot\28\29,re2::CharClassBuilder::CharClassBuilder\28\29 re2::Regexp::ParseState::PushDot\28\29,re2::CharClassBuilder::AddRange\28int\2c\20int\29 +re2::Regexp::ParseState::PushDot\28\29,re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29 re2::Regexp::ParseState::PushRepeatOp\28re2::RegexpOp\2c\20re2::StringPiece\20const&\2c\20bool\29,operator\20new\28unsigned\20long\29 re2::Regexp::ParseState::PushRepeatOp\28re2::RegexpOp\2c\20re2::StringPiece\20const&\2c\20bool\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 re2::Regexp::ParseState::PushRepeatOp\28re2::RegexpOp\2c\20re2::StringPiece\20const&\2c\20bool\29,re2::CharClassBuilder::GetCharClass\28\29 -re2::Regexp::ParseState::PushRepeatOp\28re2::RegexpOp\2c\20re2::StringPiece\20const&\2c\20bool\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -re2::Regexp::ParseState::PushRepeatOp\28re2::RegexpOp\2c\20re2::StringPiece\20const&\2c\20bool\29,operator\20delete\28void*\29 +re2::Regexp::ParseState::PushRepeatOp\28re2::RegexpOp\2c\20re2::StringPiece\20const&\2c\20bool\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +re2::Regexp::ParseState::PushRepeatOp\28re2::RegexpOp\2c\20re2::StringPiece\20const&\2c\20bool\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::ParseState::PushRepeatOp\28re2::RegexpOp\2c\20re2::StringPiece\20const&\2c\20bool\29,re2::Regexp::ComputeSimple\28\29 re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,operator\20new\28unsigned\20long\29 re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,re2::CharClassBuilder::GetCharClass\28\29 -re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,operator\20delete\28void*\29 +re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,re2::Regexp::ComputeSimple\28\29 re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20int\2c\20bool\29 re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29,re2::Regexp::Walker::~Walker\28\29 re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20int\2c\20bool\29,operator\20delete\5b\5d\28void*\29 -re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20int\2c\20bool\29,operator\20delete\28void*\29 -re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20int\2c\20bool\29,std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29 +re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20int\2c\20bool\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20int\2c\20bool\29,std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29 re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20int\2c\20bool\29,operator\20new\5b\5d\28unsigned\20long\29 re2::Regexp::Walker::~Walker\28\29,operator\20delete\5b\5d\28void*\29 -re2::Regexp::Walker::~Walker\28\29,operator\20delete\28void*\29 -re2::Regexp::Walker::~Walker\28\29,std::__1::__deque_base\2c\20std::__1::allocator\20>\20>::~__deque_base\28\29 -std::__1::__deque_base\2c\20std::__1::allocator\20>\20>::~__deque_base\28\29,operator\20delete\28void*\29 +re2::Regexp::Walker::~Walker\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Regexp::Walker::~Walker\28\29,std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::~deque\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::~deque\5babi:nn210108\5d\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::ParseState::DoLeftParen\28re2::StringPiece\20const&\29,operator\20new\28unsigned\20long\29 re2::Regexp::ParseState::DoLeftParen\28re2::StringPiece\20const&\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 -re2::Regexp::ParseState::DoLeftParen\28re2::StringPiece\20const&\29,memcpy +re2::Regexp::ParseState::DoLeftParen\28re2::StringPiece\20const&\29,memmove re2::Regexp::ParseState::DoLeftParen\28re2::StringPiece\20const&\29,re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29 -re2::Regexp::ParseState::DoLeftParen\28re2::StringPiece\20const&\29,abort +re2::Regexp::ParseState::DoLeftParen\28re2::StringPiece\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 re2::Regexp::ParseState::DoVerticalBar\28\29,re2::Regexp::ParseState::MaybeConcatString\28int\2c\20re2::Regexp::ParseFlags\29 re2::Regexp::ParseState::DoVerticalBar\28\29,operator\20new\28unsigned\20long\29 re2::Regexp::ParseState::DoVerticalBar\28\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 @@ -2005,65 +2055,70 @@ re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,operator\20new\28unsigned re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,re2::Regexp::Incref\28\29 re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,re2::Regexp::Decref\28\29 re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,re2::CharClassBuilder::GetCharClass\28\29 -re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,operator\20delete\28void*\29 +re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29 re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,re2::Regexp::ComputeSimple\28\29 -re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,abort +re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::Regexp::ParseState::DoRightParen\28\29,re2::Regexp::ParseState::DoVerticalBar\28\29 re2::Regexp::ParseState::DoRightParen\28\29,re2::Regexp::Decref\28\29 re2::Regexp::ParseState::DoRightParen\28\29,re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29 re2::Regexp::ParseState::DoRightParen\28\29,re2::CharClassBuilder::GetCharClass\28\29 -re2::Regexp::ParseState::DoRightParen\28\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -re2::Regexp::ParseState::DoRightParen\28\29,operator\20delete\28void*\29 +re2::Regexp::ParseState::DoRightParen\28\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +re2::Regexp::ParseState::DoRightParen\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::ParseState::DoRightParen\28\29,re2::Regexp::ComputeSimple\28\29 re2::Regexp::ParseState::DoRightParen\28\29,re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29 re2::Regexp::ParseState::DoFinish\28\29,re2::Regexp::ParseState::DoVerticalBar\28\29 re2::Regexp::ParseState::DoFinish\28\29,re2::Regexp::Decref\28\29 re2::Regexp::ParseState::DoFinish\28\29,re2::Regexp::ParseState::DoCollapse\28re2::RegexpOp\29 re2::Regexp::ParseState::DoFinish\28\29,re2::CharClassBuilder::GetCharClass\28\29 -re2::Regexp::ParseState::DoFinish\28\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -re2::Regexp::ParseState::DoFinish\28\29,operator\20delete\28void*\29 +re2::Regexp::ParseState::DoFinish\28\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +re2::Regexp::ParseState::DoFinish\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::RemoveLeadingString\28re2::Regexp*\2c\20int\29,operator\20delete\5b\5d\28void*\29 re2::Regexp::RemoveLeadingString\28re2::Regexp*\2c\20int\29,memmove re2::Regexp::RemoveLeadingString\28re2::Regexp*\2c\20int\29,re2::Regexp::Decref\28\29 re2::Regexp::RemoveLeadingString\28re2::Regexp*\2c\20int\29,re2::Regexp::Swap\28re2::Regexp*\29 -re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,void\20std::__1::vector\20>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29 +re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Frame*\20std::_LIBCPP_ABI_NAMESPACE::vector>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29 re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::AlternateNoFactor\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29 re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::Concat\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29 -re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29 -re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29 -re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,operator\20delete\28void*\29 -re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29 -void\20std::__1::vector\20>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29,operator\20new\28unsigned\20long\29 -void\20std::__1::vector\20>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29,operator\20delete\28void*\29 -void\20std::__1::vector\20>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29,abort -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29 -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::Regexp::Incref\28\29 -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::Regexp::Decref\28\29 -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,memmove -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,operator\20new\28unsigned\20long\29 -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,memcpy -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,operator\20delete\28void*\29 -re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,abort -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::CharClassBuilder::CharClassBuilder\28\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::CharClassBuilder::AddRange\28int\2c\20int\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::AddFoldedRange\28re2::CharClassBuilder*\2c\20int\2c\20int\2c\20int\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::Regexp::Decref\28\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::CharClassBuilder::GetCharClass\28\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::Regexp::NewCharClass\28re2::CharClass*\2c\20re2::Regexp::ParseFlags\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,operator\20new\28unsigned\20long\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,memcpy -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,operator\20delete\28void*\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,abort -re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::Regexp::LiteralString\28int*\2c\20int\2c\20re2::Regexp::ParseFlags\29 -re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,re2::Regexp::RemoveLeadingString\28re2::Regexp*\2c\20int\29 -re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,operator\20new\28unsigned\20long\29 -re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,memcpy -re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,operator\20delete\28void*\29 -re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::__1::vector\20>*\29,abort +re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29 +re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29 +re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Regexp::FactorAlternation\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29 +re2::Frame*\20std::_LIBCPP_ABI_NAMESPACE::vector>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29,operator\20new\28unsigned\20long\29 +re2::Frame*\20std::_LIBCPP_ABI_NAMESPACE::vector>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Frame*\20std::_LIBCPP_ABI_NAMESPACE::vector>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Frame*\20std::_LIBCPP_ABI_NAMESPACE::vector>::__emplace_back_slow_path\28re2::Regexp**&\2c\20int&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29 +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::Regexp::Incref\28\29 +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::Regexp::Decref\28\29 +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,memmove +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20new\28unsigned\20long\29 +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,memcpy +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::FactorAlternationImpl::Round2\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::CharClassBuilder::CharClassBuilder\28\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::CharClassBuilder::AddRange\28int\2c\20int\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::AddFoldedRange\28re2::CharClassBuilder*\2c\20int\2c\20int\2c\20int\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::Regexp::Decref\28\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::CharClassBuilder::GetCharClass\28\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::Regexp::NewCharClass\28re2::CharClass*\2c\20re2::Regexp::ParseFlags\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20new\28unsigned\20long\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,memcpy +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::FactorAlternationImpl::Round3\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::Regexp::LiteralString\28int*\2c\20int\2c\20re2::Regexp::ParseFlags\29 +re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::Regexp::RemoveLeadingString\28re2::Regexp*\2c\20int\29 +re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20new\28unsigned\20long\29 +re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,memcpy +re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::FactorAlternationImpl::Round1\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 re2::AddFoldedRange\28re2::CharClassBuilder*\2c\20int\2c\20int\2c\20int\29,re2::CharClassBuilder::AddRange\28int\2c\20int\29 re2::AddFoldedRange\28re2::CharClassBuilder*\2c\20int\2c\20int\2c\20int\29,re2::AddFoldedRange\28re2::CharClassBuilder*\2c\20int\2c\20int\2c\20int\29 re2::CharClassBuilder::AddRangeFlags\28int\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::AddFoldedRange\28re2::CharClassBuilder*\2c\20int\2c\20int\2c\20int\29 @@ -2082,7 +2137,7 @@ re2::AddUGroup\28re2::CharClassBuilder*\2c\20re2::UGroup\20const*\2c\20int\2c\20 re2::AddUGroup\28re2::CharClassBuilder*\2c\20re2::UGroup\20const*\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::CharClassBuilder::AddRange\28int\2c\20int\29 re2::AddUGroup\28re2::CharClassBuilder*\2c\20re2::UGroup\20const*\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::CharClassBuilder::Negate\28\29 re2::AddUGroup\28re2::CharClassBuilder*\2c\20re2::UGroup\20const*\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::CharClassBuilder::AddCharClass\28re2::CharClassBuilder*\29 -re2::AddUGroup\28re2::CharClassBuilder*\2c\20re2::UGroup\20const*\2c\20int\2c\20re2::Regexp::ParseFlags\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 +re2::AddUGroup\28re2::CharClassBuilder*\2c\20re2::UGroup\20const*\2c\20int\2c\20re2::Regexp::ParseFlags\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 re2::StringPieceToRune\28int*\2c\20re2::StringPiece*\2c\20re2::RegexpStatus*\29,fullrune re2::StringPieceToRune\28int*\2c\20re2::StringPiece*\2c\20re2::RegexpStatus*\29,chartorune re2::Regexp::ParseState::ParseCCCharacter\28re2::StringPiece*\2c\20int*\2c\20re2::StringPiece\20const&\2c\20re2::RegexpStatus*\29,re2::ParseEscape\28re2::StringPiece*\2c\20int*\2c\20re2::RegexpStatus*\2c\20int\29 @@ -2117,8 +2172,8 @@ re2::Regexp::ParseState::ParsePerlFlags\28re2::StringPiece*\29,re2::Regexp::Rege re2::Regexp::ParseState::ParsePerlFlags\28re2::StringPiece*\29,re2::Regexp::ParseState::PushRegexp\28re2::Regexp*\29 re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,operator\20new\28unsigned\20long\29 re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,runetochar -re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29 -re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,operator\20delete\28void*\29 +re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29 +re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,fullrune re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,chartorune re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,re2::Regexp::ParseState::PushLiteral\28int\29 @@ -2142,31 +2197,31 @@ re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\2 re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,re2::AddUGroup\28re2::CharClassBuilder*\2c\20re2::UGroup\20const*\2c\20int\2c\20re2::Regexp::ParseFlags\29 re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29,re2::Regexp::ParseState::PushRepetition\28int\2c\20int\2c\20re2::StringPiece\20const&\2c\20bool\29 re2::RepetitionWalker::~RepetitionWalker\28\29,re2::Regexp::Walker::~Walker\28\29 -re2::RepetitionWalker::~RepetitionWalker\28\29,operator\20delete\28void*\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,memmove -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,operator\20new\28unsigned\20long\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,operator\20delete\28void*\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,abort +re2::RepetitionWalker::~RepetitionWalker\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 re2::Prog::Inst::InitAlt\28unsigned\20int\2c\20unsigned\20int\29,opa_abort re2::Prog::Inst::InitByteRange\28int\2c\20int\2c\20int\2c\20unsigned\20int\29,opa_abort re2::Prog::Inst::InitCapture\28int\2c\20unsigned\20int\29,opa_abort @@ -2175,26 +2230,30 @@ re2::Prog::Inst::InitMatch\28int\29,opa_abort re2::Prog::Inst::InitNop\28unsigned\20int\29,opa_abort re2::Prog::Inst::InitFail\28\29,opa_abort re2::Prog::~Prog\28\29,re2::Prog::DeleteDFA\28re2::DFA*\29 -re2::Prog::~Prog\28\29,operator\20delete\28void*\29 +re2::Prog::~Prog\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Prog::Optimize\28\29,operator\20new\28unsigned\20long\29 re2::Prog::Optimize\28\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 +re2::Prog::Optimize\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::Prog::Optimize\28\29,opa_abort -re2::Prog::Optimize\28\29,abort -re2::Prog::Optimize\28\29,operator\20delete\28void*\29 +re2::Prog::Optimize\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::ByteMapBuilder::Mark\28int\2c\20int\29,opa_abort re2::ByteMapBuilder::Mark\28int\2c\20int\29,operator\20new\28unsigned\20long\29 re2::ByteMapBuilder::Mark\28int\2c\20int\29,memcpy -re2::ByteMapBuilder::Mark\28int\2c\20int\29,operator\20delete\28void*\29 -re2::ByteMapBuilder::Mark\28int\2c\20int\29,abort +re2::ByteMapBuilder::Mark\28int\2c\20int\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::ByteMapBuilder::Mark\28int\2c\20int\29,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::ByteMapBuilder::Mark\28int\2c\20int\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 re2::ByteMapBuilder::Merge\28\29,opa_abort re2::ByteMapBuilder::Merge\28\29,operator\20new\28unsigned\20long\29 re2::ByteMapBuilder::Merge\28\29,memcpy -re2::ByteMapBuilder::Merge\28\29,operator\20delete\28void*\29 -re2::ByteMapBuilder::Merge\28\29,abort +re2::ByteMapBuilder::Merge\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::ByteMapBuilder::Merge\28\29,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::ByteMapBuilder::Merge\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::ByteMapBuilder::Recolor\28int\29,operator\20new\28unsigned\20long\29 re2::ByteMapBuilder::Recolor\28int\29,memcpy -re2::ByteMapBuilder::Recolor\28int\29,operator\20delete\28void*\29 -re2::ByteMapBuilder::Recolor\28int\29,abort +re2::ByteMapBuilder::Recolor\28int\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::ByteMapBuilder::Recolor\28int\29,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::ByteMapBuilder::Recolor\28int\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::ByteMapBuilder::Build\28unsigned\20char*\2c\20int*\29,opa_abort re2::ByteMapBuilder::Build\28unsigned\20char*\2c\20int*\29,re2::ByteMapBuilder::Recolor\28int\29 re2::ByteMapBuilder::Build\28unsigned\20char*\2c\20int*\29,memset @@ -2202,78 +2261,99 @@ re2::Prog::ComputeByteMap\28\29,re2::ByteMapBuilder::Mark\28int\2c\20int\29 re2::Prog::ComputeByteMap\28\29,opa_abort re2::Prog::ComputeByteMap\28\29,operator\20new\28unsigned\20long\29 re2::Prog::ComputeByteMap\28\29,memcpy -re2::Prog::ComputeByteMap\28\29,operator\20delete\28void*\29 +re2::Prog::ComputeByteMap\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Prog::ComputeByteMap\28\29,re2::ByteMapBuilder::Merge\28\29 -re2::Prog::ComputeByteMap\28\29,abort +re2::Prog::ComputeByteMap\28\29,std::_LIBCPP_ABI_NAMESPACE::vector\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Prog::ComputeByteMap\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::Prog::ComputeByteMap\28\29,re2::ByteMapBuilder::Build\28unsigned\20char*\2c\20int*\29 re2::Prog::Flatten\28\29,operator\20new\28unsigned\20long\29 -re2::Prog::Flatten\28\29,re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29 +re2::Prog::Flatten\28\29,re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29 re2::Prog::Flatten\28\29,memmove -re2::Prog::Flatten\28\29,void\20std::__1::__sort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 -re2::Prog::Flatten\28\29,re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29 +re2::Prog::Flatten\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__introsort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\20false>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20std::_LIBCPP_ABI_NAMESPACE::iterator_traits::IndexValue*>::difference_type\2c\20bool\29 +re2::Prog::Flatten\28\29,re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29 re2::Prog::Flatten\28\29,memset -re2::Prog::Flatten\28\29,re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::__1::vector\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29 -re2::Prog::Flatten\28\29,re2::Prog::ComputeHints\28std::__1::vector\20>*\2c\20int\2c\20int\29 +re2::Prog::Flatten\28\29,re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29 +re2::Prog::Flatten\28\29,re2::Prog::ComputeHints\28std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20int\2c\20int\29 +re2::Prog::Flatten\28\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Prog::Flatten\28\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 re2::Prog::Flatten\28\29,opa_abort -re2::Prog::Flatten\28\29,operator\20delete\28void*\29 -re2::Prog::Flatten\28\29,abort -re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,re2::SparseArray::SetInternal\28bool\2c\20int\2c\20int\20const&\29 -re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,opa_abort -re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,operator\20new\28unsigned\20long\29 -re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,operator\20delete\28void*\29 -re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 -re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,memcpy -re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,abort -void\20std::__1::__sort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29,unsigned\20int\20std::__1::__sort4::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 -void\20std::__1::__sort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29,void\20std::__1::__insertion_sort_3::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 -void\20std::__1::__sort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29,bool\20std::__1::__insertion_sort_incomplete::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 -void\20std::__1::__sort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29,void\20std::__1::__sort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 -re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,operator\20new\28unsigned\20long\29 -re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,operator\20delete\28void*\29 -re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,opa_abort -re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 -re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,memcpy -re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,re2::SparseArray::create_index\28int\29 -re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,re2::SparseArray::SetExistingInternal\28int\2c\20int\20const&\29 -re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::__1::vector\20>\2c\20std::__1::allocator\20>\20>\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,abort -re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::__1::vector\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,operator\20new\28unsigned\20long\29 -re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::__1::vector\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,operator\20delete\28void*\29 -re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::__1::vector\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,opa_abort -re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::__1::vector\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 -re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::__1::vector\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,memcpy -re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::__1::vector\20>*\2c\20re2::SparseSetT*\2c\20std::__1::vector\20>*\29,abort -re2::Prog::ComputeHints\28std::__1::vector\20>*\2c\20int\2c\20int\29,opa_abort -re2::Prog::ComputeHints\28std::__1::vector\20>*\2c\20int\2c\20int\29,re2::Prog::ComputeHints\28std::__1::vector\20>*\2c\20int\2c\20int\29::$_1::operator\28\29\28int\2c\20int\29\20const -re2::Prog::ComputeHints\28std::__1::vector\20>*\2c\20int\2c\20int\29::$_1::operator\28\29\28int\2c\20int\29\20const,opa_abort +re2::Prog::Flatten\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Prog::Flatten\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::SparseArray::SetInternal\28bool\2c\20int\2c\20int\20const&\29 +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,opa_abort +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int&&\29 +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20new\28unsigned\20long\29 +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,memcpy +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Prog::MarkSuccessors\28re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__introsort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\20false>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20std::_LIBCPP_ABI_NAMESPACE::iterator_traits::IndexValue*>::difference_type\2c\20bool\29,re2::SparseArray::IndexValue*\20std::_LIBCPP_ABI_NAMESPACE::__partial_sort_impl\5babi:nn210108\5d::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__introsort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\20false>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20std::_LIBCPP_ABI_NAMESPACE::iterator_traits::IndexValue*>::difference_type\2c\20bool\29,void\20std::_LIBCPP_ABI_NAMESPACE::__sort5\5babi:nn210108\5d::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\200>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__introsort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\20false>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20std::_LIBCPP_ABI_NAMESPACE::iterator_traits::IndexValue*>::difference_type\2c\20bool\29,bool\20std::_LIBCPP_ABI_NAMESPACE::__insertion_sort_incomplete\5babi:nn210108\5d::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__introsort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\20false>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20std::_LIBCPP_ABI_NAMESPACE::iterator_traits::IndexValue*>::difference_type\2c\20bool\29,void\20std::_LIBCPP_ABI_NAMESPACE::__introsort::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\20false>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20std::_LIBCPP_ABI_NAMESPACE::iterator_traits::IndexValue*>::difference_type\2c\20bool\29 +re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int\20const&\29 +re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,opa_abort +re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 +re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20new\28unsigned\20long\29 +re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,memcpy +re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Prog::MarkDominator\28int\2c\20re2::SparseArray*\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int\20const&\29 +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,opa_abort +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,re2::SparseSetT::InsertInternal\28bool\2c\20int\29 +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20new\28unsigned\20long\29 +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,memcpy +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::Prog::EmitList\28int\2c\20re2::SparseArray*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20re2::SparseSetT*\2c\20std::_LIBCPP_ABI_NAMESPACE::vector>*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Prog::ComputeHints\28std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20int\2c\20int\29,opa_abort +re2::Prog::ComputeHints\28std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20int\2c\20int\29,re2::Prog::ComputeHints\28std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20int\2c\20int\29::$_0::operator\28\29\28int\2c\20int\29\20const +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int&&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int&&\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int&&\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int\20const&\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int\20const&\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int\20const&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int\20const&\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::push_back\5babi:nn210108\5d\28int\20const&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::Prog::ComputeHints\28std::_LIBCPP_ABI_NAMESPACE::vector>*\2c\20int\2c\20int\29::$_0::operator\28\29\28int\2c\20int\29\20const,opa_abort re2::Prog::PrefixAccel_FrontAndBack\28void\20const*\2c\20unsigned\20long\29,opa_abort re2::Prog::PrefixAccel_FrontAndBack\28void\20const*\2c\20unsigned\20long\29,memchr -bool\20std::__1::__insertion_sort_incomplete::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29,unsigned\20int\20std::__1::__sort4::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 -re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,std::__1::__call_once\28unsigned\20long\20volatile&\2c\20void*\2c\20void\20\28*\29\28void*\29\29 -re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,std::__1::basic_string\2c\20std::__1::allocator\20>::assign\28char\20const*\2c\20unsigned\20long\29 +bool\20std::_LIBCPP_ABI_NAMESPACE::__insertion_sort_incomplete\5babi:nn210108\5d::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29,void\20std::_LIBCPP_ABI_NAMESPACE::__sort5\5babi:nn210108\5d::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\2c\20re2::SparseArray::IndexValue*\2c\200>\28re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20re2::SparseArray::IndexValue*\2c\20bool\20\28*&\29\28re2::SparseArray::IndexValue\20const&\2c\20re2::SparseArray::IndexValue\20const&\29\29 +re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,std::_LIBCPP_ABI_NAMESPACE::__call_once\28unsigned\20long\20volatile&\2c\20void*\2c\20void\20\28*\29\28void*\29\29 +re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::assign\28char\20const*\2c\20unsigned\20long\29 re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::Regexp::Parse\28re2::StringPiece\20const&\2c\20re2::Regexp::ParseFlags\2c\20re2::RegexpStatus*\29 re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,operator\20new\28unsigned\20long\29 re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::RegexpStatus::Text\28\29\20const -re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,memcpy -re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,operator\20delete\28void*\29 -re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::Regexp::RequiredPrefix\28std::__1::basic_string\2c\20std::__1::allocator\20>*\2c\20bool*\2c\20re2::Regexp**\29 +re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,memmove +re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::Regexp::RequiredPrefix\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\2c\20bool*\2c\20re2::Regexp**\29 re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::Regexp::Incref\28\29 re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::Regexp::CompileToProg\28long\20long\29 re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::Regexp::NumCaptures\28\29 re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::Prog::IsOnePass\28\29 -re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,abort -void\20std::__1::__call_once_proxy\20>\28void*\29,operator\20new\28unsigned\20long\29 +re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,operator\20new\28unsigned\20long\29 re2::RE2::RE2\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29,re2::RE2::Init\28re2::StringPiece\20const&\2c\20re2::RE2::Options\20const&\29 -re2::RE2::ReverseProg\28\29\20const,std::__1::__call_once\28unsigned\20long\20volatile&\2c\20void*\2c\20void\20\28*\29\28void*\29\29 -void\20std::__1::__call_once_proxy\20>\28void*\29,re2::Regexp::CompileToReverseProg\28long\20long\29 +re2::RE2::ReverseProg\28\29\20const,std::_LIBCPP_ABI_NAMESPACE::__call_once\28unsigned\20long\20volatile&\2c\20void*\2c\20void\20\28*\29\28void*\29\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,re2::Regexp::CompileToReverseProg\28long\20long\29 re2::RE2::~RE2\28\29,re2::Regexp::Decref\28\29 re2::RE2::~RE2\28\29,re2::Prog::~Prog\28\29 -re2::RE2::~RE2\28\29,operator\20delete\28void*\29 -re2::RE2::~RE2\28\29,std::__1::__tree\2c\20std::__1::allocator\20>\2c\20int>\2c\20std::__1::__map_value_compare\2c\20std::__1::allocator\20>\2c\20std::__1::__value_type\2c\20std::__1::allocator\20>\2c\20int>\2c\20std::__1::less\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20int>\20>\20>::destroy\28std::__1::__tree_node\2c\20std::__1::allocator\20>\2c\20int>\2c\20void*>*\29 -re2::RE2::~RE2\28\29,std::__1::__tree\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__map_value_compare\2c\20std::__1::allocator\20>\20>\2c\20std::__1::less\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::destroy\28std::__1::__tree_node\2c\20std::__1::allocator\20>\20>\2c\20void*>*\29 -std::__1::__tree\2c\20std::__1::allocator\20>\2c\20int>\2c\20std::__1::__map_value_compare\2c\20std::__1::allocator\20>\2c\20std::__1::__value_type\2c\20std::__1::allocator\20>\2c\20int>\2c\20std::__1::less\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20int>\20>\20>::destroy\28std::__1::__tree_node\2c\20std::__1::allocator\20>\2c\20int>\2c\20void*>*\29,std::__1::__tree\2c\20std::__1::allocator\20>\2c\20int>\2c\20std::__1::__map_value_compare\2c\20std::__1::allocator\20>\2c\20std::__1::__value_type\2c\20std::__1::allocator\20>\2c\20int>\2c\20std::__1::less\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20int>\20>\20>::destroy\28std::__1::__tree_node\2c\20std::__1::allocator\20>\2c\20int>\2c\20void*>*\29 -std::__1::__tree\2c\20std::__1::allocator\20>\2c\20int>\2c\20std::__1::__map_value_compare\2c\20std::__1::allocator\20>\2c\20std::__1::__value_type\2c\20std::__1::allocator\20>\2c\20int>\2c\20std::__1::less\2c\20std::__1::allocator\20>\20>\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\2c\20int>\20>\20>::destroy\28std::__1::__tree_node\2c\20std::__1::allocator\20>\2c\20int>\2c\20void*>*\29,operator\20delete\28void*\29 -std::__1::__tree\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__map_value_compare\2c\20std::__1::allocator\20>\20>\2c\20std::__1::less\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::destroy\28std::__1::__tree_node\2c\20std::__1::allocator\20>\20>\2c\20void*>*\29,std::__1::__tree\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__map_value_compare\2c\20std::__1::allocator\20>\20>\2c\20std::__1::less\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::destroy\28std::__1::__tree_node\2c\20std::__1::allocator\20>\20>\2c\20void*>*\29 -std::__1::__tree\2c\20std::__1::allocator\20>\20>\2c\20std::__1::__map_value_compare\2c\20std::__1::allocator\20>\20>\2c\20std::__1::less\2c\20true>\2c\20std::__1::allocator\2c\20std::__1::allocator\20>\20>\20>\20>::destroy\28std::__1::__tree_node\2c\20std::__1::allocator\20>\20>\2c\20void*>*\29,operator\20delete\28void*\29 +re2::RE2::~RE2\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::RE2::~RE2\28\29,std::_LIBCPP_ABI_NAMESPACE::__tree\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20int>\2c\20std::_LIBCPP_ABI_NAMESPACE::__map_value_compare\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20int>\2c\20std::_LIBCPP_ABI_NAMESPACE::less\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20int>>>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20int>\2c\20void*>*\29 +re2::RE2::~RE2\28\29,std::_LIBCPP_ABI_NAMESPACE::__tree\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__map_value_compare\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::less\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*\29 +std::_LIBCPP_ABI_NAMESPACE::__tree\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20int>\2c\20std::_LIBCPP_ABI_NAMESPACE::__map_value_compare\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20int>\2c\20std::_LIBCPP_ABI_NAMESPACE::less\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20int>>>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20int>\2c\20void*>*\29,std::_LIBCPP_ABI_NAMESPACE::__tree\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20int>\2c\20std::_LIBCPP_ABI_NAMESPACE::__map_value_compare\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20int>\2c\20std::_LIBCPP_ABI_NAMESPACE::less\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20int>>>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20int>\2c\20void*>*\29 +std::_LIBCPP_ABI_NAMESPACE::__tree\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20int>\2c\20std::_LIBCPP_ABI_NAMESPACE::__map_value_compare\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20std::_LIBCPP_ABI_NAMESPACE::pair\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20int>\2c\20std::_LIBCPP_ABI_NAMESPACE::less\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\20const\2c\20int>>>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>\2c\20int>\2c\20void*>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::__tree\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__map_value_compare\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::less\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*\29,std::_LIBCPP_ABI_NAMESPACE::__tree\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__map_value_compare\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::less\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*\29 +std::_LIBCPP_ABI_NAMESPACE::__tree\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::__map_value_compare\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20std::_LIBCPP_ABI_NAMESPACE::less\2c\20true>\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>>>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>\2c\20void*>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::RE2::DoMatch\28re2::StringPiece\20const&\2c\20re2::RE2::Anchor\2c\20unsigned\20long*\2c\20re2::RE2::Arg\20const*\20const*\2c\20int\29\20const,memset re2::RE2::DoMatch\28re2::StringPiece\20const&\2c\20re2::RE2::Anchor\2c\20unsigned\20long*\2c\20re2::RE2::Arg\20const*\20const*\2c\20int\29\20const,operator\20new\5b\5d\28unsigned\20long\29 re2::RE2::DoMatch\28re2::StringPiece\20const&\2c\20re2::RE2::Anchor\2c\20unsigned\20long*\2c\20re2::RE2::Arg\20const*\20const*\2c\20int\29\20const,re2::RE2::Match\28re2::StringPiece\20const&\2c\20unsigned\20long\2c\20unsigned\20long\2c\20re2::RE2::Anchor\2c\20re2::StringPiece*\2c\20int\29\20const @@ -2286,20 +2366,20 @@ re2::RE2::Match\28re2::StringPiece\20const&\2c\20unsigned\20long\2c\20unsigned\2 re2::RE2::Match\28re2::StringPiece\20const&\2c\20unsigned\20long\2c\20unsigned\20long\2c\20re2::RE2::Anchor\2c\20re2::StringPiece*\2c\20int\29\20const,re2::Prog::SearchNFA\28re2::StringPiece\20const&\2c\20re2::StringPiece\20const&\2c\20re2::Prog::Anchor\2c\20re2::Prog::MatchKind\2c\20re2::StringPiece*\2c\20int\29 re2::RE2::Match\28re2::StringPiece\20const&\2c\20unsigned\20long\2c\20unsigned\20long\2c\20re2::RE2::Anchor\2c\20re2::StringPiece*\2c\20int\29\20const,memset re2::RE2::PartialMatchN\28re2::StringPiece\20const&\2c\20re2::RE2\20const&\2c\20re2::RE2::Arg\20const*\20const*\2c\20int\29,re2::RE2::DoMatch\28re2::StringPiece\20const&\2c\20re2::RE2::Anchor\2c\20unsigned\20long*\2c\20re2::RE2::Arg\20const*\20const*\2c\20int\29\20const -re2::Regexp::~Regexp\28\29,operator\20delete\28void*\29 +re2::Regexp::~Regexp\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::~Regexp\28\29,operator\20delete\5b\5d\28void*\29 -re2::Regexp::~Regexp\28\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -re2::Regexp::Incref\28\29,std::__1::__call_once\28unsigned\20long\20volatile&\2c\20void*\2c\20void\20\28*\29\28void*\29\29 +re2::Regexp::~Regexp\28\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +re2::Regexp::Incref\28\29,std::_LIBCPP_ABI_NAMESPACE::__call_once\28unsigned\20long\20volatile&\2c\20void*\2c\20void\20\28*\29\28void*\29\29 re2::Regexp::Incref\28\29,operator\20new\28unsigned\20long\29 -re2::Regexp::Incref\28\29,void\20std::__1::__tree_balance_after_insert*>\28std::__1::__tree_node_base*\2c\20std::__1::__tree_node_base*\29 -void\20std::__1::__call_once_proxy\20>\28void*\29,operator\20new\28unsigned\20long\29 +re2::Regexp::Incref\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__tree_balance_after_insert\5babi:nn210108\5d*>\28std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\2c\20std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__call_once_proxy\5babi:nn210108\5d>\28void*\29,operator\20new\28unsigned\20long\29 re2::Regexp::Decref\28\29,operator\20new\28unsigned\20long\29 -re2::Regexp::Decref\28\29,void\20std::__1::__tree_balance_after_insert*>\28std::__1::__tree_node_base*\2c\20std::__1::__tree_node_base*\29 -re2::Regexp::Decref\28\29,void\20std::__1::__tree_remove*>\28std::__1::__tree_node_base*\2c\20std::__1::__tree_node_base*\29 -re2::Regexp::Decref\28\29,operator\20delete\28void*\29 +re2::Regexp::Decref\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__tree_balance_after_insert\5babi:nn210108\5d*>\28std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\2c\20std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\29 re2::Regexp::Decref\28\29,re2::Regexp::Destroy\28\29 +re2::Regexp::Decref\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__tree_remove\5babi:nn210108\5d*>\28std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\2c\20std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\29 +re2::Regexp::Decref\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::Destroy\28\29,re2::Regexp::~Regexp\28\29 -re2::Regexp::Destroy\28\29,operator\20delete\28void*\29 +re2::Regexp::Destroy\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::Destroy\28\29,re2::Regexp::Decref\28\29 re2::Regexp::Destroy\28\29,operator\20delete\5b\5d\28void*\29 re2::Regexp::AddRuneToString\28int\29,opa_abort @@ -2317,8 +2397,8 @@ re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20 re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29,operator\20new\5b\5d\28unsigned\20long\29 re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29,re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29 re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29,opa_abort -re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29,operator\20delete\28void*\29 -re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29,abort +re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::Concat\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29 re2::Regexp::AlternateNoFactor\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29 re2::Regexp::Capture\28re2::Regexp*\2c\20re2::Regexp::ParseFlags\2c\20int\29,operator\20new\28unsigned\20long\29 @@ -2329,74 +2409,80 @@ re2::Regexp::NewCharClass\28re2::CharClass*\2c\20re2::Regexp::ParseFlags\29,oper re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,re2::TopEqual\28re2::Regexp*\2c\20re2::Regexp*\29 re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,operator\20new\28unsigned\20long\29 re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,memcpy -re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,operator\20delete\28void*\29 +re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,opa_abort -re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,std::__1::vector\20>::__append\28unsigned\20long\29 -re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,abort +re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__append\28unsigned\20long\29 +re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 re2::TopEqual\28re2::Regexp*\2c\20re2::Regexp*\29,memcmp -std::__1::vector\20>::__append\28unsigned\20long\29,memset -std::__1::vector\20>::__append\28unsigned\20long\29,operator\20new\28unsigned\20long\29 -std::__1::vector\20>::__append\28unsigned\20long\29,memcpy -std::__1::vector\20>::__append\28unsigned\20long\29,operator\20delete\28void*\29 -std::__1::vector\20>::__append\28unsigned\20long\29,abort +std::_LIBCPP_ABI_NAMESPACE::vector>::__append\28unsigned\20long\29,memset +std::_LIBCPP_ABI_NAMESPACE::vector>::__append\28unsigned\20long\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__append\28unsigned\20long\29,memcpy +std::_LIBCPP_ABI_NAMESPACE::vector>::__append\28unsigned\20long\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__append\28unsigned\20long\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__append\28unsigned\20long\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 re2::RegexpStatus::Text\28\29\20const,strlen re2::RegexpStatus::Text\28\29\20const,operator\20new\28unsigned\20long\29 -re2::RegexpStatus::Text\28\29\20const,memcpy -re2::RegexpStatus::Text\28\29\20const,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\2c\20unsigned\20long\29 -re2::RegexpStatus::Text\28\29\20const,operator\20delete\28void*\29 -re2::RegexpStatus::Text\28\29\20const,std::__1::basic_string\2c\20std::__1::allocator\20>::append\28char\20const*\29 -re2::RegexpStatus::Text\28\29\20const,abort +re2::RegexpStatus::Text\28\29\20const,memmove +re2::RegexpStatus::Text\28\29\20const,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\2c\20unsigned\20long\29 +re2::RegexpStatus::Text\28\29\20const,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::RegexpStatus::Text\28\29\20const,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::append\28char\20const*\29 +re2::RegexpStatus::Text\28\29\20const,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::__throw_length_error\5babi:nn210108\5d\28\29 re2::Regexp::NumCaptures\28\29,re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20int\2c\20bool\29 re2::Regexp::NumCaptures\28\29,re2::Regexp::Walker::~Walker\28\29 -re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,std::__1::basic_string\2c\20std::__1::allocator\20>::resize\28unsigned\20long\2c\20char\29 -re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,runetochar -re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,operator\20new\28unsigned\20long\29 -re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,memcpy -re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29,operator\20delete\28void*\29 -re2::Regexp::RequiredPrefix\28std::__1::basic_string\2c\20std::__1::allocator\20>*\2c\20bool*\2c\20re2::Regexp**\29,re2::Regexp::Incref\28\29 -re2::Regexp::RequiredPrefix\28std::__1::basic_string\2c\20std::__1::allocator\20>*\2c\20bool*\2c\20re2::Regexp**\29,re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29 -re2::Regexp::RequiredPrefix\28std::__1::basic_string\2c\20std::__1::allocator\20>*\2c\20bool*\2c\20re2::Regexp**\29,operator\20new\28unsigned\20long\29 -re2::Regexp::RequiredPrefix\28std::__1::basic_string\2c\20std::__1::allocator\20>*\2c\20bool*\2c\20re2::Regexp**\29,re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29 -re2::Regexp::RequiredPrefixForAccel\28std::__1::basic_string\2c\20std::__1::allocator\20>*\2c\20bool*\29,re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::__1::basic_string\2c\20std::__1::allocator\20>*\29 -re2::CharClassBuilder::AddRange\28int\2c\20int\29,void\20std::__1::__tree_remove*>\28std::__1::__tree_node_base*\2c\20std::__1::__tree_node_base*\29 -re2::CharClassBuilder::AddRange\28int\2c\20int\29,operator\20delete\28void*\29 +re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>::resize\28unsigned\20long\2c\20char\29 +re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,runetochar +re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,memmove +re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29,operator\20new\28unsigned\20long\29 +re2::Regexp::RequiredPrefix\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\2c\20bool*\2c\20re2::Regexp**\29,re2::Regexp::Incref\28\29 +re2::Regexp::RequiredPrefix\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\2c\20bool*\2c\20re2::Regexp**\29,re2::Regexp::ConcatOrAlternate\28re2::RegexpOp\2c\20re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\2c\20bool\29 +re2::Regexp::RequiredPrefix\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\2c\20bool*\2c\20re2::Regexp**\29,operator\20new\28unsigned\20long\29 +re2::Regexp::RequiredPrefix\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\2c\20bool*\2c\20re2::Regexp**\29,re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29 +re2::Regexp::RequiredPrefixForAccel\28std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\2c\20bool*\29,re2::ConvertRunesToBytes\28bool\2c\20int*\2c\20int\2c\20std::_LIBCPP_ABI_NAMESPACE::basic_string\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>*\29 +re2::CharClassBuilder::AddRange\28int\2c\20int\29,void\20std::_LIBCPP_ABI_NAMESPACE::__tree_remove\5babi:nn210108\5d*>\28std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\2c\20std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\29 +re2::CharClassBuilder::AddRange\28int\2c\20int\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::CharClassBuilder::AddRange\28int\2c\20int\29,operator\20new\28unsigned\20long\29 -re2::CharClassBuilder::AddRange\28int\2c\20int\29,void\20std::__1::__tree_balance_after_insert*>\28std::__1::__tree_node_base*\2c\20std::__1::__tree_node_base*\29 +re2::CharClassBuilder::AddRange\28int\2c\20int\29,void\20std::_LIBCPP_ABI_NAMESPACE::__tree_balance_after_insert\5babi:nn210108\5d*>\28std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\2c\20std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\29 re2::CharClassBuilder::AddCharClass\28re2::CharClassBuilder*\29,re2::CharClassBuilder::AddRange\28int\2c\20int\29 -re2::CharClassBuilder::RemoveAbove\28int\29,void\20std::__1::__tree_remove*>\28std::__1::__tree_node_base*\2c\20std::__1::__tree_node_base*\29 -re2::CharClassBuilder::RemoveAbove\28int\29,operator\20delete\28void*\29 +re2::CharClassBuilder::RemoveAbove\28int\29,void\20std::_LIBCPP_ABI_NAMESPACE::__tree_remove\5babi:nn210108\5d*>\28std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\2c\20std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\29 +re2::CharClassBuilder::RemoveAbove\28int\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::CharClassBuilder::RemoveAbove\28int\29,operator\20new\28unsigned\20long\29 -re2::CharClassBuilder::RemoveAbove\28int\29,void\20std::__1::__tree_balance_after_insert*>\28std::__1::__tree_node_base*\2c\20std::__1::__tree_node_base*\29 +re2::CharClassBuilder::RemoveAbove\28int\29,void\20std::_LIBCPP_ABI_NAMESPACE::__tree_balance_after_insert\5babi:nn210108\5d*>\28std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\2c\20std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\29 re2::CharClassBuilder::Negate\28\29,operator\20new\28unsigned\20long\29 re2::CharClassBuilder::Negate\28\29,memcpy -re2::CharClassBuilder::Negate\28\29,operator\20delete\28void*\29 -re2::CharClassBuilder::Negate\28\29,std::__1::__tree\20>::destroy\28std::__1::__tree_node*\29 -re2::CharClassBuilder::Negate\28\29,void\20std::__1::__tree_balance_after_insert*>\28std::__1::__tree_node_base*\2c\20std::__1::__tree_node_base*\29 -re2::CharClassBuilder::Negate\28\29,abort +re2::CharClassBuilder::Negate\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::CharClassBuilder::Negate\28\29,std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29 +re2::CharClassBuilder::Negate\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::CharClassBuilder::Negate\28\29,std::_LIBCPP_ABI_NAMESPACE::__tree>::destroy\28std::_LIBCPP_ABI_NAMESPACE::__tree_node*\29 +re2::CharClassBuilder::Negate\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__tree_balance_after_insert\5babi:nn210108\5d*>\28std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\2c\20std::_LIBCPP_ABI_NAMESPACE::__tree_node_base*\29 +std::_LIBCPP_ABI_NAMESPACE::vector>::__throw_length_error\5babi:nn210108\5d\28\29,std::_LIBCPP_ABI_NAMESPACE::__throw_length_error\5babi:nn210108\5d\28char\20const*\29 re2::CharClassBuilder::GetCharClass\28\29,operator\20new\5b\5d\28unsigned\20long\29 re2::CharClassBuilder::GetCharClass\28\29,opa_abort re2::NumCapturesWalker::~NumCapturesWalker\28\29,re2::Regexp::Walker::~Walker\28\29 -re2::NumCapturesWalker::~NumCapturesWalker\28\29,operator\20delete\28void*\29 +re2::NumCapturesWalker::~NumCapturesWalker\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::Regexp::Simplify\28\29,re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Regexp*\2c\20bool\29 re2::Regexp::Simplify\28\29,re2::Regexp::Decref\28\29 re2::Regexp::Simplify\28\29,re2::Regexp::Walker::~Walker\28\29 re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Regexp*\2c\20bool\29,operator\20delete\5b\5d\28void*\29 -re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Regexp*\2c\20bool\29,operator\20delete\28void*\29 -re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Regexp*\2c\20bool\29,std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29 +re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Regexp*\2c\20bool\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Regexp*\2c\20bool\29,std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29 re2::Regexp::Walker::WalkInternal\28re2::Regexp*\2c\20re2::Regexp*\2c\20bool\29,operator\20new\5b\5d\28unsigned\20long\29 re2::Regexp::Walker::~Walker\28\29,operator\20delete\5b\5d\28void*\29 -re2::Regexp::Walker::~Walker\28\29,operator\20delete\28void*\29 -re2::Regexp::Walker::~Walker\28\29,std::__1::__deque_base\2c\20std::__1::allocator\20>\20>::~__deque_base\28\29 +re2::Regexp::Walker::~Walker\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +re2::Regexp::Walker::~Walker\28\29,std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::~deque\5babi:nn210108\5d\28\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::~deque\5babi:nn210108\5d\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::CoalesceWalker::Copy\28re2::Regexp*\29,re2::Regexp::Incref\28\29 re2::CoalesceWalker::ShortVisit\28re2::Regexp*\2c\20re2::Regexp*\29,re2::Regexp::Incref\28\29 re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::Regexp::Incref\28\29 re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::CoalesceWalker::CanCoalesce\28re2::Regexp*\2c\20re2::Regexp*\29 re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::Regexp::Decref\28\29 -re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::CoalesceWalker::DoCoalesce\28re2::Regexp**\2c\20re2::Regexp**\29 re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,operator\20new\28unsigned\20long\29 re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 -re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,opa_abort re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,operator\20new\5b\5d\28unsigned\20long\29 +re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::CoalesceWalker::DoCoalesce\28re2::Regexp**\2c\20re2::Regexp**\29 +re2::CoalesceWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,opa_abort re2::CoalesceWalker::CanCoalesce\28re2::Regexp*\2c\20re2::Regexp*\29,re2::Regexp::Equal\28re2::Regexp*\2c\20re2::Regexp*\29 re2::CoalesceWalker::DoCoalesce\28re2::Regexp**\2c\20re2::Regexp**\29,re2::Regexp::Incref\28\29 re2::CoalesceWalker::DoCoalesce\28re2::Regexp**\2c\20re2::Regexp**\29,re2::Regexp::Repeat\28re2::Regexp*\2c\20re2::Regexp::ParseFlags\2c\20int\2c\20int\29 @@ -2410,52 +2496,51 @@ re2::SimplifyWalker::ShortVisit\28re2::Regexp*\2c\20re2::Regexp*\29,re2::Regexp: re2::SimplifyWalker::PreVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20bool*\29,re2::Regexp::Incref\28\29 re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::Regexp::Decref\28\29 re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::Regexp::Incref\28\29 -re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29 -re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29 re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,operator\20new\28unsigned\20long\29 re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 -re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,opa_abort re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,operator\20new\5b\5d\28unsigned\20long\29 -re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29,opa_abort -re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29,operator\20new\28unsigned\20long\29 -re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 -re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29,re2::Regexp::Incref\28\29 +re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,opa_abort +re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29 +re2::SimplifyWalker::PostVisit\28re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp*\2c\20re2::Regexp**\2c\20int\29,re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29 re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::Incref\28\29 re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::Star\28re2::Regexp*\2c\20re2::Regexp::ParseFlags\29 re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::Plus\28re2::Regexp*\2c\20re2::Regexp::ParseFlags\29 re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,operator\20new\28unsigned\20long\29 re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::Concat\28re2::Regexp**\2c\20int\2c\20re2::Regexp::ParseFlags\29 -re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,operator\20delete\28void*\29 +re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,re2::Regexp::Quest\28re2::Regexp*\2c\20re2::Regexp::ParseFlags\29 re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,operator\20new\5b\5d\28unsigned\20long\29 -re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,abort +re2::SimplifyWalker::SimplifyRepeat\28re2::Regexp*\2c\20int\2c\20int\2c\20re2::Regexp::ParseFlags\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29,opa_abort +re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29,operator\20new\28unsigned\20long\29 +re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29,re2::Regexp::Regexp\28re2::RegexpOp\2c\20re2::Regexp::ParseFlags\29 +re2::SimplifyWalker::SimplifyCharClass\28re2::Regexp*\29,re2::Regexp::Incref\28\29 re2::CoalesceWalker::~CoalesceWalker\28\29,re2::Regexp::Walker::~Walker\28\29 -re2::CoalesceWalker::~CoalesceWalker\28\29,operator\20delete\28void*\29 +re2::CoalesceWalker::~CoalesceWalker\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 re2::SimplifyWalker::~SimplifyWalker\28\29,re2::Regexp::Walker::~Walker\28\29 -re2::SimplifyWalker::~SimplifyWalker\28\29,operator\20delete\28void*\29 -std::__1::__deque_base\2c\20std::__1::allocator\20>\20>::~__deque_base\28\29,operator\20delete\28void*\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,memmove -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,operator\20new\28unsigned\20long\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,operator\20delete\28void*\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29 -std::__1::deque\2c\20std::__1::allocator\20>\20>::__add_back_capacity\28\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_back\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>\20>::push_front\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_back\28re2::WalkState*&&\29,abort -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,memmove -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,operator\20new\28unsigned\20long\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,operator\20delete\28void*\29 -std::__1::__split_buffer*\2c\20std::__1::allocator*>&>::push_front\28re2::WalkState*\20const&\29,abort +re2::SimplifyWalker::~SimplifyWalker\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,operator\20new\28unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,operator\20delete\28void*\2c\20unsigned\20long\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29 +std::_LIBCPP_ABI_NAMESPACE::deque\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator>>::__add_back_capacity\28\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*&>\28re2::WalkState*&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_back*>\28re2::WalkState*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>&>::emplace_front*&>\28re2::WalkState*&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,memmove +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,operator\20new\28unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,operator\20delete\28void*\2c\20unsigned\20long\29 +void\20std::_LIBCPP_ABI_NAMESPACE::__split_buffer*\2c\20std::_LIBCPP_ABI_NAMESPACE::allocator*>>::emplace_front*>\28re2::WalkState*&&\29,std::__throw_bad_array_new_length\5babi:nn210108\5d\28\29 +re2::StringPiece::find\28char\2c\20unsigned\20long\29\20const,memchr diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm index 667b9cdd49..a294543e88 100644 Binary files a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm and b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm differ diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/optimizations.go b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/optimizations.go index ecdd8d82c1..92ef107ee5 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/optimizations.go +++ b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/optimizations.go @@ -133,7 +133,7 @@ func (c *Compiler) removeUnusedCode() error { } caller, ok := c.funcs[callerName] if !ok { - return fmt.Errorf("caller not found: %s (%s)", cg[i][0], callerName) + continue // without a caller, it should get removed anyways (right?) } callee, ok := c.funcs[calleeName] if !ok { diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go index 9a5cebec54..b7f1a27812 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go +++ b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go @@ -12,7 +12,6 @@ import ( "fmt" "io" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/compiler/wasm/opa" "github.com/open-policy-agent/opa/internal/debug" "github.com/open-policy-agent/opa/internal/wasm/encoding" @@ -20,8 +19,9 @@ import ( "github.com/open-policy-agent/opa/internal/wasm/module" "github.com/open-policy-agent/opa/internal/wasm/types" "github.com/open-policy-agent/opa/internal/wasm/util" - "github.com/open-policy-agent/opa/ir" - opatypes "github.com/open-policy-agent/opa/types" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/ir" + opatypes "github.com/open-policy-agent/opa/v1/types" ) // Record Wasm ABI version in exported global variable @@ -32,7 +32,6 @@ const ( opaWasmABIMinorVersionVar = "opa_wasm_abi_minor_version" ) -// nolint: deadcode,varcheck const ( opaTypeNull int32 = iota + 1 opaTypeBoolean @@ -90,6 +89,7 @@ var builtinsFunctions = map[string]string{ ast.Floor.Name: "opa_arith_floor", ast.Rem.Name: "opa_arith_rem", ast.ArrayConcat.Name: "opa_array_concat", + ast.ArrayFlatten.Name: "opa_array_flatten", ast.ArrayReverse.Name: "opa_array_reverse", ast.ArraySlice.Name: "opa_array_slice", ast.SetDiff.Name: "opa_set_diff", @@ -162,6 +162,7 @@ var builtinsFunctions = map[string]string{ ast.TrimRight.Name: "opa_strings_trim_right", ast.TrimSuffix.Name: "opa_strings_trim_suffix", ast.TrimSpace.Name: "opa_strings_trim_space", + ast.InternalTemplateString.Name: "opa_template_string", ast.NumbersRange.Name: "opa_numbers_range", ast.ToNumber.Name: "opa_to_number", ast.WalkBuiltin.Name: "opa_value_transitive_closure", @@ -340,7 +341,7 @@ func (c *Compiler) initModule() error { // two times. But let's deal with that when it happens. if _, ok := c.funcs[name]; ok { // already seen c.debug.Printf("function name duplicate: %s (%d)", name, fn.Index) - name = name + ".1" + name += ".1" } c.funcs[name] = fn.Index } @@ -348,7 +349,7 @@ func (c *Compiler) initModule() error { for _, fn := range c.policy.Funcs.Funcs { params := make([]types.ValueType, len(fn.Params)) - for i := 0; i < len(params); i++ { + for i := range params { params[i] = types.I32 } @@ -414,7 +415,7 @@ func (c *Compiler) initModule() error { }, }, }, - Init: bytes.Repeat([]byte{0}, int(heapBase-offset)), + Init: make([]byte, int(heapBase-offset)), }) return nil @@ -827,7 +828,7 @@ func (c *Compiler) compileFunc(fn *ir.Func) error { memoize := len(fn.Params) == 2 if len(fn.Params) == 0 { - return fmt.Errorf("illegal function: zero args") + return errors.New("illegal function: zero args") } c.nextLocal = 0 @@ -996,12 +997,16 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err for _, stmt := range block.Stmts { switch stmt := stmt.(type) { case *ir.ResultSetAddStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.lrs}) - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Value)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaSetAdd)}) + instrs = append(instrs, + instruction.GetLocal{Index: c.lrs}, + instruction.GetLocal{Index: c.local(stmt.Value)}, + instruction.Call{Index: c.function(opaSetAdd)}, + ) case *ir.ReturnLocalStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)}) - instrs = append(instrs, instruction.Return{}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(stmt.Source)}, + instruction.Return{}, + ) case *ir.BlockStmt: for i := range stmt.Blocks { block, err := c.compileBlock(stmt.Blocks[i]) @@ -1029,8 +1034,10 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err return instrs, err } case *ir.AssignVarStmt: - instrs = append(instrs, c.instrRead(stmt.Source)) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + c.instrRead(stmt.Source), + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.AssignVarOnceStmt: instrs = append(instrs, instruction.Block{ Instrs: []instruction.Instruction{ @@ -1052,9 +1059,11 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err }, }) case *ir.AssignIntStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Target)}) - instrs = append(instrs, instruction.I64Const{Value: stmt.Value}) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueNumberSetInt)}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(stmt.Target)}, + instruction.I64Const{Value: stmt.Value}, + instruction.Call{Index: c.function(opaValueNumberSetInt)}, + ) case *ir.ScanStmt: if err := c.compileScan(stmt, &instrs); err != nil { return nil, err @@ -1067,12 +1076,14 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err } case *ir.DotStmt: if loc, ok := stmt.Source.Value.(ir.Local); ok { - instrs = append(instrs, instruction.GetLocal{Index: c.local(loc)}) - instrs = append(instrs, c.instrRead(stmt.Key)) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueGet)}) - instrs = append(instrs, instruction.TeeLocal{Index: c.local(stmt.Target)}) - instrs = append(instrs, instruction.I32Eqz{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(loc)}, + c.instrRead(stmt.Key), + instruction.Call{Index: c.function(opaValueGet)}, + instruction.TeeLocal{Index: c.local(stmt.Target)}, + instruction.I32Eqz{}, + instruction.BrIf{Index: 0}, + ) } else { // Booleans and string sources would lead to the BrIf (since opa_value_get // on them returns 0), so let's skip trying that. @@ -1080,97 +1091,131 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err break } case *ir.LenStmt: - instrs = append(instrs, c.instrRead(stmt.Source)) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueLength)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaNumberSize)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + c.instrRead(stmt.Source), + instruction.Call{Index: c.function(opaValueLength)}, + instruction.Call{Index: c.function(opaNumberSize)}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.EqualStmt: - instrs = append(instrs, c.instrRead(stmt.A)) - instrs = append(instrs, c.instrRead(stmt.B)) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueCompare)}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + c.instrRead(stmt.A), + c.instrRead(stmt.B), + instruction.Call{Index: c.function(opaValueCompare)}, + instruction.BrIf{Index: 0}, + ) case *ir.NotEqualStmt: - instrs = append(instrs, c.instrRead(stmt.A)) - instrs = append(instrs, c.instrRead(stmt.B)) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueCompare)}) - instrs = append(instrs, instruction.I32Eqz{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + c.instrRead(stmt.A), + c.instrRead(stmt.B), + instruction.Call{Index: c.function(opaValueCompare)}, + instruction.I32Eqz{}, + instruction.BrIf{Index: 0}, + ) case *ir.MakeNullStmt: - instrs = append(instrs, instruction.Call{Index: c.function(opaNull)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + instruction.Call{Index: c.function(opaNull)}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.MakeNumberIntStmt: - instrs = append(instrs, instruction.I64Const{Value: stmt.Value}) - instrs = append(instrs, instruction.Call{Index: c.function(opaNumberInt)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + instruction.I64Const{Value: stmt.Value}, + instruction.Call{Index: c.function(opaNumberInt)}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.MakeNumberRefStmt: - instrs = append(instrs, instruction.I32Const{Value: c.stringAddr(stmt.Index)}) - instrs = append(instrs, instruction.I32Const{Value: int32(len(c.policy.Static.Strings[stmt.Index].Value))}) - instrs = append(instrs, instruction.Call{Index: c.function(opaNumberRef)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + instruction.I32Const{Value: c.stringAddr(stmt.Index)}, + instruction.I32Const{Value: int32(len(c.policy.Static.Strings[stmt.Index].Value))}, + instruction.Call{Index: c.function(opaNumberRef)}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.MakeArrayStmt: - instrs = append(instrs, instruction.I32Const{Value: stmt.Capacity}) - instrs = append(instrs, instruction.Call{Index: c.function(opaArrayWithCap)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + instruction.I32Const{Value: stmt.Capacity}, + instruction.Call{Index: c.function(opaArrayWithCap)}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.MakeObjectStmt: - instrs = append(instrs, instruction.Call{Index: c.function(opaObject)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + instruction.Call{Index: c.function(opaObject)}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.MakeSetStmt: - instrs = append(instrs, instruction.Call{Index: c.function(opaSet)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + instruction.Call{Index: c.function(opaSet)}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.IsArrayStmt: if loc, ok := stmt.Source.Value.(ir.Local); ok { - instrs = append(instrs, instruction.GetLocal{Index: c.local(loc)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueType)}) - instrs = append(instrs, instruction.I32Const{Value: opaTypeArray}) - instrs = append(instrs, instruction.I32Ne{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(loc)}, + instruction.Call{Index: c.function(opaValueType)}, + instruction.I32Const{Value: opaTypeArray}, + instruction.I32Ne{}, + instruction.BrIf{Index: 0}, + ) } else { instrs = append(instrs, instruction.Br{Index: 0}) break } case *ir.IsObjectStmt: if loc, ok := stmt.Source.Value.(ir.Local); ok { - instrs = append(instrs, instruction.GetLocal{Index: c.local(loc)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueType)}) - instrs = append(instrs, instruction.I32Const{Value: opaTypeObject}) - instrs = append(instrs, instruction.I32Ne{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(loc)}, + instruction.Call{Index: c.function(opaValueType)}, + instruction.I32Const{Value: opaTypeObject}, + instruction.I32Ne{}, + instruction.BrIf{Index: 0}, + ) } else { instrs = append(instrs, instruction.Br{Index: 0}) break } case *ir.IsSetStmt: if loc, ok := stmt.Source.Value.(ir.Local); ok { - instrs = append(instrs, instruction.GetLocal{Index: c.local(loc)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueType)}) - instrs = append(instrs, instruction.I32Const{Value: opaTypeSet}) - instrs = append(instrs, instruction.I32Ne{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(loc)}, + instruction.Call{Index: c.function(opaValueType)}, + instruction.I32Const{Value: opaTypeSet}, + instruction.I32Ne{}, + instruction.BrIf{Index: 0}, + ) } else { instrs = append(instrs, instruction.Br{Index: 0}) break } case *ir.IsUndefinedStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)}) - instrs = append(instrs, instruction.I32Const{Value: 0}) - instrs = append(instrs, instruction.I32Ne{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(stmt.Source)}, + instruction.I32Const{Value: 0}, + instruction.I32Ne{}, + instruction.BrIf{Index: 0}, + ) case *ir.ResetLocalStmt: - instrs = append(instrs, instruction.I32Const{Value: 0}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + instruction.I32Const{Value: 0}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.IsDefinedStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)}) - instrs = append(instrs, instruction.I32Eqz{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(stmt.Source)}, + instruction.I32Eqz{}, + instruction.BrIf{Index: 0}, + ) case *ir.ArrayAppendStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Array)}) - instrs = append(instrs, c.instrRead(stmt.Value)) - instrs = append(instrs, instruction.Call{Index: c.function(opaArrayAppend)}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(stmt.Array)}, + c.instrRead(stmt.Value), + instruction.Call{Index: c.function(opaArrayAppend)}, + ) case *ir.ObjectInsertStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Object)}) - instrs = append(instrs, c.instrRead(stmt.Key)) - instrs = append(instrs, c.instrRead(stmt.Value)) - instrs = append(instrs, instruction.Call{Index: c.function(opaObjectInsert)}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(stmt.Object)}, + c.instrRead(stmt.Key), + c.instrRead(stmt.Value), + instruction.Call{Index: c.function(opaObjectInsert)}, + ) case *ir.ObjectInsertOnceStmt: tmp := c.genLocal() instrs = append(instrs, instruction.Block{ @@ -1197,14 +1242,18 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err }, }) case *ir.ObjectMergeStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.A)}) - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.B)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueMerge)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(stmt.A)}, + instruction.GetLocal{Index: c.local(stmt.B)}, + instruction.Call{Index: c.function(opaValueMerge)}, + instruction.SetLocal{Index: c.local(stmt.Target)}, + ) case *ir.SetAddStmt: - instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Set)}) - instrs = append(instrs, c.instrRead(stmt.Value)) - instrs = append(instrs, instruction.Call{Index: c.function(opaSetAdd)}) + instrs = append(instrs, + instruction.GetLocal{Index: c.local(stmt.Set)}, + c.instrRead(stmt.Value), + instruction.Call{Index: c.function(opaSetAdd)}, + ) default: var buf bytes.Buffer err := ir.Pretty(&buf, stmt) @@ -1220,8 +1269,7 @@ func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, err func (c *Compiler) compileScan(scan *ir.ScanStmt, result *[]instruction.Instruction) error { var instrs = *result - instrs = append(instrs, instruction.I32Const{Value: 0}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(scan.Key)}) + instrs = append(instrs, instruction.I32Const{Value: 0}, instruction.SetLocal{Index: c.local(scan.Key)}) body, err := c.compileScanBlock(scan) if err != nil { return err @@ -1236,23 +1284,22 @@ func (c *Compiler) compileScan(scan *ir.ScanStmt, result *[]instruction.Instruct } func (c *Compiler) compileScanBlock(scan *ir.ScanStmt) ([]instruction.Instruction, error) { - var instrs []instruction.Instruction - - // Execute iterator. - instrs = append(instrs, instruction.GetLocal{Index: c.local(scan.Source)}) - instrs = append(instrs, instruction.GetLocal{Index: c.local(scan.Key)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueIter)}) - - // Check for emptiness. - instrs = append(instrs, instruction.TeeLocal{Index: c.local(scan.Key)}) - instrs = append(instrs, instruction.I32Eqz{}) - instrs = append(instrs, instruction.BrIf{Index: 1}) - - // Load value. - instrs = append(instrs, instruction.GetLocal{Index: c.local(scan.Source)}) - instrs = append(instrs, instruction.GetLocal{Index: c.local(scan.Key)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaValueGet)}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(scan.Value)}) + //nolint:prealloc // instruction list is known and fixed, clearer as slice literal + instrs := []instruction.Instruction{ + // Execute iterator. + instruction.GetLocal{Index: c.local(scan.Source)}, + instruction.GetLocal{Index: c.local(scan.Key)}, + instruction.Call{Index: c.function(opaValueIter)}, + // Check for emptiness. + instruction.TeeLocal{Index: c.local(scan.Key)}, + instruction.I32Eqz{}, + instruction.BrIf{Index: 1}, + // Load value. + instruction.GetLocal{Index: c.local(scan.Source)}, + instruction.GetLocal{Index: c.local(scan.Key)}, + instruction.Call{Index: c.function(opaValueGet)}, + instruction.SetLocal{Index: c.local(scan.Value)}, + } // Loop body. nested, err := c.compileBlock(scan.Block) @@ -1272,8 +1319,7 @@ func (c *Compiler) compileNot(not *ir.NotStmt, result *[]instruction.Instruction // generate and initialize condition variable cond := c.genLocal() - instrs = append(instrs, instruction.I32Const{Value: 1}) - instrs = append(instrs, instruction.SetLocal{Index: cond}) + instrs = append(instrs, instruction.I32Const{Value: 1}, instruction.SetLocal{Index: cond}) nested, err := c.compileBlock(not.Block) if err != nil { @@ -1281,14 +1327,15 @@ func (c *Compiler) compileNot(not *ir.NotStmt, result *[]instruction.Instruction } // unset condition variable if end of block is reached - nested = append(nested, instruction.I32Const{Value: 0}) - nested = append(nested, instruction.SetLocal{Index: cond}) - instrs = append(instrs, instruction.Block{Instrs: nested}) - - // break out of block if condition variable was unset - instrs = append(instrs, instruction.GetLocal{Index: cond}) - instrs = append(instrs, instruction.I32Eqz{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, instruction.Block{Instrs: append(nested, + instruction.I32Const{Value: 0}, + instruction.SetLocal{Index: cond}, + )}, + // break out of block if condition variable was unset + instruction.GetLocal{Index: cond}, + instruction.I32Eqz{}, + instruction.BrIf{Index: 0}, + ) *result = instrs return nil @@ -1298,34 +1345,36 @@ func (c *Compiler) compileWithStmt(with *ir.WithStmt, result *[]instruction.Inst var instrs = *result save := c.genLocal() - instrs = append(instrs, instruction.Call{Index: c.function(opaMemoizePush)}) - instrs = append(instrs, instruction.GetLocal{Index: c.local(with.Local)}) - instrs = append(instrs, instruction.SetLocal{Index: save}) + instrs = append(instrs, + instruction.Call{Index: c.function(opaMemoizePush)}, + instruction.GetLocal{Index: c.local(with.Local)}, + instruction.SetLocal{Index: save}, + ) if len(with.Path) == 0 { - instrs = append(instrs, c.instrRead(with.Value)) - instrs = append(instrs, instruction.SetLocal{Index: c.local(with.Local)}) + instrs = append(instrs, c.instrRead(with.Value), instruction.SetLocal{Index: c.local(with.Local)}) } else { instrs = c.compileUpsert(with.Local, with.Path, with.Value, with.Location, instrs) } undefined := c.genLocal() - instrs = append(instrs, instruction.I32Const{Value: 1}) - instrs = append(instrs, instruction.SetLocal{Index: undefined}) + instrs = append(instrs, instruction.I32Const{Value: 1}, instruction.SetLocal{Index: undefined}) nested, err := c.compileBlock(with.Block) if err != nil { return err } - nested = append(nested, instruction.I32Const{Value: 0}) - nested = append(nested, instruction.SetLocal{Index: undefined}) - instrs = append(instrs, instruction.Block{Instrs: nested}) - instrs = append(instrs, instruction.GetLocal{Index: save}) - instrs = append(instrs, instruction.SetLocal{Index: c.local(with.Local)}) - instrs = append(instrs, instruction.Call{Index: c.function(opaMemoizePop)}) - instrs = append(instrs, instruction.GetLocal{Index: undefined}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + nested = append(nested, instruction.I32Const{Value: 0}, instruction.SetLocal{Index: undefined}) + + instrs = append(instrs, + instruction.Block{Instrs: nested}, + instruction.GetLocal{Index: save}, + instruction.SetLocal{Index: c.local(with.Local)}, + instruction.Call{Index: c.function(opaMemoizePop)}, + instruction.GetLocal{Index: undefined}, + instruction.BrIf{Index: 0}, + ) *result = instrs @@ -1333,110 +1382,111 @@ func (c *Compiler) compileWithStmt(with *ir.WithStmt, result *[]instruction.Inst } func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Operand, _ ir.Location, instrs []instruction.Instruction) []instruction.Instruction { - lcopy := c.genLocal() // holds copy of local - instrs = append(instrs, instruction.GetLocal{Index: c.local(local)}) - instrs = append(instrs, instruction.SetLocal{Index: lcopy}) - - // Shallow copy the local if defined otherwise initialize to an empty object. - instrs = append(instrs, instruction.Block{ - Instrs: []instruction.Instruction{ - instruction.Block{Instrs: []instruction.Instruction{ - instruction.GetLocal{Index: lcopy}, - instruction.I32Eqz{}, - instruction.BrIf{Index: 0}, - instruction.GetLocal{Index: lcopy}, - instruction.Call{Index: c.function(opaValueShallowCopy)}, + instrs = append(instrs, + instruction.GetLocal{Index: c.local(local)}, + instruction.SetLocal{Index: lcopy}, + // Shallow copy the local if defined otherwise initialize to an empty object. + instruction.Block{ + Instrs: []instruction.Instruction{ + instruction.Block{Instrs: []instruction.Instruction{ + instruction.GetLocal{Index: lcopy}, + instruction.I32Eqz{}, + instruction.BrIf{Index: 0}, + instruction.GetLocal{Index: lcopy}, + instruction.Call{Index: c.function(opaValueShallowCopy)}, + instruction.TeeLocal{Index: lcopy}, + instruction.SetLocal{Index: c.local(local)}, + instruction.Br{Index: 1}, + }}, + instruction.Call{Index: c.function(opaObject)}, instruction.TeeLocal{Index: lcopy}, instruction.SetLocal{Index: c.local(local)}, - instruction.Br{Index: 1}, - }}, - instruction.Call{Index: c.function(opaObject)}, - instruction.TeeLocal{Index: lcopy}, - instruction.SetLocal{Index: c.local(local)}, - }, - }) + }, + }) // Initialize the locals that specify the path of the upsert operation. lpath := make(map[int]uint32, len(path)) - for i := 0; i < len(path); i++ { + for i := range path { lpath[i] = c.genLocal() - instrs = append(instrs, instruction.I32Const{Value: c.opaStringAddr(path[i])}) - instrs = append(instrs, instruction.SetLocal{Index: lpath[i]}) + instrs = append(instrs, + instruction.I32Const{Value: c.opaStringAddr(path[i])}, + instruction.SetLocal{Index: lpath[i]}, + ) } // Generate a block that traverses the path of the upsert operation, // shallowing copying values at each step as needed. Stop before the final // segment that will only be inserted. - var inner []instruction.Instruction + inner := make([]instruction.Instruction, 0, len(path)*21+1) ltemp := c.genLocal() - for i := 0; i < len(path)-1; i++ { - - // Lookup the next part of the path. - inner = append(inner, instruction.GetLocal{Index: lcopy}) - inner = append(inner, instruction.GetLocal{Index: lpath[i]}) - inner = append(inner, instruction.Call{Index: c.function(opaValueGet)}) - inner = append(inner, instruction.SetLocal{Index: ltemp}) - - // If the next node is missing, break. - inner = append(inner, instruction.GetLocal{Index: ltemp}) - inner = append(inner, instruction.I32Eqz{}) - inner = append(inner, instruction.BrIf{Index: uint32(i)}) - - // If the next node is not an object, break. - inner = append(inner, instruction.GetLocal{Index: ltemp}) - inner = append(inner, instruction.Call{Index: c.function(opaValueType)}) - inner = append(inner, instruction.I32Const{Value: opaTypeObject}) - inner = append(inner, instruction.I32Ne{}) - inner = append(inner, instruction.BrIf{Index: uint32(i)}) - - // Otherwise, shallow copy the next node node and insert into the copy - // before continuing. - inner = append(inner, instruction.GetLocal{Index: ltemp}) - inner = append(inner, instruction.Call{Index: c.function(opaValueShallowCopy)}) - inner = append(inner, instruction.SetLocal{Index: ltemp}) - inner = append(inner, instruction.GetLocal{Index: lcopy}) - inner = append(inner, instruction.GetLocal{Index: lpath[i]}) - inner = append(inner, instruction.GetLocal{Index: ltemp}) - inner = append(inner, instruction.Call{Index: c.function(opaObjectInsert)}) - inner = append(inner, instruction.GetLocal{Index: ltemp}) - inner = append(inner, instruction.SetLocal{Index: lcopy}) + for i := range len(path) - 1 { + inner = append(inner, + // Lookup the next part of the path. + instruction.GetLocal{Index: lcopy}, + instruction.GetLocal{Index: lpath[i]}, + instruction.Call{Index: c.function(opaValueGet)}, + instruction.SetLocal{Index: ltemp}, + // If the next node is missing, break. + instruction.GetLocal{Index: ltemp}, + instruction.I32Eqz{}, + instruction.BrIf{Index: uint32(i)}, + // If the next node is not an object, break. + instruction.GetLocal{Index: ltemp}, + instruction.Call{Index: c.function(opaValueType)}, + instruction.I32Const{Value: opaTypeObject}, + instruction.I32Ne{}, + instruction.BrIf{Index: uint32(i)}, + // Otherwise, shallow copy the next node node and insert into the copy + // before continuing. + instruction.GetLocal{Index: ltemp}, + instruction.Call{Index: c.function(opaValueShallowCopy)}, + instruction.SetLocal{Index: ltemp}, + instruction.GetLocal{Index: lcopy}, + instruction.GetLocal{Index: lpath[i]}, + instruction.GetLocal{Index: ltemp}, + instruction.Call{Index: c.function(opaObjectInsert)}, + instruction.GetLocal{Index: ltemp}, + instruction.SetLocal{Index: lcopy}, + ) } inner = append(inner, instruction.Br{Index: uint32(len(path) - 1)}) // Generate blocks that handle missing nodes during traversal. - var block []instruction.Instruction + block := make([]instruction.Instruction, 0, len(path)*10) lval := c.genLocal() - for i := 0; i < len(path)-1; i++ { - block = append(block, instruction.Block{Instrs: inner}) - block = append(block, instruction.Call{Index: c.function(opaObject)}) - block = append(block, instruction.SetLocal{Index: lval}) - block = append(block, instruction.GetLocal{Index: lcopy}) - block = append(block, instruction.GetLocal{Index: lpath[i]}) - block = append(block, instruction.GetLocal{Index: lval}) - block = append(block, instruction.Call{Index: c.function(opaObjectInsert)}) - block = append(block, instruction.GetLocal{Index: lval}) - block = append(block, instruction.SetLocal{Index: lcopy}) + for i := range len(path) - 1 { + block = append(block, + instruction.Block{Instrs: inner}, + instruction.Call{Index: c.function(opaObject)}, + instruction.SetLocal{Index: lval}, + instruction.GetLocal{Index: lcopy}, + instruction.GetLocal{Index: lpath[i]}, + instruction.GetLocal{Index: lval}, + instruction.Call{Index: c.function(opaObjectInsert)}, + instruction.GetLocal{Index: lval}, + instruction.SetLocal{Index: lcopy}, + ) inner = block block = nil } // Finish by inserting the statement's value into the shallow copied node. - instrs = append(instrs, instruction.Block{Instrs: inner}) - instrs = append(instrs, instruction.GetLocal{Index: lcopy}) - instrs = append(instrs, instruction.GetLocal{Index: lpath[len(path)-1]}) - instrs = append(instrs, c.instrRead(value)) - instrs = append(instrs, instruction.Call{Index: c.function(opaObjectInsert)}) - - return instrs + return append(instrs, + instruction.Block{Instrs: inner}, + instruction.GetLocal{Index: lcopy}, + instruction.GetLocal{Index: lpath[len(path)-1]}, + c.instrRead(value), + instruction.Call{Index: c.function(opaObjectInsert)}, + ) } func (c *Compiler) compileCallDynamicStmt(stmt *ir.CallDynamicStmt, result *[]instruction.Instruction) error { - instrs := []instruction.Instruction{} + instrs := make([]instruction.Instruction, 0, 3+3*len(stmt.Path)+len(stmt.Args)+10) larray := c.genLocal() lidx := c.genLocal() @@ -1509,7 +1559,7 @@ func (c *Compiler) compileCallStmt(stmt *ir.CallStmt, result *[]instruction.Inst func (c *Compiler) compileInternalCall(stmt *ir.CallStmt, index uint32, result *[]instruction.Instruction) error { - instrs := []instruction.Instruction{} + instrs := make([]instruction.Instruction, 0, len(stmt.Args)+4) // Prepare function args and call. for _, arg := range stmt.Args { @@ -1535,8 +1585,7 @@ func (c *Compiler) compileExternalCall(stmt *ir.CallStmt, ef externalFunc, resul } instrs := *result - instrs = append(instrs, instruction.I32Const{Value: ef.ID}) - instrs = append(instrs, instruction.I32Const{Value: 0}) // unused context parameter + instrs = append(instrs, instruction.I32Const{Value: ef.ID}, instruction.I32Const{Value: 0}) // unused context parameter for _, arg := range stmt.Args { instrs = append(instrs, c.instrRead(arg)) @@ -1545,9 +1594,11 @@ func (c *Compiler) compileExternalCall(stmt *ir.CallStmt, ef externalFunc, resul instrs = append(instrs, instruction.Call{Index: c.function(builtinDispatchers[len(stmt.Args)])}) if ef.Decl.Result() != nil { - instrs = append(instrs, instruction.TeeLocal{Index: c.local(stmt.Result)}) - instrs = append(instrs, instruction.I32Eqz{}) - instrs = append(instrs, instruction.BrIf{Index: 0}) + instrs = append(instrs, + instruction.TeeLocal{Index: c.local(stmt.Result)}, + instruction.I32Eqz{}, + instruction.BrIf{Index: 0}, + ) } else { instrs = append(instrs, instruction.Drop{}) } @@ -1678,7 +1729,7 @@ func (c *Compiler) genLocal() uint32 { func (c *Compiler) function(name string) uint32 { fidx, ok := c.funcs[name] if !ok { - panic(fmt.Sprintf("function not found: %s", name)) + panic("function not found: " + name) } return fidx } diff --git a/vendor/github.com/open-policy-agent/opa/internal/config/config.go b/vendor/github.com/open-policy-agent/opa/internal/config/config.go deleted file mode 100644 index b1a9731f65..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/config/config.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package config implements helper functions to parse OPA's configuration. -package config - -import ( - "encoding/json" - "fmt" - "os" - "regexp" - "strings" - - "sigs.k8s.io/yaml" - - "github.com/open-policy-agent/opa/internal/strvals" - "github.com/open-policy-agent/opa/keys" - "github.com/open-policy-agent/opa/logging" - "github.com/open-policy-agent/opa/plugins/rest" - "github.com/open-policy-agent/opa/tracing" - "github.com/open-policy-agent/opa/util" -) - -// ServiceOptions stores the options passed to ParseServicesConfig -type ServiceOptions struct { - Raw json.RawMessage - AuthPlugin rest.AuthPluginLookupFunc - Keys map[string]*keys.Config - Logger logging.Logger - DistributedTacingOpts tracing.Options -} - -// ParseServicesConfig returns a set of named service clients. The service -// clients can be specified either as an array or as a map. Some systems (e.g., -// Helm) do not have proper support for configuration values nested under -// arrays, so just support both here. -func ParseServicesConfig(opts ServiceOptions) (map[string]rest.Client, error) { - - services := map[string]rest.Client{} - - var arr []json.RawMessage - var obj map[string]json.RawMessage - - if err := util.Unmarshal(opts.Raw, &arr); err == nil { - for _, s := range arr { - client, err := rest.New(s, opts.Keys, rest.AuthPluginLookup(opts.AuthPlugin), rest.Logger(opts.Logger), rest.DistributedTracingOpts(opts.DistributedTacingOpts)) - if err != nil { - return nil, err - } - services[client.Service()] = client - } - } else if util.Unmarshal(opts.Raw, &obj) == nil { - for k := range obj { - client, err := rest.New(obj[k], opts.Keys, rest.Name(k), rest.AuthPluginLookup(opts.AuthPlugin), rest.Logger(opts.Logger), rest.DistributedTracingOpts(opts.DistributedTacingOpts)) - if err != nil { - return nil, err - } - services[client.Service()] = client - } - } else { - // Return error from array decode as that is the default format. - return nil, err - } - - return services, nil -} - -// Load implements configuration file loading. The supplied config file will be -// read from disk (if specified) and overrides will be applied. If no config file is -// specified, the overrides can still be applied to an empty config. -func Load(configFile string, overrides []string, overrideFiles []string) ([]byte, error) { - baseConf := map[string]interface{}{} - - // User specified config file - if configFile != "" { - var bytes []byte - var err error - bytes, err = os.ReadFile(configFile) - if err != nil { - return nil, err - } - - processedConf := subEnvVars(string(bytes)) - - if err := yaml.Unmarshal([]byte(processedConf), &baseConf); err != nil { - return nil, fmt.Errorf("failed to parse %s: %s", configFile, err) - } - } - - overrideConf := map[string]interface{}{} - - // User specified a config override via --set - for _, override := range overrides { - processedOverride := subEnvVars(override) - if err := strvals.ParseInto(processedOverride, overrideConf); err != nil { - return nil, fmt.Errorf("failed parsing --set data: %s", err) - } - } - - // User specified a config override value via --set-file - for _, override := range overrideFiles { - reader := func(rs []rune) (interface{}, error) { - bytes, err := os.ReadFile(string(rs)) - value := strings.TrimSpace(string(bytes)) - return value, err - } - if err := strvals.ParseIntoFile(override, overrideConf, reader); err != nil { - return nil, fmt.Errorf("failed parsing --set-file data: %s", err) - } - } - - // Merge together base config file and overrides, prefer the overrides - conf := mergeValues(baseConf, overrideConf) - - // Take the patched config and marshal back to YAML - return yaml.Marshal(conf) -} - -// regex looking for ${...} notation strings -var envRegex = regexp.MustCompile(`(?U:\${.*})`) - -// subEnvVars will look for any environment variables in the passed in string -// with the syntax of ${VAR_NAME} and replace that string with ENV[VAR_NAME] -func subEnvVars(s string) string { - updatedConfig := envRegex.ReplaceAllStringFunc(s, func(s string) string { - // Trim off the '${' and '}' - if len(s) <= 3 { - // This should never happen.. - return "" - } - varName := s[2 : len(s)-1] - - // Lookup the variable in the environment. We play by - // bash rules.. if its undefined we'll treat it as an - // empty string instead of raising an error. - return os.Getenv(varName) - }) - - return updatedConfig -} - -// mergeValues will merge source and destination map, preferring values from the source map -func mergeValues(dest map[string]interface{}, src map[string]interface{}) map[string]interface{} { - for k, v := range src { - // If the key doesn't exist already, then just set the key to that value - if _, exists := dest[k]; !exists { - dest[k] = v - continue - } - nextMap, ok := v.(map[string]interface{}) - // If it isn't another map, overwrite the value - if !ok { - dest[k] = v - continue - } - // Edge case: If the key exists in the destination, but isn't a map - destMap, isMap := dest[k].(map[string]interface{}) - // If the source map has a map for this key, prefer it - if !isMap { - dest[k] = v - continue - } - // If we got to this point, it is a map in both, so merge them - dest[k] = mergeValues(destMap, nextMap) - } - return dest -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/debug/debug.go b/vendor/github.com/open-policy-agent/opa/internal/debug/debug.go index 7b90bd1bb0..9448aeb288 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/debug/debug.go +++ b/vendor/github.com/open-policy-agent/opa/internal/debug/debug.go @@ -8,7 +8,7 @@ import ( // Debug allows printing debug messages. type Debug interface { // Printf prints, with a short file:line-number prefix - Printf(format string, args ...interface{}) + Printf(format string, args ...any) // Writer returns the writer being written to, which may be // `io.Discard` if no debug output is requested. Writer() io.Writer diff --git a/vendor/github.com/open-policy-agent/opa/internal/deepcopy/deepcopy.go b/vendor/github.com/open-policy-agent/opa/internal/deepcopy/deepcopy.go index 00e8df6f88..dc3a231bc1 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/deepcopy/deepcopy.go +++ b/vendor/github.com/open-policy-agent/opa/internal/deepcopy/deepcopy.go @@ -5,25 +5,25 @@ package deepcopy // DeepCopy performs a recursive deep copy for nested slices/maps and -// returns the copied object. Supports []interface{} -// and map[string]interface{} only -func DeepCopy(val interface{}) interface{} { +// returns the copied object. Supports []any +// and map[string]any only +func DeepCopy(val any) any { switch val := val.(type) { - case []interface{}: - cpy := make([]interface{}, len(val)) + case []any: + cpy := make([]any, len(val)) for i := range cpy { cpy[i] = DeepCopy(val[i]) } return cpy - case map[string]interface{}: + case map[string]any: return Map(val) default: return val } } -func Map(val map[string]interface{}) map[string]interface{} { - cpy := make(map[string]interface{}, len(val)) +func Map(val map[string]any) map[string]any { + cpy := make(map[string]any, len(val)) for k := range val { cpy[k] = DeepCopy(val[k]) } diff --git a/vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go b/vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go index 89e7e137b7..791753528f 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go +++ b/vendor/github.com/open-policy-agent/opa/internal/edittree/bitvector/bitvector.go @@ -2,10 +2,12 @@ // which supports lookups, sets, appends, insertions, and deletions. package bitvector +import "slices" + // A BitVector is a variable sized vector of bits. It supports // lookups, sets, appends, insertions, and deletions. // -// This class is not thread safe. +// Operations are not thread safe. type BitVector struct { data []byte length int @@ -14,10 +16,25 @@ type BitVector struct { // NewBitVector creates and initializes a new bit vector with length // elements, using data as its initial contents. func NewBitVector(data []byte, length int) *BitVector { - return &BitVector{ - data: data, - length: length, + return &BitVector{data: data, length: length} +} + +func (vector *BitVector) Clear() *BitVector { + if vector == nil { + return nil } + clear(vector.data) + vector.length = 0 + + return vector +} + +func (vector *BitVector) Reset(size, length int) *BitVector { + clear(vector.data) + vector.data = slices.Grow(vector.data, size)[:size] + vector.length = length + + return vector } // Bytes returns a slice of the contents of the bit vector. If the caller changes the returned slice, @@ -36,7 +53,7 @@ func (vector *BitVector) Length() int { // position of the last byte in the slice. // This returns the bit that was shifted off of the last byte. func shiftLower(bit byte, b []byte) byte { - bit = bit << 7 + bit <<= 7 for i := len(b) - 1; i >= 0; i-- { newByte := b[i] >> 1 newByte |= bit @@ -51,7 +68,7 @@ func shiftLower(bit byte, b []byte) byte { // position of the first byte in the slice. // This returns the bit that was shifted off the last byte. func shiftHigher(bit byte, b []byte) byte { - for i := 0; i < len(b); i++ { + for i := range b { newByte := b[i] << 1 newByte |= bit bit = (b[i] & 0x80) >> 7 diff --git a/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go b/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go index 9cfaee8baf..64511e5eb5 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go +++ b/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go @@ -146,16 +146,19 @@ package edittree import ( - "encoding/json" + "errors" "fmt" - "math/big" - "sort" "strings" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/edittree/bitvector" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" ) +var refPool = util.NewSlicePool[*ast.Term](1) + +var editTreePool = util.NewSyncPool[EditTree]() + // Deletions are encoded with a nil value pointer. type EditTree struct { value *ast.Term @@ -172,120 +175,79 @@ func NewEditTree(term *ast.Term) *EditTree { return nil } - var tree EditTree + return initForTerm(&EditTree{}, term) +} + +func EditTreeFromPool(term *ast.Term) *EditTree { + return initForTerm(editTreePool.Get(), term) +} + +func Dispose(e *EditTree) { + if e != nil { + editTreePool.Put(e.Reset()) + } +} + +func (e *EditTree) Reset() *EditTree { + e.value = nil + clear(e.childKeys) + clear(e.childScalarValues) + clear(e.childCompositeValues) + + e.eliminated = e.eliminated.Clear() + e.insertions = e.insertions.Clear() + + return e +} + +func initForTerm(tree *EditTree, term *ast.Term) *EditTree { + tree.value = term + switch x := term.Value.(type) { case ast.Object, ast.Set: - tree = EditTree{ - value: term, - childKeys: map[int]*ast.Term{}, - childScalarValues: map[int]*ast.Term{}, - childCompositeValues: map[int]*EditTree{}, + if tree.childKeys == nil { + tree.childKeys = map[int]*ast.Term{} + } + if tree.childScalarValues == nil { + tree.childScalarValues = map[int]*ast.Term{} + } + if tree.childCompositeValues == nil { + tree.childCompositeValues = map[int]*EditTree{} } case *ast.Array: - tree = EditTree{ - value: term, - childScalarValues: map[int]*ast.Term{}, - childCompositeValues: map[int]*EditTree{}, + if tree.childScalarValues == nil { + tree.childScalarValues = map[int]*ast.Term{} + } + if tree.childCompositeValues == nil { + tree.childCompositeValues = map[int]*EditTree{} } bytesLength := ((x.Len() - 1) / 8) + 1 // How many bytes to use for the bit-vectors. - tree.eliminated = bitvector.NewBitVector(make([]byte, bytesLength), x.Len()) - tree.insertions = bitvector.NewBitVector(make([]byte, bytesLength), x.Len()) - default: - tree = EditTree{ - value: term, + if tree.eliminated == nil { + tree.eliminated = bitvector.NewBitVector(make([]byte, bytesLength), x.Len()) + } else { + tree.eliminated = tree.eliminated.Reset(bytesLength, x.Len()) + } + if tree.insertions == nil { + tree.insertions = bitvector.NewBitVector(make([]byte, bytesLength), x.Len()) + } else { + tree.insertions = tree.insertions.Reset(bytesLength, x.Len()) } } - return &tree + return tree } // Returns correct (collision-resolved) hash for this term + whether or not // it was found in the table already. func (e *EditTree) getKeyHash(key *ast.Term) (int, bool) { hash := key.Hash() - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v ast.Value) bool - - switch x := key.Value.(type) { - case ast.Null, ast.Boolean, ast.String, ast.Var: - equal = func(y ast.Value) bool { return x == y } - case ast.Number: - if xi, err := json.Number(x).Int64(); err == nil { - equal = func(y ast.Value) bool { - if y, ok := y.(ast.Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b ast.Value) bool { - if bNum, ok := b.(ast.Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - return false - } - - default: - equal = func(y ast.Value) bool { return ast.Compare(x, y) == 0 } - } // Look through childKeys, looking up the original hash // value first, and then use linear-probing to iter // through the keys until we either find the Term we're // after, or run out of candidates. for curr, ok := e.childKeys[hash]; ok; { - if equal(curr.Value) { + if ast.KeyHashEqual(curr.Value, key.Value) { return hash, true } @@ -308,17 +270,14 @@ func isComposite(t *ast.Term) bool { } } -//gcassert:inline func (e *EditTree) setChildKey(hash int, key *ast.Term) { e.childKeys[hash] = key } -//gcassert:inline func (e *EditTree) setChildScalarValue(hash int, value *ast.Term) { e.childScalarValues[hash] = value } -//gcassert:inline func (e *EditTree) setChildCompositeValue(hash int, child *EditTree) { e.childCompositeValues[hash] = child } @@ -336,13 +295,13 @@ func (e *EditTree) deleteChildValue(hash int) { // Insert creates a new child of e, and returns the new child EditTree node. func (e *EditTree) Insert(key, value *ast.Term) (*EditTree, error) { if e.value == nil { - return nil, fmt.Errorf("deleted node encountered during insert operation") + return nil, errors.New("deleted node encountered during insert operation") } if key == nil { - return nil, fmt.Errorf("nil key provided for insert operation") + return nil, errors.New("nil key provided for insert operation") } if value == nil { - return nil, fmt.Errorf("nil value provided for insert operation") + return nil, errors.New("nil value provided for insert operation") } switch x := e.value.Value.(type) { @@ -355,11 +314,10 @@ func (e *EditTree) Insert(key, value *ast.Term) (*EditTree, error) { // We only collapse this Set-typed node if a composite type is involved. if isComposite(key) { // TODO: Investigate re-rendering *only* the immediate composite children. - collapsed := e.Render() - e.value = collapsed - e.childKeys = map[int]*ast.Term{} - e.childScalarValues = map[int]*ast.Term{} - e.childCompositeValues = map[int]*EditTree{} + e.value = e.Render() + clear(e.childKeys) + clear(e.childScalarValues) + clear(e.childCompositeValues) } return e.unsafeInsertSet(key, value), nil case *ast.Array: @@ -368,7 +326,7 @@ func (e *EditTree) Insert(key, value *ast.Term) (*EditTree, error) { return nil, err } if idx < 0 || idx > e.insertions.Length() { - return nil, fmt.Errorf("index for array insertion out of bounds") + return nil, errors.New("index for array insertion out of bounds") } return e.unsafeInsertArray(idx, value), nil default: @@ -378,12 +336,13 @@ func (e *EditTree) Insert(key, value *ast.Term) (*EditTree, error) { } func (e *EditTree) unsafeInsertObject(key, value *ast.Term) *EditTree { - child := NewEditTree(value) keyHash, found := e.getKeyHash(key) if found { e.deleteChildValue(keyHash) } e.setChildKey(keyHash, key) + + child := NewEditTree(value) if isComposite(value) { e.setChildCompositeValue(keyHash, child) } else { @@ -408,10 +367,9 @@ func (e *EditTree) unsafeInsertSet(key, value *ast.Term) *EditTree { } func (e *EditTree) unsafeInsertArray(idx int, value *ast.Term) *EditTree { - child := NewEditTree(value) // Collect insertion indexes above the insertion site for rewriting. - rewritesScalars := []int{} - rewritesComposites := []int{} + var rewritesScalars, rewritesComposites []int + for i := idx; i < e.insertions.Length(); i++ { if e.insertions.Element(i) == 1 { if _, ok := e.childScalarValues[i]; ok { @@ -446,6 +404,8 @@ func (e *EditTree) unsafeInsertArray(idx int, value *ast.Term) *EditTree { } else { e.insertions.Insert(1, idx) } + + child := NewEditTree(value) if isComposite(value) { e.setChildCompositeValue(idx, child) } else { @@ -458,10 +418,10 @@ func (e *EditTree) unsafeInsertArray(idx int, value *ast.Term) *EditTree { // already present in e. It then returns the deleted child EditTree node. func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) { if e.value == nil { - return nil, fmt.Errorf("deleted node encountered during delete operation") + return nil, errors.New("deleted node encountered during delete operation") } if key == nil { - return nil, fmt.Errorf("nil key provided for delete operation") + return nil, errors.New("nil key provided for delete operation") } switch e.value.Value.(type) { @@ -504,9 +464,9 @@ func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) { // TODO: Investigate re-rendering *only* the immediate composite children. collapsed := e.Render() e.value = collapsed - e.childKeys = map[int]*ast.Term{} - e.childScalarValues = map[int]*ast.Term{} - e.childCompositeValues = map[int]*EditTree{} + clear(e.childKeys) + clear(e.childScalarValues) + clear(e.childCompositeValues) } else { keyHash, found := e.getKeyHash(key) // If child found, replace with delete node. If delete node already existed, error. @@ -532,7 +492,7 @@ func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) { return nil, err } if idx < 0 || idx > e.insertions.Length()-1 { - return nil, fmt.Errorf("index for array delete out of bounds") + return nil, errors.New("index for array delete out of bounds") } // Collect insertion indexes above the delete site for rewriting. @@ -553,14 +513,14 @@ func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) { } // Do rewrites to clear out the newly-removed element. e.deleteChildValue(idx) - for i := 0; i < len(rewritesScalars); i++ { + for i := range rewritesScalars { originalIdx := rewritesScalars[i] rewriteIdx := rewritesScalars[i] - 1 v := e.childScalarValues[originalIdx] e.deleteChildValue(originalIdx) e.setChildScalarValue(rewriteIdx, v) } - for i := 0; i < len(rewritesComposites); i++ { + for i := range rewritesComposites { originalIdx := rewritesComposites[i] rewriteIdx := rewritesComposites[i] - 1 v := e.childCompositeValues[originalIdx] @@ -592,7 +552,7 @@ func (e *EditTree) Delete(key *ast.Term) (*EditTree, error) { //gcassert:inline func sumZeroesBelowIndex(index int, bv *bitvector.BitVector) int { zeroesSeen := 0 - for i := 0; i < index; i++ { + for i := range index { if bv.Element(i) == 0 { zeroesSeen++ } @@ -602,7 +562,7 @@ func sumZeroesBelowIndex(index int, bv *bitvector.BitVector) int { func findIndexOfNthZero(n int, bv *bitvector.BitVector) (int, bool) { zeroesSeen := 0 - for i := 0; i < bv.Length(); i++ { + for i := range bv.Length() { if bv.Element(i) == 0 { zeroesSeen++ } @@ -616,7 +576,14 @@ func findIndexOfNthZero(n int, bv *bitvector.BitVector) (int, bool) { // Helper function for sets/objects when the key isn't present in either // child map. func (e *EditTree) fallbackDelete(key *ast.Term) (*EditTree, error) { - value, err := e.value.Value.Find(ast.Ref{key}) + // get ref from pool + rptr := refPool.Get(1) + defer refPool.Put(rptr) + + ref := *rptr + ref[0] = key + + value, err := e.value.Value.Find(ref) if err != nil { return nil, fmt.Errorf("cannot delete child key %v that does not exist", key) } @@ -638,7 +605,7 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) { } // 1+ path segment case. if e.value == nil { - return nil, fmt.Errorf("nil value encountered where composite value was expected") + return nil, errors.New("nil value encountered where composite value was expected") } // Switch behavior based on types. @@ -672,14 +639,14 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) { } // Fall back to looking up the key in e.value. // Extend the tree if key is present. Error otherwise. - if v, err := x.Find(ast.Ref{path[0]}); err == nil { + if v, err := x.Find(path[:1]); err == nil { child, err := e.Insert(path[0], ast.NewTerm(v)) if err != nil { return nil, err } return child.Unfold(path[1:]) } - return nil, fmt.Errorf("path %v does not exist in object term %v", ast.Ref{path[0]}, e.value.Value) + return nil, fmt.Errorf("path %v does not exist in object term %v", path[0], e.value.Value) case ast.Set: // Sets' keys *are* their values, so in order to allow accurate // traversal, we have to collapse the tree beneath this node, @@ -688,12 +655,11 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) { if isComposite(key) { collapsed := e.Render() e.value = collapsed - e.childKeys = map[int]*ast.Term{} - e.childScalarValues = map[int]*ast.Term{} - e.childCompositeValues = map[int]*EditTree{} + clear(e.childKeys) + clear(e.childScalarValues) + clear(e.childCompositeValues) } else { - keyHash, found := e.getKeyHash(key) - if found { + if keyHash, found := e.getKeyHash(key); found { if term, ok := e.childScalarValues[keyHash]; ok { child := NewEditTree(term) return child.Unfold(path[1:]) @@ -702,14 +668,14 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) { } // Fall back to looking up the key in e.value. // Extend the tree if key is present. Error otherwise. - if v, err := e.value.Value.Find(ast.Ref{path[0]}); err == nil { + if v, err := e.value.Value.Find(path[:1]); err == nil { child, err := e.Insert(path[0], ast.NewTerm(v)) if err != nil { return nil, err } return child.Unfold(path[1:]) } - return nil, fmt.Errorf("path %v does not exist in set term %v", ast.Ref{path[0]}, e.value.Value) + return nil, fmt.Errorf("path %v does not exist in set term %v", path[:1], e.value.Value) case *ast.Array: idx, err := toIndex(e.insertions.Length(), path[0]) if err != nil { @@ -723,24 +689,30 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) { return child.Unfold(path[1:]) } + idxt := ast.InternedTerm(idx) + rptr := refPool.Get(1) + defer refPool.Put(rptr) + + ref := *rptr + ref[0] = idxt + // Fall back to looking up the key in e.value. // Extend the tree if key is present. Error otherwise. - if v, err := x.Find(ast.Ref{ast.IntNumberTerm(idx)}); err == nil { + if v, err := x.Find(ref); err == nil { // TODO: Consider a more efficient "Replace" function that special-cases this for arrays instead? - _, err := e.Delete(ast.IntNumberTerm(idx)) - if err != nil { + if _, err := e.Delete(idxt); err != nil { return nil, err } - child, err := e.Insert(ast.IntNumberTerm(idx), ast.NewTerm(v)) + child, err := e.Insert(idxt, ast.NewTerm(v)) if err != nil { return nil, err } return child.Unfold(path[1:]) } - return nil, fmt.Errorf("path %v does not exist in array term %v", ast.Ref{ast.IntNumberTerm(idx)}, e.value.Value) + return nil, fmt.Errorf("path %v does not exist in array term %v", ast.InternedTerm(idx), e.value.Value) default: // Catch all primitive types. - return nil, fmt.Errorf("expected composite type for path %v, found value: %v (type: %T)", ast.Ref{path[0]}, x, x) + return nil, fmt.Errorf("expected composite type for path %v, found value: %v (type: %T)", path[0], x, x) } } @@ -832,7 +804,7 @@ func (e *EditTree) Render() *ast.Term { // original array. We build a new Array with modified/deleted keys. out := make([]*ast.Term, 0, e.insertions.Length()) eIdx := 0 - for i := 0; i < e.insertions.Length(); i++ { + for i := range e.insertions.Length() { // If the index == 0, that indicates we should look up the next // surviving original element. // If the index == 1, that indicates we should look up that @@ -862,8 +834,7 @@ func (e *EditTree) Render() *ast.Term { if t, ok := e.childScalarValues[i]; ok { out = append(out, t) } else if child, ok := e.childCompositeValues[i]; ok { - t := child.Render() - out = append(out, t) + out = append(out, child.Render()) } else { panic(fmt.Errorf("invalid index %d does not exist in array", i)) } @@ -880,14 +851,15 @@ func (e *EditTree) Render() *ast.Term { // Returns the inserted EditTree node. func (e *EditTree) InsertAtPath(path ast.Ref, value *ast.Term) (*EditTree, error) { if value == nil { - return nil, fmt.Errorf("cannot insert nil value into EditTree") + return nil, errors.New("cannot insert nil value into EditTree") } if len(path) == 0 { e.value = value - e.childKeys = map[int]*ast.Term{} - e.childScalarValues = map[int]*ast.Term{} - e.childCompositeValues = map[int]*EditTree{} + clear(e.childKeys) + clear(e.childScalarValues) + clear(e.childCompositeValues) + if v, ok := value.Value.(*ast.Array); ok { bytesLength := ((v.Len() - 1) / 8) + 1 // How many bytes to use for the bit-vectors. e.eliminated = bitvector.NewBitVector(make([]byte, bytesLength), v.Len()) @@ -911,7 +883,7 @@ func (e *EditTree) DeleteAtPath(path ast.Ref) (*EditTree, error) { // Root document case: if len(path) == 0 { if e.value == nil { - return nil, fmt.Errorf("deleted node encountered during delete operation") + return nil, errors.New("deleted node encountered during delete operation") } e.value = nil e.childKeys = nil @@ -996,24 +968,19 @@ func (e *EditTree) Exists(path ast.Ref) bool { // so that we can accurately unfold it again for an update, // once we know that the key we care about is present. if isComposite(key) { - collapsed := e.Render() - e.value = collapsed - e.childKeys = map[int]*ast.Term{} - e.childScalarValues = map[int]*ast.Term{} - e.childCompositeValues = map[int]*EditTree{} - } else { - keyHash, found := e.getKeyHash(key) - if found { - if _, ok := e.childScalarValues[keyHash]; ok { - return len(path) == 1 - } + e.value = e.Render() + clear(e.childKeys) + clear(e.childScalarValues) + clear(e.childCompositeValues) + } else if keyHash, found := e.getKeyHash(key); found { + if _, ok := e.childScalarValues[keyHash]; ok { + return len(path) == 1 } } // Fallback if child lookup failed. _, err := e.value.Value.Find(path) return err == nil case *ast.Array: - var idx int idx, err := toIndex(e.insertions.Length(), path[0]) if err != nil { return false @@ -1026,8 +993,16 @@ func (e *EditTree) Exists(path ast.Ref) bool { } // Fallback if child lookup failed. // We have to ensure that the lookup term is a number here, or Find will fail. - k := ast.Ref{ast.IntNumberTerm(idx)}.Concat(path[1:]) - _, err = x.Find(k) + rptr := refPool.Get(len(path)) + + ref := *rptr + ref[0] = ast.InternedTerm(idx) + copy(ref[1:], path[1:]) + + _, err = x.Find(ref) + + refPool.Put(rptr) + return err == nil default: // Catch all primitive types. @@ -1048,21 +1023,20 @@ func toIndex(arrayLength int, term *ast.Term) (int, error) { switch v := term.Value.(type) { case ast.Number: if i, ok = v.Int(); !ok { - return 0, fmt.Errorf("invalid number type for indexing") + return 0, errors.New("invalid number type for indexing") } case ast.String: if v == "-" { return arrayLength, nil } - num := ast.Number(v) - if i, ok = num.Int(); !ok { - return 0, fmt.Errorf("invalid string for indexing") + if i, ok = ast.Number(v).Int(); !ok { + return 0, errors.New("invalid string for indexing") } if v != "0" && strings.HasPrefix(string(v), "0") { - return 0, fmt.Errorf("leading zeros are not allowed in JSON paths") + return 0, errors.New("leading zeros are not allowed in JSON paths") } default: - return 0, fmt.Errorf("invalid type for indexing") + return 0, errors.New("invalid type for indexing") } return i, nil @@ -1080,6 +1054,14 @@ func (e *EditTree) Filter(paths []ast.Ref) *ast.Term { return nil } + // term pointer and ref pointer for reuse in lookups and iteration below. + tptr, rptr := ast.TermPtrPool.Get(), refPool.Get(1) + defer func() { + tptr.Value = nil + ast.TermPtrPool.Put(tptr) + refPool.Put(rptr) + }() + // Separate out keys for this level. // In the event of paths like "a", "a/b", "a/b/c", the "a" path will win out. // Nil keys, such as "" or [], are not permitted. (legacy behavior) @@ -1102,7 +1084,8 @@ func (e *EditTree) Filter(paths []ast.Ref) *ast.Term { renderNow := ast.NewSet(renderNowList...) // Clear everything out of the pathMap that has a renderNow candidate. for k := range pathMap { - if renderNow.Contains(ast.NewTerm(k)) { + tptr.Value = k + if renderNow.Contains(tptr) { delete(pathMap, k) } } @@ -1111,37 +1094,46 @@ func (e *EditTree) Filter(paths []ast.Ref) *ast.Term { switch e.value.Value.(type) { case ast.Object: out := make([][2]*ast.Term, 0, renderNow.Len()+len(pathMap)) + ref := *rptr + // Render any finished paths. - renderNow.Foreach(func(k *ast.Term) { - if e.Exists(ast.Ref{k}) { - subtreeResult, _ := e.RenderAtPath(ast.Ref{k}) + for _, k := range renderNow.Slice() { + ref[0] = k + if e.Exists(ref) { + subtreeResult, _ := e.RenderAtPath(ref) out = append(out, [2]*ast.Term{k, subtreeResult}) } - }) + } // Recursively descend remaining paths. for k, p := range pathMap { - if e.Exists(ast.Ref{ast.NewTerm(k)}) { - child, _ := e.Unfold(ast.Ref{ast.NewTerm(k)}) + tptr.Value = k + ref[0] = tptr + if e.Exists(ref) { + child, _ := e.Unfold(ref) subtreeResult := child.Filter(p) out = append(out, [2]*ast.Term{ast.NewTerm(k), subtreeResult}) } } + return ast.ObjectTerm(out...) case ast.Set: out := make([]*ast.Term, 0, renderNow.Len()+len(pathMap)) + ref := *rptr // Render any finished paths. - renderNow.Foreach(func(k *ast.Term) { - if e.Exists(ast.Ref{k}) { - subtreeResult, _ := e.RenderAtPath(ast.Ref{k}) + for _, k := range renderNow.Slice() { + ref[0] = k + if e.Exists(ref) { + subtreeResult, _ := e.RenderAtPath(ref) out = append(out, subtreeResult) } - }) + } // Recursively descend remaining paths. for k, p := range pathMap { - if e.Exists(ast.Ref{ast.NewTerm(k)}) { - child, _ := e.Unfold(ast.Ref{ast.NewTerm(k)}) - subtreeResult := child.Filter(p) - out = append(out, subtreeResult) + tptr.Value = k + ref[0] = tptr + if e.Exists(ref) { + child, _ := e.Unfold(ref) + out = append(out, child.Filter(p)) } } return ast.SetTerm(out...) @@ -1149,27 +1141,25 @@ func (e *EditTree) Filter(paths []ast.Ref) *ast.Term { // No early exit here, because we might have just deletes on the // original array. We build a new Array with modified/deleted keys. out := make([]*ast.Term, 0, renderNow.Len()+len(pathMap)) - // Sort array indexes before descending. - idxList := make([]*ast.Term, 0, len(pathMap)) - renderNow.Foreach(func(k *ast.Term) { - idxList = append(idxList, k) - }) + idxList := append(make([]*ast.Term, 0, renderNow.Len()+len(pathMap)), renderNow.Slice()...) for k := range pathMap { idxList = append(idxList, ast.NewTerm(k)) } - sort.Sort(termSlice(idxList)) - // Render child or recursively descend as needed. - for i := range idxList { + + ref := *rptr + + // Render child or recursively descend sorted indexes as needed. + for i := range util.SortedFunc(idxList, ast.TermValueCompare) { k := idxList[i] - if renderNow.Contains(k) { - if e.Exists(ast.Ref{k}) { - subtreeResult, _ := e.RenderAtPath(ast.Ref{k}) + ref[0] = k + if e.Exists(ref) { + if renderNow.Contains(k) { + subtreeResult, _ := e.RenderAtPath(ref) out = append(out, subtreeResult) + } else { + child, _ := e.Unfold(ref) + out = append(out, child.Filter(pathMap[k.Value])) } - } else if e.Exists(ast.Ref{k}) { - child, _ := e.Unfold(ast.Ref{k}) - subtreeResult := child.Filter(pathMap[k.Value]) - out = append(out, subtreeResult) } } return ast.ArrayTerm(out...) @@ -1177,9 +1167,3 @@ func (e *EditTree) Filter(paths []ast.Ref) *ast.Term { return e.value } } - -type termSlice []*ast.Term - -func (s termSlice) Less(i, j int) bool { return ast.Compare(s[i].Value, s[j].Value) < 0 } -func (s termSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x } -func (s termSlice) Len() int { return len(s) } diff --git a/vendor/github.com/open-policy-agent/opa/internal/file/archive/tarball.go b/vendor/github.com/open-policy-agent/opa/internal/file/archive/tarball.go index 6b8ba48d15..93396aa96f 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/file/archive/tarball.go +++ b/vendor/github.com/open-policy-agent/opa/internal/file/archive/tarball.go @@ -4,39 +4,72 @@ import ( "archive/tar" "bytes" "compress/gzip" + "encoding/json" + "errors" + "io" "strings" ) -// MustWriteTarGz write the list of file names and content -// into a tarball. -func MustWriteTarGz(files [][2]string) *bytes.Buffer { - var buf bytes.Buffer - gw := gzip.NewWriter(&buf) - defer gw.Close() +type TarGzWriter struct { + *tar.Writer + + gw *gzip.Writer +} + +func NewTarGzWriter(w io.Writer) *TarGzWriter { + gw := gzip.NewWriter(w) tw := tar.NewWriter(gw) - defer tw.Close() - for _, file := range files { - if err := WriteFile(tw, file[0], []byte(file[1])); err != nil { - panic(err) - } + + return &TarGzWriter{ + Writer: tw, + gw: gw, } - return &buf } -// WriteFile adds a file header with content to the given tar writer -func WriteFile(tw *tar.Writer, path string, bs []byte) error { - +func (tgw *TarGzWriter) WriteFile(path string, bs []byte) (err error) { hdr := &tar.Header{ - Name: "/" + strings.TrimLeft(path, "/"), + Name: path, Mode: 0600, Typeflag: tar.TypeReg, Size: int64(len(bs)), } - if err := tw.WriteHeader(hdr); err != nil { - return err + if err = tgw.WriteHeader(hdr); err == nil { + _, err = tgw.Write(bs) } - _, err := tw.Write(bs) return err } + +func (tgw *TarGzWriter) WriteJSONFile(path string, v any) error { + buf := &bytes.Buffer{} + if err := json.NewEncoder(buf).Encode(v); err != nil { + return err + } + + return tgw.WriteFile(path, buf.Bytes()) +} + +func (tgw *TarGzWriter) Close() error { + return errors.Join(tgw.Writer.Close(), tgw.gw.Close()) +} + +// MustWriteTarGz writes the list of file names and content into a tarball. +// Paths are prefixed with "/". +func MustWriteTarGz(files [][2]string) *bytes.Buffer { + buf := &bytes.Buffer{} + tgw := NewTarGzWriter(buf) + defer tgw.Close() + + for _, file := range files { + if !strings.HasPrefix(file[0], "/") { + file[0] = "/" + file[0] + } + + if err := tgw.WriteFile(file[0], []byte(file[1])); err != nil { + panic(err) + } + } + + return buf +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go b/vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go index cf5721101a..27ca5559f1 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go +++ b/vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go @@ -4,7 +4,7 @@ package future -import "github.com/open-policy-agent/opa/ast" +import "github.com/open-policy-agent/opa/v1/ast" // FilterFutureImports filters OUT any future imports from the passed slice of // `*ast.Import`s. @@ -24,7 +24,7 @@ func IsAllFutureKeywords(imp *ast.Import) bool { path := imp.Path.Value.(ast.Ref) return len(path) == 2 && ast.FutureRootDocument.Equal(path[0]) && - path[1].Equal(ast.StringTerm("keywords")) + path[1].Equal(ast.InternedTerm("keywords")) } // IsFutureKeyword returns true if the passed *ast.Import is `future.keywords.{kw}` @@ -32,7 +32,7 @@ func IsFutureKeyword(imp *ast.Import, kw string) bool { path := imp.Path.Value.(ast.Ref) return len(path) == 3 && ast.FutureRootDocument.Equal(path[0]) && - path[1].Equal(ast.StringTerm("keywords")) && + path[1].Equal(ast.InternedTerm("keywords")) && path[2].Equal(ast.StringTerm(kw)) } @@ -40,7 +40,7 @@ func WhichFutureKeyword(imp *ast.Import) (string, bool) { path := imp.Path.Value.(ast.Ref) if len(path) == 3 && ast.FutureRootDocument.Equal(path[0]) && - path[1].Equal(ast.StringTerm("keywords")) { + path[1].Equal(ast.InternedTerm("keywords")) { if str, ok := path[2].Value.(ast.String); ok { return string(str), true } diff --git a/vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go b/vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go index 804702b945..eaeb87e296 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go +++ b/vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go @@ -5,9 +5,10 @@ package future import ( + "errors" "fmt" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) // ParserOptionsFromFutureImports transforms a slice of `ast.Import`s into the @@ -33,7 +34,7 @@ func ParserOptionsFromFutureImports(imports []*ast.Import) (ast.ParserOptions, e } if len(path) == 3 { if imp.Alias != "" { - return popts, fmt.Errorf("alias not supported") + return popts, errors.New("alias not supported") } popts.FutureKeywords = append(popts.FutureKeywords, string(path[2].Value.(ast.String))) } diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/draft.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/draft.go index dac1aafdac..656804acb7 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/draft.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/draft.go @@ -86,12 +86,12 @@ func (dc draftConfigs) GetSchemaURL(draft Draft) string { return "" } -func parseSchemaURL(documentNode interface{}) (string, *Draft, error) { +func parseSchemaURL(documentNode any) (string, *Draft, error) { if _, ok := documentNode.(bool); ok { return "", nil, nil } - m, ok := documentNode.(map[string]interface{}) + m, ok := documentNode.(map[string]any) if !ok { return "", nil, errors.New("schema is invalid") } diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/errors.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/errors.go index f7aaf90306..a937d9b3b9 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/errors.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/errors.go @@ -212,7 +212,7 @@ type ( ) // newError takes a ResultError type and sets the type, context, description, details, value, and field -func newError(err ResultError, context *JSONContext, value interface{}, locale locale, details ErrorDetails) { +func newError(err ResultError, context *JSONContext, value any, locale locale, details ErrorDetails) { var t string var d string switch err.(type) { diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/format_checkers.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/format_checkers.go index 1e770464e8..c078e9862f 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/format_checkers.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/format_checkers.go @@ -14,7 +14,7 @@ type ( // FormatChecker is the interface all formatters added to FormatCheckerChain must implement FormatChecker interface { // IsFormat checks if input has the correct format - IsFormat(input interface{}) bool + IsFormat(input any) bool } // FormatCheckerChain holds the formatters @@ -174,7 +174,7 @@ func (c *FormatCheckerChain) Has(name string) bool { // IsFormat will check an input against a FormatChecker with the given name // to see if it is the correct format -func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool { +func (c *FormatCheckerChain) IsFormat(name string, input any) bool { lock.RLock() f, ok := c.formatters[name] lock.RUnlock() @@ -188,7 +188,7 @@ func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool { } // IsFormat checks if input is a correctly formatted e-mail address -func (f EmailFormatChecker) IsFormat(input interface{}) bool { +func (f EmailFormatChecker) IsFormat(input any) bool { asString, ok := input.(string) if !ok { return true @@ -199,7 +199,7 @@ func (f EmailFormatChecker) IsFormat(input interface{}) bool { } // IsFormat checks if input is a correctly formatted IPv4-address -func (f IPV4FormatChecker) IsFormat(input interface{}) bool { +func (f IPV4FormatChecker) IsFormat(input any) bool { asString, ok := input.(string) if !ok { return true @@ -211,7 +211,7 @@ func (f IPV4FormatChecker) IsFormat(input interface{}) bool { } // IsFormat checks if input is a correctly formatted IPv6=address -func (f IPV6FormatChecker) IsFormat(input interface{}) bool { +func (f IPV6FormatChecker) IsFormat(input any) bool { asString, ok := input.(string) if !ok { return true @@ -223,7 +223,7 @@ func (f IPV6FormatChecker) IsFormat(input interface{}) bool { } // IsFormat checks if input is a correctly formatted date/time per RFC3339 5.6 -func (f DateTimeFormatChecker) IsFormat(input interface{}) bool { +func (f DateTimeFormatChecker) IsFormat(input any) bool { asString, ok := input.(string) if !ok { return true @@ -247,7 +247,7 @@ func (f DateTimeFormatChecker) IsFormat(input interface{}) bool { } // IsFormat checks if input is a correctly formatted date (YYYY-MM-DD) -func (f DateFormatChecker) IsFormat(input interface{}) bool { +func (f DateFormatChecker) IsFormat(input any) bool { asString, ok := input.(string) if !ok { return true @@ -257,7 +257,7 @@ func (f DateFormatChecker) IsFormat(input interface{}) bool { } // IsFormat checks if input correctly formatted time (HH:MM:SS or HH:MM:SSZ-07:00) -func (f TimeFormatChecker) IsFormat(input interface{}) bool { +func (f TimeFormatChecker) IsFormat(input any) bool { asString, ok := input.(string) if !ok { return true @@ -272,7 +272,7 @@ func (f TimeFormatChecker) IsFormat(input interface{}) bool { } // IsFormat checks if input is correctly formatted URI with a valid Scheme per RFC3986 -func (f URIFormatChecker) IsFormat(input interface{}) bool { +func (f URIFormatChecker) IsFormat(input any) bool { asString, ok := input.(string) if !ok { return true @@ -288,7 +288,7 @@ func (f URIFormatChecker) IsFormat(input interface{}) bool { } // IsFormat checks if input is a correctly formatted URI or relative-reference per RFC3986 -func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool { +func (f URIReferenceFormatChecker) IsFormat(input any) bool { asString, ok := input.(string) if !ok { return true @@ -299,7 +299,7 @@ func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool { } // IsFormat checks if input is a correctly formatted URI template per RFC6570 -func (f URITemplateFormatChecker) IsFormat(input interface{}) bool { +func (f URITemplateFormatChecker) IsFormat(input any) bool { asString, ok := input.(string) if !ok { return true @@ -314,7 +314,7 @@ func (f URITemplateFormatChecker) IsFormat(input interface{}) bool { } // IsFormat checks if input is a correctly formatted hostname -func (f HostnameFormatChecker) IsFormat(input interface{}) bool { +func (f HostnameFormatChecker) IsFormat(input any) bool { asString, ok := input.(string) if !ok { return true @@ -324,7 +324,7 @@ func (f HostnameFormatChecker) IsFormat(input interface{}) bool { } // IsFormat checks if input is a correctly formatted UUID -func (f UUIDFormatChecker) IsFormat(input interface{}) bool { +func (f UUIDFormatChecker) IsFormat(input any) bool { asString, ok := input.(string) if !ok { return true @@ -334,7 +334,7 @@ func (f UUIDFormatChecker) IsFormat(input interface{}) bool { } // IsFormat checks if input is a correctly formatted regular expression -func (f RegexFormatChecker) IsFormat(input interface{}) bool { +func (f RegexFormatChecker) IsFormat(input any) bool { asString, ok := input.(string) if !ok { return true @@ -348,7 +348,7 @@ func (f RegexFormatChecker) IsFormat(input interface{}) bool { } // IsFormat checks if input is a correctly formatted JSON Pointer per RFC6901 -func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool { +func (f JSONPointerFormatChecker) IsFormat(input any) bool { asString, ok := input.(string) if !ok { return true @@ -358,7 +358,7 @@ func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool { } // IsFormat checks if input is a correctly formatted relative JSON Pointer -func (f RelativeJSONPointerFormatChecker) IsFormat(input interface{}) bool { +func (f RelativeJSONPointerFormatChecker) IsFormat(input any) bool { asString, ok := input.(string) if !ok { return true diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/internalLog.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/internalLog.go index 4ef7a8d03e..bab75112eb 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/internalLog.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/internalLog.go @@ -32,6 +32,6 @@ import ( const internalLogEnabled = false -func internalLog(format string, v ...interface{}) { +func internalLog(format string, v ...any) { log.Printf(format, v...) } diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/jsonLoader.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/jsonLoader.go index 1011552dee..73f25e3b7f 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/jsonLoader.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/jsonLoader.go @@ -77,8 +77,8 @@ var osFS = osFileSystem(os.Open) // JSONLoader defines the JSON loader interface type JSONLoader interface { - JSONSource() interface{} - LoadJSON() (interface{}, error) + JSONSource() any + LoadJSON() (any, error) JSONReference() (gojsonreference.JsonReference, error) LoaderFactory() JSONLoaderFactory } @@ -130,7 +130,7 @@ type jsonReferenceLoader struct { source string } -func (l *jsonReferenceLoader) JSONSource() interface{} { +func (l *jsonReferenceLoader) JSONSource() any { return l.source } @@ -160,7 +160,7 @@ func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) JSONLoader } } -func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) { +func (l *jsonReferenceLoader) LoadJSON() (any, error) { var err error @@ -207,7 +207,7 @@ func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) { return nil, fmt.Errorf("remote reference loading disabled: %s", reference.String()) } -func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) { +func (l *jsonReferenceLoader) loadFromHTTP(address string) (any, error) { resp, err := http.Get(address) if err != nil { @@ -227,7 +227,7 @@ func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) return decodeJSONUsingNumber(bytes.NewReader(bodyBuff)) } -func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) { +func (l *jsonReferenceLoader) loadFromFile(path string) (any, error) { f, err := l.fs.Open(path) if err != nil { return nil, err @@ -249,7 +249,7 @@ type jsonStringLoader struct { source string } -func (l *jsonStringLoader) JSONSource() interface{} { +func (l *jsonStringLoader) JSONSource() any { return l.source } @@ -266,7 +266,7 @@ func NewStringLoader(source string) JSONLoader { return &jsonStringLoader{source: source} } -func (l *jsonStringLoader) LoadJSON() (interface{}, error) { +func (l *jsonStringLoader) LoadJSON() (any, error) { return decodeJSONUsingNumber(strings.NewReader(l.JSONSource().(string))) @@ -278,7 +278,7 @@ type jsonBytesLoader struct { source []byte } -func (l *jsonBytesLoader) JSONSource() interface{} { +func (l *jsonBytesLoader) JSONSource() any { return l.source } @@ -295,18 +295,18 @@ func NewBytesLoader(source []byte) JSONLoader { return &jsonBytesLoader{source: source} } -func (l *jsonBytesLoader) LoadJSON() (interface{}, error) { +func (l *jsonBytesLoader) LoadJSON() (any, error) { return decodeJSONUsingNumber(bytes.NewReader(l.JSONSource().([]byte))) } // JSON Go (types) loader -// used to load JSONs from the code as maps, interface{}, structs ... +// used to load JSONs from the code as maps, any, structs ... type jsonGoLoader struct { - source interface{} + source any } -func (l *jsonGoLoader) JSONSource() interface{} { +func (l *jsonGoLoader) JSONSource() any { return l.source } @@ -319,11 +319,11 @@ func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory { } // NewGoLoader creates a new JSONLoader from a given Go struct -func NewGoLoader(source interface{}) JSONLoader { +func NewGoLoader(source any) JSONLoader { return &jsonGoLoader{source: source} } -func (l *jsonGoLoader) LoadJSON() (interface{}, error) { +func (l *jsonGoLoader) LoadJSON() (any, error) { // convert it to a compliant JSON first to avoid types "mismatches" @@ -352,11 +352,11 @@ func NewWriterLoader(source io.Writer) (JSONLoader, io.Writer) { return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf) } -func (l *jsonIOLoader) JSONSource() interface{} { +func (l *jsonIOLoader) JSONSource() any { return l.buf.String() } -func (l *jsonIOLoader) LoadJSON() (interface{}, error) { +func (l *jsonIOLoader) LoadJSON() (any, error) { return decodeJSONUsingNumber(l.buf) } @@ -369,21 +369,21 @@ func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory { } // JSON raw loader -// In case the JSON is already marshalled to interface{} use this loader +// In case the JSON is already marshalled to any use this loader // This is used for testing as otherwise there is no guarantee the JSON is marshalled // "properly" by using https://golang.org/pkg/encoding/json/#Decoder.UseNumber type jsonRawLoader struct { - source interface{} + source any } // NewRawLoader creates a new JSON raw loader for the given source -func NewRawLoader(source interface{}) JSONLoader { +func NewRawLoader(source any) JSONLoader { return &jsonRawLoader{source: source} } -func (l *jsonRawLoader) JSONSource() interface{} { +func (l *jsonRawLoader) JSONSource() any { return l.source } -func (l *jsonRawLoader) LoadJSON() (interface{}, error) { +func (l *jsonRawLoader) LoadJSON() (any, error) { return l.source, nil } func (l *jsonRawLoader) JSONReference() (gojsonreference.JsonReference, error) { @@ -393,9 +393,9 @@ func (l *jsonRawLoader) LoaderFactory() JSONLoaderFactory { return &DefaultJSONLoaderFactory{} } -func decodeJSONUsingNumber(r io.Reader) (interface{}, error) { +func decodeJSONUsingNumber(r io.Reader) (any, error) { - var document interface{} + var document any decoder := json.NewDecoder(r) decoder.UseNumber() diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/result.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/result.go index 8baff07179..0329721c20 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/result.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/result.go @@ -33,7 +33,7 @@ import ( type ( // ErrorDetails is a map of details specific to each error. // While the values will vary, every error will contain a "field" value - ErrorDetails map[string]interface{} + ErrorDetails map[string]any // ResultError is the interface that library errors must implement ResultError interface { @@ -57,9 +57,9 @@ type ( // DescriptionFormat returns the format for the description in the default text/template format DescriptionFormat() string // SetValue sets the value related to the error - SetValue(interface{}) + SetValue(any) // Value returns the value related to the error - Value() interface{} + Value() any // SetDetails sets the details specific to the error SetDetails(ErrorDetails) // Details returns details about the error @@ -76,7 +76,7 @@ type ( context *JSONContext // Tree like notation of the part that failed the validation. ex (root).a.b ... description string // A human readable error message descriptionFormat string // A format for human readable error message - value interface{} // Value given by the JSON file that is the source of the error + value any // Value given by the JSON file that is the source of the error details ErrorDetails } @@ -136,12 +136,12 @@ func (v *ResultErrorFields) DescriptionFormat() string { } // SetValue sets the value related to the error -func (v *ResultErrorFields) SetValue(value interface{}) { +func (v *ResultErrorFields) SetValue(value any) { v.value = value } // Value returns the value related to the error -func (v *ResultErrorFields) Value() interface{} { +func (v *ResultErrorFields) Value() any { return v.value } @@ -203,7 +203,7 @@ func (v *Result) AddError(err ResultError, details ErrorDetails) { v.errors = append(v.errors, err) } -func (v *Result) addInternalError(err ResultError, context *JSONContext, value interface{}, details ErrorDetails) { +func (v *Result) addInternalError(err ResultError, context *JSONContext, value any, details ErrorDetails) { newError(err, context, value, Locale, details) v.errors = append(v.errors, err) v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schema.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schema.go index 8e035013c2..e8007ee2b6 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schema.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schema.go @@ -58,7 +58,7 @@ type Schema struct { ReferencePool *schemaReferencePool } -func (d *Schema) parse(document interface{}, draft Draft) error { +func (d *Schema) parse(document any, draft Draft) error { d.RootSchema = &SubSchema{Property: StringRootSchemaProperty, Draft: &draft} return d.parseSchema(document, d.RootSchema) } @@ -73,7 +73,7 @@ func (d *Schema) SetRootSchemaName(name string) { // Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring // Not much magic involved here, most of the job is to validate the key names and their values, // then the values are copied into SubSchema struct -func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema) error { +func (d *Schema) parseSchema(documentNode any, currentSchema *SubSchema) error { if currentSchema.Draft == nil { if currentSchema.Parent == nil { @@ -90,7 +90,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema) } } - m, isMap := documentNode.(map[string]interface{}) + m, isMap := documentNode.(map[string]any) if !isMap { return errors.New(formatErrorDescription( Locale.ParseError(), @@ -146,10 +146,10 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema) // definitions if v, ok := m[KeyDefinitions]; ok { switch mt := v.(type) { - case map[string]interface{}: + case map[string]any: for _, dv := range mt { switch dv.(type) { - case bool, map[string]interface{}: + case bool, map[string]any: newSchema := &SubSchema{Property: KeyDefinitions, Parent: currentSchema} err := d.parseSchema(dv, newSchema) if err != nil { @@ -203,7 +203,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema) if err != nil { return err } - case []interface{}: + case []any: for _, typeInArray := range t { s, isString := typeInArray.(string) if !isString { @@ -231,7 +231,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema) switch v := additionalProperties.(type) { case bool: currentSchema.additionalProperties = v - case map[string]interface{}: + case map[string]any: newSchema := &SubSchema{Property: KeyAdditionalProperties, Parent: currentSchema, Ref: currentSchema.Ref} currentSchema.additionalProperties = newSchema err := d.parseSchema(v, newSchema) @@ -270,7 +270,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema) // propertyNames if propertyNames, found := m[KeyPropertyNames]; found && *currentSchema.Draft >= Draft6 { switch propertyNames.(type) { - case bool, map[string]interface{}: + case bool, map[string]any: newSchema := &SubSchema{Property: KeyPropertyNames, Parent: currentSchema, Ref: currentSchema.Ref} currentSchema.propertyNames = newSchema err := d.parseSchema(propertyNames, newSchema) @@ -299,10 +299,10 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema) // items if items, found := m[KeyItems]; found { switch i := items.(type) { - case []interface{}: + case []any: for _, itemElement := range i { switch itemElement.(type) { - case map[string]interface{}, bool: + case map[string]any, bool: newSchema := &SubSchema{Parent: currentSchema, Property: KeyItems} newSchema.Ref = currentSchema.Ref currentSchema.ItemsChildren = append(currentSchema.ItemsChildren, newSchema) @@ -315,7 +315,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema) } currentSchema.ItemsChildrenIsSingleSchema = false } - case map[string]interface{}, bool: + case map[string]any, bool: newSchema := &SubSchema{Parent: currentSchema, Property: KeyItems} newSchema.Ref = currentSchema.Ref currentSchema.ItemsChildren = append(currentSchema.ItemsChildren, newSchema) @@ -334,7 +334,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema) switch i := additionalItems.(type) { case bool: currentSchema.additionalItems = i - case map[string]interface{}: + case map[string]any: newSchema := &SubSchema{Property: KeyAdditionalItems, Parent: currentSchema, Ref: currentSchema.Ref} currentSchema.additionalItems = newSchema err := d.parseSchema(additionalItems, newSchema) @@ -717,7 +717,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema) if vNot, found := m[KeyNot]; found { switch vNot.(type) { - case bool, map[string]interface{}: + case bool, map[string]any: newSchema := &SubSchema{Property: KeyNot, Parent: currentSchema, Ref: currentSchema.Ref} currentSchema.not = newSchema err := d.parseSchema(vNot, newSchema) @@ -735,7 +735,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema) if *currentSchema.Draft >= Draft7 { if vIf, found := m[KeyIf]; found { switch vIf.(type) { - case bool, map[string]interface{}: + case bool, map[string]any: newSchema := &SubSchema{Property: KeyIf, Parent: currentSchema, Ref: currentSchema.Ref} currentSchema._if = newSchema err := d.parseSchema(vIf, newSchema) @@ -752,7 +752,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema) if then, found := m[KeyThen]; found { switch then.(type) { - case bool, map[string]interface{}: + case bool, map[string]any: newSchema := &SubSchema{Property: KeyThen, Parent: currentSchema, Ref: currentSchema.Ref} currentSchema._then = newSchema err := d.parseSchema(then, newSchema) @@ -769,7 +769,7 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema) if vElse, found := m[KeyElse]; found { switch vElse.(type) { - case bool, map[string]interface{}: + case bool, map[string]any: newSchema := &SubSchema{Property: KeyElse, Parent: currentSchema, Ref: currentSchema.Ref} currentSchema._else = newSchema err := d.parseSchema(vElse, newSchema) @@ -788,9 +788,9 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *SubSchema) return nil } -func (d *Schema) parseReference(_ interface{}, currentSchema *SubSchema) error { +func (d *Schema) parseReference(_ any, currentSchema *SubSchema) error { var ( - refdDocumentNode interface{} + refdDocumentNode any dsp *schemaPoolDocument err error ) @@ -809,7 +809,7 @@ func (d *Schema) parseReference(_ interface{}, currentSchema *SubSchema) error { newSchema.Draft = dsp.Draft switch refdDocumentNode.(type) { - case bool, map[string]interface{}: + case bool, map[string]any: // expected default: return errors.New(formatErrorDescription( @@ -829,8 +829,8 @@ func (d *Schema) parseReference(_ interface{}, currentSchema *SubSchema) error { } -func (d *Schema) parseProperties(documentNode interface{}, currentSchema *SubSchema) error { - m, isMap := documentNode.(map[string]interface{}) +func (d *Schema) parseProperties(documentNode any, currentSchema *SubSchema) error { + m, isMap := documentNode.(map[string]any) if !isMap { return errors.New(formatErrorDescription( Locale.MustBeOfType(), @@ -851,19 +851,19 @@ func (d *Schema) parseProperties(documentNode interface{}, currentSchema *SubSch return nil } -func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *SubSchema) error { - m, isMap := documentNode.(map[string]interface{}) +func (d *Schema) parseDependencies(documentNode any, currentSchema *SubSchema) error { + m, isMap := documentNode.(map[string]any) if !isMap { return errors.New(formatErrorDescription( Locale.MustBeOfType(), ErrorDetails{"key": KeyDependencies, "type": TypeObject}, )) } - currentSchema.dependencies = make(map[string]interface{}) + currentSchema.dependencies = make(map[string]any) for k := range m { switch values := m[k].(type) { - case []interface{}: + case []any: var valuesToRegister []string for _, value := range values { str, isString := value.(string) @@ -880,7 +880,7 @@ func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *SubS currentSchema.dependencies[k] = valuesToRegister } - case bool, map[string]interface{}: + case bool, map[string]any: depSchema := &SubSchema{Property: k, Parent: currentSchema, Ref: currentSchema.Ref} err := d.parseSchema(m[k], depSchema) if err != nil { @@ -913,7 +913,7 @@ func invalidType(expected, given string) error { )) } -func getString(m map[string]interface{}, key string) (*string, error) { +func getString(m map[string]any, key string) (*string, error) { v, found := m[key] if !found { // not found @@ -927,13 +927,13 @@ func getString(m map[string]interface{}, key string) (*string, error) { return &s, nil } -func getMap(m map[string]interface{}, key string) (map[string]interface{}, error) { +func getMap(m map[string]any, key string) (map[string]any, error) { v, found := m[key] if !found { // not found return nil, nil } - s, isMap := v.(map[string]interface{}) + s, isMap := v.(map[string]any) if !isMap { // wrong type return nil, invalidType(StringSchema, key) @@ -941,12 +941,12 @@ func getMap(m map[string]interface{}, key string) (map[string]interface{}, error return s, nil } -func getSlice(m map[string]interface{}, key string) ([]interface{}, error) { +func getSlice(m map[string]any, key string) ([]any, error) { v, found := m[key] if !found { return nil, nil } - s, isArray := v.([]interface{}) + s, isArray := v.([]any) if !isArray { return nil, errors.New(formatErrorDescription( Locale.MustBeOfAn(), diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaLoader.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaLoader.go index 8cc6dc03b8..88caa65de2 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaLoader.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaLoader.go @@ -45,7 +45,7 @@ func NewSchemaLoader() *SchemaLoader { return ps } -func (sl *SchemaLoader) validateMetaschema(documentNode interface{}) error { +func (sl *SchemaLoader) validateMetaschema(documentNode any) error { var ( schema string @@ -158,7 +158,7 @@ func (sl *SchemaLoader) Compile(rootSchema JSONLoader) (*Schema, error) { d.DocumentReference = ref d.ReferencePool = newSchemaReferencePool() - var doc interface{} + var doc any if ref.String() != "" { // Get document from schema pool spd, err := d.Pool.GetDocument(d.DocumentReference) diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaPool.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaPool.go index ed8ff688b5..513f8df2cc 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaPool.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaPool.go @@ -34,7 +34,7 @@ import ( ) type schemaPoolDocument struct { - Document interface{} + Document any Draft *Draft } @@ -44,7 +44,7 @@ type schemaPool struct { autoDetect *bool } -func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.JsonReference, pooled bool) error { +func (p *schemaPool) parseReferences(document any, ref gojsonreference.JsonReference, pooled bool) error { var ( draft *Draft @@ -72,7 +72,7 @@ func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.J return err } -func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonreference.JsonReference, draft *Draft) error { +func (p *schemaPool) parseReferencesRecursive(document any, ref gojsonreference.JsonReference, draft *Draft) error { // parseReferencesRecursive parses a JSON document and resolves all $id and $ref references. // For $ref references it takes into account the $id scope it is in and replaces // the reference by the absolute resolved reference @@ -80,14 +80,14 @@ func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonre // When encountering errors it fails silently. Error handling is done when the schema // is syntactically parsed and any error encountered here should also come up there. switch m := document.(type) { - case []interface{}: + case []any: for _, v := range m { err := p.parseReferencesRecursive(v, ref, draft) if err != nil { return err } } - case map[string]interface{}: + case map[string]any: localRef := &ref keyID := KeyIDNew @@ -129,7 +129,7 @@ func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonre // Something like a property or a dependency is not a valid schema, as it might describe properties named "$ref", "$id" or "const", etc // Therefore don't treat it like a schema. if k == KeyProperties || k == KeyDependencies || k == KeyPatternProperties { - if child, ok := v.(map[string]interface{}); ok { + if child, ok := v.(map[string]any); ok { for _, v := range child { err := p.parseReferencesRecursive(v, *localRef, draft) if err != nil { diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go index 876419f56c..515702095b 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go @@ -25,10 +25,6 @@ package gojsonschema -import ( - "fmt" -) - type schemaReferencePool struct { documents map[string]*SubSchema } @@ -44,7 +40,7 @@ func newSchemaReferencePool() *schemaReferencePool { func (p *schemaReferencePool) Get(ref string) (r *SubSchema, o bool) { if internalLogEnabled { - internalLog(fmt.Sprintf("Schema Reference ( %s )", ref)) + internalLog("Schema Reference ( %s )", ref) } if sch, ok := p.documents[ref]; ok { @@ -60,7 +56,7 @@ func (p *schemaReferencePool) Get(ref string) (r *SubSchema, o bool) { func (p *schemaReferencePool) Add(ref string, sch *SubSchema) { if internalLogEnabled { - internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref)) + internalLog("Add Schema Reference %s to pool", ref) } if _, ok := p.documents[ref]; !ok { p.documents[ref] = sch diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaType.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaType.go index 271832d334..4abcc6814e 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaType.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaType.go @@ -28,6 +28,7 @@ package gojsonschema import ( "errors" "fmt" + "slices" "strings" ) @@ -58,13 +59,7 @@ func (t *jsonSchemaType) Add(etype string) error { func (t *jsonSchemaType) Contains(etype string) bool { - for _, v := range t.types { - if v == etype { - return true - } - } - - return false + return slices.Contains(t.types, etype) } func (t *jsonSchemaType) String() string { diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/subSchema.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/subSchema.go index d8bc0cb568..b7ceb3136e 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/subSchema.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/subSchema.go @@ -123,8 +123,8 @@ type SubSchema struct { maxProperties *int required []string - dependencies map[string]interface{} - additionalProperties interface{} + dependencies map[string]any + additionalProperties any patternProperties map[string]*SubSchema propertyNames *SubSchema @@ -134,7 +134,7 @@ type SubSchema struct { uniqueItems bool contains *SubSchema - additionalItems interface{} + additionalItems any // validation : all _const *string //const is a golang keyword diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/utils.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/utils.go index fd0f1870f9..a8639d4d9a 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/utils.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/utils.go @@ -23,24 +23,20 @@ // // created 26-02-2013 -// nolint: deadcode,unused,varcheck // Package in development (2021). +// nolint:unused // Package in development (2021). package gojsonschema import ( "encoding/json" "math/big" + "slices" ) func isStringInSlice(s []string, what string) bool { - for i := range s { - if s[i] == what { - return true - } - } - return false + return slices.Contains(s, what) } -func marshalToJSONString(value interface{}) (*string, error) { +func marshalToJSONString(value any) (*string, error) { mBytes, err := json.Marshal(value) if err != nil { @@ -51,7 +47,7 @@ func marshalToJSONString(value interface{}) (*string, error) { return &sBytes, nil } -func marshalWithoutNumber(value interface{}) (*string, error) { +func marshalWithoutNumber(value any) (*string, error) { // The JSON is decoded using https://golang.org/pkg/encoding/json/#Decoder.UseNumber // This means the numbers are internally still represented as strings and therefore 1.00 is unequal to 1 @@ -63,7 +59,7 @@ func marshalWithoutNumber(value interface{}) (*string, error) { return nil, err } - var document interface{} + var document any err = json.Unmarshal([]byte(*jsonString), &document) if err != nil { @@ -73,7 +69,7 @@ func marshalWithoutNumber(value interface{}) (*string, error) { return marshalToJSONString(document) } -func isJSONNumber(what interface{}) bool { +func isJSONNumber(what any) bool { switch what.(type) { @@ -84,7 +80,7 @@ func isJSONNumber(what interface{}) bool { return false } -func checkJSONInteger(what interface{}) (isInt bool) { +func checkJSONInteger(what any) (isInt bool) { jsonNumber := what.(json.Number) @@ -100,7 +96,7 @@ const ( minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 ) -func mustBeInteger(what interface{}) *int { +func mustBeInteger(what any) *int { number, ok := what.(json.Number) if !ok { return nil @@ -123,7 +119,7 @@ func mustBeInteger(what interface{}) *int { return &int32Value } -func mustBeNumber(what interface{}) *big.Rat { +func mustBeNumber(what any) *big.Rat { number, ok := what.(json.Number) if !ok { return nil @@ -136,11 +132,11 @@ func mustBeNumber(what interface{}) *big.Rat { return nil } -func convertDocumentNode(val interface{}) interface{} { +func convertDocumentNode(val any) any { - if lval, ok := val.([]interface{}); ok { + if lval, ok := val.([]any); ok { - res := []interface{}{} + res := []any{} for _, v := range lval { res = append(res, convertDocumentNode(v)) } @@ -149,9 +145,9 @@ func convertDocumentNode(val interface{}) interface{} { } - if mval, ok := val.(map[interface{}]interface{}); ok { + if mval, ok := val.(map[any]any); ok { - res := map[string]interface{}{} + res := map[string]any{} for k, v := range mval { res[k.(string)] = convertDocumentNode(v) diff --git a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go index 7c86e37245..e33a0f3d27 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go +++ b/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go @@ -54,21 +54,21 @@ func (v *Schema) Validate(l JSONLoader) (*Result, error) { return v.validateDocument(root), nil } -func (v *Schema) validateDocument(root interface{}) *Result { +func (v *Schema) validateDocument(root any) *Result { result := &Result{} context := NewJSONContext(StringContextRoot, nil) v.RootSchema.validateRecursive(v.RootSchema, root, result, context) return result } -func (v *SubSchema) subValidateWithContext(document interface{}, context *JSONContext) *Result { +func (v *SubSchema) subValidateWithContext(document any, context *JSONContext) *Result { result := &Result{} v.validateRecursive(v, document, result, context) return result } // Walker function to validate the json recursively against the SubSchema -func (v *SubSchema) validateRecursive(currentSubSchema *SubSchema, currentNode interface{}, result *Result, context *JSONContext) { +func (v *SubSchema) validateRecursive(currentSubSchema *SubSchema, currentNode any, result *Result, context *JSONContext) { if internalLogEnabled { internalLog("validateRecursive %s", context.String()) @@ -167,7 +167,7 @@ func (v *SubSchema) validateRecursive(currentSubSchema *SubSchema, currentNode i return } - castCurrentNode := currentNode.([]interface{}) + castCurrentNode := currentNode.([]any) currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) @@ -190,9 +190,9 @@ func (v *SubSchema) validateRecursive(currentSubSchema *SubSchema, currentNode i return } - castCurrentNode, ok := currentNode.(map[string]interface{}) + castCurrentNode, ok := currentNode.(map[string]any) if !ok { - castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{}) + castCurrentNode = convertDocumentNode(currentNode).(map[string]any) } currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) @@ -264,7 +264,7 @@ func (v *SubSchema) validateRecursive(currentSubSchema *SubSchema, currentNode i } // Different kinds of validation there, SubSchema / common / array / object / string... -func (v *SubSchema) validateSchema(currentSubSchema *SubSchema, currentNode interface{}, result *Result, context *JSONContext) { +func (v *SubSchema) validateSchema(currentSubSchema *SubSchema, currentNode any, result *Result, context *JSONContext) { if internalLogEnabled { internalLog("validateSchema %s", context.String()) @@ -348,15 +348,15 @@ func (v *SubSchema) validateSchema(currentSubSchema *SubSchema, currentNode inte } } - if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 { - if currentNodeMap, ok := currentNode.(map[string]interface{}); ok { + if len(currentSubSchema.dependencies) > 0 { + if currentNodeMap, ok := currentNode.(map[string]any); ok { for elementKey := range currentNodeMap { if dependency, ok := currentSubSchema.dependencies[elementKey]; ok { switch dependency := dependency.(type) { case []string: for _, dependOnKey := range dependency { - if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved { + if _, dependencyResolved := currentNode.(map[string]any)[dependOnKey]; !dependencyResolved { result.addInternalError( new(MissingDependencyError), context, @@ -395,7 +395,7 @@ func (v *SubSchema) validateSchema(currentSubSchema *SubSchema, currentNode inte result.incrementScore() } -func (v *SubSchema) validateCommon(currentSubSchema *SubSchema, value interface{}, result *Result, context *JSONContext) { +func (v *SubSchema) validateCommon(currentSubSchema *SubSchema, value any, result *Result, context *JSONContext) { if internalLogEnabled { internalLog("validateCommon %s", context.String()) @@ -452,7 +452,7 @@ func (v *SubSchema) validateCommon(currentSubSchema *SubSchema, value interface{ result.incrementScore() } -func (v *SubSchema) validateArray(currentSubSchema *SubSchema, value []interface{}, result *Result, context *JSONContext) { +func (v *SubSchema) validateArray(currentSubSchema *SubSchema, value []any, result *Result, context *JSONContext) { if internalLogEnabled { internalLog("validateArray %s", context.String()) @@ -469,7 +469,7 @@ func (v *SubSchema) validateArray(currentSubSchema *SubSchema, value []interface result.mergeErrors(validationResult) } } else { - if currentSubSchema.ItemsChildren != nil && len(currentSubSchema.ItemsChildren) > 0 { + if len(currentSubSchema.ItemsChildren) > 0 { nbItems := len(currentSubSchema.ItemsChildren) @@ -578,7 +578,7 @@ func (v *SubSchema) validateArray(currentSubSchema *SubSchema, value []interface result.incrementScore() } -func (v *SubSchema) validateObject(currentSubSchema *SubSchema, value map[string]interface{}, result *Result, context *JSONContext) { +func (v *SubSchema) validateObject(currentSubSchema *SubSchema, value map[string]any, result *Result, context *JSONContext) { if internalLogEnabled { internalLog("validateObject %s", context.String()) @@ -675,7 +675,7 @@ func (v *SubSchema) validateObject(currentSubSchema *SubSchema, value map[string result.incrementScore() } -func (v *SubSchema) validatePatternProperty(currentSubSchema *SubSchema, key string, value interface{}, result *Result, context *JSONContext) bool { +func (v *SubSchema) validatePatternProperty(currentSubSchema *SubSchema, key string, value any, result *Result, context *JSONContext) bool { if internalLogEnabled { internalLog("validatePatternProperty %s", context.String()) @@ -701,7 +701,7 @@ func (v *SubSchema) validatePatternProperty(currentSubSchema *SubSchema, key str return true } -func (v *SubSchema) validateString(currentSubSchema *SubSchema, value interface{}, result *Result, context *JSONContext) { +func (v *SubSchema) validateString(currentSubSchema *SubSchema, value any, result *Result, context *JSONContext) { // Ignore JSON numbers stringValue, isString := value.(string) @@ -752,7 +752,7 @@ func (v *SubSchema) validateString(currentSubSchema *SubSchema, value interface{ result.incrementScore() } -func (v *SubSchema) validateNumber(currentSubSchema *SubSchema, value interface{}, result *Result, context *JSONContext) { +func (v *SubSchema) validateNumber(currentSubSchema *SubSchema, value any, result *Result, context *JSONContext) { // Ignore non numbers number, isNumber := value.(json.Number) diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/decode.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/decode.go deleted file mode 100644 index d00920554c..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/decode.go +++ /dev/null @@ -1,216 +0,0 @@ -package ast - -import ( - "encoding/json" -) - -func UnmarshalSelectionSet(b []byte) (SelectionSet, error) { - var tmp []json.RawMessage - - if err := json.Unmarshal(b, &tmp); err != nil { - return nil, err - } - - var result = make([]Selection, 0) - for _, item := range tmp { - var field Field - if err := json.Unmarshal(item, &field); err == nil { - result = append(result, &field) - continue - } - var fragmentSpread FragmentSpread - if err := json.Unmarshal(item, &fragmentSpread); err == nil { - result = append(result, &fragmentSpread) - continue - } - var inlineFragment InlineFragment - if err := json.Unmarshal(item, &inlineFragment); err == nil { - result = append(result, &inlineFragment) - continue - } - } - - return result, nil -} - -func (f *FragmentDefinition) UnmarshalJSON(b []byte) error { - var tmp map[string]json.RawMessage - if err := json.Unmarshal(b, &tmp); err != nil { - return err - } - for k := range tmp { - switch k { - case "Name": - err := json.Unmarshal(tmp[k], &f.Name) - if err != nil { - return err - } - case "VariableDefinition": - err := json.Unmarshal(tmp[k], &f.VariableDefinition) - if err != nil { - return err - } - case "TypeCondition": - err := json.Unmarshal(tmp[k], &f.TypeCondition) - if err != nil { - return err - } - case "Directives": - err := json.Unmarshal(tmp[k], &f.Directives) - if err != nil { - return err - } - case "SelectionSet": - ss, err := UnmarshalSelectionSet(tmp[k]) - if err != nil { - return err - } - f.SelectionSet = ss - case "Definition": - err := json.Unmarshal(tmp[k], &f.Definition) - if err != nil { - return err - } - case "Position": - err := json.Unmarshal(tmp[k], &f.Position) - if err != nil { - return err - } - } - } - return nil -} - -func (f *InlineFragment) UnmarshalJSON(b []byte) error { - var tmp map[string]json.RawMessage - if err := json.Unmarshal(b, &tmp); err != nil { - return err - } - for k := range tmp { - switch k { - case "TypeCondition": - err := json.Unmarshal(tmp[k], &f.TypeCondition) - if err != nil { - return err - } - case "Directives": - err := json.Unmarshal(tmp[k], &f.Directives) - if err != nil { - return err - } - case "SelectionSet": - ss, err := UnmarshalSelectionSet(tmp[k]) - if err != nil { - return err - } - f.SelectionSet = ss - case "ObjectDefinition": - err := json.Unmarshal(tmp[k], &f.ObjectDefinition) - if err != nil { - return err - } - case "Position": - err := json.Unmarshal(tmp[k], &f.Position) - if err != nil { - return err - } - } - } - return nil -} - -func (f *OperationDefinition) UnmarshalJSON(b []byte) error { - var tmp map[string]json.RawMessage - if err := json.Unmarshal(b, &tmp); err != nil { - return err - } - for k := range tmp { - switch k { - case "Operation": - err := json.Unmarshal(tmp[k], &f.Operation) - if err != nil { - return err - } - case "Name": - err := json.Unmarshal(tmp[k], &f.Name) - if err != nil { - return err - } - case "VariableDefinitions": - err := json.Unmarshal(tmp[k], &f.VariableDefinitions) - if err != nil { - return err - } - case "Directives": - err := json.Unmarshal(tmp[k], &f.Directives) - if err != nil { - return err - } - case "SelectionSet": - ss, err := UnmarshalSelectionSet(tmp[k]) - if err != nil { - return err - } - f.SelectionSet = ss - case "Position": - err := json.Unmarshal(tmp[k], &f.Position) - if err != nil { - return err - } - } - } - return nil -} - -func (f *Field) UnmarshalJSON(b []byte) error { - var tmp map[string]json.RawMessage - if err := json.Unmarshal(b, &tmp); err != nil { - return err - } - for k := range tmp { - switch k { - case "Alias": - err := json.Unmarshal(tmp[k], &f.Alias) - if err != nil { - return err - } - case "Name": - err := json.Unmarshal(tmp[k], &f.Name) - if err != nil { - return err - } - case "Arguments": - err := json.Unmarshal(tmp[k], &f.Arguments) - if err != nil { - return err - } - case "Directives": - err := json.Unmarshal(tmp[k], &f.Directives) - if err != nil { - return err - } - case "SelectionSet": - ss, err := UnmarshalSelectionSet(tmp[k]) - if err != nil { - return err - } - f.SelectionSet = ss - case "Position": - err := json.Unmarshal(tmp[k], &f.Position) - if err != nil { - return err - } - case "Definition": - err := json.Unmarshal(tmp[k], &f.Definition) - if err != nil { - return err - } - case "ObjectDefinition": - err := json.Unmarshal(tmp[k], &f.ObjectDefinition) - if err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/document.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/document.go deleted file mode 100644 index 43bfb54ff5..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/document.go +++ /dev/null @@ -1,79 +0,0 @@ -package ast - -type QueryDocument struct { - Operations OperationList - Fragments FragmentDefinitionList - Position *Position `dump:"-"` -} - -type SchemaDocument struct { - Schema SchemaDefinitionList - SchemaExtension SchemaDefinitionList - Directives DirectiveDefinitionList - Definitions DefinitionList - Extensions DefinitionList - Position *Position `dump:"-"` -} - -func (d *SchemaDocument) Merge(other *SchemaDocument) { - d.Schema = append(d.Schema, other.Schema...) - d.SchemaExtension = append(d.SchemaExtension, other.SchemaExtension...) - d.Directives = append(d.Directives, other.Directives...) - d.Definitions = append(d.Definitions, other.Definitions...) - d.Extensions = append(d.Extensions, other.Extensions...) -} - -type Schema struct { - Query *Definition - Mutation *Definition - Subscription *Definition - - Types map[string]*Definition - Directives map[string]*DirectiveDefinition - - PossibleTypes map[string][]*Definition - Implements map[string][]*Definition - - Description string -} - -// AddTypes is the helper to add types definition to the schema -func (s *Schema) AddTypes(defs ...*Definition) { - if s.Types == nil { - s.Types = make(map[string]*Definition) - } - for _, def := range defs { - s.Types[def.Name] = def - } -} - -func (s *Schema) AddPossibleType(name string, def *Definition) { - s.PossibleTypes[name] = append(s.PossibleTypes[name], def) -} - -// GetPossibleTypes will enumerate all the definitions for a given interface or union -func (s *Schema) GetPossibleTypes(def *Definition) []*Definition { - return s.PossibleTypes[def.Name] -} - -func (s *Schema) AddImplements(name string, iface *Definition) { - s.Implements[name] = append(s.Implements[name], iface) -} - -// GetImplements returns all the interface and union definitions that the given definition satisfies -func (s *Schema) GetImplements(def *Definition) []*Definition { - return s.Implements[def.Name] -} - -type SchemaDefinition struct { - Description string - Directives DirectiveList - OperationTypes OperationTypeDefinitionList - Position *Position `dump:"-"` -} - -type OperationTypeDefinition struct { - Operation Operation - Type string - Position *Position `dump:"-"` -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/path.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/path.go deleted file mode 100644 index be1a9e4edb..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/path.go +++ /dev/null @@ -1,67 +0,0 @@ -package ast - -import ( - "bytes" - "encoding/json" - "fmt" -) - -var _ json.Unmarshaler = (*Path)(nil) - -type Path []PathElement - -type PathElement interface { - isPathElement() -} - -var _ PathElement = PathIndex(0) -var _ PathElement = PathName("") - -func (path Path) String() string { - var str bytes.Buffer - for i, v := range path { - switch v := v.(type) { - case PathIndex: - str.WriteString(fmt.Sprintf("[%d]", v)) - case PathName: - if i != 0 { - str.WriteByte('.') - } - str.WriteString(string(v)) - default: - panic(fmt.Sprintf("unknown type: %T", v)) - } - } - return str.String() -} - -func (path *Path) UnmarshalJSON(b []byte) error { - var vs []interface{} - err := json.Unmarshal(b, &vs) - if err != nil { - return err - } - - *path = make([]PathElement, 0, len(vs)) - for _, v := range vs { - switch v := v.(type) { - case string: - *path = append(*path, PathName(v)) - case int: - *path = append(*path, PathIndex(v)) - case float64: - *path = append(*path, PathIndex(int(v))) - default: - return fmt.Errorf("unknown path element type: %T", v) - } - } - return nil -} - -type PathIndex int - -func (PathIndex) isPathElement() {} - -type PathName string - -func (PathName) isPathElement() {} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/selection.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/selection.go deleted file mode 100644 index 5ef26c6ab3..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/selection.go +++ /dev/null @@ -1,39 +0,0 @@ -package ast - -type SelectionSet []Selection - -type Selection interface { - isSelection() - GetPosition() *Position -} - -func (*Field) isSelection() {} -func (*FragmentSpread) isSelection() {} -func (*InlineFragment) isSelection() {} - -func (s *Field) GetPosition() *Position { return s.Position } -func (s *FragmentSpread) GetPosition() *Position { return s.Position } -func (s *InlineFragment) GetPosition() *Position { return s.Position } - -type Field struct { - Alias string - Name string - Arguments ArgumentList - Directives DirectiveList - SelectionSet SelectionSet - Position *Position `dump:"-"` - - // Require validation - Definition *FieldDefinition - ObjectDefinition *Definition -} - -type Argument struct { - Name string - Value *Value - Position *Position `dump:"-"` -} - -func (s *Field) ArgumentMap(vars map[string]interface{}) map[string]interface{} { - return arg2map(s.Definition.Arguments, s.Arguments, vars) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/type.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/type.go deleted file mode 100644 index 5f77bc7ce4..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/type.go +++ /dev/null @@ -1,68 +0,0 @@ -package ast - -func NonNullNamedType(named string, pos *Position) *Type { - return &Type{NamedType: named, NonNull: true, Position: pos} -} - -func NamedType(named string, pos *Position) *Type { - return &Type{NamedType: named, NonNull: false, Position: pos} -} - -func NonNullListType(elem *Type, pos *Position) *Type { - return &Type{Elem: elem, NonNull: true, Position: pos} -} - -func ListType(elem *Type, pos *Position) *Type { - return &Type{Elem: elem, NonNull: false, Position: pos} -} - -type Type struct { - NamedType string - Elem *Type - NonNull bool - Position *Position `dump:"-"` -} - -func (t *Type) Name() string { - if t.NamedType != "" { - return t.NamedType - } - - return t.Elem.Name() -} - -func (t *Type) String() string { - nn := "" - if t.NonNull { - nn = "!" - } - if t.NamedType != "" { - return t.NamedType + nn - } - - return "[" + t.Elem.String() + "]" + nn -} - -func (t *Type) IsCompatible(other *Type) bool { - if t.NamedType != other.NamedType { - return false - } - - if t.Elem != nil && other.Elem == nil { - return false - } - - if t.Elem != nil && !t.Elem.IsCompatible(other.Elem) { - return false - } - - if other.NonNull { - return t.NonNull - } - - return true -} - -func (t *Type) Dump() string { - return t.String() -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/value.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/value.go deleted file mode 100644 index c25ef15059..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/value.go +++ /dev/null @@ -1,120 +0,0 @@ -package ast - -import ( - "fmt" - "strconv" - "strings" -) - -type ValueKind int - -const ( - Variable ValueKind = iota - IntValue - FloatValue - StringValue - BlockValue - BooleanValue - NullValue - EnumValue - ListValue - ObjectValue -) - -type Value struct { - Raw string - Children ChildValueList - Kind ValueKind - Position *Position `dump:"-"` - - // Require validation - Definition *Definition - VariableDefinition *VariableDefinition - ExpectedType *Type -} - -type ChildValue struct { - Name string - Value *Value - Position *Position `dump:"-"` -} - -func (v *Value) Value(vars map[string]interface{}) (interface{}, error) { - if v == nil { - return nil, nil - } - switch v.Kind { - case Variable: - if value, ok := vars[v.Raw]; ok { - return value, nil - } - if v.VariableDefinition != nil && v.VariableDefinition.DefaultValue != nil { - return v.VariableDefinition.DefaultValue.Value(vars) - } - return nil, nil - case IntValue: - return strconv.ParseInt(v.Raw, 10, 64) - case FloatValue: - return strconv.ParseFloat(v.Raw, 64) - case StringValue, BlockValue, EnumValue: - return v.Raw, nil - case BooleanValue: - return strconv.ParseBool(v.Raw) - case NullValue: - return nil, nil - case ListValue: - var val []interface{} - for _, elem := range v.Children { - elemVal, err := elem.Value.Value(vars) - if err != nil { - return val, err - } - val = append(val, elemVal) - } - return val, nil - case ObjectValue: - val := map[string]interface{}{} - for _, elem := range v.Children { - elemVal, err := elem.Value.Value(vars) - if err != nil { - return val, err - } - val[elem.Name] = elemVal - } - return val, nil - default: - panic(fmt.Errorf("unknown value kind %d", v.Kind)) - } -} - -func (v *Value) String() string { - if v == nil { - return "" - } - switch v.Kind { - case Variable: - return "$" + v.Raw - case IntValue, FloatValue, EnumValue, BooleanValue, NullValue: - return v.Raw - case StringValue, BlockValue: - return strconv.Quote(v.Raw) - case ListValue: - var val []string - for _, elem := range v.Children { - val = append(val, elem.Value.String()) - } - return "[" + strings.Join(val, ",") + "]" - case ObjectValue: - var val []string - for _, elem := range v.Children { - val = append(val, elem.Name+":"+elem.Value.String()) - } - return "{" + strings.Join(val, ",") + "}" - default: - panic(fmt.Errorf("unknown value kind %d", v.Kind)) - } -} - -func (v *Value) Dump() string { - return v.String() -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/gqlerror/error.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/gqlerror/error.go deleted file mode 100644 index 58d1c1bd6c..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/gqlerror/error.go +++ /dev/null @@ -1,147 +0,0 @@ -package gqlerror - -import ( - "bytes" - "errors" - "fmt" - "strconv" - - "github.com/open-policy-agent/opa/internal/gqlparser/ast" -) - -// Error is the standard graphql error type described in https://facebook.github.io/graphql/draft/#sec-Errors -type Error struct { - err error `json:"-"` - Message string `json:"message"` - Path ast.Path `json:"path,omitempty"` - Locations []Location `json:"locations,omitempty"` - Extensions map[string]interface{} `json:"extensions,omitempty"` - Rule string `json:"-"` -} - -func (err *Error) SetFile(file string) { - if file == "" { - return - } - if err.Extensions == nil { - err.Extensions = map[string]interface{}{} - } - - err.Extensions["file"] = file -} - -type Location struct { - Line int `json:"line,omitempty"` - Column int `json:"column,omitempty"` -} - -type List []*Error - -func (err *Error) Error() string { - var res bytes.Buffer - if err == nil { - return "" - } - filename, _ := err.Extensions["file"].(string) - if filename == "" { - filename = "input" - } - res.WriteString(filename) - - if len(err.Locations) > 0 { - res.WriteByte(':') - res.WriteString(strconv.Itoa(err.Locations[0].Line)) - res.WriteByte(':') - res.WriteString(strconv.Itoa(err.Locations[0].Column)) - } - - res.WriteString(": ") - if ps := err.pathString(); ps != "" { - res.WriteString(ps) - res.WriteByte(' ') - } - - res.WriteString(err.Message) - - return res.String() -} - -func (err Error) pathString() string { - return err.Path.String() -} - -func (err Error) Unwrap() error { - return err.err -} - -func (errs List) Error() string { - var buf bytes.Buffer - for _, err := range errs { - buf.WriteString(err.Error()) - buf.WriteByte('\n') - } - return buf.String() -} - -func (errs List) Is(target error) bool { - for _, err := range errs { - if errors.Is(err, target) { - return true - } - } - return false -} - -func (errs List) As(target interface{}) bool { - for _, err := range errs { - if errors.As(err, target) { - return true - } - } - return false -} - -func WrapPath(path ast.Path, err error) *Error { - return &Error{ - err: err, - Message: err.Error(), - Path: path, - } -} - -func Errorf(message string, args ...interface{}) *Error { - return &Error{ - Message: fmt.Sprintf(message, args...), - } -} - -func ErrorPathf(path ast.Path, message string, args ...interface{}) *Error { - return &Error{ - Message: fmt.Sprintf(message, args...), - Path: path, - } -} - -func ErrorPosf(pos *ast.Position, message string, args ...interface{}) *Error { - return ErrorLocf( - pos.Src.Name, - pos.Line, - pos.Column, - message, - args..., - ) -} - -func ErrorLocf(file string, line int, col int, message string, args ...interface{}) *Error { - var extensions map[string]interface{} - if file != "" { - extensions = map[string]interface{}{"file": file} - } - return &Error{ - Message: fmt.Sprintf(message, args...), - Extensions: extensions, - Locations: []Location{ - {Line: line, Column: col}, - }, - } -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/parser.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/parser.go deleted file mode 100644 index c0d2b4a3b7..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/parser.go +++ /dev/null @@ -1,136 +0,0 @@ -package parser - -import ( - "strconv" - - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - "github.com/open-policy-agent/opa/internal/gqlparser/gqlerror" - "github.com/open-policy-agent/opa/internal/gqlparser/lexer" -) - -type parser struct { - lexer lexer.Lexer - err error - - peeked bool - peekToken lexer.Token - peekError error - - prev lexer.Token -} - -func (p *parser) peekPos() *ast.Position { - if p.err != nil { - return nil - } - - peek := p.peek() - return &peek.Pos -} - -func (p *parser) peek() lexer.Token { - if p.err != nil { - return p.prev - } - - if !p.peeked { - p.peekToken, p.peekError = p.lexer.ReadToken() - p.peeked = true - } - - return p.peekToken -} - -func (p *parser) error(tok lexer.Token, format string, args ...interface{}) { - if p.err != nil { - return - } - p.err = gqlerror.ErrorLocf(tok.Pos.Src.Name, tok.Pos.Line, tok.Pos.Column, format, args...) -} - -func (p *parser) next() lexer.Token { - if p.err != nil { - return p.prev - } - if p.peeked { - p.peeked = false - p.prev, p.err = p.peekToken, p.peekError - } else { - p.prev, p.err = p.lexer.ReadToken() - } - return p.prev -} - -func (p *parser) expectKeyword(value string) lexer.Token { - tok := p.peek() - if tok.Kind == lexer.Name && tok.Value == value { - return p.next() - } - - p.error(tok, "Expected %s, found %s", strconv.Quote(value), tok.String()) - return tok -} - -func (p *parser) expect(kind lexer.Type) lexer.Token { - tok := p.peek() - if tok.Kind == kind { - return p.next() - } - - p.error(tok, "Expected %s, found %s", kind, tok.Kind.String()) - return tok -} - -func (p *parser) skip(kind lexer.Type) bool { - if p.err != nil { - return false - } - - tok := p.peek() - - if tok.Kind != kind { - return false - } - p.next() - return true -} - -func (p *parser) unexpectedError() { - p.unexpectedToken(p.peek()) -} - -func (p *parser) unexpectedToken(tok lexer.Token) { - p.error(tok, "Unexpected %s", tok.String()) -} - -func (p *parser) many(start lexer.Type, end lexer.Type, cb func()) { - hasDef := p.skip(start) - if !hasDef { - return - } - - for p.peek().Kind != end && p.err == nil { - cb() - } - p.next() -} - -func (p *parser) some(start lexer.Type, end lexer.Type, cb func()) { - hasDef := p.skip(start) - if !hasDef { - return - } - - called := false - for p.peek().Kind != end && p.err == nil { - called = true - cb() - } - - if !called { - p.error(p.peek(), "expected at least one definition, found %s", p.peek().Kind.String()) - return - } - - p.next() -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/query.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/query.go deleted file mode 100644 index 319425f587..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/query.go +++ /dev/null @@ -1,349 +0,0 @@ -package parser - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/lexer" - - //nolint:revive - . "github.com/open-policy-agent/opa/internal/gqlparser/ast" -) - -func ParseQuery(source *Source) (*QueryDocument, error) { - p := parser{ - lexer: lexer.New(source), - } - return p.parseQueryDocument(), p.err -} - -func (p *parser) parseQueryDocument() *QueryDocument { - var doc QueryDocument - for p.peek().Kind != lexer.EOF { - if p.err != nil { - return &doc - } - doc.Position = p.peekPos() - switch p.peek().Kind { - case lexer.Name: - switch p.peek().Value { - case "query", "mutation", "subscription": - doc.Operations = append(doc.Operations, p.parseOperationDefinition()) - case "fragment": - doc.Fragments = append(doc.Fragments, p.parseFragmentDefinition()) - default: - p.unexpectedError() - } - case lexer.BraceL: - doc.Operations = append(doc.Operations, p.parseOperationDefinition()) - default: - p.unexpectedError() - } - } - - return &doc -} - -func (p *parser) parseOperationDefinition() *OperationDefinition { - if p.peek().Kind == lexer.BraceL { - return &OperationDefinition{ - Position: p.peekPos(), - Operation: Query, - SelectionSet: p.parseRequiredSelectionSet(), - } - } - - var od OperationDefinition - od.Position = p.peekPos() - od.Operation = p.parseOperationType() - - if p.peek().Kind == lexer.Name { - od.Name = p.next().Value - } - - od.VariableDefinitions = p.parseVariableDefinitions() - od.Directives = p.parseDirectives(false) - od.SelectionSet = p.parseRequiredSelectionSet() - - return &od -} - -func (p *parser) parseOperationType() Operation { - tok := p.next() - switch tok.Value { - case "query": - return Query - case "mutation": - return Mutation - case "subscription": - return Subscription - } - p.unexpectedToken(tok) - return "" -} - -func (p *parser) parseVariableDefinitions() VariableDefinitionList { - var defs []*VariableDefinition - p.many(lexer.ParenL, lexer.ParenR, func() { - defs = append(defs, p.parseVariableDefinition()) - }) - - return defs -} - -func (p *parser) parseVariableDefinition() *VariableDefinition { - var def VariableDefinition - def.Position = p.peekPos() - def.Variable = p.parseVariable() - - p.expect(lexer.Colon) - - def.Type = p.parseTypeReference() - - if p.skip(lexer.Equals) { - def.DefaultValue = p.parseValueLiteral(true) - } - - def.Directives = p.parseDirectives(false) - - return &def -} - -func (p *parser) parseVariable() string { - p.expect(lexer.Dollar) - return p.parseName() -} - -func (p *parser) parseOptionalSelectionSet() SelectionSet { - var selections []Selection - p.some(lexer.BraceL, lexer.BraceR, func() { - selections = append(selections, p.parseSelection()) - }) - - return SelectionSet(selections) -} - -func (p *parser) parseRequiredSelectionSet() SelectionSet { - if p.peek().Kind != lexer.BraceL { - p.error(p.peek(), "Expected %s, found %s", lexer.BraceL, p.peek().Kind.String()) - return nil - } - - var selections []Selection - p.some(lexer.BraceL, lexer.BraceR, func() { - selections = append(selections, p.parseSelection()) - }) - - return SelectionSet(selections) -} - -func (p *parser) parseSelection() Selection { - if p.peek().Kind == lexer.Spread { - return p.parseFragment() - } - return p.parseField() -} - -func (p *parser) parseField() *Field { - var field Field - field.Position = p.peekPos() - field.Alias = p.parseName() - - if p.skip(lexer.Colon) { - field.Name = p.parseName() - } else { - field.Name = field.Alias - } - - field.Arguments = p.parseArguments(false) - field.Directives = p.parseDirectives(false) - if p.peek().Kind == lexer.BraceL { - field.SelectionSet = p.parseOptionalSelectionSet() - } - - return &field -} - -func (p *parser) parseArguments(isConst bool) ArgumentList { - var arguments ArgumentList - p.many(lexer.ParenL, lexer.ParenR, func() { - arguments = append(arguments, p.parseArgument(isConst)) - }) - - return arguments -} - -func (p *parser) parseArgument(isConst bool) *Argument { - arg := Argument{} - arg.Position = p.peekPos() - arg.Name = p.parseName() - p.expect(lexer.Colon) - - arg.Value = p.parseValueLiteral(isConst) - return &arg -} - -func (p *parser) parseFragment() Selection { - p.expect(lexer.Spread) - - if peek := p.peek(); peek.Kind == lexer.Name && peek.Value != "on" { - return &FragmentSpread{ - Position: p.peekPos(), - Name: p.parseFragmentName(), - Directives: p.parseDirectives(false), - } - } - - var def InlineFragment - def.Position = p.peekPos() - if p.peek().Value == "on" { - p.next() // "on" - - def.TypeCondition = p.parseName() - } - - def.Directives = p.parseDirectives(false) - def.SelectionSet = p.parseRequiredSelectionSet() - return &def -} - -func (p *parser) parseFragmentDefinition() *FragmentDefinition { - var def FragmentDefinition - def.Position = p.peekPos() - p.expectKeyword("fragment") - - def.Name = p.parseFragmentName() - def.VariableDefinition = p.parseVariableDefinitions() - - p.expectKeyword("on") - - def.TypeCondition = p.parseName() - def.Directives = p.parseDirectives(false) - def.SelectionSet = p.parseRequiredSelectionSet() - return &def -} - -func (p *parser) parseFragmentName() string { - if p.peek().Value == "on" { - p.unexpectedError() - return "" - } - - return p.parseName() -} - -func (p *parser) parseValueLiteral(isConst bool) *Value { - token := p.peek() - - var kind ValueKind - switch token.Kind { - case lexer.BracketL: - return p.parseList(isConst) - case lexer.BraceL: - return p.parseObject(isConst) - case lexer.Dollar: - if isConst { - p.unexpectedError() - return nil - } - return &Value{Position: &token.Pos, Raw: p.parseVariable(), Kind: Variable} - case lexer.Int: - kind = IntValue - case lexer.Float: - kind = FloatValue - case lexer.String: - kind = StringValue - case lexer.BlockString: - kind = BlockValue - case lexer.Name: - switch token.Value { - case "true", "false": - kind = BooleanValue - case "null": - kind = NullValue - default: - kind = EnumValue - } - default: - p.unexpectedError() - return nil - } - - p.next() - - return &Value{Position: &token.Pos, Raw: token.Value, Kind: kind} -} - -func (p *parser) parseList(isConst bool) *Value { - var values ChildValueList - pos := p.peekPos() - p.many(lexer.BracketL, lexer.BracketR, func() { - values = append(values, &ChildValue{Value: p.parseValueLiteral(isConst)}) - }) - - return &Value{Children: values, Kind: ListValue, Position: pos} -} - -func (p *parser) parseObject(isConst bool) *Value { - var fields ChildValueList - pos := p.peekPos() - p.many(lexer.BraceL, lexer.BraceR, func() { - fields = append(fields, p.parseObjectField(isConst)) - }) - - return &Value{Children: fields, Kind: ObjectValue, Position: pos} -} - -func (p *parser) parseObjectField(isConst bool) *ChildValue { - field := ChildValue{} - field.Position = p.peekPos() - field.Name = p.parseName() - - p.expect(lexer.Colon) - - field.Value = p.parseValueLiteral(isConst) - return &field -} - -func (p *parser) parseDirectives(isConst bool) []*Directive { - var directives []*Directive - - for p.peek().Kind == lexer.At { - if p.err != nil { - break - } - directives = append(directives, p.parseDirective(isConst)) - } - return directives -} - -func (p *parser) parseDirective(isConst bool) *Directive { - p.expect(lexer.At) - - return &Directive{ - Position: p.peekPos(), - Name: p.parseName(), - Arguments: p.parseArguments(isConst), - } -} - -func (p *parser) parseTypeReference() *Type { - var typ Type - - if p.skip(lexer.BracketL) { - typ.Position = p.peekPos() - typ.Elem = p.parseTypeReference() - p.expect(lexer.BracketR) - } else { - typ.Position = p.peekPos() - typ.NamedType = p.parseName() - } - - if p.skip(lexer.Bang) { - typ.NonNull = true - } - return &typ -} - -func (p *parser) parseName() string { - token := p.expect(lexer.Name) - - return token.Value -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/schema.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/schema.go deleted file mode 100644 index 32c293399b..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/schema.go +++ /dev/null @@ -1,535 +0,0 @@ -package parser - -import ( - //nolint:revive - . "github.com/open-policy-agent/opa/internal/gqlparser/ast" - "github.com/open-policy-agent/opa/internal/gqlparser/lexer" -) - -func ParseSchema(source *Source) (*SchemaDocument, error) { - p := parser{ - lexer: lexer.New(source), - } - ast, err := p.parseSchemaDocument(), p.err - if err != nil { - return nil, err - } - - for _, def := range ast.Definitions { - def.BuiltIn = source.BuiltIn - } - for _, def := range ast.Extensions { - def.BuiltIn = source.BuiltIn - } - - return ast, nil -} - -func ParseSchemas(inputs ...*Source) (*SchemaDocument, error) { - ast := &SchemaDocument{} - for _, input := range inputs { - inputAst, err := ParseSchema(input) - if err != nil { - return nil, err - } - ast.Merge(inputAst) - } - return ast, nil -} - -func (p *parser) parseSchemaDocument() *SchemaDocument { - var doc SchemaDocument - doc.Position = p.peekPos() - for p.peek().Kind != lexer.EOF { - if p.err != nil { - return nil - } - - var description string - if p.peek().Kind == lexer.BlockString || p.peek().Kind == lexer.String { - description = p.parseDescription() - } - - if p.peek().Kind != lexer.Name { - p.unexpectedError() - break - } - - switch p.peek().Value { - case "scalar", "type", "interface", "union", "enum", "input": - doc.Definitions = append(doc.Definitions, p.parseTypeSystemDefinition(description)) - case "schema": - doc.Schema = append(doc.Schema, p.parseSchemaDefinition(description)) - case "directive": - doc.Directives = append(doc.Directives, p.parseDirectiveDefinition(description)) - case "extend": - if description != "" { - p.unexpectedToken(p.prev) - } - p.parseTypeSystemExtension(&doc) - default: - p.unexpectedError() - return nil - } - } - - return &doc -} - -func (p *parser) parseDescription() string { - token := p.peek() - - if token.Kind != lexer.BlockString && token.Kind != lexer.String { - return "" - } - - return p.next().Value -} - -func (p *parser) parseTypeSystemDefinition(description string) *Definition { - tok := p.peek() - if tok.Kind != lexer.Name { - p.unexpectedError() - return nil - } - - switch tok.Value { - case "scalar": - return p.parseScalarTypeDefinition(description) - case "type": - return p.parseObjectTypeDefinition(description) - case "interface": - return p.parseInterfaceTypeDefinition(description) - case "union": - return p.parseUnionTypeDefinition(description) - case "enum": - return p.parseEnumTypeDefinition(description) - case "input": - return p.parseInputObjectTypeDefinition(description) - default: - p.unexpectedError() - return nil - } -} - -func (p *parser) parseSchemaDefinition(description string) *SchemaDefinition { - p.expectKeyword("schema") - - def := SchemaDefinition{Description: description} - def.Position = p.peekPos() - def.Description = description - def.Directives = p.parseDirectives(true) - - p.some(lexer.BraceL, lexer.BraceR, func() { - def.OperationTypes = append(def.OperationTypes, p.parseOperationTypeDefinition()) - }) - return &def -} - -func (p *parser) parseOperationTypeDefinition() *OperationTypeDefinition { - var op OperationTypeDefinition - op.Position = p.peekPos() - op.Operation = p.parseOperationType() - p.expect(lexer.Colon) - op.Type = p.parseName() - return &op -} - -func (p *parser) parseScalarTypeDefinition(description string) *Definition { - p.expectKeyword("scalar") - - var def Definition - def.Position = p.peekPos() - def.Kind = Scalar - def.Description = description - def.Name = p.parseName() - def.Directives = p.parseDirectives(true) - return &def -} - -func (p *parser) parseObjectTypeDefinition(description string) *Definition { - p.expectKeyword("type") - - var def Definition - def.Position = p.peekPos() - def.Kind = Object - def.Description = description - def.Name = p.parseName() - def.Interfaces = p.parseImplementsInterfaces() - def.Directives = p.parseDirectives(true) - def.Fields = p.parseFieldsDefinition() - return &def -} - -func (p *parser) parseImplementsInterfaces() []string { - var types []string - if p.peek().Value == "implements" { - p.next() - // optional leading ampersand - p.skip(lexer.Amp) - - types = append(types, p.parseName()) - for p.skip(lexer.Amp) && p.err == nil { - types = append(types, p.parseName()) - } - } - return types -} - -func (p *parser) parseFieldsDefinition() FieldList { - var defs FieldList - p.some(lexer.BraceL, lexer.BraceR, func() { - defs = append(defs, p.parseFieldDefinition()) - }) - return defs -} - -func (p *parser) parseFieldDefinition() *FieldDefinition { - var def FieldDefinition - def.Position = p.peekPos() - def.Description = p.parseDescription() - def.Name = p.parseName() - def.Arguments = p.parseArgumentDefs() - p.expect(lexer.Colon) - def.Type = p.parseTypeReference() - def.Directives = p.parseDirectives(true) - - return &def -} - -func (p *parser) parseArgumentDefs() ArgumentDefinitionList { - var args ArgumentDefinitionList - p.some(lexer.ParenL, lexer.ParenR, func() { - args = append(args, p.parseArgumentDef()) - }) - return args -} - -func (p *parser) parseArgumentDef() *ArgumentDefinition { - var def ArgumentDefinition - def.Position = p.peekPos() - def.Description = p.parseDescription() - def.Name = p.parseName() - p.expect(lexer.Colon) - def.Type = p.parseTypeReference() - if p.skip(lexer.Equals) { - def.DefaultValue = p.parseValueLiteral(true) - } - def.Directives = p.parseDirectives(true) - return &def -} - -func (p *parser) parseInputValueDef() *FieldDefinition { - var def FieldDefinition - def.Position = p.peekPos() - def.Description = p.parseDescription() - def.Name = p.parseName() - p.expect(lexer.Colon) - def.Type = p.parseTypeReference() - if p.skip(lexer.Equals) { - def.DefaultValue = p.parseValueLiteral(true) - } - def.Directives = p.parseDirectives(true) - return &def -} - -func (p *parser) parseInterfaceTypeDefinition(description string) *Definition { - p.expectKeyword("interface") - - var def Definition - def.Position = p.peekPos() - def.Kind = Interface - def.Description = description - def.Name = p.parseName() - def.Interfaces = p.parseImplementsInterfaces() - def.Directives = p.parseDirectives(true) - def.Fields = p.parseFieldsDefinition() - return &def -} - -func (p *parser) parseUnionTypeDefinition(description string) *Definition { - p.expectKeyword("union") - - var def Definition - def.Position = p.peekPos() - def.Kind = Union - def.Description = description - def.Name = p.parseName() - def.Directives = p.parseDirectives(true) - def.Types = p.parseUnionMemberTypes() - return &def -} - -func (p *parser) parseUnionMemberTypes() []string { - var types []string - if p.skip(lexer.Equals) { - // optional leading pipe - p.skip(lexer.Pipe) - - types = append(types, p.parseName()) - for p.skip(lexer.Pipe) && p.err == nil { - types = append(types, p.parseName()) - } - } - return types -} - -func (p *parser) parseEnumTypeDefinition(description string) *Definition { - p.expectKeyword("enum") - - var def Definition - def.Position = p.peekPos() - def.Kind = Enum - def.Description = description - def.Name = p.parseName() - def.Directives = p.parseDirectives(true) - def.EnumValues = p.parseEnumValuesDefinition() - return &def -} - -func (p *parser) parseEnumValuesDefinition() EnumValueList { - var values EnumValueList - p.some(lexer.BraceL, lexer.BraceR, func() { - values = append(values, p.parseEnumValueDefinition()) - }) - return values -} - -func (p *parser) parseEnumValueDefinition() *EnumValueDefinition { - return &EnumValueDefinition{ - Position: p.peekPos(), - Description: p.parseDescription(), - Name: p.parseName(), - Directives: p.parseDirectives(true), - } -} - -func (p *parser) parseInputObjectTypeDefinition(description string) *Definition { - p.expectKeyword("input") - - var def Definition - def.Position = p.peekPos() - def.Kind = InputObject - def.Description = description - def.Name = p.parseName() - def.Directives = p.parseDirectives(true) - def.Fields = p.parseInputFieldsDefinition() - return &def -} - -func (p *parser) parseInputFieldsDefinition() FieldList { - var values FieldList - p.some(lexer.BraceL, lexer.BraceR, func() { - values = append(values, p.parseInputValueDef()) - }) - return values -} - -func (p *parser) parseTypeSystemExtension(doc *SchemaDocument) { - p.expectKeyword("extend") - - switch p.peek().Value { - case "schema": - doc.SchemaExtension = append(doc.SchemaExtension, p.parseSchemaExtension()) - case "scalar": - doc.Extensions = append(doc.Extensions, p.parseScalarTypeExtension()) - case "type": - doc.Extensions = append(doc.Extensions, p.parseObjectTypeExtension()) - case "interface": - doc.Extensions = append(doc.Extensions, p.parseInterfaceTypeExtension()) - case "union": - doc.Extensions = append(doc.Extensions, p.parseUnionTypeExtension()) - case "enum": - doc.Extensions = append(doc.Extensions, p.parseEnumTypeExtension()) - case "input": - doc.Extensions = append(doc.Extensions, p.parseInputObjectTypeExtension()) - default: - p.unexpectedError() - } -} - -func (p *parser) parseSchemaExtension() *SchemaDefinition { - p.expectKeyword("schema") - - var def SchemaDefinition - def.Position = p.peekPos() - def.Directives = p.parseDirectives(true) - p.some(lexer.BraceL, lexer.BraceR, func() { - def.OperationTypes = append(def.OperationTypes, p.parseOperationTypeDefinition()) - }) - if len(def.Directives) == 0 && len(def.OperationTypes) == 0 { - p.unexpectedError() - } - return &def -} - -func (p *parser) parseScalarTypeExtension() *Definition { - p.expectKeyword("scalar") - - var def Definition - def.Position = p.peekPos() - def.Kind = Scalar - def.Name = p.parseName() - def.Directives = p.parseDirectives(true) - if len(def.Directives) == 0 { - p.unexpectedError() - } - return &def -} - -func (p *parser) parseObjectTypeExtension() *Definition { - p.expectKeyword("type") - - var def Definition - def.Position = p.peekPos() - def.Kind = Object - def.Name = p.parseName() - def.Interfaces = p.parseImplementsInterfaces() - def.Directives = p.parseDirectives(true) - def.Fields = p.parseFieldsDefinition() - if len(def.Interfaces) == 0 && len(def.Directives) == 0 && len(def.Fields) == 0 { - p.unexpectedError() - } - return &def -} - -func (p *parser) parseInterfaceTypeExtension() *Definition { - p.expectKeyword("interface") - - var def Definition - def.Position = p.peekPos() - def.Kind = Interface - def.Name = p.parseName() - def.Directives = p.parseDirectives(true) - def.Fields = p.parseFieldsDefinition() - if len(def.Directives) == 0 && len(def.Fields) == 0 { - p.unexpectedError() - } - return &def -} - -func (p *parser) parseUnionTypeExtension() *Definition { - p.expectKeyword("union") - - var def Definition - def.Position = p.peekPos() - def.Kind = Union - def.Name = p.parseName() - def.Directives = p.parseDirectives(true) - def.Types = p.parseUnionMemberTypes() - - if len(def.Directives) == 0 && len(def.Types) == 0 { - p.unexpectedError() - } - return &def -} - -func (p *parser) parseEnumTypeExtension() *Definition { - p.expectKeyword("enum") - - var def Definition - def.Position = p.peekPos() - def.Kind = Enum - def.Name = p.parseName() - def.Directives = p.parseDirectives(true) - def.EnumValues = p.parseEnumValuesDefinition() - if len(def.Directives) == 0 && len(def.EnumValues) == 0 { - p.unexpectedError() - } - return &def -} - -func (p *parser) parseInputObjectTypeExtension() *Definition { - p.expectKeyword("input") - - var def Definition - def.Position = p.peekPos() - def.Kind = InputObject - def.Name = p.parseName() - def.Directives = p.parseDirectives(false) - def.Fields = p.parseInputFieldsDefinition() - if len(def.Directives) == 0 && len(def.Fields) == 0 { - p.unexpectedError() - } - return &def -} - -func (p *parser) parseDirectiveDefinition(description string) *DirectiveDefinition { - p.expectKeyword("directive") - p.expect(lexer.At) - - var def DirectiveDefinition - def.Position = p.peekPos() - def.Description = description - def.Name = p.parseName() - def.Arguments = p.parseArgumentDefs() - - if peek := p.peek(); peek.Kind == lexer.Name && peek.Value == "repeatable" { - def.IsRepeatable = true - p.skip(lexer.Name) - } - - p.expectKeyword("on") - def.Locations = p.parseDirectiveLocations() - return &def -} - -func (p *parser) parseDirectiveLocations() []DirectiveLocation { - p.skip(lexer.Pipe) - - locations := []DirectiveLocation{p.parseDirectiveLocation()} - - for p.skip(lexer.Pipe) && p.err == nil { - locations = append(locations, p.parseDirectiveLocation()) - } - - return locations -} - -func (p *parser) parseDirectiveLocation() DirectiveLocation { - name := p.expect(lexer.Name) - - switch name.Value { - case `QUERY`: - return LocationQuery - case `MUTATION`: - return LocationMutation - case `SUBSCRIPTION`: - return LocationSubscription - case `FIELD`: - return LocationField - case `FRAGMENT_DEFINITION`: - return LocationFragmentDefinition - case `FRAGMENT_SPREAD`: - return LocationFragmentSpread - case `INLINE_FRAGMENT`: - return LocationInlineFragment - case `VARIABLE_DEFINITION`: - return LocationVariableDefinition - case `SCHEMA`: - return LocationSchema - case `SCALAR`: - return LocationScalar - case `OBJECT`: - return LocationObject - case `FIELD_DEFINITION`: - return LocationFieldDefinition - case `ARGUMENT_DEFINITION`: - return LocationArgumentDefinition - case `INTERFACE`: - return LocationInterface - case `UNION`: - return LocationUnion - case `ENUM`: - return LocationEnum - case `ENUM_VALUE`: - return LocationEnumValue - case `INPUT_OBJECT`: - return LocationInputObject - case `INPUT_FIELD_DEFINITION`: - return LocationInputFieldDefinition - } - - p.unexpectedToken(name) - return "" -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/schema_test.yml b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/schema_test.yml deleted file mode 100644 index 8b6a5d0ca3..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/schema_test.yml +++ /dev/null @@ -1,646 +0,0 @@ -object types: - - name: simple - input: | - type Hello { - world: String - } - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("OBJECT") - Name: "Hello" - Fields: [FieldDefinition] - - - Name: "world" - Type: String - - - name: with description - input: | - "Description" - type Hello { - world: String - } - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("OBJECT") - Description: "Description" - Name: "Hello" - Fields: [FieldDefinition] - - - Name: "world" - Type: String - - - name: with block description - input: | - """ - Description - """ - # Even with comments between them - type Hello { - world: String - } - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("OBJECT") - Description: "Description" - Name: "Hello" - Fields: [FieldDefinition] - - - Name: "world" - Type: String - - name: with field arg - input: | - type Hello { - world(flag: Boolean): String - } - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("OBJECT") - Name: "Hello" - Fields: [FieldDefinition] - - - Name: "world" - Arguments: [ArgumentDefinition] - - - Name: "flag" - Type: Boolean - Type: String - - - name: with field arg and default value - input: | - type Hello { - world(flag: Boolean = true): String - } - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("OBJECT") - Name: "Hello" - Fields: [FieldDefinition] - - - Name: "world" - Arguments: [ArgumentDefinition] - - - Name: "flag" - DefaultValue: true - Type: Boolean - Type: String - - - name: with field list arg - input: | - type Hello { - world(things: [String]): String - } - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("OBJECT") - Name: "Hello" - Fields: [FieldDefinition] - - - Name: "world" - Arguments: [ArgumentDefinition] - - - Name: "things" - Type: [String] - Type: String - - - name: with two args - input: | - type Hello { - world(argOne: Boolean, argTwo: Int): String - } - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("OBJECT") - Name: "Hello" - Fields: [FieldDefinition] - - - Name: "world" - Arguments: [ArgumentDefinition] - - - Name: "argOne" - Type: Boolean - - - Name: "argTwo" - Type: Int - Type: String - - name: must define one or more fields - input: | - type Hello {} - error: - message: "expected at least one definition, found }" - locations: [{ line: 1, column: 13 }] - -type extensions: - - name: Object extension - input: | - extend type Hello { - world: String - } - ast: | - - Extensions: [Definition] - - - Kind: DefinitionKind("OBJECT") - Name: "Hello" - Fields: [FieldDefinition] - - - Name: "world" - Type: String - - - name: without any fields - input: "extend type Hello implements Greeting" - ast: | - - Extensions: [Definition] - - - Kind: DefinitionKind("OBJECT") - Name: "Hello" - Interfaces: [string] - - "Greeting" - - - name: without fields twice - input: | - extend type Hello implements Greeting - extend type Hello implements SecondGreeting - ast: | - - Extensions: [Definition] - - - Kind: DefinitionKind("OBJECT") - Name: "Hello" - Interfaces: [string] - - "Greeting" - - - Kind: DefinitionKind("OBJECT") - Name: "Hello" - Interfaces: [string] - - "SecondGreeting" - - - name: without anything errors - input: "extend type Hello" - error: - message: "Unexpected " - locations: [{ line: 1, column: 18 }] - - - name: can have descriptions # hmm, this might not be spec compliant... - input: | - "Description" - extend type Hello { - world: String - } - error: - message: 'Unexpected String "Description"' - locations: [{ line: 1, column: 2 }] - - - name: can not have descriptions on types - input: | - extend "Description" type Hello { - world: String - } - error: - message: Unexpected String "Description" - locations: [{ line: 1, column: 9 }] - - - name: all can have directives - input: | - extend scalar Foo @deprecated - extend type Foo @deprecated - extend interface Foo @deprecated - extend union Foo @deprecated - extend enum Foo @deprecated - extend input Foo @deprecated - ast: | - - Extensions: [Definition] - - - Kind: DefinitionKind("SCALAR") - Name: "Foo" - Directives: [Directive] - - - Name: "deprecated" - - - Kind: DefinitionKind("OBJECT") - Name: "Foo" - Directives: [Directive] - - - Name: "deprecated" - - - Kind: DefinitionKind("INTERFACE") - Name: "Foo" - Directives: [Directive] - - - Name: "deprecated" - - - Kind: DefinitionKind("UNION") - Name: "Foo" - Directives: [Directive] - - - Name: "deprecated" - - - Kind: DefinitionKind("ENUM") - Name: "Foo" - Directives: [Directive] - - - Name: "deprecated" - - - Kind: DefinitionKind("INPUT_OBJECT") - Name: "Foo" - Directives: [Directive] - - - Name: "deprecated" - -schema definition: - - name: simple - input: | - schema { - query: Query - } - ast: | - - Schema: [SchemaDefinition] - - - OperationTypes: [OperationTypeDefinition] - - - Operation: Operation("query") - Type: "Query" - -schema extensions: - - name: simple - input: | - extend schema { - mutation: Mutation - } - ast: | - - SchemaExtension: [SchemaDefinition] - - - OperationTypes: [OperationTypeDefinition] - - - Operation: Operation("mutation") - Type: "Mutation" - - - name: directive only - input: "extend schema @directive" - ast: | - - SchemaExtension: [SchemaDefinition] - - - Directives: [Directive] - - - Name: "directive" - - - name: without anything errors - input: "extend schema" - error: - message: "Unexpected " - locations: [{ line: 1, column: 14}] - -inheritance: - - name: single - input: "type Hello implements World { field: String }" - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("OBJECT") - Name: "Hello" - Interfaces: [string] - - "World" - Fields: [FieldDefinition] - - - Name: "field" - Type: String - - - name: multi - input: "type Hello implements Wo & rld { field: String }" - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("OBJECT") - Name: "Hello" - Interfaces: [string] - - "Wo" - - "rld" - Fields: [FieldDefinition] - - - Name: "field" - Type: String - - - name: multi with leading amp - input: "type Hello implements & Wo & rld { field: String }" - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("OBJECT") - Name: "Hello" - Interfaces: [string] - - "Wo" - - "rld" - Fields: [FieldDefinition] - - - Name: "field" - Type: String - -enums: - - name: single value - input: "enum Hello { WORLD }" - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("ENUM") - Name: "Hello" - EnumValues: [EnumValueDefinition] - - - Name: "WORLD" - - - name: double value - input: "enum Hello { WO, RLD }" - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("ENUM") - Name: "Hello" - EnumValues: [EnumValueDefinition] - - - Name: "WO" - - - Name: "RLD" - - name: must define one or more unique enum values - input: | - enum Hello {} - error: - message: "expected at least one definition, found }" - locations: [{ line: 1, column: 13 }] - -interface: - - name: simple - input: | - interface Hello { - world: String - } - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("INTERFACE") - Name: "Hello" - Fields: [FieldDefinition] - - - Name: "world" - Type: String - - name: must define one or more fields - input: | - interface Hello {} - error: - message: "expected at least one definition, found }" - locations: [{ line: 1, column: 18 }] - - - name: may define intermediate interfaces - input: | - interface IA { - id: ID! - } - - interface IIA implements IA { - id: ID! - } - - type A implements IIA { - id: ID! - } - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("INTERFACE") - Name: "IA" - Fields: [FieldDefinition] - - - Name: "id" - Type: ID! - - - Kind: DefinitionKind("INTERFACE") - Name: "IIA" - Interfaces: [string] - - "IA" - Fields: [FieldDefinition] - - - Name: "id" - Type: ID! - - - Kind: DefinitionKind("OBJECT") - Name: "A" - Interfaces: [string] - - "IIA" - Fields: [FieldDefinition] - - - Name: "id" - Type: ID! - -unions: - - name: simple - input: "union Hello = World" - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("UNION") - Name: "Hello" - Types: [string] - - "World" - - - name: with two types - input: "union Hello = Wo | Rld" - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("UNION") - Name: "Hello" - Types: [string] - - "Wo" - - "Rld" - - - name: with leading pipe - input: "union Hello = | Wo | Rld" - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("UNION") - Name: "Hello" - Types: [string] - - "Wo" - - "Rld" - - - name: cant be empty - input: "union Hello = || Wo | Rld" - error: - message: "Expected Name, found |" - locations: [{ line: 1, column: 16 }] - - - name: cant double pipe - input: "union Hello = Wo || Rld" - error: - message: "Expected Name, found |" - locations: [{ line: 1, column: 19 }] - - - name: cant have trailing pipe - input: "union Hello = | Wo | Rld |" - error: - message: "Expected Name, found " - locations: [{ line: 1, column: 27 }] - -scalar: - - name: simple - input: "scalar Hello" - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("SCALAR") - Name: "Hello" - -input object: - - name: simple - input: | - input Hello { - world: String - } - ast: | - - Definitions: [Definition] - - - Kind: DefinitionKind("INPUT_OBJECT") - Name: "Hello" - Fields: [FieldDefinition] - - - Name: "world" - Type: String - - - name: can not have args - input: | - input Hello { - world(foo: Int): String - } - error: - message: "Expected :, found (" - locations: [{ line: 2, column: 8 }] - - name: must define one or more input fields - input: | - input Hello {} - error: - message: "expected at least one definition, found }" - locations: [{ line: 1, column: 14 }] - -directives: - - name: simple - input: directive @foo on FIELD - ast: | - - Directives: [DirectiveDefinition] - - - Name: "foo" - Locations: [DirectiveLocation] - - DirectiveLocation("FIELD") - IsRepeatable: false - - - name: executable - input: | - directive @onQuery on QUERY - directive @onMutation on MUTATION - directive @onSubscription on SUBSCRIPTION - directive @onField on FIELD - directive @onFragmentDefinition on FRAGMENT_DEFINITION - directive @onFragmentSpread on FRAGMENT_SPREAD - directive @onInlineFragment on INLINE_FRAGMENT - directive @onVariableDefinition on VARIABLE_DEFINITION - ast: | - - Directives: [DirectiveDefinition] - - - Name: "onQuery" - Locations: [DirectiveLocation] - - DirectiveLocation("QUERY") - IsRepeatable: false - - - Name: "onMutation" - Locations: [DirectiveLocation] - - DirectiveLocation("MUTATION") - IsRepeatable: false - - - Name: "onSubscription" - Locations: [DirectiveLocation] - - DirectiveLocation("SUBSCRIPTION") - IsRepeatable: false - - - Name: "onField" - Locations: [DirectiveLocation] - - DirectiveLocation("FIELD") - IsRepeatable: false - - - Name: "onFragmentDefinition" - Locations: [DirectiveLocation] - - DirectiveLocation("FRAGMENT_DEFINITION") - IsRepeatable: false - - - Name: "onFragmentSpread" - Locations: [DirectiveLocation] - - DirectiveLocation("FRAGMENT_SPREAD") - IsRepeatable: false - - - Name: "onInlineFragment" - Locations: [DirectiveLocation] - - DirectiveLocation("INLINE_FRAGMENT") - IsRepeatable: false - - - Name: "onVariableDefinition" - Locations: [DirectiveLocation] - - DirectiveLocation("VARIABLE_DEFINITION") - IsRepeatable: false - - - name: repeatable - input: directive @foo repeatable on FIELD - ast: | - - Directives: [DirectiveDefinition] - - - Name: "foo" - Locations: [DirectiveLocation] - - DirectiveLocation("FIELD") - IsRepeatable: true - - - name: invalid location - input: "directive @foo on FIELD | INCORRECT_LOCATION" - error: - message: 'Unexpected Name "INCORRECT_LOCATION"' - locations: [{ line: 1, column: 27 }] - -fuzzer: - - name: 1 - input: "type o{d(g:[" - error: - message: 'Expected Name, found ' - locations: [{ line: 1, column: 13 }] - - name: 2 - input: "\"\"\"\r" - error: - message: 'Unexpected ' - locations: [{ line: 2, column: 1 }] diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/error.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/error.go deleted file mode 100644 index f31f180a2e..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/error.go +++ /dev/null @@ -1,55 +0,0 @@ -package validator - -import ( - "fmt" - - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - "github.com/open-policy-agent/opa/internal/gqlparser/gqlerror" -) - -type ErrorOption func(err *gqlerror.Error) - -func Message(msg string, args ...interface{}) ErrorOption { - return func(err *gqlerror.Error) { - err.Message += fmt.Sprintf(msg, args...) - } -} - -func At(position *ast.Position) ErrorOption { - return func(err *gqlerror.Error) { - if position == nil { - return - } - err.Locations = append(err.Locations, gqlerror.Location{ - Line: position.Line, - Column: position.Column, - }) - if position.Src.Name != "" { - err.SetFile(position.Src.Name) - } - } -} - -func SuggestListQuoted(prefix string, typed string, suggestions []string) ErrorOption { - suggested := SuggestionList(typed, suggestions) - return func(err *gqlerror.Error) { - if len(suggested) > 0 { - err.Message += " " + prefix + " " + QuotedOrList(suggested...) + "?" - } - } -} - -func SuggestListUnquoted(prefix string, typed string, suggestions []string) ErrorOption { - suggested := SuggestionList(typed, suggestions) - return func(err *gqlerror.Error) { - if len(suggested) > 0 { - err.Message += " " + prefix + " " + OrList(suggested...) + "?" - } - } -} - -func Suggestf(suggestion string, args ...interface{}) ErrorOption { - return func(err *gqlerror.Error) { - err.Message += " Did you mean " + fmt.Sprintf(suggestion, args...) + "?" - } -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/messaging.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/messaging.go deleted file mode 100644 index f1ab5873f3..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/messaging.go +++ /dev/null @@ -1,39 +0,0 @@ -package validator - -import "bytes" - -// Given [ A, B, C ] return '"A", "B", or "C"'. -func QuotedOrList(items ...string) string { - itemsQuoted := make([]string, len(items)) - for i, item := range items { - itemsQuoted[i] = `"` + item + `"` - } - return OrList(itemsQuoted...) -} - -// Given [ A, B, C ] return 'A, B, or C'. -func OrList(items ...string) string { - var buf bytes.Buffer - - if len(items) > 5 { - items = items[:5] - } - if len(items) == 2 { - buf.WriteString(items[0]) - buf.WriteString(" or ") - buf.WriteString(items[1]) - return buf.String() - } - - for i, item := range items { - if i != 0 { - if i == len(items)-1 { - buf.WriteString(", or ") - } else { - buf.WriteString(", ") - } - } - buf.WriteString(item) - } - return buf.String() -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/prelude.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/prelude.go deleted file mode 100644 index 86796fab6c..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/prelude.go +++ /dev/null @@ -1,16 +0,0 @@ -package validator - -import ( - _ "embed" - - "github.com/open-policy-agent/opa/internal/gqlparser/ast" -) - -//go:embed prelude.graphql -var preludeGraphql string - -var Prelude = &ast.Source{ - Name: "prelude.graphql", - Input: preludeGraphql, - BuiltIn: true, -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/prelude.graphql b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/prelude.graphql deleted file mode 100644 index bdca0096a5..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/prelude.graphql +++ /dev/null @@ -1,121 +0,0 @@ -# This file defines all the implicitly declared types that are required by the graphql spec. It is implicitly included by calls to LoadSchema - -"The `Int` scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1." -scalar Int - -"The `Float` scalar type represents signed double-precision fractional values as specified by [IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point)." -scalar Float - -"The `String`scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text." -scalar String - -"The `Boolean` scalar type represents `true` or `false`." -scalar Boolean - -"""The `ID` scalar type represents a unique identifier, often used to refetch an object or as key for a cache. The ID type appears in a JSON response as a String; however, it is not intended to be human-readable. When expected as an input type, any string (such as "4") or integer (such as 4) input value will be accepted as an ID.""" -scalar ID - -"The @include directive may be provided for fields, fragment spreads, and inline fragments, and allows for conditional inclusion during execution as described by the if argument." -directive @include(if: Boolean!) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT - -"The @skip directive may be provided for fields, fragment spreads, and inline fragments, and allows for conditional exclusion during execution as described by the if argument." -directive @skip(if: Boolean!) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT - -"The @deprecated built-in directive is used within the type system definition language to indicate deprecated portions of a GraphQL service's schema, such as deprecated fields on a type, arguments on a field, input fields on an input type, or values of an enum type." -directive @deprecated(reason: String = "No longer supported") on FIELD_DEFINITION | ARGUMENT_DEFINITION | INPUT_FIELD_DEFINITION | ENUM_VALUE - -"The @specifiedBy built-in directive is used within the type system definition language to provide a scalar specification URL for specifying the behavior of custom scalar types." -directive @specifiedBy(url: String!) on SCALAR - -type __Schema { - description: String - types: [__Type!]! - queryType: __Type! - mutationType: __Type - subscriptionType: __Type - directives: [__Directive!]! -} - -type __Type { - kind: __TypeKind! - name: String - description: String - # must be non-null for OBJECT and INTERFACE, otherwise null. - fields(includeDeprecated: Boolean = false): [__Field!] - # must be non-null for OBJECT and INTERFACE, otherwise null. - interfaces: [__Type!] - # must be non-null for INTERFACE and UNION, otherwise null. - possibleTypes: [__Type!] - # must be non-null for ENUM, otherwise null. - enumValues(includeDeprecated: Boolean = false): [__EnumValue!] - # must be non-null for INPUT_OBJECT, otherwise null. - inputFields: [__InputValue!] - # must be non-null for NON_NULL and LIST, otherwise null. - ofType: __Type - # may be non-null for custom SCALAR, otherwise null. - specifiedByURL: String -} - -type __Field { - name: String! - description: String - args: [__InputValue!]! - type: __Type! - isDeprecated: Boolean! - deprecationReason: String -} - -type __InputValue { - name: String! - description: String - type: __Type! - defaultValue: String -} - -type __EnumValue { - name: String! - description: String - isDeprecated: Boolean! - deprecationReason: String -} - -enum __TypeKind { - SCALAR - OBJECT - INTERFACE - UNION - ENUM - INPUT_OBJECT - LIST - NON_NULL -} - -type __Directive { - name: String! - description: String - locations: [__DirectiveLocation!]! - args: [__InputValue!]! - isRepeatable: Boolean! -} - -enum __DirectiveLocation { - QUERY - MUTATION - SUBSCRIPTION - FIELD - FRAGMENT_DEFINITION - FRAGMENT_SPREAD - INLINE_FRAGMENT - VARIABLE_DEFINITION - SCHEMA - SCALAR - OBJECT - FIELD_DEFINITION - ARGUMENT_DEFINITION - INTERFACE - UNION - ENUM - ENUM_VALUE - INPUT_OBJECT - INPUT_FIELD_DEFINITION -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fields_on_correct_type.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fields_on_correct_type.go deleted file mode 100644 index d536e5e5f4..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fields_on_correct_type.go +++ /dev/null @@ -1,97 +0,0 @@ -package validator - -import ( - "fmt" - "sort" - "strings" - - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("FieldsOnCorrectType", func(observers *Events, addError AddErrFunc) { - observers.OnField(func(walker *Walker, field *ast.Field) { - if field.ObjectDefinition == nil || field.Definition != nil { - return - } - - message := fmt.Sprintf(`Cannot query field "%s" on type "%s".`, field.Name, field.ObjectDefinition.Name) - - if suggestedTypeNames := getSuggestedTypeNames(walker, field.ObjectDefinition, field.Name); suggestedTypeNames != nil { - message += " Did you mean to use an inline fragment on " + QuotedOrList(suggestedTypeNames...) + "?" - } else if suggestedFieldNames := getSuggestedFieldNames(field.ObjectDefinition, field.Name); suggestedFieldNames != nil { - message += " Did you mean " + QuotedOrList(suggestedFieldNames...) + "?" - } - - addError( - Message(message), - At(field.Position), - ) - }) - }) -} - -// Go through all of the implementations of type, as well as the interfaces -// that they implement. If any of those types include the provided field, -// suggest them, sorted by how often the type is referenced, starting -// with Interfaces. -func getSuggestedTypeNames(walker *Walker, parent *ast.Definition, name string) []string { - if !parent.IsAbstractType() { - return nil - } - - possibleTypes := walker.Schema.GetPossibleTypes(parent) - var suggestedObjectTypes = make([]string, 0, len(possibleTypes)) - var suggestedInterfaceTypes []string - interfaceUsageCount := map[string]int{} - - for _, possibleType := range possibleTypes { - field := possibleType.Fields.ForName(name) - if field == nil { - continue - } - - suggestedObjectTypes = append(suggestedObjectTypes, possibleType.Name) - - for _, possibleInterface := range possibleType.Interfaces { - interfaceField := walker.Schema.Types[possibleInterface] - if interfaceField != nil && interfaceField.Fields.ForName(name) != nil { - if interfaceUsageCount[possibleInterface] == 0 { - suggestedInterfaceTypes = append(suggestedInterfaceTypes, possibleInterface) - } - interfaceUsageCount[possibleInterface]++ - } - } - } - - suggestedTypes := append(suggestedInterfaceTypes, suggestedObjectTypes...) - - sort.SliceStable(suggestedTypes, func(i, j int) bool { - typeA, typeB := suggestedTypes[i], suggestedTypes[j] - diff := interfaceUsageCount[typeB] - interfaceUsageCount[typeA] - if diff != 0 { - return diff < 0 - } - return strings.Compare(typeA, typeB) < 0 - }) - - return suggestedTypes -} - -// For the field name provided, determine if there are any similar field names -// that may be the result of a typo. -func getSuggestedFieldNames(parent *ast.Definition, name string) []string { - if parent.Kind != ast.Object && parent.Kind != ast.Interface { - return nil - } - - var possibleFieldNames = make([]string, 0, len(parent.Fields)) - for _, field := range parent.Fields { - possibleFieldNames = append(possibleFieldNames, field.Name) - } - - return SuggestionList(name, possibleFieldNames) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go deleted file mode 100644 index 66bd348c47..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go +++ /dev/null @@ -1,41 +0,0 @@ -package validator - -import ( - "fmt" - - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("FragmentsOnCompositeTypes", func(observers *Events, addError AddErrFunc) { - observers.OnInlineFragment(func(walker *Walker, inlineFragment *ast.InlineFragment) { - fragmentType := walker.Schema.Types[inlineFragment.TypeCondition] - if fragmentType == nil || fragmentType.IsCompositeType() { - return - } - - message := fmt.Sprintf(`Fragment cannot condition on non composite type "%s".`, inlineFragment.TypeCondition) - - addError( - Message(message), - At(inlineFragment.Position), - ) - }) - - observers.OnFragment(func(_ *Walker, fragment *ast.FragmentDefinition) { - if fragment.Definition == nil || fragment.TypeCondition == "" || fragment.Definition.IsCompositeType() { - return - } - - message := fmt.Sprintf(`Fragment "%s" cannot condition on non composite type "%s".`, fragment.Name, fragment.TypeCondition) - - addError( - Message(message), - At(fragment.Position), - ) - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_argument_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_argument_names.go deleted file mode 100644 index 36b2d057c9..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_argument_names.go +++ /dev/null @@ -1,59 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("KnownArgumentNames", func(observers *Events, addError AddErrFunc) { - // A GraphQL field is only valid if all supplied arguments are defined by that field. - observers.OnField(func(_ *Walker, field *ast.Field) { - if field.Definition == nil || field.ObjectDefinition == nil { - return - } - for _, arg := range field.Arguments { - def := field.Definition.Arguments.ForName(arg.Name) - if def != nil { - continue - } - - var suggestions []string - for _, argDef := range field.Definition.Arguments { - suggestions = append(suggestions, argDef.Name) - } - - addError( - Message(`Unknown argument "%s" on field "%s.%s".`, arg.Name, field.ObjectDefinition.Name, field.Name), - SuggestListQuoted("Did you mean", arg.Name, suggestions), - At(field.Position), - ) - } - }) - - observers.OnDirective(func(_ *Walker, directive *ast.Directive) { - if directive.Definition == nil { - return - } - for _, arg := range directive.Arguments { - def := directive.Definition.Arguments.ForName(arg.Name) - if def != nil { - continue - } - - var suggestions []string - for _, argDef := range directive.Definition.Arguments { - suggestions = append(suggestions, argDef.Name) - } - - addError( - Message(`Unknown argument "%s" on directive "@%s".`, arg.Name, directive.Name), - SuggestListQuoted("Did you mean", arg.Name, suggestions), - At(directive.Position), - ) - } - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_directives.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_directives.go deleted file mode 100644 index 9855291e3b..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_directives.go +++ /dev/null @@ -1,49 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("KnownDirectives", func(observers *Events, addError AddErrFunc) { - type mayNotBeUsedDirective struct { - Name string - Line int - Column int - } - var seen = map[mayNotBeUsedDirective]bool{} - observers.OnDirective(func(_ *Walker, directive *ast.Directive) { - if directive.Definition == nil { - addError( - Message(`Unknown directive "@%s".`, directive.Name), - At(directive.Position), - ) - return - } - - for _, loc := range directive.Definition.Locations { - if loc == directive.Location { - return - } - } - - // position must be exists if directive.Definition != nil - tmp := mayNotBeUsedDirective{ - Name: directive.Name, - Line: directive.Position.Line, - Column: directive.Position.Column, - } - - if !seen[tmp] { - addError( - Message(`Directive "@%s" may not be used on %s.`, directive.Name, directive.Location), - At(directive.Position), - ) - seen[tmp] = true - } - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_fragment_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_fragment_names.go deleted file mode 100644 index 8ae1fc33f4..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_fragment_names.go +++ /dev/null @@ -1,21 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("KnownFragmentNames", func(observers *Events, addError AddErrFunc) { - observers.OnFragmentSpread(func(_ *Walker, fragmentSpread *ast.FragmentSpread) { - if fragmentSpread.Definition == nil { - addError( - Message(`Unknown fragment "%s".`, fragmentSpread.Name), - At(fragmentSpread.Position), - ) - } - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_root_type.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_root_type.go deleted file mode 100644 index ab97cd9017..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_root_type.go +++ /dev/null @@ -1,37 +0,0 @@ -package validator - -import ( - "fmt" - - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("KnownRootType", func(observers *Events, addError AddErrFunc) { - // A query's root must be a valid type. Surprisingly, this isn't - // checked anywhere else! - observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { - var def *ast.Definition - switch operation.Operation { - case ast.Query, "": - def = walker.Schema.Query - case ast.Mutation: - def = walker.Schema.Mutation - case ast.Subscription: - def = walker.Schema.Subscription - default: - // This shouldn't even parse; if it did we probably need to - // update this switch block to add the new operation type. - panic(fmt.Sprintf(`got unknown operation type "%s"`, operation.Operation)) - } - if def == nil { - addError( - Message(`Schema does not support operation type "%s"`, operation.Operation), - At(operation.Position)) - } - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_type_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_type_names.go deleted file mode 100644 index aa9809be34..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/known_type_names.go +++ /dev/null @@ -1,61 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("KnownTypeNames", func(observers *Events, addError AddErrFunc) { - observers.OnVariable(func(walker *Walker, variable *ast.VariableDefinition) { - typeName := variable.Type.Name() - typdef := walker.Schema.Types[typeName] - if typdef != nil { - return - } - - addError( - Message(`Unknown type "%s".`, typeName), - At(variable.Position), - ) - }) - - observers.OnInlineFragment(func(walker *Walker, inlineFragment *ast.InlineFragment) { - typedName := inlineFragment.TypeCondition - if typedName == "" { - return - } - - def := walker.Schema.Types[typedName] - if def != nil { - return - } - - addError( - Message(`Unknown type "%s".`, typedName), - At(inlineFragment.Position), - ) - }) - - observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) { - typeName := fragment.TypeCondition - def := walker.Schema.Types[typeName] - if def != nil { - return - } - - var possibleTypes []string - for _, t := range walker.Schema.Types { - possibleTypes = append(possibleTypes, t.Name) - } - - addError( - Message(`Unknown type "%s".`, typeName), - SuggestListQuoted("Did you mean", typeName, possibleTypes), - At(fragment.Position), - ) - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/lone_anonymous_operation.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/lone_anonymous_operation.go deleted file mode 100644 index 2af7b5a038..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/lone_anonymous_operation.go +++ /dev/null @@ -1,21 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("LoneAnonymousOperation", func(observers *Events, addError AddErrFunc) { - observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { - if operation.Name == "" && len(walker.Document.Operations) > 1 { - addError( - Message(`This anonymous operation must be the only defined operation.`), - At(operation.Position), - ) - } - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_undefined_variables.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_undefined_variables.go deleted file mode 100644 index e45a5e3d51..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_undefined_variables.go +++ /dev/null @@ -1,30 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("NoUndefinedVariables", func(observers *Events, addError AddErrFunc) { - observers.OnValue(func(walker *Walker, value *ast.Value) { - if walker.CurrentOperation == nil || value.Kind != ast.Variable || value.VariableDefinition != nil { - return - } - - if walker.CurrentOperation.Name != "" { - addError( - Message(`Variable "%s" is not defined by operation "%s".`, value, walker.CurrentOperation.Name), - At(value.Position), - ) - } else { - addError( - Message(`Variable "%s" is not defined.`, value), - At(value.Position), - ) - } - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_fragments.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_fragments.go deleted file mode 100644 index f6ba046a1c..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_fragments.go +++ /dev/null @@ -1,32 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("NoUnusedFragments", func(observers *Events, addError AddErrFunc) { - - inFragmentDefinition := false - fragmentNameUsed := make(map[string]bool) - - observers.OnFragmentSpread(func(_ *Walker, fragmentSpread *ast.FragmentSpread) { - if !inFragmentDefinition { - fragmentNameUsed[fragmentSpread.Name] = true - } - }) - - observers.OnFragment(func(_ *Walker, fragment *ast.FragmentDefinition) { - inFragmentDefinition = true - if !fragmentNameUsed[fragment.Name] { - addError( - Message(`Fragment "%s" is never used.`, fragment.Name), - At(fragment.Position), - ) - } - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_variables.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_variables.go deleted file mode 100644 index 163ac895b5..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_unused_variables.go +++ /dev/null @@ -1,32 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("NoUnusedVariables", func(observers *Events, addError AddErrFunc) { - observers.OnOperation(func(_ *Walker, operation *ast.OperationDefinition) { - for _, varDef := range operation.VariableDefinitions { - if varDef.Used { - continue - } - - if operation.Name != "" { - addError( - Message(`Variable "$%s" is never used in operation "%s".`, varDef.Variable, operation.Name), - At(varDef.Position), - ) - } else { - addError( - Message(`Variable "$%s" is never used.`, varDef.Variable), - At(varDef.Position), - ) - } - } - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/possible_fragment_spreads.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/possible_fragment_spreads.go deleted file mode 100644 index 79cb20c49c..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/possible_fragment_spreads.go +++ /dev/null @@ -1,70 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("PossibleFragmentSpreads", func(observers *Events, addError AddErrFunc) { - - validate := func(walker *Walker, parentDef *ast.Definition, fragmentName string, emitError func()) { - if parentDef == nil { - return - } - - var parentDefs []*ast.Definition - switch parentDef.Kind { - case ast.Object: - parentDefs = []*ast.Definition{parentDef} - case ast.Interface, ast.Union: - parentDefs = walker.Schema.GetPossibleTypes(parentDef) - default: - return - } - - fragmentDefType := walker.Schema.Types[fragmentName] - if fragmentDefType == nil { - return - } - if !fragmentDefType.IsCompositeType() { - // checked by FragmentsOnCompositeTypes - return - } - fragmentDefs := walker.Schema.GetPossibleTypes(fragmentDefType) - - for _, fragmentDef := range fragmentDefs { - for _, parentDef := range parentDefs { - if parentDef.Name == fragmentDef.Name { - return - } - } - } - - emitError() - } - - observers.OnInlineFragment(func(walker *Walker, inlineFragment *ast.InlineFragment) { - validate(walker, inlineFragment.ObjectDefinition, inlineFragment.TypeCondition, func() { - addError( - Message(`Fragment cannot be spread here as objects of type "%s" can never be of type "%s".`, inlineFragment.ObjectDefinition.Name, inlineFragment.TypeCondition), - At(inlineFragment.Position), - ) - }) - }) - - observers.OnFragmentSpread(func(walker *Walker, fragmentSpread *ast.FragmentSpread) { - if fragmentSpread.Definition == nil { - return - } - validate(walker, fragmentSpread.ObjectDefinition, fragmentSpread.Definition.TypeCondition, func() { - addError( - Message(`Fragment "%s" cannot be spread here as objects of type "%s" can never be of type "%s".`, fragmentSpread.Name, fragmentSpread.ObjectDefinition.Name, fragmentSpread.Definition.TypeCondition), - At(fragmentSpread.Position), - ) - }) - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/provided_required_arguments.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/provided_required_arguments.go deleted file mode 100644 index d6d12c4fd2..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/provided_required_arguments.go +++ /dev/null @@ -1,64 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("ProvidedRequiredArguments", func(observers *Events, addError AddErrFunc) { - observers.OnField(func(_ *Walker, field *ast.Field) { - if field.Definition == nil { - return - } - - argDef: - for _, argDef := range field.Definition.Arguments { - if !argDef.Type.NonNull { - continue - } - if argDef.DefaultValue != nil { - continue - } - for _, arg := range field.Arguments { - if arg.Name == argDef.Name { - continue argDef - } - } - - addError( - Message(`Field "%s" argument "%s" of type "%s" is required, but it was not provided.`, field.Name, argDef.Name, argDef.Type.String()), - At(field.Position), - ) - } - }) - - observers.OnDirective(func(_ *Walker, directive *ast.Directive) { - if directive.Definition == nil { - return - } - - argDef: - for _, argDef := range directive.Definition.Arguments { - if !argDef.Type.NonNull { - continue - } - if argDef.DefaultValue != nil { - continue - } - for _, arg := range directive.Arguments { - if arg.Name == argDef.Name { - continue argDef - } - } - - addError( - Message(`Directive "@%s" argument "%s" of type "%s" is required, but it was not provided.`, directive.Definition.Name, argDef.Name, argDef.Type.String()), - At(directive.Position), - ) - } - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/scalar_leafs.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/scalar_leafs.go deleted file mode 100644 index cd17b47c87..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/scalar_leafs.go +++ /dev/null @@ -1,38 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("ScalarLeafs", func(observers *Events, addError AddErrFunc) { - observers.OnField(func(walker *Walker, field *ast.Field) { - if field.Definition == nil { - return - } - - fieldType := walker.Schema.Types[field.Definition.Type.Name()] - if fieldType == nil { - return - } - - if fieldType.IsLeafType() && len(field.SelectionSet) > 0 { - addError( - Message(`Field "%s" must not have a selection since type "%s" has no subfields.`, field.Name, fieldType.Name), - At(field.Position), - ) - } - - if !fieldType.IsLeafType() && len(field.SelectionSet) == 0 { - addError( - Message(`Field "%s" of type "%s" must have a selection of subfields.`, field.Name, field.Definition.Type.String()), - Suggestf(`"%s { ... }"`, field.Name), - At(field.Position), - ) - } - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_argument_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_argument_names.go deleted file mode 100644 index 7458c5f6cb..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_argument_names.go +++ /dev/null @@ -1,35 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("UniqueArgumentNames", func(observers *Events, addError AddErrFunc) { - observers.OnField(func(_ *Walker, field *ast.Field) { - checkUniqueArgs(field.Arguments, addError) - }) - - observers.OnDirective(func(_ *Walker, directive *ast.Directive) { - checkUniqueArgs(directive.Arguments, addError) - }) - }) -} - -func checkUniqueArgs(args ast.ArgumentList, addError AddErrFunc) { - knownArgNames := map[string]int{} - - for _, arg := range args { - if knownArgNames[arg.Name] == 1 { - addError( - Message(`There can be only one argument named "%s".`, arg.Name), - At(arg.Position), - ) - } - - knownArgNames[arg.Name]++ - } -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_directives_per_location.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_directives_per_location.go deleted file mode 100644 index ecf5a0a82e..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_directives_per_location.go +++ /dev/null @@ -1,26 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("UniqueDirectivesPerLocation", func(observers *Events, addError AddErrFunc) { - observers.OnDirectiveList(func(_ *Walker, directives []*ast.Directive) { - seen := map[string]bool{} - - for _, dir := range directives { - if dir.Name != "repeatable" && seen[dir.Name] { - addError( - Message(`The directive "@%s" can only be used once at this location.`, dir.Name), - At(dir.Position), - ) - } - seen[dir.Name] = true - } - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_fragment_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_fragment_names.go deleted file mode 100644 index c94f3ad27c..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_fragment_names.go +++ /dev/null @@ -1,24 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("UniqueFragmentNames", func(observers *Events, addError AddErrFunc) { - seenFragments := map[string]bool{} - - observers.OnFragment(func(_ *Walker, fragment *ast.FragmentDefinition) { - if seenFragments[fragment.Name] { - addError( - Message(`There can be only one fragment named "%s".`, fragment.Name), - At(fragment.Position), - ) - } - seenFragments[fragment.Name] = true - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_input_field_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_input_field_names.go deleted file mode 100644 index a93d63bd1e..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_input_field_names.go +++ /dev/null @@ -1,29 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("UniqueInputFieldNames", func(observers *Events, addError AddErrFunc) { - observers.OnValue(func(_ *Walker, value *ast.Value) { - if value.Kind != ast.ObjectValue { - return - } - - seen := map[string]bool{} - for _, field := range value.Children { - if seen[field.Name] { - addError( - Message(`There can be only one input field named "%s".`, field.Name), - At(field.Position), - ) - } - seen[field.Name] = true - } - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_operation_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_operation_names.go deleted file mode 100644 index dcd404dadf..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_operation_names.go +++ /dev/null @@ -1,24 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("UniqueOperationNames", func(observers *Events, addError AddErrFunc) { - seen := map[string]bool{} - - observers.OnOperation(func(_ *Walker, operation *ast.OperationDefinition) { - if seen[operation.Name] { - addError( - Message(`There can be only one operation named "%s".`, operation.Name), - At(operation.Position), - ) - } - seen[operation.Name] = true - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_variable_names.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_variable_names.go deleted file mode 100644 index 7a214dbe4c..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/unique_variable_names.go +++ /dev/null @@ -1,26 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("UniqueVariableNames", func(observers *Events, addError AddErrFunc) { - observers.OnOperation(func(_ *Walker, operation *ast.OperationDefinition) { - seen := map[string]int{} - for _, def := range operation.VariableDefinitions { - // add the same error only once per a variable. - if seen[def.Variable] == 1 { - addError( - Message(`There can be only one variable named "$%s".`, def.Variable), - At(def.Position), - ) - } - seen[def.Variable]++ - } - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/values_of_correct_type.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/values_of_correct_type.go deleted file mode 100644 index 8858023d4e..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/values_of_correct_type.go +++ /dev/null @@ -1,170 +0,0 @@ -package validator - -import ( - "errors" - "fmt" - "strconv" - - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("ValuesOfCorrectType", func(observers *Events, addError AddErrFunc) { - observers.OnValue(func(_ *Walker, value *ast.Value) { - if value.Definition == nil || value.ExpectedType == nil { - return - } - - if value.Kind == ast.NullValue && value.ExpectedType.NonNull { - addError( - Message(`Expected value of type "%s", found %s.`, value.ExpectedType.String(), value.String()), - At(value.Position), - ) - } - - if value.Definition.Kind == ast.Scalar { - // Skip custom validating scalars - if !value.Definition.OneOf("Int", "Float", "String", "Boolean", "ID") { - return - } - } - - var possibleEnums []string - if value.Definition.Kind == ast.Enum { - for _, val := range value.Definition.EnumValues { - possibleEnums = append(possibleEnums, val.Name) - } - } - - rawVal, err := value.Value(nil) - if err != nil { - unexpectedTypeMessage(addError, value) - } - - switch value.Kind { - case ast.NullValue: - return - case ast.ListValue: - if value.ExpectedType.Elem == nil { - unexpectedTypeMessage(addError, value) - return - } - - case ast.IntValue: - if !value.Definition.OneOf("Int", "Float", "ID") { - unexpectedTypeMessage(addError, value) - } - - case ast.FloatValue: - if !value.Definition.OneOf("Float") { - unexpectedTypeMessage(addError, value) - } - - case ast.StringValue, ast.BlockValue: - if value.Definition.Kind == ast.Enum { - rawValStr := fmt.Sprint(rawVal) - addError( - Message(`Enum "%s" cannot represent non-enum value: %s.`, value.ExpectedType.String(), value.String()), - SuggestListQuoted("Did you mean the enum value", rawValStr, possibleEnums), - At(value.Position), - ) - } else if !value.Definition.OneOf("String", "ID") { - unexpectedTypeMessage(addError, value) - } - - case ast.EnumValue: - if value.Definition.Kind != ast.Enum { - rawValStr := fmt.Sprint(rawVal) - addError( - unexpectedTypeMessageOnly(value), - SuggestListUnquoted("Did you mean the enum value", rawValStr, possibleEnums), - At(value.Position), - ) - } else if value.Definition.EnumValues.ForName(value.Raw) == nil { - rawValStr := fmt.Sprint(rawVal) - addError( - Message(`Value "%s" does not exist in "%s" enum.`, value.String(), value.ExpectedType.String()), - SuggestListQuoted("Did you mean the enum value", rawValStr, possibleEnums), - At(value.Position), - ) - } - - case ast.BooleanValue: - if !value.Definition.OneOf("Boolean") { - unexpectedTypeMessage(addError, value) - } - - case ast.ObjectValue: - - for _, field := range value.Definition.Fields { - if field.Type.NonNull { - fieldValue := value.Children.ForName(field.Name) - if fieldValue == nil && field.DefaultValue == nil { - addError( - Message(`Field "%s.%s" of required type "%s" was not provided.`, value.Definition.Name, field.Name, field.Type.String()), - At(value.Position), - ) - continue - } - } - } - - for _, fieldValue := range value.Children { - if value.Definition.Fields.ForName(fieldValue.Name) == nil { - var suggestions []string - for _, fieldValue := range value.Definition.Fields { - suggestions = append(suggestions, fieldValue.Name) - } - - addError( - Message(`Field "%s" is not defined by type "%s".`, fieldValue.Name, value.Definition.Name), - SuggestListQuoted("Did you mean", fieldValue.Name, suggestions), - At(fieldValue.Position), - ) - } - } - - case ast.Variable: - return - - default: - panic(fmt.Errorf("unhandled %T", value)) - } - }) - }) -} - -func unexpectedTypeMessage(addError AddErrFunc, v *ast.Value) { - addError( - unexpectedTypeMessageOnly(v), - At(v.Position), - ) -} - -func unexpectedTypeMessageOnly(v *ast.Value) ErrorOption { - switch v.ExpectedType.String() { - case "Int", "Int!": - if _, err := strconv.ParseInt(v.Raw, 10, 32); err != nil && errors.Is(err, strconv.ErrRange) { - return Message(`Int cannot represent non 32-bit signed integer value: %s`, v.String()) - } - return Message(`Int cannot represent non-integer value: %s`, v.String()) - case "String", "String!", "[String]": - return Message(`String cannot represent a non string value: %s`, v.String()) - case "Boolean", "Boolean!": - return Message(`Boolean cannot represent a non boolean value: %s`, v.String()) - case "Float", "Float!": - return Message(`Float cannot represent non numeric value: %s`, v.String()) - case "ID", "ID!": - return Message(`ID cannot represent a non-string and non-integer value: %s`, v.String()) - //case "Enum": - // return Message(`Enum "%s" cannot represent non-enum value: %s`, v.ExpectedType.String(), v.String()) - default: - if v.Definition.Kind == ast.Enum { - return Message(`Enum "%s" cannot represent non-enum value: %s.`, v.ExpectedType.String(), v.String()) - } - return Message(`Expected value of type "%s", found %s.`, v.ExpectedType.String(), v.String()) - } -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/variables_are_input_types.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/variables_are_input_types.go deleted file mode 100644 index ea4dfcc5ab..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/variables_are_input_types.go +++ /dev/null @@ -1,30 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("VariablesAreInputTypes", func(observers *Events, addError AddErrFunc) { - observers.OnOperation(func(_ *Walker, operation *ast.OperationDefinition) { - for _, def := range operation.VariableDefinitions { - if def.Definition == nil { - continue - } - if !def.Definition.IsInputType() { - addError( - Message( - `Variable "$%s" cannot be non-input type "%s".`, - def.Variable, - def.Type.String(), - ), - At(def.Position), - ) - } - } - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/variables_in_allowed_position.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/variables_in_allowed_position.go deleted file mode 100644 index 08a8e18c09..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/variables_in_allowed_position.go +++ /dev/null @@ -1,40 +0,0 @@ -package validator - -import ( - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" -) - -func init() { - AddRule("VariablesInAllowedPosition", func(observers *Events, addError AddErrFunc) { - observers.OnValue(func(walker *Walker, value *ast.Value) { - if value.Kind != ast.Variable || value.ExpectedType == nil || value.VariableDefinition == nil || walker.CurrentOperation == nil { - return - } - - tmp := *value.ExpectedType - - // todo: move me into walk - // If there is a default non nullable types can be null - if value.VariableDefinition.DefaultValue != nil && value.VariableDefinition.DefaultValue.Kind != ast.NullValue { - if value.ExpectedType.NonNull { - tmp.NonNull = false - } - } - - if !value.VariableDefinition.Type.IsCompatible(&tmp) { - addError( - Message( - `Variable "%s" of type "%s" used in position expecting type "%s".`, - value, - value.VariableDefinition.Type.String(), - value.ExpectedType.String(), - ), - At(value.Position), - ) - } - }) - }) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/schema.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/schema.go deleted file mode 100644 index c9c542195d..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/schema.go +++ /dev/null @@ -1,513 +0,0 @@ -package validator - -import ( - "sort" - "strconv" - "strings" - - //nolint:revive - . "github.com/open-policy-agent/opa/internal/gqlparser/ast" - "github.com/open-policy-agent/opa/internal/gqlparser/gqlerror" - "github.com/open-policy-agent/opa/internal/gqlparser/parser" -) - -func LoadSchema(inputs ...*Source) (*Schema, error) { - ast, err := parser.ParseSchemas(inputs...) - if err != nil { - return nil, err - } - return ValidateSchemaDocument(ast) -} - -func ValidateSchemaDocument(ast *SchemaDocument) (*Schema, error) { - schema := Schema{ - Types: map[string]*Definition{}, - Directives: map[string]*DirectiveDefinition{}, - PossibleTypes: map[string][]*Definition{}, - Implements: map[string][]*Definition{}, - } - - for i, def := range ast.Definitions { - if schema.Types[def.Name] != nil { - return nil, gqlerror.ErrorPosf(def.Position, "Cannot redeclare type %s.", def.Name) - } - schema.Types[def.Name] = ast.Definitions[i] - } - - defs := append(DefinitionList{}, ast.Definitions...) - - for _, ext := range ast.Extensions { - def := schema.Types[ext.Name] - if def == nil { - schema.Types[ext.Name] = &Definition{ - Kind: ext.Kind, - Name: ext.Name, - Position: ext.Position, - } - def = schema.Types[ext.Name] - defs = append(defs, def) - } - - if def.Kind != ext.Kind { - return nil, gqlerror.ErrorPosf(ext.Position, "Cannot extend type %s because the base type is a %s, not %s.", ext.Name, def.Kind, ext.Kind) - } - - def.Directives = append(def.Directives, ext.Directives...) - def.Interfaces = append(def.Interfaces, ext.Interfaces...) - def.Fields = append(def.Fields, ext.Fields...) - def.Types = append(def.Types, ext.Types...) - def.EnumValues = append(def.EnumValues, ext.EnumValues...) - } - - for _, def := range defs { - switch def.Kind { - case Union: - for _, t := range def.Types { - schema.AddPossibleType(def.Name, schema.Types[t]) - schema.AddImplements(t, def) - } - case InputObject, Object: - for _, intf := range def.Interfaces { - schema.AddPossibleType(intf, def) - schema.AddImplements(def.Name, schema.Types[intf]) - } - schema.AddPossibleType(def.Name, def) - case Interface: - for _, intf := range def.Interfaces { - schema.AddPossibleType(intf, def) - schema.AddImplements(def.Name, schema.Types[intf]) - } - } - } - - for i, dir := range ast.Directives { - if schema.Directives[dir.Name] != nil { - // While the spec says SDL must not (§3.5) explicitly define builtin - // scalars, it may (§3.13) define builtin directives. Here we check for - // that, and reject doubly-defined directives otherwise. - switch dir.Name { - case "include", "skip", "deprecated", "specifiedBy": // the builtins - // In principle here we might want to validate that the - // directives are the same. But they might not be, if the - // server has an older spec than we do. (Plus, validating this - // is a lot of work.) So we just keep the first one we saw. - // That's an arbitrary choice, but in theory the only way it - // fails is if the server is using features newer than this - // version of gqlparser, in which case they're in trouble - // anyway. - default: - return nil, gqlerror.ErrorPosf(dir.Position, "Cannot redeclare directive %s.", dir.Name) - } - } - schema.Directives[dir.Name] = ast.Directives[i] - } - - if len(ast.Schema) > 1 { - return nil, gqlerror.ErrorPosf(ast.Schema[1].Position, "Cannot have multiple schema entry points, consider schema extensions instead.") - } - - if len(ast.Schema) == 1 { - schema.Description = ast.Schema[0].Description - for _, entrypoint := range ast.Schema[0].OperationTypes { - def := schema.Types[entrypoint.Type] - if def == nil { - return nil, gqlerror.ErrorPosf(entrypoint.Position, "Schema root %s refers to a type %s that does not exist.", entrypoint.Operation, entrypoint.Type) - } - switch entrypoint.Operation { - case Query: - schema.Query = def - case Mutation: - schema.Mutation = def - case Subscription: - schema.Subscription = def - } - } - } - - for _, ext := range ast.SchemaExtension { - for _, entrypoint := range ext.OperationTypes { - def := schema.Types[entrypoint.Type] - if def == nil { - return nil, gqlerror.ErrorPosf(entrypoint.Position, "Schema root %s refers to a type %s that does not exist.", entrypoint.Operation, entrypoint.Type) - } - switch entrypoint.Operation { - case Query: - schema.Query = def - case Mutation: - schema.Mutation = def - case Subscription: - schema.Subscription = def - } - } - } - - if err := validateTypeDefinitions(&schema); err != nil { - return nil, err - } - - if err := validateDirectiveDefinitions(&schema); err != nil { - return nil, err - } - - // Inferred root operation type names should be performed only when a `schema` directive is - // **not** provided, when it is, `Mutation` and `Subscription` becomes valid types and are not - // assigned as a root operation on the schema. - if len(ast.Schema) == 0 { - if schema.Query == nil && schema.Types["Query"] != nil { - schema.Query = schema.Types["Query"] - } - - if schema.Mutation == nil && schema.Types["Mutation"] != nil { - schema.Mutation = schema.Types["Mutation"] - } - - if schema.Subscription == nil && schema.Types["Subscription"] != nil { - schema.Subscription = schema.Types["Subscription"] - } - } - - if schema.Query != nil { - schema.Query.Fields = append( - schema.Query.Fields, - &FieldDefinition{ - Name: "__schema", - Type: NonNullNamedType("__Schema", nil), - }, - &FieldDefinition{ - Name: "__type", - Type: NamedType("__Type", nil), - Arguments: ArgumentDefinitionList{ - {Name: "name", Type: NonNullNamedType("String", nil)}, - }, - }, - ) - } - - return &schema, nil -} - -func validateTypeDefinitions(schema *Schema) *gqlerror.Error { - types := make([]string, 0, len(schema.Types)) - for typ := range schema.Types { - types = append(types, typ) - } - sort.Strings(types) - for _, typ := range types { - err := validateDefinition(schema, schema.Types[typ]) - if err != nil { - return err - } - } - return nil -} - -func validateDirectiveDefinitions(schema *Schema) *gqlerror.Error { - directives := make([]string, 0, len(schema.Directives)) - for directive := range schema.Directives { - directives = append(directives, directive) - } - sort.Strings(directives) - for _, directive := range directives { - err := validateDirective(schema, schema.Directives[directive]) - if err != nil { - return err - } - } - return nil -} - -func validateDirective(schema *Schema, def *DirectiveDefinition) *gqlerror.Error { - if err := validateName(def.Position, def.Name); err != nil { - // now, GraphQL spec doesn't have reserved directive name - return err - } - - return validateArgs(schema, def.Arguments, def) -} - -func validateDefinition(schema *Schema, def *Definition) *gqlerror.Error { - for _, field := range def.Fields { - if err := validateName(field.Position, field.Name); err != nil { - // now, GraphQL spec doesn't have reserved field name - return err - } - if err := validateTypeRef(schema, field.Type); err != nil { - return err - } - if err := validateArgs(schema, field.Arguments, nil); err != nil { - return err - } - wantDirLocation := LocationFieldDefinition - if def.Kind == InputObject { - wantDirLocation = LocationInputFieldDefinition - } - if err := validateDirectives(schema, field.Directives, wantDirLocation, nil); err != nil { - return err - } - } - - for _, typ := range def.Types { - typDef := schema.Types[typ] - if typDef == nil { - return gqlerror.ErrorPosf(def.Position, "Undefined type %s.", strconv.Quote(typ)) - } - if !isValidKind(typDef.Kind, Object) { - return gqlerror.ErrorPosf(def.Position, "%s type %s must be %s.", def.Kind, strconv.Quote(typ), kindList(Object)) - } - } - - for _, intf := range def.Interfaces { - if err := validateImplements(schema, def, intf); err != nil { - return err - } - } - - switch def.Kind { - case Object, Interface: - if len(def.Fields) == 0 { - return gqlerror.ErrorPosf(def.Position, "%s %s: must define one or more fields.", def.Kind, def.Name) - } - for _, field := range def.Fields { - if typ, ok := schema.Types[field.Type.Name()]; ok { - if !isValidKind(typ.Kind, Scalar, Object, Interface, Union, Enum) { - return gqlerror.ErrorPosf(field.Position, "%s %s: field must be one of %s.", def.Kind, def.Name, kindList(Scalar, Object, Interface, Union, Enum)) - } - } - } - case Enum: - if len(def.EnumValues) == 0 { - return gqlerror.ErrorPosf(def.Position, "%s %s: must define one or more unique enum values.", def.Kind, def.Name) - } - for _, value := range def.EnumValues { - for _, nonEnum := range [3]string{"true", "false", "null"} { - if value.Name == nonEnum { - return gqlerror.ErrorPosf(def.Position, "%s %s: non-enum value %s.", def.Kind, def.Name, value.Name) - } - } - } - case InputObject: - if len(def.Fields) == 0 { - return gqlerror.ErrorPosf(def.Position, "%s %s: must define one or more input fields.", def.Kind, def.Name) - } - for _, field := range def.Fields { - if typ, ok := schema.Types[field.Type.Name()]; ok { - if !isValidKind(typ.Kind, Scalar, Enum, InputObject) { - return gqlerror.ErrorPosf(field.Position, "%s %s: field must be one of %s.", typ.Kind, field.Name, kindList(Scalar, Enum, InputObject)) - } - } - } - } - - for idx, field1 := range def.Fields { - for _, field2 := range def.Fields[idx+1:] { - if field1.Name == field2.Name { - return gqlerror.ErrorPosf(field2.Position, "Field %s.%s can only be defined once.", def.Name, field2.Name) - } - } - } - - if !def.BuiltIn { - // GraphQL spec has reserved type names a lot! - err := validateName(def.Position, def.Name) - if err != nil { - return err - } - } - - return validateDirectives(schema, def.Directives, DirectiveLocation(def.Kind), nil) -} - -func validateTypeRef(schema *Schema, typ *Type) *gqlerror.Error { - if schema.Types[typ.Name()] == nil { - return gqlerror.ErrorPosf(typ.Position, "Undefined type %s.", typ.Name()) - } - return nil -} - -func validateArgs(schema *Schema, args ArgumentDefinitionList, currentDirective *DirectiveDefinition) *gqlerror.Error { - for _, arg := range args { - if err := validateName(arg.Position, arg.Name); err != nil { - // now, GraphQL spec doesn't have reserved argument name - return err - } - if err := validateTypeRef(schema, arg.Type); err != nil { - return err - } - def := schema.Types[arg.Type.Name()] - if !def.IsInputType() { - return gqlerror.ErrorPosf( - arg.Position, - "cannot use %s as argument %s because %s is not a valid input type", - arg.Type.String(), - arg.Name, - def.Kind, - ) - } - if err := validateDirectives(schema, arg.Directives, LocationArgumentDefinition, currentDirective); err != nil { - return err - } - } - return nil -} - -func validateDirectives(schema *Schema, dirs DirectiveList, location DirectiveLocation, currentDirective *DirectiveDefinition) *gqlerror.Error { - for _, dir := range dirs { - if err := validateName(dir.Position, dir.Name); err != nil { - // now, GraphQL spec doesn't have reserved directive name - return err - } - if currentDirective != nil && dir.Name == currentDirective.Name { - return gqlerror.ErrorPosf(dir.Position, "Directive %s cannot refer to itself.", currentDirective.Name) - } - if schema.Directives[dir.Name] == nil { - return gqlerror.ErrorPosf(dir.Position, "Undefined directive %s.", dir.Name) - } - validKind := false - for _, dirLocation := range schema.Directives[dir.Name].Locations { - if dirLocation == location { - validKind = true - break - } - } - if !validKind { - return gqlerror.ErrorPosf(dir.Position, "Directive %s is not applicable on %s.", dir.Name, location) - } - dir.Definition = schema.Directives[dir.Name] - } - return nil -} - -func validateImplements(schema *Schema, def *Definition, intfName string) *gqlerror.Error { - // see validation rules at the bottom of - // https://facebook.github.io/graphql/October2021/#sec-Objects - intf := schema.Types[intfName] - if intf == nil { - return gqlerror.ErrorPosf(def.Position, "Undefined type %s.", strconv.Quote(intfName)) - } - if intf.Kind != Interface { - return gqlerror.ErrorPosf(def.Position, "%s is a non interface type %s.", strconv.Quote(intfName), intf.Kind) - } - for _, requiredField := range intf.Fields { - foundField := def.Fields.ForName(requiredField.Name) - if foundField == nil { - return gqlerror.ErrorPosf(def.Position, - `For %s to implement %s it must have a field called %s.`, - def.Name, intf.Name, requiredField.Name, - ) - } - - if !isCovariant(schema, requiredField.Type, foundField.Type) { - return gqlerror.ErrorPosf(foundField.Position, - `For %s to implement %s the field %s must have type %s.`, - def.Name, intf.Name, requiredField.Name, requiredField.Type.String(), - ) - } - - for _, requiredArg := range requiredField.Arguments { - foundArg := foundField.Arguments.ForName(requiredArg.Name) - if foundArg == nil { - return gqlerror.ErrorPosf(foundField.Position, - `For %s to implement %s the field %s must have the same arguments but it is missing %s.`, - def.Name, intf.Name, requiredField.Name, requiredArg.Name, - ) - } - - if !requiredArg.Type.IsCompatible(foundArg.Type) { - return gqlerror.ErrorPosf(foundArg.Position, - `For %s to implement %s the field %s must have the same arguments but %s has the wrong type.`, - def.Name, intf.Name, requiredField.Name, requiredArg.Name, - ) - } - } - for _, foundArgs := range foundField.Arguments { - if requiredField.Arguments.ForName(foundArgs.Name) == nil && foundArgs.Type.NonNull && foundArgs.DefaultValue == nil { - return gqlerror.ErrorPosf(foundArgs.Position, - `For %s to implement %s any additional arguments on %s must be optional or have a default value but %s is required.`, - def.Name, intf.Name, foundField.Name, foundArgs.Name, - ) - } - } - } - return validateTypeImplementsAncestors(schema, def, intfName) -} - -// validateTypeImplementsAncestors -// https://github.com/graphql/graphql-js/blob/47bd8c8897c72d3efc17ecb1599a95cee6bac5e8/src/type/validate.ts#L428 -func validateTypeImplementsAncestors(schema *Schema, def *Definition, intfName string) *gqlerror.Error { - intf := schema.Types[intfName] - if intf == nil { - return gqlerror.ErrorPosf(def.Position, "Undefined type %s.", strconv.Quote(intfName)) - } - for _, transitive := range intf.Interfaces { - if !containsString(def.Interfaces, transitive) { - if transitive == def.Name { - return gqlerror.ErrorPosf(def.Position, - `Type %s cannot implement %s because it would create a circular reference.`, - def.Name, intfName, - ) - } - return gqlerror.ErrorPosf(def.Position, - `Type %s must implement %s because it is implemented by %s.`, - def.Name, transitive, intfName, - ) - } - } - return nil -} - -func containsString(slice []string, want string) bool { - for _, str := range slice { - if want == str { - return true - } - } - return false -} - -func isCovariant(schema *Schema, required *Type, actual *Type) bool { - if required.NonNull && !actual.NonNull { - return false - } - - if required.NamedType != "" { - if required.NamedType == actual.NamedType { - return true - } - for _, pt := range schema.PossibleTypes[required.NamedType] { - if pt.Name == actual.NamedType { - return true - } - } - return false - } - - if required.Elem != nil && actual.Elem == nil { - return false - } - - return isCovariant(schema, required.Elem, actual.Elem) -} - -func validateName(pos *Position, name string) *gqlerror.Error { - if strings.HasPrefix(name, "__") { - return gqlerror.ErrorPosf(pos, `Name "%s" must not begin with "__", which is reserved by GraphQL introspection.`, name) - } - return nil -} - -func isValidKind(kind DefinitionKind, valid ...DefinitionKind) bool { - for _, k := range valid { - if kind == k { - return true - } - } - return false -} - -func kindList(kinds ...DefinitionKind) string { - s := make([]string, len(kinds)) - for i, k := range kinds { - s[i] = string(k) - } - return strings.Join(s, ", ") -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/schema_test.yml b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/schema_test.yml deleted file mode 100644 index 7034a4697c..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/schema_test.yml +++ /dev/null @@ -1,678 +0,0 @@ -types: - - name: cannot be redeclared - input: | - type A { - name: String - } - type A { - name: String - } - error: - message: "Cannot redeclare type A." - locations: [{line: 4, column: 6}] - - name: cannot be duplicated field at same definition 1 - input: | - type A { - name: String - name: String - } - error: - message: "Field A.name can only be defined once." - locations: [{line: 3, column: 3}] - - name: cannot be duplicated field at same definition 2 - input: | - type A { - name: String - } - extend type A { - name: String - } - error: - message: "Field A.name can only be defined once." - locations: [{line: 5, column: 3}] - - name: cannot be duplicated field at same definition 3 - input: | - type A { - name: String - } - extend type A { - age: Int - age: Int - } - error: - message: "Field A.age can only be defined once." - locations: [{line: 6, column: 3}] - -object types: - - name: must define one or more fields - input: | - directive @D on OBJECT - - # This pattern rejected by parser - # type InvalidObject1 {} - - type InvalidObject2 @D - - type ValidObject { - id: ID - } - extend type ValidObject @D - extend type ValidObject { - b: Int - } - error: - message: 'OBJECT InvalidObject2: must define one or more fields.' - locations: [{line: 6, column: 6}] - - name: check reserved names on type name - input: | - type __FooBar { - id: ID - } - error: - message: 'Name "__FooBar" must not begin with "__", which is reserved by GraphQL introspection.' - locations: [{line: 1, column: 6}] - - name: check reserved names on type field - input: | - type FooBar { - __id: ID - } - error: - message: 'Name "__id" must not begin with "__", which is reserved by GraphQL introspection.' - locations: [{line: 2, column: 3}] - - - name: check reserved names on type field argument - input: | - type FooBar { - foo(__bar: ID): ID - } - error: - message: 'Name "__bar" must not begin with "__", which is reserved by GraphQL introspection.' - locations: [{line: 2, column: 7}] - - - name: must not allow input object as field type - input: | - input Input { - id: ID - } - type Query { - input: Input! - } - error: - message: 'OBJECT Query: field must be one of SCALAR, OBJECT, INTERFACE, UNION, ENUM.' - locations: [{line: 5, column: 3}] - -interfaces: - - name: must exist - input: | - type Thing implements Object { - id: ID! - } - - type Query { - Things: [Thing!]! - } - error: - message: 'Undefined type "Object".' - locations: [{line: 1, column: 6}] - - - name: must be an interface - input: | - type Thing implements Object { - id: ID! - } - - type Query { - Things: [Thing!]! - } - - type Object { - name: String - } - error: - message: '"Object" is a non interface type OBJECT.' - locations: [{line: 1, column: 6}] - - - name: must define one or more fields - input: | - directive @D on INTERFACE - - # This pattern rejected by parser - # interface InvalidInterface1 {} - - interface InvalidInterface2 @D - - interface ValidInterface { - id: ID - } - extend interface ValidInterface @D - extend interface ValidInterface { - b: Int - } - error: - message: 'INTERFACE InvalidInterface2: must define one or more fields.' - locations: [{line: 6, column: 11}] - - - name: check reserved names on type name - input: | - interface __FooBar { - id: ID - } - error: - message: 'Name "__FooBar" must not begin with "__", which is reserved by GraphQL introspection.' - locations: [{line: 1, column: 11}] - - - name: must not allow input object as field type - input: | - input Input { - id: ID - } - type Query { - foo: Foo! - } - interface Foo { - input: Input! - } - error: - message: 'INTERFACE Foo: field must be one of SCALAR, OBJECT, INTERFACE, UNION, ENUM.' - locations: [{line: 8, column: 3}] - - - name: must have all fields from interface - input: | - type Bar implements BarInterface { - someField: Int! - } - - interface BarInterface { - id: ID! - } - error: - message: 'For Bar to implement BarInterface it must have a field called id.' - locations: [{line: 1, column: 6}] - - - name: must have same type of fields - input: | - type Bar implements BarInterface { - id: Int! - } - - interface BarInterface { - id: ID! - } - error: - message: 'For Bar to implement BarInterface the field id must have type ID!.' - locations: [{line: 2, column: 5}] - - - name: must have all required arguments - input: | - type Bar implements BarInterface { - id: ID! - } - - interface BarInterface { - id(ff: Int!): ID! - } - error: - message: 'For Bar to implement BarInterface the field id must have the same arguments but it is missing ff.' - locations: [{line: 2, column: 5}] - - - name: must have same argument types - input: | - type Bar implements BarInterface { - id(ff: ID!): ID! - } - - interface BarInterface { - id(ff: Int!): ID! - } - error: - message: 'For Bar to implement BarInterface the field id must have the same arguments but ff has the wrong type.' - locations: [{line: 2, column: 8}] - - - name: may defined additional nullable arguments - input: | - type Bar implements BarInterface { - id(opt: Int): ID! - } - - interface BarInterface { - id: ID! - } - - - name: may defined additional required arguments with defaults - input: | - type Bar implements BarInterface { - id(opt: Int! = 1): ID! - } - - interface BarInterface { - id: ID! - } - - - name: must not define additional required arguments without defaults - input: | - type Bar implements BarInterface { - id(opt: Int!): ID! - } - - interface BarInterface { - id: ID! - } - error: - message: 'For Bar to implement BarInterface any additional arguments on id must be optional or have a default value but opt is required.' - locations: [{line: 2, column: 8}] - - - name: can have covariant argument types - input: | - union U = A|B - - type A { name: String } - type B { name: String } - - type Bar implements BarInterface { - f: A! - } - - interface BarInterface { - f: U! - } - - - name: may define intermediate interfaces - input: | - interface IA { - id: ID! - } - - interface IIA implements IA { - id: ID! - } - - type A implements IIA & IA { - id: ID! - } - - - name: Type Foo must implement Baz because it is implemented by Bar - input: | - interface Baz { - baz: String - } - - interface Bar implements Baz { - bar: String - baz: String - } - - type Foo implements Bar { - foo: String - bar: String - baz: String - } - error: - message: 'Type Foo must implement Baz because it is implemented by Bar.' - locations: [{line: 10, column: 6}] - - - name: circular reference error - input: | - interface Circular1 implements Circular2 { - id: ID! - } - - interface Circular2 implements Circular1 { - id: ID! - } - error: - message: 'Type Circular1 cannot implement Circular2 because it would create a circular reference.' - locations: [{line: 1, column: 11}] - -inputs: - - name: must define one or more input fields - input: | - directive @D on INPUT_OBJECT - - # This pattern rejected by parser - # input InvalidInput1 {} - - input InvalidInput2 @D - - input ValidInput { - id: ID - } - extend input ValidInput @D - extend input ValidInput { - b: Int - } - error: - message: 'INPUT_OBJECT InvalidInput2: must define one or more input fields.' - locations: [{line: 6, column: 7}] - - name: check reserved names on type name - input: | - input __FooBar { - id: ID - } - error: - message: 'Name "__FooBar" must not begin with "__", which is reserved by GraphQL introspection.' - locations: [{line: 1, column: 7}] - - - name: fields cannot be Objects - input: | - type Object { id: ID } - input Foo { a: Object! } - error: - message: 'OBJECT a: field must be one of SCALAR, ENUM, INPUT_OBJECT.' - locations: [{line: 2, column: 13}] - - - name: fields cannot be Interfaces - input: | - interface Interface { id: ID! } - input Foo { a: Interface! } - error: - message: 'INTERFACE a: field must be one of SCALAR, ENUM, INPUT_OBJECT.' - locations: [{line: 2, column: 13}] - - - name: fields cannot be Unions - input: | - type Object { id: ID } - union Union = Object - input Foo { a: Union! } - error: - message: 'UNION a: field must be one of SCALAR, ENUM, INPUT_OBJECT.' - locations: [{line: 3, column: 13}] - -args: - - name: Valid arg types - input: | - input Input { id: ID } - enum Enum { A } - scalar Scalar - - type Query { - f(a: Input, b: Scalar, c: Enum): Boolean! - } - - - name: Objects not allowed - input: | - type Object { id: ID } - type Query { f(a: Object): Boolean! } - - error: - message: 'cannot use Object as argument a because OBJECT is not a valid input type' - locations: [{line: 2, column: 16}] - - - name: Union not allowed - input: | - type Object { id: ID } - union Union = Object - type Query { f(a: Union): Boolean! } - - error: - message: 'cannot use Union as argument a because UNION is not a valid input type' - locations: [{line: 3, column: 16}] - - - name: Interface not allowed - input: | - interface Interface { id: ID } - type Query { f(a: Interface): Boolean! } - - error: - message: 'cannot use Interface as argument a because INTERFACE is not a valid input type' - locations: [{line: 2, column: 16}] - -enums: - - name: must define one or more unique enum values - input: | - directive @D on ENUM - - # This pattern rejected by parser - # enum InvalidEmum1 {} - - enum InvalidEnum2 @D - - enum ValidEnum { - FOO - } - extend enum ValidEnum @D - extend enum ValidEnum { - BAR - } - error: - message: 'ENUM InvalidEnum2: must define one or more unique enum values.' - locations: [{line: 6, column: 6}] - - name: check reserved names on type name - input: | - enum __FooBar { - A - B - } - error: - message: 'Name "__FooBar" must not begin with "__", which is reserved by GraphQL introspection.' - locations: [{line: 1, column: 6}] - -unions: - - name: union types must be defined - input: | - union Foo = Bar | Baz - type Bar { - id: ID - } - error: - message: "Undefined type \"Baz\"." - locations: [{line: 1, column: 7}] - - name: union types must be objects - input: | - union Foo = Baz - interface Baz { - id: ID - } - error: - message: "UNION type \"Baz\" must be OBJECT." - locations: [{line: 1, column: 7}] - - - name: unions of pure type extensions are valid - input: | - - type Review { - body: String! - author: User! @provides(fields: "username") - product: Product! - } - - extend type User @key(fields: "id") { - id: ID! @external - reviews: [Review] - } - - extend type Product @key(fields: "upc") { - upc: String! @external - reviews: [Review] - } - - union Foo = User | Product - scalar _Any - scalar _FieldSet - directive @external on FIELD_DEFINITION - directive @requires(fields: _FieldSet!) on FIELD_DEFINITION - directive @provides(fields: _FieldSet!) on FIELD_DEFINITION - directive @key(fields: _FieldSet!) on OBJECT | INTERFACE - directive @extends on OBJECT - - - -type extensions: - - name: can extend non existant types - input: | - extend type A { - name: String - } - - - - name: cannot extend incorret type existant types - input: | - scalar A - extend type A { - name: String - } - error: - message: "Cannot extend type A because the base type is a SCALAR, not OBJECT." - locations: [{line: 2, column: 13}] - -directives: - - name: cannot redeclare directives - input: | - directive @A on FIELD_DEFINITION - directive @A on FIELD_DEFINITION - error: - message: "Cannot redeclare directive A." - locations: [{line: 2, column: 12}] - - - name: can redeclare builtin directives - input: | - directive @skip(if: Boolean!) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT - directive @skip(if: Boolean!) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT - - - name: must be declared - input: | - type User { - name: String @foo - } - error: - message: "Undefined directive foo." - locations: [{line: 2, column: 17}] - - - name: cannot be self-referential - input: | - directive @A(foo: Int! @A) on FIELD_DEFINITION - error: - message: "Directive A cannot refer to itself." - locations: [{line: 1, column: 25}] - - name: check reserved names on type name - input: | - directive @__A on FIELD_DEFINITION - error: - message: 'Name "__A" must not begin with "__", which is reserved by GraphQL introspection.' - locations: [{line: 1, column: 12}] - - - name: Valid arg types - input: | - input Input { id: ID } - enum Enum { A } - scalar Scalar - - directive @A(a: Input, b: Scalar, c: Enum) on FIELD_DEFINITION - - - name: Objects not allowed - input: | - type Object { id: ID } - directive @A(a: Object) on FIELD_DEFINITION - - error: - message: 'cannot use Object as argument a because OBJECT is not a valid input type' - locations: [{line: 2, column: 14}] - - - name: Union not allowed - input: | - type Object { id: ID } - union Union = Object - directive @A(a: Union) on FIELD_DEFINITION - - error: - message: 'cannot use Union as argument a because UNION is not a valid input type' - locations: [{line: 3, column: 14}] - - - name: Interface not allowed - input: | - interface Interface { id: ID } - directive @A(a: Interface) on FIELD_DEFINITION - - error: - message: 'cannot use Interface as argument a because INTERFACE is not a valid input type' - locations: [{line: 2, column: 14}] - - - name: Invalid location usage not allowed - input: | - directive @test on FIELD_DEFINITION - input I1 @test { f: String } - - error: - message: 'Directive test is not applicable on INPUT_OBJECT.' - locations: [{line: 2, column: 11}] - - - name: Valid location usage - input: | - directive @testInputField on INPUT_FIELD_DEFINITION - directive @testField on FIELD_DEFINITION - directive @inp on INPUT_OBJECT - input I1 @inp { f: String @testInputField } - type P { name: String @testField } - interface I { id: ID @testField } - - -entry points: - - name: multiple schema entry points - input: | - schema { - query: Query - } - schema { - query: Query - } - scalar Query - error: - message: "Cannot have multiple schema entry points, consider schema extensions instead." - locations: [{line: 4, column: 8}] - - - name: Undefined schema entrypoint - input: | - schema { - query: Query - } - error: - message: "Schema root query refers to a type Query that does not exist." - locations: [{line: 2, column: 3}] - -entry point extensions: - - name: Undefined schema entrypoint - input: | - schema { - query: Query - } - scalar Query - extend schema { - mutation: Mutation - } - error: - message: "Schema root mutation refers to a type Mutation that does not exist." - locations: [{line: 6, column: 3}] - -type references: - - name: Field types - input: | - type User { - posts: Post - } - error: - message: "Undefined type Post." - locations: [{line: 2, column: 10}] - - - name: Arg types - input: | - type User { - posts(foo: FooBar): String - } - error: - message: "Undefined type FooBar." - locations: [{line: 2, column: 14}] - - - name: Directive arg types - input: | - directive @Foo(foo: FooBar) on FIELD_DEFINITION - - error: - message: "Undefined type FooBar." - locations: [{line: 1, column: 21}] - - - name: Invalid enum value - input: | - enum Enum { true } - - error: - message: "ENUM Enum: non-enum value true." - locations: [{line: 1, column: 6}] diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/suggestionList.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/suggestionList.go deleted file mode 100644 index f0bbc32786..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/suggestionList.go +++ /dev/null @@ -1,69 +0,0 @@ -package validator - -import ( - "math" - "sort" - "strings" - - "github.com/agnivade/levenshtein" -) - -// Given an invalid input string and a list of valid options, returns a filtered -// list of valid options sorted based on their similarity with the input. -func SuggestionList(input string, options []string) []string { - var results []string - optionsByDistance := map[string]int{} - - for _, option := range options { - distance := lexicalDistance(input, option) - threshold := calcThreshold(input) - if distance <= threshold { - results = append(results, option) - optionsByDistance[option] = distance - } - } - - sort.Slice(results, func(i, j int) bool { - return optionsByDistance[results[i]] < optionsByDistance[results[j]] - }) - return results -} - -func calcThreshold(a string) (threshold int) { - // the logic is copied from here - // https://github.com/graphql/graphql-js/blob/47bd8c8897c72d3efc17ecb1599a95cee6bac5e8/src/jsutils/suggestionList.ts#L14 - threshold = int(math.Floor(float64(len(a))*0.4) + 1) - - if threshold < 1 { - threshold = 1 - } - return -} - -// Computes the lexical distance between strings A and B. -// -// The "distance" between two strings is given by counting the minimum number -// of edits needed to transform string A into string B. An edit can be an -// insertion, deletion, or substitution of a single character, or a swap of two -// adjacent characters. -// -// Includes a custom alteration from Damerau-Levenshtein to treat case changes -// as a single edit which helps identify mis-cased values with an edit distance -// of 1. -// -// This distance can be useful for detecting typos in input or sorting -func lexicalDistance(a, b string) int { - if a == b { - return 0 - } - - a = strings.ToLower(a) - b = strings.ToLower(b) - - // Any case change counts as a single edit - if a == b { - return 1 - } - - return levenshtein.ComputeDistance(a, b) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/validator.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/validator.go deleted file mode 100644 index 05f5b91669..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/validator.go +++ /dev/null @@ -1,45 +0,0 @@ -package validator - -import ( - //nolint:revive - . "github.com/open-policy-agent/opa/internal/gqlparser/ast" - "github.com/open-policy-agent/opa/internal/gqlparser/gqlerror" -) - -type AddErrFunc func(options ...ErrorOption) - -type ruleFunc func(observers *Events, addError AddErrFunc) - -type rule struct { - name string - rule ruleFunc -} - -var rules []rule - -// addRule to rule set. -// f is called once each time `Validate` is executed. -func AddRule(name string, f ruleFunc) { - rules = append(rules, rule{name: name, rule: f}) -} - -func Validate(schema *Schema, doc *QueryDocument) gqlerror.List { - var errs gqlerror.List - - observers := &Events{} - for i := range rules { - rule := rules[i] - rule.rule(observers, func(options ...ErrorOption) { - err := &gqlerror.Error{ - Rule: rule.name, - } - for _, o := range options { - o(err) - } - errs = append(errs, err) - }) - } - - Walk(schema, doc, observers) - return errs -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/walk.go b/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/walk.go deleted file mode 100644 index f722871869..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/walk.go +++ /dev/null @@ -1,292 +0,0 @@ -package validator - -import ( - "context" - "fmt" - - "github.com/open-policy-agent/opa/internal/gqlparser/ast" -) - -type Events struct { - operationVisitor []func(walker *Walker, operation *ast.OperationDefinition) - field []func(walker *Walker, field *ast.Field) - fragment []func(walker *Walker, fragment *ast.FragmentDefinition) - inlineFragment []func(walker *Walker, inlineFragment *ast.InlineFragment) - fragmentSpread []func(walker *Walker, fragmentSpread *ast.FragmentSpread) - directive []func(walker *Walker, directive *ast.Directive) - directiveList []func(walker *Walker, directives []*ast.Directive) - value []func(walker *Walker, value *ast.Value) - variable []func(walker *Walker, variable *ast.VariableDefinition) -} - -func (o *Events) OnOperation(f func(walker *Walker, operation *ast.OperationDefinition)) { - o.operationVisitor = append(o.operationVisitor, f) -} -func (o *Events) OnField(f func(walker *Walker, field *ast.Field)) { - o.field = append(o.field, f) -} -func (o *Events) OnFragment(f func(walker *Walker, fragment *ast.FragmentDefinition)) { - o.fragment = append(o.fragment, f) -} -func (o *Events) OnInlineFragment(f func(walker *Walker, inlineFragment *ast.InlineFragment)) { - o.inlineFragment = append(o.inlineFragment, f) -} -func (o *Events) OnFragmentSpread(f func(walker *Walker, fragmentSpread *ast.FragmentSpread)) { - o.fragmentSpread = append(o.fragmentSpread, f) -} -func (o *Events) OnDirective(f func(walker *Walker, directive *ast.Directive)) { - o.directive = append(o.directive, f) -} -func (o *Events) OnDirectiveList(f func(walker *Walker, directives []*ast.Directive)) { - o.directiveList = append(o.directiveList, f) -} -func (o *Events) OnValue(f func(walker *Walker, value *ast.Value)) { - o.value = append(o.value, f) -} -func (o *Events) OnVariable(f func(walker *Walker, variable *ast.VariableDefinition)) { - o.variable = append(o.variable, f) -} - -func Walk(schema *ast.Schema, document *ast.QueryDocument, observers *Events) { - w := Walker{ - Observers: observers, - Schema: schema, - Document: document, - } - - w.walk() -} - -type Walker struct { - Context context.Context - Observers *Events - Schema *ast.Schema - Document *ast.QueryDocument - - validatedFragmentSpreads map[string]bool - CurrentOperation *ast.OperationDefinition -} - -func (w *Walker) walk() { - for _, child := range w.Document.Operations { - w.validatedFragmentSpreads = make(map[string]bool) - w.walkOperation(child) - } - for _, child := range w.Document.Fragments { - w.validatedFragmentSpreads = make(map[string]bool) - w.walkFragment(child) - } -} - -func (w *Walker) walkOperation(operation *ast.OperationDefinition) { - w.CurrentOperation = operation - for _, varDef := range operation.VariableDefinitions { - varDef.Definition = w.Schema.Types[varDef.Type.Name()] - for _, v := range w.Observers.variable { - v(w, varDef) - } - if varDef.DefaultValue != nil { - varDef.DefaultValue.ExpectedType = varDef.Type - varDef.DefaultValue.Definition = w.Schema.Types[varDef.Type.Name()] - } - } - - var def *ast.Definition - var loc ast.DirectiveLocation - switch operation.Operation { - case ast.Query, "": - def = w.Schema.Query - loc = ast.LocationQuery - case ast.Mutation: - def = w.Schema.Mutation - loc = ast.LocationMutation - case ast.Subscription: - def = w.Schema.Subscription - loc = ast.LocationSubscription - } - - for _, varDef := range operation.VariableDefinitions { - if varDef.DefaultValue != nil { - w.walkValue(varDef.DefaultValue) - } - w.walkDirectives(varDef.Definition, varDef.Directives, ast.LocationVariableDefinition) - } - - w.walkDirectives(def, operation.Directives, loc) - w.walkSelectionSet(def, operation.SelectionSet) - - for _, v := range w.Observers.operationVisitor { - v(w, operation) - } - w.CurrentOperation = nil -} - -func (w *Walker) walkFragment(it *ast.FragmentDefinition) { - def := w.Schema.Types[it.TypeCondition] - - it.Definition = def - - w.walkDirectives(def, it.Directives, ast.LocationFragmentDefinition) - w.walkSelectionSet(def, it.SelectionSet) - - for _, v := range w.Observers.fragment { - v(w, it) - } -} - -func (w *Walker) walkDirectives(parentDef *ast.Definition, directives []*ast.Directive, location ast.DirectiveLocation) { - for _, dir := range directives { - def := w.Schema.Directives[dir.Name] - dir.Definition = def - dir.ParentDefinition = parentDef - dir.Location = location - - for _, arg := range dir.Arguments { - var argDef *ast.ArgumentDefinition - if def != nil { - argDef = def.Arguments.ForName(arg.Name) - } - - w.walkArgument(argDef, arg) - } - - for _, v := range w.Observers.directive { - v(w, dir) - } - } - - for _, v := range w.Observers.directiveList { - v(w, directives) - } -} - -func (w *Walker) walkValue(value *ast.Value) { - if value.Kind == ast.Variable && w.CurrentOperation != nil { - value.VariableDefinition = w.CurrentOperation.VariableDefinitions.ForName(value.Raw) - if value.VariableDefinition != nil { - value.VariableDefinition.Used = true - } - } - - if value.Kind == ast.ObjectValue { - for _, child := range value.Children { - if value.Definition != nil { - fieldDef := value.Definition.Fields.ForName(child.Name) - if fieldDef != nil { - child.Value.ExpectedType = fieldDef.Type - child.Value.Definition = w.Schema.Types[fieldDef.Type.Name()] - } - } - w.walkValue(child.Value) - } - } - - if value.Kind == ast.ListValue { - for _, child := range value.Children { - if value.ExpectedType != nil && value.ExpectedType.Elem != nil { - child.Value.ExpectedType = value.ExpectedType.Elem - child.Value.Definition = value.Definition - } - - w.walkValue(child.Value) - } - } - - for _, v := range w.Observers.value { - v(w, value) - } -} - -func (w *Walker) walkArgument(argDef *ast.ArgumentDefinition, arg *ast.Argument) { - if argDef != nil { - arg.Value.ExpectedType = argDef.Type - arg.Value.Definition = w.Schema.Types[argDef.Type.Name()] - } - - w.walkValue(arg.Value) -} - -func (w *Walker) walkSelectionSet(parentDef *ast.Definition, it ast.SelectionSet) { - for _, child := range it { - w.walkSelection(parentDef, child) - } -} - -func (w *Walker) walkSelection(parentDef *ast.Definition, it ast.Selection) { - switch it := it.(type) { - case *ast.Field: - var def *ast.FieldDefinition - if it.Name == "__typename" { - def = &ast.FieldDefinition{ - Name: "__typename", - Type: ast.NamedType("String", nil), - } - } else if parentDef != nil { - def = parentDef.Fields.ForName(it.Name) - } - - it.Definition = def - it.ObjectDefinition = parentDef - - var nextParentDef *ast.Definition - if def != nil { - nextParentDef = w.Schema.Types[def.Type.Name()] - } - - for _, arg := range it.Arguments { - var argDef *ast.ArgumentDefinition - if def != nil { - argDef = def.Arguments.ForName(arg.Name) - } - - w.walkArgument(argDef, arg) - } - - w.walkDirectives(nextParentDef, it.Directives, ast.LocationField) - w.walkSelectionSet(nextParentDef, it.SelectionSet) - - for _, v := range w.Observers.field { - v(w, it) - } - - case *ast.InlineFragment: - it.ObjectDefinition = parentDef - - nextParentDef := parentDef - if it.TypeCondition != "" { - nextParentDef = w.Schema.Types[it.TypeCondition] - } - - w.walkDirectives(nextParentDef, it.Directives, ast.LocationInlineFragment) - w.walkSelectionSet(nextParentDef, it.SelectionSet) - - for _, v := range w.Observers.inlineFragment { - v(w, it) - } - - case *ast.FragmentSpread: - def := w.Document.Fragments.ForName(it.Name) - it.Definition = def - it.ObjectDefinition = parentDef - - var nextParentDef *ast.Definition - if def != nil { - nextParentDef = w.Schema.Types[def.TypeCondition] - } - - w.walkDirectives(nextParentDef, it.Directives, ast.LocationFragmentSpread) - - if def != nil && !w.validatedFragmentSpreads[def.Name] { - // prevent inifinite recursion - w.validatedFragmentSpreads[def.Name] = true - w.walkSelectionSet(nextParentDef, def.SelectionSet) - } - - for _, v := range w.Observers.fragmentSpread { - v(w, it) - } - - default: - panic(fmt.Errorf("unsupported %T", it)) - } -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go b/vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go index 31c89869da..9ddb93506e 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go +++ b/vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go @@ -7,7 +7,7 @@ package patch import ( "strings" - "github.com/open-policy-agent/opa/storage" + "github.com/open-policy-agent/opa/v1/storage" ) // ParsePatchPathEscaped returns a new path for the given escaped str. @@ -37,8 +37,8 @@ func ParsePatchPathEscaped(str string) (path storage.Path, ok bool) { // the substitutions in this order, an implementation avoids the error of // turning '~01' first into '~1' and then into '/', which would be // incorrect (the string '~01' correctly becomes '~1' after transformation)." - path[i] = strings.Replace(path[i], "~1", "/", -1) - path[i] = strings.Replace(path[i], "~0", "~", -1) + path[i] = strings.ReplaceAll(path[i], "~1", "/") + path[i] = strings.ReplaceAll(path[i], "~0", "~") } return diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/LICENSE b/vendor/github.com/open-policy-agent/opa/internal/jwx/LICENSE deleted file mode 100644 index 6369f4fcc4..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 lestrrat - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/buffer/buffer.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/buffer/buffer.go deleted file mode 100644 index c383ff3b54..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/buffer/buffer.go +++ /dev/null @@ -1,112 +0,0 @@ -// Package buffer provides a very thin wrapper around []byte buffer called -// `Buffer`, to provide functionalities that are often used within the jwx -// related packages -package buffer - -import ( - "encoding/base64" - "encoding/binary" - "encoding/json" - "fmt" -) - -// Buffer wraps `[]byte` and provides functions that are often used in -// the jwx related packages. One notable difference is that while -// encoding/json marshalls `[]byte` using base64.StdEncoding, this -// module uses base64.RawURLEncoding as mandated by the spec -type Buffer []byte - -// FromUint creates a `Buffer` from an unsigned int -func FromUint(v uint64) Buffer { - data := make([]byte, 8) - binary.BigEndian.PutUint64(data, v) - - i := 0 - for ; i < len(data); i++ { - if data[i] != 0x0 { - break - } - } - return Buffer(data[i:]) -} - -// FromBase64 constructs a new Buffer from a base64 encoded data -func FromBase64(v []byte) (Buffer, error) { - b := Buffer{} - if err := b.Base64Decode(v); err != nil { - return Buffer(nil), fmt.Errorf("failed to decode from base64: %w", err) - } - - return b, nil -} - -// FromNData constructs a new Buffer from a "n:data" format -// (I made that name up) -func FromNData(v []byte) (Buffer, error) { - size := binary.BigEndian.Uint32(v) - buf := make([]byte, int(size)) - copy(buf, v[4:4+size]) - return Buffer(buf), nil -} - -// Bytes returns the raw bytes that comprises the Buffer -func (b Buffer) Bytes() []byte { - return []byte(b) -} - -// NData returns Datalen || Data, where Datalen is a 32 bit counter for -// the length of the following data, and Data is the octets that comprise -// the buffer data -func (b Buffer) NData() []byte { - buf := make([]byte, 4+b.Len()) - binary.BigEndian.PutUint32(buf, uint32(b.Len())) - - copy(buf[4:], b.Bytes()) - return buf -} - -// Len returns the number of bytes that the Buffer holds -func (b Buffer) Len() int { - return len(b) -} - -// Base64Encode encodes the contents of the Buffer using base64.RawURLEncoding -func (b Buffer) Base64Encode() ([]byte, error) { - enc := base64.RawURLEncoding - out := make([]byte, enc.EncodedLen(len(b))) - enc.Encode(out, b) - return out, nil -} - -// Base64Decode decodes the contents of the Buffer using base64.RawURLEncoding -func (b *Buffer) Base64Decode(v []byte) error { - enc := base64.RawURLEncoding - out := make([]byte, enc.DecodedLen(len(v))) - n, err := enc.Decode(out, v) - if err != nil { - return fmt.Errorf("failed to decode from base64: %w", err) - } - out = out[:n] - *b = Buffer(out) - return nil -} - -// MarshalJSON marshals the buffer into JSON format after encoding the buffer -// with base64.RawURLEncoding -func (b Buffer) MarshalJSON() ([]byte, error) { - v, err := b.Base64Encode() - if err != nil { - return nil, fmt.Errorf("failed to encode to base64: %w", err) - } - return json.Marshal(string(v)) -} - -// UnmarshalJSON unmarshals from a JSON string into a Buffer, after decoding it -// with base64.RawURLEncoding -func (b *Buffer) UnmarshalJSON(data []byte) error { - var x string - if err := json.Unmarshal(data, &x); err != nil { - return fmt.Errorf("failed to unmarshal JSON: %w", err) - } - return b.Base64Decode([]byte(x)) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/elliptic.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/elliptic.go deleted file mode 100644 index b7e35dc707..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/elliptic.go +++ /dev/null @@ -1,11 +0,0 @@ -package jwa - -// EllipticCurveAlgorithm represents the algorithms used for EC keys -type EllipticCurveAlgorithm string - -// Supported values for EllipticCurveAlgorithm -const ( - P256 EllipticCurveAlgorithm = "P-256" - P384 EllipticCurveAlgorithm = "P-384" - P521 EllipticCurveAlgorithm = "P-521" -) diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/key_type.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/key_type.go deleted file mode 100644 index 98f0cc42e2..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/key_type.go +++ /dev/null @@ -1,67 +0,0 @@ -package jwa - -import ( - "errors" - "fmt" - "strconv" -) - -// KeyType represents the key type ("kty") that are supported -type KeyType string - -var keyTypeAlg = map[string]struct{}{"EC": {}, "oct": {}, "RSA": {}} - -// Supported values for KeyType -const ( - EC KeyType = "EC" // Elliptic Curve - InvalidKeyType KeyType = "" // Invalid KeyType - OctetSeq KeyType = "oct" // Octet sequence (used to represent symmetric keys) - RSA KeyType = "RSA" // RSA -) - -// Accept is used when conversion from values given by -// outside sources (such as JSON payloads) is required -func (keyType *KeyType) Accept(value interface{}) error { - var tmp KeyType - switch x := value.(type) { - case string: - tmp = KeyType(x) - case KeyType: - tmp = x - default: - return fmt.Errorf("invalid type for jwa.KeyType: %T", value) - } - _, ok := keyTypeAlg[tmp.String()] - if !ok { - return errors.New("unknown Key Type algorithm") - } - - *keyType = tmp - return nil -} - -// String returns the string representation of a KeyType -func (keyType KeyType) String() string { - return string(keyType) -} - -// UnmarshalJSON unmarshals and checks data as KeyType Algorithm -func (keyType *KeyType) UnmarshalJSON(data []byte) error { - var quote byte = '"' - var quoted string - if data[0] == quote { - var err error - quoted, err = strconv.Unquote(string(data)) - if err != nil { - return fmt.Errorf("failed to process signature algorithm: %w", err) - } - } else { - quoted = string(data) - } - _, ok := keyTypeAlg[quoted] - if !ok { - return errors.New("unknown signature algorithm") - } - *keyType = KeyType(quoted) - return nil -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/parameters.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/parameters.go deleted file mode 100644 index 2fe72e1dbc..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/parameters.go +++ /dev/null @@ -1,29 +0,0 @@ -package jwa - -import ( - "crypto/elliptic" - - "github.com/open-policy-agent/opa/internal/jwx/buffer" -) - -// EllipticCurve provides a indirect type to standard elliptic curve such that we can -// use it for unmarshal -type EllipticCurve struct { - elliptic.Curve -} - -// AlgorithmParameters provides a single structure suitable to unmarshaling any JWK -type AlgorithmParameters struct { - N buffer.Buffer `json:"n,omitempty"` - E buffer.Buffer `json:"e,omitempty"` - D buffer.Buffer `json:"d,omitempty"` - P buffer.Buffer `json:"p,omitempty"` - Q buffer.Buffer `json:"q,omitempty"` - Dp buffer.Buffer `json:"dp,omitempty"` - Dq buffer.Buffer `json:"dq,omitempty"` - Qi buffer.Buffer `json:"qi,omitempty"` - Crv EllipticCurveAlgorithm `json:"crv,omitempty"` - X buffer.Buffer `json:"x,omitempty"` - Y buffer.Buffer `json:"y,omitempty"` - K buffer.Buffer `json:"k,omitempty"` -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/signature.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/signature.go deleted file mode 100644 index 45e400176d..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/signature.go +++ /dev/null @@ -1,78 +0,0 @@ -package jwa - -import ( - "errors" - "fmt" - "strconv" -) - -// SignatureAlgorithm represents the various signature algorithms as described in https://tools.ietf.org/html/rfc7518#section-3.1 -type SignatureAlgorithm string - -var signatureAlg = map[string]struct{}{"ES256": {}, "ES384": {}, "ES512": {}, "HS256": {}, "HS384": {}, "HS512": {}, "PS256": {}, "PS384": {}, "PS512": {}, "RS256": {}, "RS384": {}, "RS512": {}, "none": {}} - -// Supported values for SignatureAlgorithm -const ( - ES256 SignatureAlgorithm = "ES256" // ECDSA using P-256 and SHA-256 - ES384 SignatureAlgorithm = "ES384" // ECDSA using P-384 and SHA-384 - ES512 SignatureAlgorithm = "ES512" // ECDSA using P-521 and SHA-512 - HS256 SignatureAlgorithm = "HS256" // HMAC using SHA-256 - HS384 SignatureAlgorithm = "HS384" // HMAC using SHA-384 - HS512 SignatureAlgorithm = "HS512" // HMAC using SHA-512 - NoSignature SignatureAlgorithm = "none" - PS256 SignatureAlgorithm = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256 - PS384 SignatureAlgorithm = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384 - PS512 SignatureAlgorithm = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512 - RS256 SignatureAlgorithm = "RS256" // RSASSA-PKCS-v1.5 using SHA-256 - RS384 SignatureAlgorithm = "RS384" // RSASSA-PKCS-v1.5 using SHA-384 - RS512 SignatureAlgorithm = "RS512" // RSASSA-PKCS-v1.5 using SHA-512 - NoValue SignatureAlgorithm = "" // No value is different from none - Unsupported SignatureAlgorithm = "unsupported" -) - -// Accept is used when conversion from values given by -// outside sources (such as JSON payloads) is required -func (signature *SignatureAlgorithm) Accept(value interface{}) error { - var tmp SignatureAlgorithm - switch x := value.(type) { - case string: - tmp = SignatureAlgorithm(x) - case SignatureAlgorithm: - tmp = x - default: - return fmt.Errorf("invalid type for jwa.SignatureAlgorithm: %T", value) - } - _, ok := signatureAlg[tmp.String()] - if !ok { - return errors.New("unknown signature algorithm") - } - *signature = tmp - return nil -} - -// String returns the string representation of a SignatureAlgorithm -func (signature SignatureAlgorithm) String() string { - return string(signature) -} - -// UnmarshalJSON unmarshals and checks data as Signature Algorithm -func (signature *SignatureAlgorithm) UnmarshalJSON(data []byte) error { - var quote byte = '"' - var quoted string - if data[0] == quote { - var err error - quoted, err = strconv.Unquote(string(data)) - if err != nil { - return fmt.Errorf("failed to process signature algorithm: %w", err) - } - } else { - quoted = string(data) - } - _, ok := signatureAlg[quoted] - if !ok { - *signature = Unsupported - return nil - } - *signature = SignatureAlgorithm(quoted) - return nil -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/ecdsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/ecdsa.go deleted file mode 100644 index b46689f037..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/ecdsa.go +++ /dev/null @@ -1,120 +0,0 @@ -package jwk - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "errors" - "fmt" - "math/big" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" -) - -func newECDSAPublicKey(key *ecdsa.PublicKey) (*ECDSAPublicKey, error) { - - var hdr StandardHeaders - err := hdr.Set(KeyTypeKey, jwa.EC) - if err != nil { - return nil, fmt.Errorf("failed to set Key Type: %w", err) - } - - return &ECDSAPublicKey{ - StandardHeaders: &hdr, - key: key, - }, nil -} - -func newECDSAPrivateKey(key *ecdsa.PrivateKey) (*ECDSAPrivateKey, error) { - - var hdr StandardHeaders - err := hdr.Set(KeyTypeKey, jwa.EC) - if err != nil { - return nil, fmt.Errorf("failed to set Key Type: %w", err) - } - - return &ECDSAPrivateKey{ - StandardHeaders: &hdr, - key: key, - }, nil -} - -// Materialize returns the EC-DSA public key represented by this JWK -func (k ECDSAPublicKey) Materialize() (interface{}, error) { - return k.key, nil -} - -// Materialize returns the EC-DSA private key represented by this JWK -func (k ECDSAPrivateKey) Materialize() (interface{}, error) { - return k.key, nil -} - -// GenerateKey creates a ECDSAPublicKey from JWK format -func (k *ECDSAPublicKey) GenerateKey(keyJSON *RawKeyJSON) error { - - var x, y big.Int - - if keyJSON.X == nil || keyJSON.Y == nil || keyJSON.Crv == "" { - return errors.New("missing mandatory key parameters X, Y or Crv") - } - - x.SetBytes(keyJSON.X.Bytes()) - y.SetBytes(keyJSON.Y.Bytes()) - - var curve elliptic.Curve - switch keyJSON.Crv { - case jwa.P256: - curve = elliptic.P256() - case jwa.P384: - curve = elliptic.P384() - case jwa.P521: - curve = elliptic.P521() - default: - return fmt.Errorf("invalid curve name %s", keyJSON.Crv) - } - - *k = ECDSAPublicKey{ - StandardHeaders: &keyJSON.StandardHeaders, - key: &ecdsa.PublicKey{ - Curve: curve, - X: &x, - Y: &y, - }, - } - return nil -} - -// GenerateKey creates a ECDSAPrivateKey from JWK format -func (k *ECDSAPrivateKey) GenerateKey(keyJSON *RawKeyJSON) error { - - if keyJSON.D == nil { - return errors.New("missing mandatory key parameter D") - } - eCDSAPublicKey := &ECDSAPublicKey{} - err := eCDSAPublicKey.GenerateKey(keyJSON) - if err != nil { - return fmt.Errorf("failed to generate public key: %w", err) - } - dBytes := keyJSON.D.Bytes() - // The length of this octet string MUST be ceiling(log-base-2(n)/8) - // octets (where n is the order of the curve). This is because the private - // key d must be in the interval [1, n-1] so the bitlength of d should be - // no larger than the bitlength of n-1. The easiest way to find the octet - // length is to take bitlength(n-1), add 7 to force a carry, and shift this - // bit sequence right by 3, which is essentially dividing by 8 and adding - // 1 if there is any remainder. Thus, the private key value d should be - // output to (bitlength(n-1)+7)>>3 octets. - n := eCDSAPublicKey.key.Params().N - octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 - if octetLength-len(dBytes) != 0 { - return errors.New("failed to generate private key. Incorrect D value") - } - privateKey := &ecdsa.PrivateKey{ - PublicKey: *eCDSAPublicKey.key, - D: (&big.Int{}).SetBytes(keyJSON.D.Bytes()), - } - - k.key = privateKey - k.StandardHeaders = &keyJSON.StandardHeaders - - return nil -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/headers.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/headers.go deleted file mode 100644 index b0fd51e901..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/headers.go +++ /dev/null @@ -1,178 +0,0 @@ -package jwk - -import ( - "fmt" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" -) - -// Convenience constants for common JWK parameters -const ( - AlgorithmKey = "alg" - KeyIDKey = "kid" - KeyOpsKey = "key_ops" - KeyTypeKey = "kty" - KeyUsageKey = "use" - PrivateParamsKey = "privateParams" -) - -// Headers provides a common interface to all future possible headers -type Headers interface { - Get(string) (interface{}, bool) - Set(string, interface{}) error - Walk(func(string, interface{}) error) error - GetAlgorithm() jwa.SignatureAlgorithm - GetKeyID() string - GetKeyOps() KeyOperationList - GetKeyType() jwa.KeyType - GetKeyUsage() string - GetPrivateParams() map[string]interface{} -} - -// StandardHeaders stores the common JWK parameters -type StandardHeaders struct { - Algorithm *jwa.SignatureAlgorithm `json:"alg,omitempty"` // https://tools.ietf.org/html/rfc7517#section-4.4 - KeyID string `json:"kid,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.4 - KeyOps KeyOperationList `json:"key_ops,omitempty"` // https://tools.ietf.org/html/rfc7517#section-4.3 - KeyType jwa.KeyType `json:"kty,omitempty"` // https://tools.ietf.org/html/rfc7517#section-4.1 - KeyUsage string `json:"use,omitempty"` // https://tools.ietf.org/html/rfc7517#section-4.2 - PrivateParams map[string]interface{} `json:"privateParams,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.4 -} - -// GetAlgorithm is a convenience function to retrieve the corresponding value stored in the StandardHeaders -func (h *StandardHeaders) GetAlgorithm() jwa.SignatureAlgorithm { - if v := h.Algorithm; v != nil { - return *v - } - return jwa.NoValue -} - -// GetKeyID is a convenience function to retrieve the corresponding value stored in the StandardHeaders -func (h *StandardHeaders) GetKeyID() string { - return h.KeyID -} - -// GetKeyOps is a convenience function to retrieve the corresponding value stored in the StandardHeaders -func (h *StandardHeaders) GetKeyOps() KeyOperationList { - return h.KeyOps -} - -// GetKeyType is a convenience function to retrieve the corresponding value stored in the StandardHeaders -func (h *StandardHeaders) GetKeyType() jwa.KeyType { - return h.KeyType -} - -// GetKeyUsage is a convenience function to retrieve the corresponding value stored in the StandardHeaders -func (h *StandardHeaders) GetKeyUsage() string { - return h.KeyUsage -} - -// GetPrivateParams is a convenience function to retrieve the corresponding value stored in the StandardHeaders -func (h *StandardHeaders) GetPrivateParams() map[string]interface{} { - return h.PrivateParams -} - -// Get is a general getter function for JWK StandardHeaders structure -func (h *StandardHeaders) Get(name string) (interface{}, bool) { - switch name { - case AlgorithmKey: - alg := h.GetAlgorithm() - if alg != jwa.NoValue { - return alg, true - } - return nil, false - case KeyIDKey: - v := h.KeyID - if v == "" { - return nil, false - } - return v, true - case KeyOpsKey: - v := h.KeyOps - if v == nil { - return nil, false - } - return v, true - case KeyTypeKey: - v := h.KeyType - if v == jwa.InvalidKeyType { - return nil, false - } - return v, true - case KeyUsageKey: - v := h.KeyUsage - if v == "" { - return nil, false - } - return v, true - case PrivateParamsKey: - v := h.PrivateParams - if len(v) == 0 { - return nil, false - } - return v, true - default: - return nil, false - } -} - -// Set is a general getter function for JWK StandardHeaders structure -func (h *StandardHeaders) Set(name string, value interface{}) error { - switch name { - case AlgorithmKey: - var acceptor jwa.SignatureAlgorithm - if err := acceptor.Accept(value); err != nil { - return fmt.Errorf("invalid value for %s key: %w", AlgorithmKey, err) - } - h.Algorithm = &acceptor - return nil - case KeyIDKey: - if v, ok := value.(string); ok { - h.KeyID = v - return nil - } - return fmt.Errorf("invalid value for %s key: %T", KeyIDKey, value) - case KeyOpsKey: - if err := h.KeyOps.Accept(value); err != nil { - return fmt.Errorf("invalid value for %s key: %w", KeyOpsKey, err) - } - return nil - case KeyTypeKey: - if err := h.KeyType.Accept(value); err != nil { - return fmt.Errorf("invalid value for %s key: %w", KeyTypeKey, err) - } - return nil - case KeyUsageKey: - if v, ok := value.(string); ok { - h.KeyUsage = v - return nil - } - return fmt.Errorf("invalid value for %s key: %T", KeyUsageKey, value) - case PrivateParamsKey: - if v, ok := value.(map[string]interface{}); ok { - h.PrivateParams = v - return nil - } - return fmt.Errorf("invalid value for %s key: %T", PrivateParamsKey, value) - default: - return fmt.Errorf("invalid key: %s", name) - } -} - -// Walk iterates over all JWK standard headers fields while applying a function to its value. -func (h StandardHeaders) Walk(f func(string, interface{}) error) error { - for _, key := range []string{AlgorithmKey, KeyIDKey, KeyOpsKey, KeyTypeKey, KeyUsageKey, PrivateParamsKey} { - if v, ok := h.Get(key); ok { - if err := f(key, v); err != nil { - return fmt.Errorf("walk function returned error for %s: %w", key, err) - } - } - } - - for k, v := range h.PrivateParams { - if err := f(k, v); err != nil { - return fmt.Errorf("walk function returned error for %s: %w", k, err) - } - } - return nil -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/interface.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/interface.go deleted file mode 100644 index 7a7d03ef1c..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/interface.go +++ /dev/null @@ -1,71 +0,0 @@ -package jwk - -import ( - "crypto/ecdsa" - "crypto/rsa" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" -) - -// Set is a convenience struct to allow generating and parsing -// JWK sets as opposed to single JWKs -type Set struct { - Keys []Key `json:"keys"` -} - -// Key defines the minimal interface for each of the -// key types. Their use and implementation differ significantly -// between each key types, so you should use type assertions -// to perform more specific tasks with each key -type Key interface { - Headers - - // Materialize creates the corresponding key. For example, - // RSA types would create *rsa.PublicKey or *rsa.PrivateKey, - // EC types would create *ecdsa.PublicKey or *ecdsa.PrivateKey, - // and OctetSeq types create a []byte key. - Materialize() (interface{}, error) - GenerateKey(*RawKeyJSON) error -} - -// RawKeyJSON is generic type that represents any kind JWK -type RawKeyJSON struct { - StandardHeaders - jwa.AlgorithmParameters -} - -// RawKeySetJSON is generic type that represents a JWK Set -type RawKeySetJSON struct { - Keys []RawKeyJSON `json:"keys"` -} - -// RSAPublicKey is a type of JWK generated from RSA public keys -type RSAPublicKey struct { - *StandardHeaders - key *rsa.PublicKey -} - -// RSAPrivateKey is a type of JWK generated from RSA private keys -type RSAPrivateKey struct { - *StandardHeaders - *jwa.AlgorithmParameters - key *rsa.PrivateKey -} - -// SymmetricKey is a type of JWK generated from symmetric keys -type SymmetricKey struct { - *StandardHeaders - key []byte -} - -// ECDSAPublicKey is a type of JWK generated from ECDSA public keys -type ECDSAPublicKey struct { - *StandardHeaders - key *ecdsa.PublicKey -} - -// ECDSAPrivateKey is a type of JWK generated from ECDH-ES private keys -type ECDSAPrivateKey struct { - *StandardHeaders - key *ecdsa.PrivateKey -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/jwk.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/jwk.go deleted file mode 100644 index aa22a3830f..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/jwk.go +++ /dev/null @@ -1,153 +0,0 @@ -// Package jwk implements JWK as described in https://tools.ietf.org/html/rfc7517 -package jwk - -import ( - "crypto/ecdsa" - "crypto/rsa" - "encoding/json" - "errors" - "fmt" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" -) - -// GetPublicKey returns the public key based on the private key type. -// For rsa key types *rsa.PublicKey is returned; for ecdsa key types *ecdsa.PublicKey; -// for byte slice (raw) keys, the key itself is returned. If the corresponding -// public key cannot be deduced, an error is returned -func GetPublicKey(key interface{}) (interface{}, error) { - if key == nil { - return nil, errors.New("jwk.New requires a non-nil key") - } - - switch v := key.(type) { - // Mental note: although Public() is defined in both types, - // you can not coalesce the clauses for rsa.PrivateKey and - // ecdsa.PrivateKey, as then `v` becomes interface{} - // b/c the compiler cannot deduce the exact type. - case *rsa.PrivateKey: - return v.Public(), nil - case *ecdsa.PrivateKey: - return v.Public(), nil - case []byte: - return v, nil - default: - return nil, fmt.Errorf("invalid key type %T", key) - } -} - -// GetKeyTypeFromKey creates a jwk.Key from the given key. -func GetKeyTypeFromKey(key interface{}) jwa.KeyType { - - switch key.(type) { - case *rsa.PrivateKey, *rsa.PublicKey: - return jwa.RSA - case *ecdsa.PrivateKey, *ecdsa.PublicKey: - return jwa.EC - case []byte: - return jwa.OctetSeq - default: - return jwa.InvalidKeyType - } -} - -// New creates a jwk.Key from the given key. -func New(key interface{}) (Key, error) { - if key == nil { - return nil, errors.New("jwk.New requires a non-nil key") - } - - switch v := key.(type) { - case *rsa.PrivateKey: - return newRSAPrivateKey(v) - case *rsa.PublicKey: - return newRSAPublicKey(v) - case *ecdsa.PrivateKey: - return newECDSAPrivateKey(v) - case *ecdsa.PublicKey: - return newECDSAPublicKey(v) - case []byte: - return newSymmetricKey(v) - default: - return nil, fmt.Errorf("invalid key type %T", key) - } -} - -func parse(jwkSrc string) (*Set, error) { - - var jwkKeySet Set - var jwkKey Key - rawKeySetJSON := &RawKeySetJSON{} - err := json.Unmarshal([]byte(jwkSrc), rawKeySetJSON) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal JWK Set: %w", err) - } - if len(rawKeySetJSON.Keys) == 0 { - - // It might be a single key - rawKeyJSON := &RawKeyJSON{} - err := json.Unmarshal([]byte(jwkSrc), rawKeyJSON) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal JWK: %w", err) - } - jwkKey, err = rawKeyJSON.GenerateKey() - if err != nil { - return nil, fmt.Errorf("failed to generate key: %w", err) - } - // Add to set - jwkKeySet.Keys = append(jwkKeySet.Keys, jwkKey) - } else { - for i := range rawKeySetJSON.Keys { - rawKeyJSON := rawKeySetJSON.Keys[i] - if rawKeyJSON.Algorithm != nil && *rawKeyJSON.Algorithm == jwa.Unsupported { - continue - } - jwkKey, err = rawKeyJSON.GenerateKey() - if err != nil { - return nil, fmt.Errorf("failed to generate key: %w", err) - } - jwkKeySet.Keys = append(jwkKeySet.Keys, jwkKey) - } - } - return &jwkKeySet, nil -} - -// ParseBytes parses JWK from the incoming byte buffer. -func ParseBytes(buf []byte) (*Set, error) { - return parse(string(buf[:])) -} - -// ParseString parses JWK from the incoming string. -func ParseString(s string) (*Set, error) { - return parse(s) -} - -// GenerateKey creates an internal representation of a key from a raw JWK JSON -func (r *RawKeyJSON) GenerateKey() (Key, error) { - - var key Key - - switch r.KeyType { - case jwa.RSA: - if r.D != nil { - key = &RSAPrivateKey{} - } else { - key = &RSAPublicKey{} - } - case jwa.EC: - if r.D != nil { - key = &ECDSAPrivateKey{} - } else { - key = &ECDSAPublicKey{} - } - case jwa.OctetSeq: - key = &SymmetricKey{} - default: - return nil, errors.New("unrecognized key type") - } - err := key.GenerateKey(r) - if err != nil { - return nil, fmt.Errorf("failed to generate key from JWK: %w", err) - } - return key, nil -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/key_ops.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/key_ops.go deleted file mode 100644 index e8fe4cd854..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/key_ops.go +++ /dev/null @@ -1,66 +0,0 @@ -package jwk - -import ( - "encoding/json" - "fmt" -) - -// KeyUsageType is used to denote what this key should be used for -type KeyUsageType string - -const ( - // ForSignature is the value used in the headers to indicate that - // this key should be used for signatures - ForSignature KeyUsageType = "sig" - // ForEncryption is the value used in the headers to indicate that - // this key should be used for encryptiong - ForEncryption KeyUsageType = "enc" -) - -// KeyOperation is used to denote the allowed operations for a Key -type KeyOperation string - -// KeyOperationList represents an slice of KeyOperation -type KeyOperationList []KeyOperation - -var keyOps = map[string]struct{}{"sign": {}, "verify": {}, "encrypt": {}, "decrypt": {}, "wrapKey": {}, "unwrapKey": {}, "deriveKey": {}, "deriveBits": {}} - -// KeyOperation constants -const ( - KeyOpSign KeyOperation = "sign" // (compute digital signature or MAC) - KeyOpVerify KeyOperation = "verify" // (verify digital signature or MAC) - KeyOpEncrypt KeyOperation = "encrypt" // (encrypt content) - KeyOpDecrypt KeyOperation = "decrypt" // (decrypt content and validate decryption, if applicable) - KeyOpWrapKey KeyOperation = "wrapKey" // (encrypt key) - KeyOpUnwrapKey KeyOperation = "unwrapKey" // (decrypt key and validate decryption, if applicable) - KeyOpDeriveKey KeyOperation = "deriveKey" // (derive key) - KeyOpDeriveBits KeyOperation = "deriveBits" // (derive bits not to be used as a key) -) - -// Accept determines if Key Operation is valid -func (keyOperationList *KeyOperationList) Accept(v interface{}) error { - switch x := v.(type) { - case KeyOperationList: - *keyOperationList = x - return nil - default: - return fmt.Errorf(`invalid value %T`, v) - } -} - -// UnmarshalJSON unmarshals and checks data as KeyType Algorithm -func (keyOperationList *KeyOperationList) UnmarshalJSON(data []byte) error { - var tempKeyOperationList []string - err := json.Unmarshal(data, &tempKeyOperationList) - if err != nil { - return fmt.Errorf("invalid key operation") - } - for _, value := range tempKeyOperationList { - _, ok := keyOps[value] - if !ok { - return fmt.Errorf("unknown key operation") - } - *keyOperationList = append(*keyOperationList, KeyOperation(value)) - } - return nil -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/rsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/rsa.go deleted file mode 100644 index 11b8e3b56b..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/rsa.go +++ /dev/null @@ -1,133 +0,0 @@ -package jwk - -import ( - "crypto/rsa" - "encoding/binary" - "errors" - "fmt" - "math/big" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" -) - -func newRSAPublicKey(key *rsa.PublicKey) (*RSAPublicKey, error) { - - var hdr StandardHeaders - err := hdr.Set(KeyTypeKey, jwa.RSA) - if err != nil { - return nil, fmt.Errorf("failed to set Key Type: %w", err) - } - return &RSAPublicKey{ - StandardHeaders: &hdr, - key: key, - }, nil -} - -func newRSAPrivateKey(key *rsa.PrivateKey) (*RSAPrivateKey, error) { - - var hdr StandardHeaders - err := hdr.Set(KeyTypeKey, jwa.RSA) - if err != nil { - return nil, fmt.Errorf("failed to set Key Type: %w", err) - } - - var algoParams jwa.AlgorithmParameters - - // it is needed to use raw encoding to omit the "=" paddings at the end - algoParams.D = key.D.Bytes() - algoParams.P = key.Primes[0].Bytes() - algoParams.Q = key.Primes[1].Bytes() - algoParams.Dp = key.Precomputed.Dp.Bytes() - algoParams.Dq = key.Precomputed.Dq.Bytes() - algoParams.Qi = key.Precomputed.Qinv.Bytes() - - // "modulus" (N) from the public key in the private key - algoParams.N = key.PublicKey.N.Bytes() - - // make the E a.k.a "coprime" - // https://en.wikipedia.org/wiki/RSA_(cryptosystem) - coprime := make([]byte, 8) - binary.BigEndian.PutUint64(coprime, uint64(key.PublicKey.E)) - // find the 1st index of non 0x0 paddings from the beginning - i := 0 - for ; i < len(coprime); i++ { - if coprime[i] != 0x0 { - break - } - } - algoParams.E = coprime[i:] - - return &RSAPrivateKey{ - StandardHeaders: &hdr, - AlgorithmParameters: &algoParams, - key: key, - }, nil -} - -// Materialize returns the standard RSA Public Key representation stored in the internal representation -func (k *RSAPublicKey) Materialize() (interface{}, error) { - if k.key == nil { - return nil, errors.New("key has no rsa.PublicKey associated with it") - } - return k.key, nil -} - -// Materialize returns the standard RSA Private Key representation stored in the internal representation -func (k *RSAPrivateKey) Materialize() (interface{}, error) { - if k.key == nil { - return nil, errors.New("key has no rsa.PrivateKey associated with it") - } - return k.key, nil -} - -// GenerateKey creates a RSAPublicKey from a RawKeyJSON -func (k *RSAPublicKey) GenerateKey(keyJSON *RawKeyJSON) error { - - if keyJSON.N == nil || keyJSON.E == nil { - return errors.New("missing mandatory key parameters N or E") - } - rsaPublicKey := &rsa.PublicKey{ - N: (&big.Int{}).SetBytes(keyJSON.N.Bytes()), - E: int((&big.Int{}).SetBytes(keyJSON.E.Bytes()).Int64()), - } - k.key = rsaPublicKey - k.StandardHeaders = &keyJSON.StandardHeaders - return nil -} - -// GenerateKey creates a RSAPublicKey from a RawKeyJSON -func (k *RSAPrivateKey) GenerateKey(keyJSON *RawKeyJSON) error { - - rsaPublicKey := &RSAPublicKey{} - err := rsaPublicKey.GenerateKey(keyJSON) - if err != nil { - return fmt.Errorf("failed to generate public key: %w", err) - } - - if keyJSON.D == nil || keyJSON.P == nil || keyJSON.Q == nil { - return errors.New("missing mandatory key parameters D, P or Q") - } - privateKey := &rsa.PrivateKey{ - PublicKey: *rsaPublicKey.key, - D: (&big.Int{}).SetBytes(keyJSON.D.Bytes()), - Primes: []*big.Int{ - (&big.Int{}).SetBytes(keyJSON.P.Bytes()), - (&big.Int{}).SetBytes(keyJSON.Q.Bytes()), - }, - } - - if keyJSON.Dp.Len() > 0 { - privateKey.Precomputed.Dp = (&big.Int{}).SetBytes(keyJSON.Dp.Bytes()) - } - if keyJSON.Dq.Len() > 0 { - privateKey.Precomputed.Dq = (&big.Int{}).SetBytes(keyJSON.Dq.Bytes()) - } - if keyJSON.Qi.Len() > 0 { - privateKey.Precomputed.Qinv = (&big.Int{}).SetBytes(keyJSON.Qi.Bytes()) - } - - k.key = privateKey - k.StandardHeaders = &keyJSON.StandardHeaders - k.AlgorithmParameters = &keyJSON.AlgorithmParameters - return nil -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/symmetric.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/symmetric.go deleted file mode 100644 index e0cc0751e6..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/symmetric.go +++ /dev/null @@ -1,41 +0,0 @@ -package jwk - -import ( - "fmt" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" -) - -func newSymmetricKey(key []byte) (*SymmetricKey, error) { - var hdr StandardHeaders - - err := hdr.Set(KeyTypeKey, jwa.OctetSeq) - if err != nil { - return nil, fmt.Errorf("failed to set Key Type: %w", err) - } - return &SymmetricKey{ - StandardHeaders: &hdr, - key: key, - }, nil -} - -// Materialize returns the octets for this symmetric key. -// Since this is a symmetric key, this just calls Octets -func (s SymmetricKey) Materialize() (interface{}, error) { - return s.Octets(), nil -} - -// Octets returns the octets in the key -func (s SymmetricKey) Octets() []byte { - return s.key -} - -// GenerateKey creates a Symmetric key from a RawKeyJSON -func (s *SymmetricKey) GenerateKey(keyJSON *RawKeyJSON) error { - - *s = SymmetricKey{ - StandardHeaders: &keyJSON.StandardHeaders, - key: keyJSON.K, - } - return nil -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/headers.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/headers.go deleted file mode 100644 index 0c8b355087..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/headers.go +++ /dev/null @@ -1,154 +0,0 @@ -package jws - -import ( - "fmt" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" -) - -// Constants for JWS Common parameters -const ( - AlgorithmKey = "alg" - ContentTypeKey = "cty" - CriticalKey = "crit" - JWKKey = "jwk" - JWKSetURLKey = "jku" - KeyIDKey = "kid" - PrivateParamsKey = "privateParams" - TypeKey = "typ" -) - -// Headers provides a common interface for common header parameters -type Headers interface { - Get(string) (interface{}, bool) - Set(string, interface{}) error - GetAlgorithm() jwa.SignatureAlgorithm -} - -// StandardHeaders contains JWS common parameters. -type StandardHeaders struct { - Algorithm jwa.SignatureAlgorithm `json:"alg,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.1 - ContentType string `json:"cty,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.10 - Critical []string `json:"crit,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.11 - JWK string `json:"jwk,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.3 - JWKSetURL string `json:"jku,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.2 - KeyID string `json:"kid,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.4 - PrivateParams map[string]interface{} `json:"privateParams,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.9 - Type string `json:"typ,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.9 -} - -// GetAlgorithm returns algorithm -func (h *StandardHeaders) GetAlgorithm() jwa.SignatureAlgorithm { - return h.Algorithm -} - -// Get is a general getter function for StandardHeaders structure -func (h *StandardHeaders) Get(name string) (interface{}, bool) { - switch name { - case AlgorithmKey: - v := h.Algorithm - if v == "" { - return nil, false - } - return v, true - case ContentTypeKey: - v := h.ContentType - if v == "" { - return nil, false - } - return v, true - case CriticalKey: - v := h.Critical - if len(v) == 0 { - return nil, false - } - return v, true - case JWKKey: - v := h.JWK - if v == "" { - return nil, false - } - return v, true - case JWKSetURLKey: - v := h.JWKSetURL - if v == "" { - return nil, false - } - return v, true - case KeyIDKey: - v := h.KeyID - if v == "" { - return nil, false - } - return v, true - case PrivateParamsKey: - v := h.PrivateParams - if len(v) == 0 { - return nil, false - } - return v, true - case TypeKey: - v := h.Type - if v == "" { - return nil, false - } - return v, true - default: - return nil, false - } -} - -// Set is a general setter function for StandardHeaders structure -func (h *StandardHeaders) Set(name string, value interface{}) error { - switch name { - case AlgorithmKey: - if err := h.Algorithm.Accept(value); err != nil { - return fmt.Errorf("invalid value for %s key: %w", AlgorithmKey, err) - } - return nil - case ContentTypeKey: - if v, ok := value.(string); ok { - h.ContentType = v - return nil - } - return fmt.Errorf("invalid value for %s key: %T", ContentTypeKey, value) - case CriticalKey: - if v, ok := value.([]string); ok { - h.Critical = v - return nil - } - return fmt.Errorf("invalid value for %s key: %T", CriticalKey, value) - case JWKKey: - if v, ok := value.(string); ok { - h.JWK = v - return nil - } - return fmt.Errorf("invalid value for %s key: %T", JWKKey, value) - case JWKSetURLKey: - if v, ok := value.(string); ok { - h.JWKSetURL = v - return nil - } - return fmt.Errorf("invalid value for %s key: %T", JWKSetURLKey, value) - case KeyIDKey: - if v, ok := value.(string); ok { - h.KeyID = v - return nil - } - return fmt.Errorf("invalid value for %s key: %T", KeyIDKey, value) - case PrivateParamsKey: - if v, ok := value.(map[string]interface{}); ok { - h.PrivateParams = v - return nil - } - return fmt.Errorf("invalid value for %s key: %T", PrivateParamsKey, value) - case TypeKey: - if v, ok := value.(string); ok { - h.Type = v - return nil - } - return fmt.Errorf("invalid value for %s key: %T", TypeKey, value) - default: - return fmt.Errorf("invalid key: %s", name) - } -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/interface.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/interface.go deleted file mode 100644 index e647c8ac93..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/interface.go +++ /dev/null @@ -1,22 +0,0 @@ -package jws - -// Message represents a full JWS encoded message. Flattened serialization -// is not supported as a struct, but rather it's represented as a -// Message struct with only one `Signature` element. -// -// Do not expect to use the Message object to verify or construct a -// signed payloads with. You should only use this when you want to actually -// want to programmatically view the contents for the full JWS Payload. -// -// To sign and verify, use the appropriate `SignWithOption()` nad `Verify()` functions -type Message struct { - Payload []byte `json:"payload"` - Signatures []*Signature `json:"signatures,omitempty"` -} - -// Signature represents the headers and signature of a JWS message -type Signature struct { - Headers Headers `json:"header,omitempty"` // Unprotected Headers - Protected Headers `json:"Protected,omitempty"` // Protected Headers - Signature []byte `json:"signature,omitempty"` // GetSignature -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/jws.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/jws.go deleted file mode 100644 index 2a5fe3c173..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/jws.go +++ /dev/null @@ -1,220 +0,0 @@ -// Package jws implements the digital Signature on JSON based data -// structures as described in https://tools.ietf.org/html/rfc7515 -// -// If you do not care about the details, the only things that you -// would need to use are the following functions: -// -// jws.SignWithOption(Payload, algorithm, key) -// jws.Verify(encodedjws, algorithm, key) -// -// To sign, simply use `jws.SignWithOption`. `Payload` is a []byte buffer that -// contains whatever data you want to sign. `alg` is one of the -// jwa.SignatureAlgorithm constants from package jwa. For RSA and -// ECDSA family of algorithms, you will need to prepare a private key. -// For HMAC family, you just need a []byte value. The `jws.SignWithOption` -// function will return the encoded JWS message on success. -// -// To verify, use `jws.Verify`. It will parse the `encodedjws` buffer -// and verify the result using `algorithm` and `key`. Upon successful -// verification, the original Payload is returned, so you can work on it. -package jws - -import ( - "bytes" - "crypto/rand" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "strings" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" - "github.com/open-policy-agent/opa/internal/jwx/jwk" - "github.com/open-policy-agent/opa/internal/jwx/jws/sign" - "github.com/open-policy-agent/opa/internal/jwx/jws/verify" -) - -// SignLiteral generates a Signature for the given Payload and Headers, and serializes -// it in compact serialization format. In this format you may NOT use -// multiple signers. -func SignLiteral(payload []byte, alg jwa.SignatureAlgorithm, key interface{}, hdrBuf []byte, rnd io.Reader) ([]byte, error) { - encodedHdr := base64.RawURLEncoding.EncodeToString(hdrBuf) - encodedPayload := base64.RawURLEncoding.EncodeToString(payload) - signingInput := strings.Join( - []string{ - encodedHdr, - encodedPayload, - }, ".", - ) - signer, err := sign.New(alg) - if err != nil { - return nil, fmt.Errorf("failed to create signer: %w", err) - } - - var signature []byte - switch s := signer.(type) { - case *sign.ECDSASigner: - signature, err = s.SignWithRand([]byte(signingInput), key, rnd) - default: - signature, err = signer.Sign([]byte(signingInput), key) - } - if err != nil { - return nil, fmt.Errorf("failed to sign Payload: %w", err) - } - encodedSignature := base64.RawURLEncoding.EncodeToString(signature) - compactSerialization := strings.Join( - []string{ - signingInput, - encodedSignature, - }, ".", - ) - return []byte(compactSerialization), nil -} - -// SignWithOption generates a Signature for the given Payload, and serializes -// it in compact serialization format. In this format you may NOT use -// multiple signers. -// -// If you would like to pass custom Headers, use the WithHeaders option. -func SignWithOption(payload []byte, alg jwa.SignatureAlgorithm, key interface{}) ([]byte, error) { - var headers Headers = &StandardHeaders{} - - err := headers.Set(AlgorithmKey, alg) - if err != nil { - return nil, fmt.Errorf("failed to set alg value: %w", err) - } - - hdrBuf, err := json.Marshal(headers) - if err != nil { - return nil, fmt.Errorf("failed to marshal Headers: %w", err) - } - // NOTE(sr): we don't use SignWithOption -- if we did, this rand.Reader - // should come from the BuiltinContext's Seed, too. - return SignLiteral(payload, alg, key, hdrBuf, rand.Reader) -} - -// Verify checks if the given JWS message is verifiable using `alg` and `key`. -// If the verification is successful, `err` is nil, and the content of the -// Payload that was signed is returned. If you need more fine-grained -// control of the verification process, manually call `Parse`, generate a -// verifier, and call `Verify` on the parsed JWS message object. -func Verify(buf []byte, alg jwa.SignatureAlgorithm, key interface{}) (ret []byte, err error) { - - verifier, err := verify.New(alg) - if err != nil { - return nil, fmt.Errorf("failed to create verifier: %w", err) - } - - buf = bytes.TrimSpace(buf) - if len(buf) == 0 { - return nil, errors.New(`attempt to verify empty buffer`) - } - - parts, err := SplitCompact(string(buf[:])) - if err != nil { - return nil, fmt.Errorf("failed extract from compact serialization format: %w", err) - } - - signingInput := strings.Join( - []string{ - parts[0], - parts[1], - }, ".", - ) - - decodedSignature, err := base64.RawURLEncoding.DecodeString(parts[2]) - if err != nil { - return nil, fmt.Errorf("failed to decode signature: %w", err) - } - if err := verifier.Verify([]byte(signingInput), decodedSignature, key); err != nil { - return nil, fmt.Errorf("failed to verify message: %w", err) - } - - if decodedPayload, err := base64.RawURLEncoding.DecodeString(parts[1]); err == nil { - return decodedPayload, nil - } - return nil, fmt.Errorf("failed to decode Payload: %w", err) -} - -// VerifyWithJWK verifies the JWS message using the specified JWK -func VerifyWithJWK(buf []byte, key jwk.Key) (payload []byte, err error) { - - keyVal, err := key.Materialize() - if err != nil { - return nil, fmt.Errorf("failed to materialize key: %w", err) - } - return Verify(buf, key.GetAlgorithm(), keyVal) -} - -// VerifyWithJWKSet verifies the JWS message using JWK key set. -// By default it will only pick up keys that have the "use" key -// set to either "sig" or "enc", but you can override it by -// providing a keyaccept function. -func VerifyWithJWKSet(buf []byte, keyset *jwk.Set) (payload []byte, err error) { - - for _, key := range keyset.Keys { - payload, err := VerifyWithJWK(buf, key) - if err == nil { - return payload, nil - } - } - return nil, errors.New("failed to verify with any of the keys") -} - -// ParseByte parses a JWS value serialized via compact serialization and provided as []byte. -func ParseByte(jwsCompact []byte) (m *Message, err error) { - return parseCompact(string(jwsCompact[:])) -} - -// ParseString parses a JWS value serialized via compact serialization and provided as string. -func ParseString(s string) (*Message, error) { - return parseCompact(s) -} - -// SplitCompact splits a JWT and returns its three parts -// separately: Protected Headers, Payload and Signature. -func SplitCompact(jwsCompact string) ([]string, error) { - - parts := strings.Split(jwsCompact, ".") - if len(parts) < 3 { - return nil, errors.New("failed to split compact serialization") - } - return parts, nil -} - -// parseCompact parses a JWS value serialized via compact serialization. -func parseCompact(str string) (m *Message, err error) { - - var decodedHeader, decodedPayload, decodedSignature []byte - parts, err := SplitCompact(str) - if err != nil { - return nil, fmt.Errorf("invalid compact serialization format: %w", err) - } - - if decodedHeader, err = base64.RawURLEncoding.DecodeString(parts[0]); err != nil { - return nil, fmt.Errorf("failed to decode Headers: %w", err) - } - var hdr StandardHeaders - if err := json.Unmarshal(decodedHeader, &hdr); err != nil { - return nil, fmt.Errorf("failed to parse JOSE Headers: %w", err) - } - - if decodedPayload, err = base64.RawURLEncoding.DecodeString(parts[1]); err != nil { - return nil, fmt.Errorf("failed to decode Payload: %w", err) - } - - if len(parts) > 2 { - if decodedSignature, err = base64.RawURLEncoding.DecodeString(parts[2]); err != nil { - return nil, fmt.Errorf("failed to decode Signature: %w", err) - } - } - - var msg Message - msg.Payload = decodedPayload - msg.Signatures = append(msg.Signatures, &Signature{ - Protected: &hdr, - Signature: decodedSignature, - }) - return &msg, nil -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/message.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/message.go deleted file mode 100644 index 1366a3d7be..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/message.go +++ /dev/null @@ -1,26 +0,0 @@ -package jws - -// PublicHeaders returns the public headers in a JWS -func (s Signature) PublicHeaders() Headers { - return s.Headers -} - -// ProtectedHeaders returns the protected headers in a JWS -func (s Signature) ProtectedHeaders() Headers { - return s.Protected -} - -// GetSignature returns the signature in a JWS -func (s Signature) GetSignature() []byte { - return s.Signature -} - -// GetPayload returns the payload in a JWS -func (m Message) GetPayload() []byte { - return m.Payload -} - -// GetSignatures returns the all signatures in a JWS -func (m Message) GetSignatures() []*Signature { - return m.Signatures -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/ecdsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/ecdsa.go deleted file mode 100644 index db1aadec67..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/ecdsa.go +++ /dev/null @@ -1,90 +0,0 @@ -package sign - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rand" - "errors" - "fmt" - "io" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" -) - -var ecdsaSignFuncs = map[jwa.SignatureAlgorithm]ecdsaSignFunc{} - -func init() { - algs := map[jwa.SignatureAlgorithm]crypto.Hash{ - jwa.ES256: crypto.SHA256, - jwa.ES384: crypto.SHA384, - jwa.ES512: crypto.SHA512, - } - - for alg, h := range algs { - ecdsaSignFuncs[alg] = makeECDSASignFunc(h) - } -} - -func makeECDSASignFunc(hash crypto.Hash) ecdsaSignFunc { - return ecdsaSignFunc(func(payload []byte, key *ecdsa.PrivateKey, rnd io.Reader) ([]byte, error) { - curveBits := key.Curve.Params().BitSize - keyBytes := curveBits / 8 - // Curve bits do not need to be a multiple of 8. - if curveBits%8 > 0 { - keyBytes++ - } - h := hash.New() - h.Write(payload) - r, s, err := ecdsa.Sign(rnd, key, h.Sum(nil)) - if err != nil { - return nil, fmt.Errorf("failed to sign payload using ecdsa: %w", err) - } - - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - return out, nil - }) -} - -func newECDSA(alg jwa.SignatureAlgorithm) (*ECDSASigner, error) { - signfn, ok := ecdsaSignFuncs[alg] - if !ok { - return nil, fmt.Errorf("unsupported algorithm while trying to create ECDSA signer: %s", alg) - } - - return &ECDSASigner{ - alg: alg, - sign: signfn, - }, nil -} - -// Algorithm returns the signer algorithm -func (s ECDSASigner) Algorithm() jwa.SignatureAlgorithm { - return s.alg -} - -// SignWithRand signs payload with a ECDSA private key and a provided randomness -// source (such as `rand.Reader`). -func (s ECDSASigner) SignWithRand(payload []byte, key interface{}, r io.Reader) ([]byte, error) { - if key == nil { - return nil, errors.New("missing private key while signing payload") - } - - privateKey, ok := key.(*ecdsa.PrivateKey) - if !ok { - return nil, fmt.Errorf("invalid key type %T. *ecdsa.PrivateKey is required", key) - } - return s.sign(payload, privateKey, r) -} - -// Sign signs payload with a ECDSA private key -func (s ECDSASigner) Sign(payload []byte, key interface{}) ([]byte, error) { - return s.SignWithRand(payload, key, rand.Reader) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/hmac.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/hmac.go deleted file mode 100644 index a4fad4208b..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/hmac.go +++ /dev/null @@ -1,66 +0,0 @@ -package sign - -import ( - "crypto/hmac" - "crypto/sha256" - "crypto/sha512" - "errors" - "fmt" - "hash" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" -) - -var hmacSignFuncs = map[jwa.SignatureAlgorithm]hmacSignFunc{} - -func init() { - algs := map[jwa.SignatureAlgorithm]func() hash.Hash{ - jwa.HS256: sha256.New, - jwa.HS384: sha512.New384, - jwa.HS512: sha512.New, - } - - for alg, h := range algs { - hmacSignFuncs[alg] = makeHMACSignFunc(h) - - } -} - -func newHMAC(alg jwa.SignatureAlgorithm) (*HMACSigner, error) { - signer, ok := hmacSignFuncs[alg] - if !ok { - return nil, fmt.Errorf(`unsupported algorithm while trying to create HMAC signer: %s`, alg) - } - - return &HMACSigner{ - alg: alg, - sign: signer, - }, nil -} - -func makeHMACSignFunc(hfunc func() hash.Hash) hmacSignFunc { - return hmacSignFunc(func(payload []byte, key []byte) ([]byte, error) { - h := hmac.New(hfunc, key) - h.Write(payload) - return h.Sum(nil), nil - }) -} - -// Algorithm returns the signer algorithm -func (s HMACSigner) Algorithm() jwa.SignatureAlgorithm { - return s.alg -} - -// Sign signs payload with a Symmetric key -func (s HMACSigner) Sign(payload []byte, key interface{}) ([]byte, error) { - hmackey, ok := key.([]byte) - if !ok { - return nil, fmt.Errorf(`invalid key type %T. []byte is required`, key) - } - - if len(hmackey) == 0 { - return nil, errors.New(`missing key while signing payload`) - } - - return s.sign(payload, hmackey) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/interface.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/interface.go deleted file mode 100644 index 2ef2bee486..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/interface.go +++ /dev/null @@ -1,46 +0,0 @@ -package sign - -import ( - "crypto/ecdsa" - "crypto/rsa" - "io" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" -) - -// Signer provides a common interface for supported alg signing methods -type Signer interface { - // Sign creates a signature for the given `payload`. - // `key` is the key used for signing the payload, and is usually - // the private key type associated with the signature method. For example, - // for `jwa.RSXXX` and `jwa.PSXXX` types, you need to pass the - // `*"crypto/rsa".PrivateKey` type. - // Check the documentation for each signer for details - Sign(payload []byte, key interface{}) ([]byte, error) - - Algorithm() jwa.SignatureAlgorithm -} - -type rsaSignFunc func([]byte, *rsa.PrivateKey) ([]byte, error) - -// RSASigner uses crypto/rsa to sign the payloads. -type RSASigner struct { - alg jwa.SignatureAlgorithm - sign rsaSignFunc -} - -type ecdsaSignFunc func([]byte, *ecdsa.PrivateKey, io.Reader) ([]byte, error) - -// ECDSASigner uses crypto/ecdsa to sign the payloads. -type ECDSASigner struct { - alg jwa.SignatureAlgorithm - sign ecdsaSignFunc -} - -type hmacSignFunc func([]byte, []byte) ([]byte, error) - -// HMACSigner uses crypto/hmac to sign the payloads. -type HMACSigner struct { - alg jwa.SignatureAlgorithm - sign hmacSignFunc -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/rsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/rsa.go deleted file mode 100644 index 1e02993eb0..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/rsa.go +++ /dev/null @@ -1,97 +0,0 @@ -package sign - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "errors" - "fmt" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" -) - -var rsaSignFuncs = map[jwa.SignatureAlgorithm]rsaSignFunc{} - -func init() { - algs := map[jwa.SignatureAlgorithm]struct { - Hash crypto.Hash - SignFunc func(crypto.Hash) rsaSignFunc - }{ - jwa.RS256: { - Hash: crypto.SHA256, - SignFunc: makeSignPKCS1v15, - }, - jwa.RS384: { - Hash: crypto.SHA384, - SignFunc: makeSignPKCS1v15, - }, - jwa.RS512: { - Hash: crypto.SHA512, - SignFunc: makeSignPKCS1v15, - }, - jwa.PS256: { - Hash: crypto.SHA256, - SignFunc: makeSignPSS, - }, - jwa.PS384: { - Hash: crypto.SHA384, - SignFunc: makeSignPSS, - }, - jwa.PS512: { - Hash: crypto.SHA512, - SignFunc: makeSignPSS, - }, - } - - for alg, item := range algs { - rsaSignFuncs[alg] = item.SignFunc(item.Hash) - } -} - -func makeSignPKCS1v15(hash crypto.Hash) rsaSignFunc { - return rsaSignFunc(func(payload []byte, key *rsa.PrivateKey) ([]byte, error) { - h := hash.New() - h.Write(payload) - return rsa.SignPKCS1v15(rand.Reader, key, hash, h.Sum(nil)) - }) -} - -func makeSignPSS(hash crypto.Hash) rsaSignFunc { - return rsaSignFunc(func(payload []byte, key *rsa.PrivateKey) ([]byte, error) { - h := hash.New() - h.Write(payload) - return rsa.SignPSS(rand.Reader, key, hash, h.Sum(nil), &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - }) - }) -} - -func newRSA(alg jwa.SignatureAlgorithm) (*RSASigner, error) { - signfn, ok := rsaSignFuncs[alg] - if !ok { - return nil, fmt.Errorf(`unsupported algorithm while trying to create RSA signer: %s`, alg) - } - return &RSASigner{ - alg: alg, - sign: signfn, - }, nil -} - -// Algorithm returns the signer algorithm -func (s RSASigner) Algorithm() jwa.SignatureAlgorithm { - return s.alg -} - -// Sign creates a signature using crypto/rsa. key must be a non-nil instance of -// `*"crypto/rsa".PrivateKey`. -func (s RSASigner) Sign(payload []byte, key interface{}) ([]byte, error) { - if key == nil { - return nil, errors.New(`missing private key while signing payload`) - } - rsakey, ok := key.(*rsa.PrivateKey) - if !ok { - return nil, fmt.Errorf(`invalid key type %T. *rsa.PrivateKey is required`, key) - } - - return s.sign(payload, rsakey) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/sign.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/sign.go deleted file mode 100644 index 7db7bbd69c..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/sign.go +++ /dev/null @@ -1,65 +0,0 @@ -package sign - -import ( - "crypto/x509" - "encoding/pem" - "fmt" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" -) - -// New creates a signer that signs payloads using the given signature algorithm. -func New(alg jwa.SignatureAlgorithm) (Signer, error) { - switch alg { - case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512: - return newRSA(alg) - case jwa.ES256, jwa.ES384, jwa.ES512: - return newECDSA(alg) - case jwa.HS256, jwa.HS384, jwa.HS512: - return newHMAC(alg) - default: - return nil, fmt.Errorf(`unsupported signature algorithm %s`, alg) - } -} - -// GetSigningKey returns a *rsa.PrivateKey or *ecdsa.PrivateKey typically encoded in PEM blocks of type "RSA PRIVATE KEY" -// or "EC PRIVATE KEY" for RSA and ECDSA family of algorithms. -// For HMAC family, it return a []byte value -func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (interface{}, error) { - switch alg { - case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512: - block, _ := pem.Decode([]byte(key)) - if block == nil { - return nil, fmt.Errorf("failed to parse PEM block containing the key") - } - - priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - pkcs8priv, err2 := x509.ParsePKCS8PrivateKey(block.Bytes) - if err2 != nil { - return nil, fmt.Errorf("error parsing private key (%v), (%v)", err, err2) - } - return pkcs8priv, nil - } - return priv, nil - case jwa.ES256, jwa.ES384, jwa.ES512: - block, _ := pem.Decode([]byte(key)) - if block == nil { - return nil, fmt.Errorf("failed to parse PEM block containing the key") - } - - priv, err := x509.ParseECPrivateKey(block.Bytes) - if err != nil { - pkcs8priv, err2 := x509.ParsePKCS8PrivateKey(block.Bytes) - if err2 != nil { - return nil, fmt.Errorf("error parsing private key (%v), (%v)", err, err2) - } - return pkcs8priv, nil - } - return priv, nil - case jwa.HS256, jwa.HS384, jwa.HS512: - return []byte(key), nil - default: - return nil, fmt.Errorf("unsupported signature algorithm: %s", alg) - } -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/ecdsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/ecdsa.go deleted file mode 100644 index 0d4971dc19..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/ecdsa.go +++ /dev/null @@ -1,67 +0,0 @@ -package verify - -import ( - "crypto" - "crypto/ecdsa" - "errors" - "fmt" - "math/big" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" -) - -var ecdsaVerifyFuncs = map[jwa.SignatureAlgorithm]ecdsaVerifyFunc{} - -func init() { - algs := map[jwa.SignatureAlgorithm]crypto.Hash{ - jwa.ES256: crypto.SHA256, - jwa.ES384: crypto.SHA384, - jwa.ES512: crypto.SHA512, - } - - for alg, h := range algs { - ecdsaVerifyFuncs[alg] = makeECDSAVerifyFunc(h) - } -} - -func makeECDSAVerifyFunc(hash crypto.Hash) ecdsaVerifyFunc { - return ecdsaVerifyFunc(func(payload []byte, signature []byte, key *ecdsa.PublicKey) error { - - r, s := &big.Int{}, &big.Int{} - n := len(signature) / 2 - r.SetBytes(signature[:n]) - s.SetBytes(signature[n:]) - - h := hash.New() - h.Write(payload) - - if !ecdsa.Verify(key, h.Sum(nil), r, s) { - return errors.New(`failed to verify signature using ecdsa`) - } - return nil - }) -} - -func newECDSA(alg jwa.SignatureAlgorithm) (*ECDSAVerifier, error) { - verifyfn, ok := ecdsaVerifyFuncs[alg] - if !ok { - return nil, fmt.Errorf(`unsupported algorithm while trying to create ECDSA verifier: %s`, alg) - } - - return &ECDSAVerifier{ - verify: verifyfn, - }, nil -} - -// Verify checks whether the signature for a given input and key is correct -func (v ECDSAVerifier) Verify(payload []byte, signature []byte, key interface{}) error { - if key == nil { - return errors.New(`missing public key while verifying payload`) - } - ecdsakey, ok := key.(*ecdsa.PublicKey) - if !ok { - return fmt.Errorf(`invalid key type %T. *ecdsa.PublicKey is required`, key) - } - - return v.verify(payload, signature, ecdsakey) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/hmac.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/hmac.go deleted file mode 100644 index d8498f50f2..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/hmac.go +++ /dev/null @@ -1,33 +0,0 @@ -package verify - -import ( - "crypto/hmac" - "errors" - "fmt" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" - "github.com/open-policy-agent/opa/internal/jwx/jws/sign" -) - -func newHMAC(alg jwa.SignatureAlgorithm) (*HMACVerifier, error) { - - s, err := sign.New(alg) - if err != nil { - return nil, fmt.Errorf("failed to generate HMAC signer: %w", err) - } - return &HMACVerifier{signer: s}, nil -} - -// Verify checks whether the signature for a given input and key is correct -func (v HMACVerifier) Verify(signingInput, signature []byte, key interface{}) (err error) { - - expected, err := v.signer.Sign(signingInput, key) - if err != nil { - return fmt.Errorf("failed to generated signature: %w", err) - } - - if !hmac.Equal(signature, expected) { - return errors.New("failed to match hmac signature") - } - return nil -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/interface.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/interface.go deleted file mode 100644 index f5beb69741..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/interface.go +++ /dev/null @@ -1,39 +0,0 @@ -package verify - -import ( - "crypto/ecdsa" - "crypto/rsa" - - "github.com/open-policy-agent/opa/internal/jwx/jws/sign" -) - -// Verifier provides a common interface for supported alg verification methods -type Verifier interface { - // Verify checks whether the payload and signature are valid for - // the given key. - // `key` is the key used for verifying the payload, and is usually - // the public key associated with the signature method. For example, - // for `jwa.RSXXX` and `jwa.PSXXX` types, you need to pass the - // `*"crypto/rsa".PublicKey` type. - // Check the documentation for each verifier for details - Verify(payload []byte, signature []byte, key interface{}) error -} - -type rsaVerifyFunc func([]byte, []byte, *rsa.PublicKey) error - -// RSAVerifier implements the Verifier interface -type RSAVerifier struct { - verify rsaVerifyFunc -} - -type ecdsaVerifyFunc func([]byte, []byte, *ecdsa.PublicKey) error - -// ECDSAVerifier implements the Verifier interface -type ECDSAVerifier struct { - verify ecdsaVerifyFunc -} - -// HMACVerifier implements the Verifier interface -type HMACVerifier struct { - signer sign.Signer -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/rsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/rsa.go deleted file mode 100644 index edc560dfa6..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/rsa.go +++ /dev/null @@ -1,88 +0,0 @@ -package verify - -import ( - "crypto" - "crypto/rsa" - "errors" - "fmt" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" -) - -var rsaVerifyFuncs = map[jwa.SignatureAlgorithm]rsaVerifyFunc{} - -func init() { - algs := map[jwa.SignatureAlgorithm]struct { - Hash crypto.Hash - VerifyFunc func(crypto.Hash) rsaVerifyFunc - }{ - jwa.RS256: { - Hash: crypto.SHA256, - VerifyFunc: makeVerifyPKCS1v15, - }, - jwa.RS384: { - Hash: crypto.SHA384, - VerifyFunc: makeVerifyPKCS1v15, - }, - jwa.RS512: { - Hash: crypto.SHA512, - VerifyFunc: makeVerifyPKCS1v15, - }, - jwa.PS256: { - Hash: crypto.SHA256, - VerifyFunc: makeVerifyPSS, - }, - jwa.PS384: { - Hash: crypto.SHA384, - VerifyFunc: makeVerifyPSS, - }, - jwa.PS512: { - Hash: crypto.SHA512, - VerifyFunc: makeVerifyPSS, - }, - } - - for alg, item := range algs { - rsaVerifyFuncs[alg] = item.VerifyFunc(item.Hash) - } -} - -func makeVerifyPKCS1v15(hash crypto.Hash) rsaVerifyFunc { - return rsaVerifyFunc(func(payload, signature []byte, key *rsa.PublicKey) error { - h := hash.New() - h.Write(payload) - return rsa.VerifyPKCS1v15(key, hash, h.Sum(nil), signature) - }) -} - -func makeVerifyPSS(hash crypto.Hash) rsaVerifyFunc { - return rsaVerifyFunc(func(payload, signature []byte, key *rsa.PublicKey) error { - h := hash.New() - h.Write(payload) - return rsa.VerifyPSS(key, hash, h.Sum(nil), signature, nil) - }) -} - -func newRSA(alg jwa.SignatureAlgorithm) (*RSAVerifier, error) { - verifyfn, ok := rsaVerifyFuncs[alg] - if !ok { - return nil, fmt.Errorf(`unsupported algorithm while trying to create RSA verifier: %s`, alg) - } - - return &RSAVerifier{ - verify: verifyfn, - }, nil -} - -// Verify checks if a JWS is valid. -func (v RSAVerifier) Verify(payload, signature []byte, key interface{}) error { - if key == nil { - return errors.New(`missing public key while verifying payload`) - } - rsaKey, ok := key.(*rsa.PublicKey) - if !ok { - return fmt.Errorf(`invalid key type %T. *rsa.PublicKey is required`, key) - } - - return v.verify(payload, signature, rsaKey) -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/verify.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/verify.go deleted file mode 100644 index 05720a64e0..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/verify.go +++ /dev/null @@ -1,55 +0,0 @@ -package verify - -import ( - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "fmt" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" -) - -// New creates a new JWS verifier using the specified algorithm -// and the public key -func New(alg jwa.SignatureAlgorithm) (Verifier, error) { - switch alg { - case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512: - return newRSA(alg) - case jwa.ES256, jwa.ES384, jwa.ES512: - return newECDSA(alg) - case jwa.HS256, jwa.HS384, jwa.HS512: - return newHMAC(alg) - default: - return nil, fmt.Errorf(`unsupported signature algorithm: %s`, alg) - } -} - -// GetSigningKey returns a *rsa.PublicKey or *ecdsa.PublicKey typically encoded in PEM blocks of type "PUBLIC KEY", -// for RSA and ECDSA family of algorithms. -// For HMAC family, it return a []byte value -func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (interface{}, error) { - switch alg { - case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512, jwa.ES256, jwa.ES384, jwa.ES512: - block, _ := pem.Decode([]byte(key)) - if block == nil { - return nil, fmt.Errorf("failed to parse PEM block containing the key") - } - - pub, err := x509.ParsePKIXPublicKey(block.Bytes) - if err != nil { - return nil, err - } - - switch pub := pub.(type) { - case *rsa.PublicKey, *ecdsa.PublicKey: - return pub, nil - default: - return nil, fmt.Errorf("invalid key type %T", pub) - } - case jwa.HS256, jwa.HS384, jwa.HS512: - return []byte(key), nil - default: - return nil, fmt.Errorf("unsupported signature algorithm: %s", alg) - } -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/merge/merge.go b/vendor/github.com/open-policy-agent/opa/internal/merge/merge.go index 16f39350be..ba1a09c329 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/merge/merge.go +++ b/vendor/github.com/open-policy-agent/opa/internal/merge/merge.go @@ -8,7 +8,7 @@ package merge // InterfaceMaps returns the result of merging a and b. If a and b cannot be // merged because of conflicting key-value pairs, ok is false. -func InterfaceMaps(a map[string]interface{}, b map[string]interface{}) (map[string]interface{}, bool) { +func InterfaceMaps(a map[string]any, b map[string]any) (map[string]any, bool) { if a == nil { return b, true @@ -21,7 +21,7 @@ func InterfaceMaps(a map[string]interface{}, b map[string]interface{}) (map[stri return merge(a, b), true } -func merge(a, b map[string]interface{}) map[string]interface{} { +func merge(a, b map[string]any) map[string]any { for k := range b { @@ -32,8 +32,8 @@ func merge(a, b map[string]interface{}) map[string]interface{} { continue } - existObj := exist.(map[string]interface{}) - addObj := add.(map[string]interface{}) + existObj := exist.(map[string]any) + addObj := add.(map[string]any) a[k] = merge(existObj, addObj) } @@ -41,7 +41,7 @@ func merge(a, b map[string]interface{}) map[string]interface{} { return a } -func hasConflicts(a, b map[string]interface{}) bool { +func hasConflicts(a, b map[string]any) bool { for k := range b { add := b[k] @@ -50,8 +50,8 @@ func hasConflicts(a, b map[string]interface{}) bool { continue } - existObj, existOk := exist.(map[string]interface{}) - addObj, addOk := add.(map[string]interface{}) + existObj, existOk := exist.(map[string]any) + addObj, addOk := add.(map[string]any) if !existOk || !addOk { return true } diff --git a/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go b/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go index b75d26ddab..445b5d7c2a 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go +++ b/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go @@ -11,10 +11,10 @@ import ( "io" "sort" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/ast/location" "github.com/open-policy-agent/opa/internal/debug" - "github.com/open-policy-agent/opa/ir" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/ast/location" + "github.com/open-policy-agent/opa/v1/ir" ) // QuerySet represents the input to the planner. @@ -51,10 +51,10 @@ type Planner struct { // debugf prepends the planner location. We're passing callstack depth 2 because // it should still log the file location of p.debugf. -func (p *Planner) debugf(format string, args ...interface{}) { +func (p *Planner) debugf(format string, args ...any) { var msg string if p.loc != nil { - msg = fmt.Sprintf("%s: "+format, append([]interface{}{p.loc}, args...)...) + msg = fmt.Sprintf("%s: "+format, append([]any{p.loc}, args...)...) } else { msg = fmt.Sprintf(format, args...) } @@ -211,23 +211,28 @@ func (p *Planner) planRules(rules []*ast.Rule) (string, error) { // Set the location to the rule head. p.loc = rules[0].Head.Loc() + pcount := p.funcs.argVars() + params := make([]ir.Local, 0, pcount+len(rules[0].Head.Args)) + for range pcount { + params = append(params, p.newLocal()) + } // Create function definition for rules. fn := &ir.Func{ - Name: fmt.Sprintf("g%d.%s", p.funcs.gen(), path), - Params: []ir.Local{ - p.newLocal(), // input document - p.newLocal(), // data document - }, + Name: fmt.Sprintf("g%d.%s", p.funcs.gen(), path), + Params: params, Return: p.newLocal(), Path: append([]string{fmt.Sprintf("g%d", p.funcs.gen())}, pathPieces...), } // Initialize parameters for functions. - for i := 0; i < len(rules[0].Head.Args); i++ { + for range len(rules[0].Head.Args) { fn.Params = append(fn.Params, p.newLocal()) } - params := fn.Params[2:] + // only those added as formal parameters: + // f(x, y) is planned as f(data, input, x, y) + // pcount > 2 means there are vars passed along through with replacements by variables + params = fn.Params[pcount:] // Initialize return value for partial set/object rules. Complete document // rules assign directly to `fn.Return`. @@ -301,10 +306,11 @@ func (p *Planner) planRules(rules []*ast.Rule) (string, error) { // Setup planner for block. p.lnext = lnext - p.vars = newVarstack(map[ast.Var]ir.Local{ - ast.InputRootDocument.Value.(ast.Var): fn.Params[0], - ast.DefaultRootDocument.Value.(ast.Var): fn.Params[1], - }) + vs := make(map[ast.Var]ir.Local, p.funcs.argVars()) + for i, v := range p.funcs.vars() { + vs[v] = fn.Params[i] + } + p.vars = newVarstack(vs) curr := &ir.Block{} *blocks = append(*blocks, curr) @@ -385,7 +391,7 @@ func (p *Planner) planRules(rules []*ast.Rule) (string, error) { return nil }) default: - return fmt.Errorf("illegal rule kind") + return errors.New("illegal rule kind") } }) }) @@ -497,7 +503,6 @@ func (p *Planner) planDotOr(obj ir.Local, key ir.Operand, or stmtFactory, iter p func (p *Planner) planNestedObjects(obj ir.Local, ref ast.Ref, iter planLocalIter) error { if len(ref) == 0 { - //return fmt.Errorf("nested object construction didn't create object") return iter(obj) } @@ -673,13 +678,17 @@ func (p *Planner) planWith(e *ast.Expr, iter planiter) error { values := make([]*ast.Term, 0, len(e.With)) // NOTE(sr): we could be overallocating if there are builtin replacements targets := make([]ast.Ref, 0, len(e.With)) + vars := []ast.Var{} mocks := frame{} for _, w := range e.With { v := w.Target.Value.(ast.Ref) switch { - case p.isFunction(v): // nothing to do + case p.isFunctionOrBuiltin(v): // track var values + if wvar, ok := w.Value.Value.(ast.Var); ok { + vars = append(vars, wvar) + } case ast.DefaultRootDocument.Equal(v[0]) || ast.InputRootDocument.Equal(v[0]): @@ -736,7 +745,7 @@ func (p *Planner) planWith(e *ast.Expr, iter planiter) error { // planning of this expression (transitively). shadowing := p.dataRefsShadowRuletrie(dataRefs) || len(mocks) > 0 if shadowing { - p.funcs.Push(map[string]string{}) + p.funcs.Push(map[string]string{}, vars) for _, ref := range dataRefs { p.rules.Push(ref) } @@ -757,7 +766,7 @@ func (p *Planner) planWith(e *ast.Expr, iter planiter) error { p.mocks.PushFrame(mocks) if shadowing { - p.funcs.Push(map[string]string{}) + p.funcs.Push(map[string]string{}, vars) for _, ref := range dataRefs { p.rules.Push(ref) } @@ -991,8 +1000,16 @@ func (p *Planner) planExprCall(e *ast.Expr, iter planiter) error { op := e.Operator() if replacement := p.mocks.Lookup(operator); replacement != nil { - switch r := replacement.Value.(type) { - case ast.Ref: + if _, ok := replacement.Value.(ast.Var); ok { + var arity int + if node := p.rules.Lookup(op); node != nil { + arity = node.Arity() // NB(sr): We don't need to plan what isn't called, only lookup arity + } else if bi, ok := p.decls[operator]; ok { + arity = bi.Decl.Arity() + } + return p.planExprCallValue(replacement, arity, operands, iter) + } + if r, ok := replacement.Value.(ast.Ref); ok { if !r.HasPrefix(ast.DefaultRootRef) && !r.HasPrefix(ast.InputRootRef) { // replacement is builtin operator = r.String() @@ -1020,7 +1037,7 @@ func (p *Planner) planExprCall(e *ast.Expr, iter planiter) error { // replacement is a value, or ref if bi, ok := p.decls[operator]; ok { - return p.planExprCallValue(replacement, len(bi.Decl.FuncArgs().Args), operands, iter) + return p.planExprCallValue(replacement, bi.Decl.Arity(), operands, iter) } if node := p.rules.Lookup(op); node != nil { return p.planExprCallValue(replacement, node.Arity(), operands, iter) @@ -1037,7 +1054,7 @@ func (p *Planner) planExprCall(e *ast.Expr, iter planiter) error { args = p.defaultOperands() } else if decl, ok := p.decls[operator]; ok { relation = decl.Relation - arity = len(decl.Decl.Args()) + arity = decl.Decl.Arity() void = decl.Decl.Result() == nil name = operator p.externs[operator] = decl @@ -1147,7 +1164,7 @@ func (p *Planner) planExprCallFunc(name string, arity int, void bool, operands [ }) default: - return fmt.Errorf("impossible replacement, arity mismatch") + return errors.New("impossible replacement, arity mismatch") } } @@ -1173,7 +1190,7 @@ func (p *Planner) planExprCallValue(value *ast.Term, arity int, operands []*ast. }) }) default: - return fmt.Errorf("impossible replacement, arity mismatch") + return errors.New("impossible replacement, arity mismatch") } } @@ -1519,7 +1536,7 @@ func (p *Planner) planValue(t ast.Value, loc *ast.Location, iter planiter) error p.loc = loc return p.planObjectComprehension(v, iter) default: - return fmt.Errorf("%v term not implemented", ast.TypeName(v)) + return fmt.Errorf("%v term not implemented", ast.ValueName(v)) } } @@ -1564,9 +1581,7 @@ func (p *Planner) planString(str ast.String, iter planiter) error { } func (p *Planner) planVar(v ast.Var, iter planiter) error { - p.ltarget = op(p.vars.GetOrElse(v, func() ir.Local { - return p.newLocal() - })) + p.ltarget = op(p.vars.GetOrElse(v, p.newLocal)) return iter() } @@ -1750,10 +1765,10 @@ func (p *Planner) planRef(ref ast.Ref, iter planiter) error { head, ok := ref[0].Value.(ast.Var) if !ok { - return fmt.Errorf("illegal ref: non-var head") + return errors.New("illegal ref: non-var head") } - if head.Compare(ast.DefaultRootDocument.Value) == 0 { + if head.Equal(ast.DefaultRootDocument.Value) { virtual := p.rules.Get(ref[0].Value) base := &baseptr{local: p.vars.GetOrEmpty(ast.DefaultRootDocument.Value.(ast.Var))} return p.planRefData(virtual, base, ref, 1, iter) @@ -1767,7 +1782,7 @@ func (p *Planner) planRef(ref ast.Ref, iter planiter) error { p.ltarget, ok = p.vars.GetOp(head) if !ok { - return fmt.Errorf("illegal ref: unsafe head") + return errors.New("illegal ref: unsafe head") } return p.planRefRec(ref, 1, iter) @@ -1924,12 +1939,15 @@ func (p *Planner) planRefData(virtual *ruletrie, base *baseptr, ref ast.Ref, ind if err != nil { return err } - - p.appendStmt(&ir.CallStmt{ + call := ir.CallStmt{ Func: funcName, - Args: p.defaultOperands(), + Args: make([]ir.Operand, 0, p.funcs.argVars()), Result: p.ltarget.Value.(ir.Local), - }) + } + for _, v := range p.funcs.vars() { + call.Args = append(call.Args, p.vars.GetOpOrEmpty(v)) + } + p.appendStmt(&call) return p.planRefRec(ref, index+1, iter) } @@ -2052,7 +2070,7 @@ func (p *Planner) planRefDataExtent(virtual *ruletrie, base *baseptr, iter plani } } if anyKeyNonGround { - var rules []*ast.Rule + rules := make([]*ast.Rule, 0, len(virtual.Children())) for _, key := range virtual.Children() { // TODO(sr): skip functions rules = append(rules, virtual.Get(key).Rules()...) @@ -2365,6 +2383,10 @@ func rewrittenVar(vars map[ast.Var]ast.Var, k ast.Var) ast.Var { return rw } +func dont() ([][]*ast.Rule, []ir.Operand, int, bool) { + return nil, nil, 0, false +} + // optimizeLookup returns a set of rulesets and required statements planning // the locals (strings) needed with the used local variables, and the index // into ref's parth that is still to be planned; if the passed ref's vars @@ -2381,9 +2403,6 @@ func rewrittenVar(vars map[ast.Var]ast.Var, k ast.Var) ast.Var { // var actually matched_ -- so we don't know which subtree to evaluate // with the results. func (p *Planner) optimizeLookup(t *ruletrie, ref ast.Ref) ([][]*ast.Rule, []ir.Operand, int, bool) { - dont := func() ([][]*ast.Rule, []ir.Operand, int, bool) { - return nil, nil, 0, false - } if t == nil { p.debugf("no optimization of %s: trie is nil", ref) return dont() @@ -2411,6 +2430,10 @@ outer: opt = true // take all children, they might match for _, node := range nodes { + if nr := node.Rules(); len(nr) > 0 { + p.debugf("no optimization of %s: node with rules (%v)", ref, refsOfRules(nr)) + return dont() + } for _, child := range node.Children() { if node := node.Get(child); node != nil { nextNodes = append(nextNodes, node) @@ -2418,8 +2441,12 @@ outer: } } case ast.String: - // take all children that either match or have a var key + // take all children that either match or have a var key // TODO(sr): Where's the code for the second part, having a var key? for _, node := range nodes { + if nr := node.Rules(); len(nr) > 0 { + p.debugf("no optimization of %s: node with rules (%v)", ref, refsOfRules(nr)) + return dont() + } if node := node.Get(r); node != nil { nextNodes = append(nextNodes, node) } @@ -2438,10 +2465,20 @@ outer: // let us break, too. all := 0 for _, node := range nodes { - all += node.ChildrenCount() + if i < len(ref)-1 { + // Look ahead one term to only count those children relevant to your planned ref. + switch ref[i+1].Value.(type) { + case ast.Var: + all += node.ChildrenCount() + default: + if relChildren := node.Get(ref[i+1].Value); relChildren != nil { + all++ + } + } + } } if all == 0 { - p.debugf("ref %s: all nodes have 0 children, break", ref[0:index+1]) + p.debugf("ref %s: all nodes have 0 relevant children, break", ref[0:index+1]) break } @@ -2534,19 +2571,30 @@ func (p *Planner) unseenVars(t *ast.Term) bool { } func (p *Planner) defaultOperands() []ir.Operand { - return []ir.Operand{ - p.vars.GetOpOrEmpty(ast.InputRootDocument.Value.(ast.Var)), - p.vars.GetOpOrEmpty(ast.DefaultRootDocument.Value.(ast.Var)), + pcount := p.funcs.argVars() + operands := make([]ir.Operand, pcount) + for i, v := range p.funcs.vars() { + operands[i] = p.vars.GetOpOrEmpty(v) } + return operands } -func (p *Planner) isFunction(r ast.Ref) bool { +func (p *Planner) isFunctionOrBuiltin(r ast.Ref) bool { if node := p.rules.Lookup(r); node != nil { return node.Arity() > 0 } - return false + _, ok := p.decls[r.String()] + return ok } func op(v ir.Val) ir.Operand { return ir.Operand{Value: v} } + +func refsOfRules(rs []*ast.Rule) []string { + refs := make([]string, len(rs)) + for i := range rs { + refs[i] = rs[i].Head.Ref().String() + } + return refs +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go b/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go index f5d6f3fc6c..9f3d115293 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go +++ b/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go @@ -4,7 +4,7 @@ import ( "fmt" "sort" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) // funcstack implements a simple map structure used to keep track of virtual @@ -20,20 +20,44 @@ type funcstack struct { } type taggedPairs struct { - pairs map[string]string - gen int + pairs map[string]string + vars []ast.Var + vcount int + gen int } func newFuncstack() *funcstack { return &funcstack{ - stack: []taggedPairs{{pairs: map[string]string{}, gen: 0}}, - next: 1} + stack: []taggedPairs{ + { + pairs: map[string]string{}, + gen: 0, + vars: []ast.Var{ + ast.InputRootDocument.Value.(ast.Var), + ast.DefaultRootDocument.Value.(ast.Var), + }, + vcount: 2, + }, + }, + next: 1} } func (p funcstack) last() taggedPairs { return p.stack[len(p.stack)-1] } +func (p funcstack) argVars() int { + return p.last().vcount +} + +func (p funcstack) vars() []ast.Var { + ret := make([]ast.Var, 0, p.last().vcount) + for i := range p.stack { + ret = append(ret, p.stack[i].vars...) + } + return ret +} + func (p funcstack) Add(key, value string) { p.last().pairs[key] = value } @@ -43,8 +67,13 @@ func (p funcstack) Get(key string) (string, bool) { return value, ok } -func (p *funcstack) Push(funcs map[string]string) { - p.stack = append(p.stack, taggedPairs{pairs: funcs, gen: p.next}) +func (p *funcstack) Push(funcs map[string]string, vars []ast.Var) { + p.stack = append(p.stack, taggedPairs{ + pairs: funcs, + gen: p.next, + vars: vars, + vcount: p.last().vcount + len(vars), + }) p.next++ } @@ -111,7 +140,7 @@ func (t *ruletrie) Rules() []*ast.Rule { func (t *ruletrie) Push(key ast.Ref) { node := t - for i := 0; i < len(key)-1; i++ { + for i := range len(key) - 1 { node = node.Get(key[i].Value) if node == nil { return @@ -123,7 +152,7 @@ func (t *ruletrie) Push(key ast.Ref) { func (t *ruletrie) Pop(key ast.Ref) { node := t - for i := 0; i < len(key)-1; i++ { + for i := range len(key) - 1 { node = node.Get(key[i].Value) if node == nil { return diff --git a/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go b/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go index dccff1b5c1..0df6bcd8b2 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go +++ b/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go @@ -5,8 +5,8 @@ package planner import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/ir" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/ir" ) type varstack []map[ast.Var]ir.Local diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/compare.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/compare.go index 1d0f25f8c2..e2514423b7 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/compare.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/compare.go @@ -1,6 +1,6 @@ package crypto -import "fmt" +import "errors" // ConstantTimeByteCompare is a constant-time byte comparison of x and y. This function performs an absolute comparison // if the two byte slices assuming they represent a big-endian number. @@ -11,12 +11,12 @@ import "fmt" // +1 if x > y func ConstantTimeByteCompare(x, y []byte) (int, error) { if len(x) != len(y) { - return 0, fmt.Errorf("slice lengths do not match") + return 0, errors.New("slice lengths do not match") } xLarger, yLarger := 0, 0 - for i := 0; i < len(x); i++ { + for i := range x { xByte, yByte := int(x[i]), int(y[i]) x := ((yByte - xByte) >> 8) & 1 diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go index 758c73fcb3..f93261a809 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/crypto/ecc.go @@ -2,11 +2,13 @@ package crypto import ( "bytes" + "crypto/ecdh" "crypto/ecdsa" "crypto/elliptic" "crypto/hmac" "encoding/asn1" "encoding/binary" + "errors" "fmt" "hash" "math" @@ -26,27 +28,77 @@ func ECDSAKey(curve elliptic.Curve, d []byte) *ecdsa.PrivateKey { // ECDSAKeyFromPoint takes the given elliptic curve and point and returns the // private and public keypair func ECDSAKeyFromPoint(curve elliptic.Curve, d *big.Int) *ecdsa.PrivateKey { - pX, pY := curve.ScalarBaseMult(d.Bytes()) + dBytes := make([]byte, (curve.Params().BitSize+7)/8) + d.FillBytes(dBytes) privKey := &ecdsa.PrivateKey{ PublicKey: ecdsa.PublicKey{ Curve: curve, - X: pX, - Y: pY, }, D: d, } + var pubBytes []byte + switch curve { + case elliptic.P256(): + if ecdhPriv, err := ecdh.P256().NewPrivateKey(dBytes); err == nil { + pubBytes = ecdhPriv.PublicKey().Bytes() + } + case elliptic.P384(): + if ecdhPriv, err := ecdh.P384().NewPrivateKey(dBytes); err == nil { + pubBytes = ecdhPriv.PublicKey().Bytes() + } + case elliptic.P521(): + if ecdhPriv, err := ecdh.P521().NewPrivateKey(dBytes); err == nil { + pubBytes = ecdhPriv.PublicKey().Bytes() + } + } + + if len(pubBytes) > 0 { + byteLen := (curve.Params().BitSize + 7) / 8 + privKey.X = new(big.Int).SetBytes(pubBytes[1 : 1+byteLen]) + privKey.Y = new(big.Int).SetBytes(pubBytes[1+byteLen:]) + } else { + panic(fmt.Sprintf("unsupported curve or invalid private key: %v", curve)) + } + return privKey } +// mathIntToBytes writes val as a big-endian, fixed-length byte slice into out, +// zero-padding on the left when val.Bytes() is shorter than out. This satisfies +// the uncompressed SEC 1 encoding (0x04 || X || Y) expected by crypto/ecdh's +// NewPublicKey: https://pkg.go.dev/crypto/ecdh#Curve.NewPublicKey +func mathIntToBytes(val *big.Int, out []byte) { + valBytes := val.Bytes() + copy(out[len(out)-len(valBytes):], valBytes) +} + // ECDSAPublicKey takes the provide curve and (x, y) coordinates and returns // *ecdsa.PublicKey. Returns an error if the given points are not on the curve. func ECDSAPublicKey(curve elliptic.Curve, x, y []byte) (*ecdsa.PublicKey, error) { xPoint := (&big.Int{}).SetBytes(x) yPoint := (&big.Int{}).SetBytes(y) - if !curve.IsOnCurve(xPoint, yPoint) { + byteLen := (curve.Params().BitSize + 7) / 8 + buf := make([]byte, 1+2*byteLen) + buf[0] = 4 // uncompressed point + mathIntToBytes(xPoint, buf[1:1+byteLen]) + mathIntToBytes(yPoint, buf[1+byteLen:]) + + var err error + switch curve { + case elliptic.P256(): + _, err = ecdh.P256().NewPublicKey(buf) + case elliptic.P384(): + _, err = ecdh.P384().NewPublicKey(buf) + case elliptic.P521(): + _, err = ecdh.P521().NewPublicKey(buf) + default: + err = fmt.Errorf("unsupported curve for ECDSA: %v", curve) + } + + if err != nil { return nil, fmt.Errorf("point(%v, %v) is not on the given curve", xPoint.String(), yPoint.String()) } @@ -82,7 +134,7 @@ func HMACKeyDerivation(hash func() hash.Hash, bitLen int, key []byte, label, con // verify the requested bit length is not larger then the length encoding size if int64(bitLen) > 0x7FFFFFFF { - return nil, fmt.Errorf("bitLen is greater than 32-bits") + return nil, errors.New("bitLen is greater than 32-bits") } fixedInput := bytes.NewBuffer(nil) diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go index 179b5b5d5e..55e587e9f5 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go @@ -11,7 +11,7 @@ import ( "time" "github.com/open-policy-agent/opa/internal/version" - "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/v1/logging" ) // Values taken from diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go index 77c0bc9349..6dfb06a496 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go @@ -10,7 +10,7 @@ import ( "time" "github.com/open-policy-agent/opa/internal/version" - "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/v1/logging" ) // Values taken from diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go index bfb780754b..c463ccbff8 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go @@ -8,18 +8,19 @@ import ( "bytes" "crypto/hmac" "crypto/sha256" + "encoding/hex" "errors" "fmt" "io" "net/http" "net/url" - "sort" "strings" "time" v4 "github.com/open-policy-agent/opa/internal/providers/aws/v4" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" ) func stringFromTerm(t *ast.Term) string { @@ -67,19 +68,6 @@ func sha256MAC(message string, key []byte) []byte { return mac.Sum(nil) } -func sortKeys(strMap map[string][]string) []string { - keys := make([]string, len(strMap)) - - i := 0 - for k := range strMap { - keys[i] = k - i++ - } - sort.Strings(keys) - - return keys -} - // SignRequest modifies an http.Request to include an AWS V4 signature based on the provided credentials. func SignRequest(req *http.Request, service string, creds Credentials, theTime time.Time, sigVersion string) error { // General ref. https://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html @@ -168,8 +156,10 @@ func SignV4(headers map[string][]string, method string, theURL *url.URL, body [] canonicalReq += theURL.RawQuery + "\n" // RAW Query String // include the values for the signed headers - orderedKeys := sortKeys(headersToSign) + orderedKeys := util.KeysSorted(headersToSign) for _, k := range orderedKeys { + // TODO: fix later + //nolint:perfsprint canonicalReq += k + ":" + strings.Join(headersToSign[k], ",") + "\n" } canonicalReq += "\n" // linefeed to terminate headers @@ -202,7 +192,7 @@ func SignV4(headers map[string][]string, method string, theURL *url.URL, body [] authHeader := "AWS4-HMAC-SHA256 Credential=" + awsCreds.AccessKey + "/" + dateNow authHeader += "/" + awsCreds.RegionName + "/" + service + "/aws4_request," authHeader += "SignedHeaders=" + headerList + "," - authHeader += "Signature=" + fmt.Sprintf("%x", signature) + authHeader += "Signature=" + hex.EncodeToString(signature) return authHeader, awsHeaders } diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go index 929f2006e7..db20eddc9d 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4a.go @@ -4,12 +4,13 @@ package aws import ( "bytes" "crypto" + "crypto/ecdh" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/sha256" "encoding/hex" - "fmt" + "errors" "hash" "io" "math/big" @@ -107,7 +108,7 @@ func deriveKeyFromAccessKeyPair(accessKey, secretKey string) (*ecdsa.PrivateKey, counter++ if counter > 0xFF { - return nil, fmt.Errorf("exhausted single byte external counter") + return nil, errors.New("exhausted single byte external counter") } } d = d.Add(d, one) @@ -115,7 +116,18 @@ func deriveKeyFromAccessKeyPair(accessKey, secretKey string) (*ecdsa.PrivateKey, priv := new(ecdsa.PrivateKey) priv.PublicKey.Curve = p256 priv.D = d - priv.PublicKey.X, priv.PublicKey.Y = p256.ScalarBaseMult(d.Bytes()) + + dBytes := make([]byte, 32) + d.FillBytes(dBytes) + + ecdhPriv, err := ecdh.P256().NewPrivateKey(dBytes) + if err != nil { + return nil, err + } + pubBytes := ecdhPriv.PublicKey().Bytes() + + priv.PublicKey.X = new(big.Int).SetBytes(pubBytes[1:33]) + priv.PublicKey.Y = new(big.Int).SetBytes(pubBytes[33:]) return priv, nil } @@ -146,7 +158,7 @@ func retrievePrivateKey(symmetric Credentials) (v4aCredentials, error) { privateKey, err := deriveKeyFromAccessKeyPair(symmetric.AccessKey, symmetric.SecretKey) if err != nil { - return v4aCredentials{}, fmt.Errorf("failed to derive asymmetric key from credentials") + return v4aCredentials{}, errors.New("failed to derive asymmetric key from credentials") } creds := v4aCredentials{ @@ -216,7 +228,7 @@ func (s *httpSigner) Build() (signedRequest, error) { signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength) - rawQuery := strings.Replace(query.Encode(), "+", "%20", -1) + rawQuery := strings.ReplaceAll(query.Encode(), "+", "%20") canonicalURI := v4Internal.GetURIPath(req.URL) @@ -280,7 +292,7 @@ func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature return parts.String() } -func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) { +func (*httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) { signed = make(http.Header) const hostHeader = "host" @@ -314,7 +326,7 @@ func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, he var canonicalHeaders strings.Builder n := len(headers) const colon = ':' - for i := 0; i < n; i++ { + for i := range n { if headers[i] == hostHeader { canonicalHeaders.WriteString(hostHeader) canonicalHeaders.WriteRune(colon) diff --git a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go index e033da7460..d43339c961 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go +++ b/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go @@ -5,7 +5,7 @@ import ( "io" "net/http" - "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/v1/logging" ) // DoRequestWithClient is a convenience function to get the body of an HTTP response with @@ -18,7 +18,7 @@ func DoRequestWithClient(req *http.Request, client *http.Client, desc string, lo } defer resp.Body.Close() - logger.WithFields(map[string]interface{}{ + logger.WithFields(map[string]any{ "url": req.URL.String(), "status": resp.Status, "headers": resp.Header, diff --git a/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go b/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go index 6e84df4b08..9590b8886b 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go +++ b/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go @@ -7,19 +7,16 @@ package ref import ( "errors" - "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/storage" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/util" ) // ParseDataPath returns a ref from the slash separated path s rooted at data. // All path segments are treated as identifier strings. func ParseDataPath(s string) (ast.Ref, error) { - - s = "/" + strings.TrimPrefix(s, "/") - - path, ok := storage.ParsePath(s) + path, ok := storage.ParsePath(util.WithPrefix(s, "/")) if !ok { return nil, errors.New("invalid path") } @@ -29,7 +26,7 @@ func ParseDataPath(s string) (ast.Ref, error) { // ArrayPath will take an ast.Array and build an ast.Ref using the ast.Terms in the Array func ArrayPath(a *ast.Array) ast.Ref { - var ref ast.Ref + ref := make(ast.Ref, 0, a.Len()) a.Foreach(func(term *ast.Term) { ref = append(ref, term) diff --git a/vendor/github.com/open-policy-agent/opa/internal/rego/opa/engine.go b/vendor/github.com/open-policy-agent/opa/internal/rego/opa/engine.go index 36ee844504..7defdf788c 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/rego/opa/engine.go +++ b/vendor/github.com/open-policy-agent/opa/internal/rego/opa/engine.go @@ -36,10 +36,10 @@ type EvalEngine interface { Init() (EvalEngine, error) Entrypoints(context.Context) (map[string]int32, error) WithPolicyBytes([]byte) EvalEngine - WithDataJSON(interface{}) EvalEngine + WithDataJSON(any) EvalEngine Eval(context.Context, EvalOpts) (*Result, error) - SetData(context.Context, interface{}) error - SetDataPath(context.Context, []string, interface{}) error + SetData(context.Context, any) error + SetDataPath(context.Context, []string, any) error RemoveDataPath(context.Context, []string) error Close() } diff --git a/vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go b/vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go index b58a05ee8e..97aa41bf0e 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go +++ b/vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go @@ -4,11 +4,11 @@ import ( "io" "time" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/print" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/print" ) // Result holds the evaluation result. @@ -18,7 +18,7 @@ type Result struct { // EvalOpts define options for performing an evaluation. type EvalOpts struct { - Input *interface{} + Input *any Metrics metrics.Metrics Entrypoint int32 Time time.Time diff --git a/vendor/github.com/open-policy-agent/opa/internal/report/report.go b/vendor/github.com/open-policy-agent/opa/internal/report/report.go deleted file mode 100644 index 145d0a9465..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/report/report.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package report provides functions to report OPA's version information to an external service and process the response. -package report - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "os" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "github.com/open-policy-agent/opa/keys" - "github.com/open-policy-agent/opa/logging" - - "github.com/open-policy-agent/opa/plugins/rest" - "github.com/open-policy-agent/opa/util" - "github.com/open-policy-agent/opa/version" -) - -// ExternalServiceURL is the base HTTP URL for a telemetry service. -// If not otherwise specified it will use the hard coded default. -// -// Override at build time via: -// -// -ldflags "-X github.com/open-policy-agent/opa/internal/report.ExternalServiceURL=" -// -// This will be overridden if the OPA_TELEMETRY_SERVICE_URL environment variable -// is provided. -var ExternalServiceURL = "https://telemetry.openpolicyagent.org" - -// Reporter reports information such as the version, heap usage about the running OPA instance to an external service -type Reporter struct { - body map[string]any - client rest.Client - - gatherers map[string]Gatherer - gatherersMtx sync.Mutex -} - -// Gatherer represents a mechanism to inject additional data in the telemetry report -type Gatherer func(ctx context.Context) (any, error) - -// DataResponse represents the data returned by the external service -type DataResponse struct { - Latest ReleaseDetails `json:"latest,omitempty"` -} - -// ReleaseDetails holds information about the latest OPA release -type ReleaseDetails struct { - Download string `json:"download,omitempty"` // link to download the OPA release - ReleaseNotes string `json:"release_notes,omitempty"` // link to the OPA release notes - LatestRelease string `json:"latest_release,omitempty"` // latest OPA released version - OPAUpToDate bool `json:"opa_up_to_date,omitempty"` // is running OPA version greater than or equal to the latest released -} - -// Options supplies parameters to the reporter. -type Options struct { - Logger logging.Logger -} - -// New returns an instance of the Reporter -func New(id string, opts Options) (*Reporter, error) { - r := Reporter{ - gatherers: map[string]Gatherer{}, - } - r.body = map[string]any{ - "id": id, - "version": version.Version, - } - - url := os.Getenv("OPA_TELEMETRY_SERVICE_URL") - if url == "" { - url = ExternalServiceURL - } - - restConfig := []byte(fmt.Sprintf(`{ - "url": %q, - }`, url)) - - client, err := rest.New(restConfig, map[string]*keys.Config{}, rest.Logger(opts.Logger)) - if err != nil { - return nil, err - } - r.client = client - - // heap_usage_bytes is always present, so register it unconditionally - r.RegisterGatherer("heap_usage_bytes", readRuntimeMemStats) - - return &r, nil -} - -// SendReport sends the telemetry report which includes information such as the OPA version, current memory usage to -// the external service -func (r *Reporter) SendReport(ctx context.Context) (*DataResponse, error) { - rCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - - r.gatherersMtx.Lock() - defer r.gatherersMtx.Unlock() - for key, g := range r.gatherers { - var err error - r.body[key], err = g(rCtx) - if err != nil { - return nil, fmt.Errorf("gather telemetry error for key %s: %w", key, err) - } - } - - resp, err := r.client.WithJSON(r.body).Do(rCtx, "POST", "/v1/version") - if err != nil { - return nil, err - } - - defer util.Close(resp) - - switch resp.StatusCode { - case http.StatusOK: - if resp.Body != nil { - var result DataResponse - err := json.NewDecoder(resp.Body).Decode(&result) - if err != nil { - return nil, err - } - return &result, nil - } - return nil, nil - default: - return nil, fmt.Errorf("server replied with HTTP %v", resp.StatusCode) - } -} - -func (r *Reporter) RegisterGatherer(key string, f Gatherer) { - r.gatherersMtx.Lock() - r.gatherers[key] = f - r.gatherersMtx.Unlock() -} - -// IsSet returns true if dr is populated. -func (dr *DataResponse) IsSet() bool { - return dr != nil && dr.Latest.LatestRelease != "" && dr.Latest.Download != "" && dr.Latest.ReleaseNotes != "" -} - -// Slice returns the dr as a slice of key-value string pairs. If dr is nil, this function returns an empty slice. -func (dr *DataResponse) Slice() [][2]string { - - if !dr.IsSet() { - return nil - } - - return [][2]string{ - {"Latest Upstream Version", strings.TrimPrefix(dr.Latest.LatestRelease, "v")}, - {"Download", dr.Latest.Download}, - {"Release Notes", dr.Latest.ReleaseNotes}, - } -} - -// Pretty returns OPA release information in a human-readable format. -func (dr *DataResponse) Pretty() string { - if !dr.IsSet() { - return "" - } - - pairs := dr.Slice() - lines := make([]string, 0, len(pairs)) - - for _, pair := range pairs { - lines = append(lines, fmt.Sprintf("%v: %v", pair[0], pair[1])) - } - - return strings.Join(lines, "\n") -} - -func readRuntimeMemStats(_ context.Context) (any, error) { - var m runtime.MemStats - runtime.ReadMemStats(&m) - return strconv.FormatUint(m.Alloc, 10), nil -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go b/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go deleted file mode 100644 index b1a5b71577..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package init is an internal package with helpers for data and policy loading during initialization. -package init - -import ( - "context" - "fmt" - "io/fs" - "path/filepath" - "strings" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/bundle" - storedversion "github.com/open-policy-agent/opa/internal/version" - "github.com/open-policy-agent/opa/loader" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/storage" -) - -// InsertAndCompileOptions contains the input for the operation. -type InsertAndCompileOptions struct { - Store storage.Store - Txn storage.Transaction - Files loader.Result - Bundles map[string]*bundle.Bundle - MaxErrors int - EnablePrintStatements bool - ParserOptions ast.ParserOptions -} - -// InsertAndCompileResult contains the output of the operation. -type InsertAndCompileResult struct { - Compiler *ast.Compiler - Metrics metrics.Metrics -} - -// InsertAndCompile writes data and policy into the store and returns a compiler for the -// store contents. -func InsertAndCompile(ctx context.Context, opts InsertAndCompileOptions) (*InsertAndCompileResult, error) { - if len(opts.Files.Documents) > 0 { - if err := opts.Store.Write(ctx, opts.Txn, storage.AddOp, storage.Path{}, opts.Files.Documents); err != nil { - return nil, fmt.Errorf("storage error: %w", err) - } - } - - policies := make(map[string]*ast.Module, len(opts.Files.Modules)) - - for id, parsed := range opts.Files.Modules { - policies[id] = parsed.Parsed - } - - compiler := ast.NewCompiler(). - SetErrorLimit(opts.MaxErrors). - WithPathConflictsCheck(storage.NonEmpty(ctx, opts.Store, opts.Txn)). - WithEnablePrintStatements(opts.EnablePrintStatements) - m := metrics.New() - - activation := &bundle.ActivateOpts{ - Ctx: ctx, - Store: opts.Store, - Txn: opts.Txn, - Compiler: compiler, - Metrics: m, - Bundles: opts.Bundles, - ExtraModules: policies, - ParserOptions: opts.ParserOptions, - } - - err := bundle.Activate(activation) - if err != nil { - return nil, err - } - - // Policies in bundles will have already been added to the store, but - // modules loaded outside of bundles will need to be added manually. - for id, parsed := range opts.Files.Modules { - if err := opts.Store.UpsertPolicy(ctx, opts.Txn, id, parsed.Raw); err != nil { - return nil, fmt.Errorf("storage error: %w", err) - } - } - - // Set the version in the store last to prevent data files from overwriting. - if err := storedversion.Write(ctx, opts.Store, opts.Txn); err != nil { - return nil, fmt.Errorf("storage error: %w", err) - } - - return &InsertAndCompileResult{Compiler: compiler, Metrics: m}, nil -} - -// LoadPathsResult contains the output loading a set of paths. -type LoadPathsResult struct { - Bundles map[string]*bundle.Bundle - Files loader.Result -} - -// WalkPathsResult contains the output loading a set of paths. -type WalkPathsResult struct { - BundlesLoader []BundleLoader - FileDescriptors []*Descriptor -} - -// BundleLoader contains information about files in a bundle -type BundleLoader struct { - DirectoryLoader bundle.DirectoryLoader - IsDir bool -} - -// Descriptor contains information about a file -type Descriptor struct { - Root string - Path string -} - -// LoadPaths reads data and policy from the given paths and returns a set of bundles or -// raw loader file results. -func LoadPaths(paths []string, - filter loader.Filter, - asBundle bool, - bvc *bundle.VerificationConfig, - skipVerify bool, - processAnnotations bool, - caps *ast.Capabilities, - fsys fs.FS) (*LoadPathsResult, error) { - return LoadPathsForRegoVersion(ast.RegoV0, paths, filter, asBundle, bvc, skipVerify, processAnnotations, false, caps, fsys) -} - -func LoadPathsForRegoVersion(regoVersion ast.RegoVersion, - paths []string, - filter loader.Filter, - asBundle bool, - bvc *bundle.VerificationConfig, - skipVerify bool, - processAnnotations bool, - followSymlinks bool, - caps *ast.Capabilities, - fsys fs.FS) (*LoadPathsResult, error) { - - if caps == nil { - caps = ast.CapabilitiesForThisVersion() - } - - // tar.gz files are automatically loaded as bundles - var likelyBundles, nonBundlePaths []string - if !asBundle { - likelyBundles, nonBundlePaths = splitByTarGzExt(paths) - paths = likelyBundles - } - - var result LoadPathsResult - var err error - if asBundle || len(likelyBundles) > 0 { - result.Bundles = make(map[string]*bundle.Bundle, len(paths)) - for _, path := range paths { - result.Bundles[path], err = loader.NewFileLoader(). - WithFS(fsys). - WithBundleVerificationConfig(bvc). - WithSkipBundleVerification(skipVerify). - WithFilter(filter). - WithProcessAnnotation(processAnnotations). - WithCapabilities(caps). - WithRegoVersion(regoVersion). - WithFollowSymlinks(followSymlinks). - AsBundle(path) - if err != nil { - return nil, err - } - } - } - - if len(nonBundlePaths) == 0 { - return &result, nil - } - - files, err := loader.NewFileLoader(). - WithFS(fsys). - WithProcessAnnotation(processAnnotations). - WithCapabilities(caps). - WithRegoVersion(regoVersion). - Filtered(nonBundlePaths, filter) - - if err != nil { - return nil, err - } - - result.Files = *files - - return &result, nil -} - -// splitByTarGzExt splits the paths in 2 groups. Ones with .tar.gz and another with -// non .tar.gz extensions. -func splitByTarGzExt(paths []string) (targzs []string, nonTargzs []string) { - for _, path := range paths { - if strings.HasSuffix(path, ".tar.gz") { - targzs = append(targzs, path) - } else { - nonTargzs = append(nonTargzs, path) - } - } - return -} - -// WalkPaths reads data and policy from the given paths and returns a set of bundle directory loaders -// or descriptors that contain information about files. -func WalkPaths(paths []string, filter loader.Filter, asBundle bool) (*WalkPathsResult, error) { - - var result WalkPathsResult - - if asBundle { - result.BundlesLoader = make([]BundleLoader, len(paths)) - for i, path := range paths { - bundleLoader, isDir, err := loader.GetBundleDirectoryLoader(path) - if err != nil { - return nil, err - } - - result.BundlesLoader[i] = BundleLoader{ - DirectoryLoader: bundleLoader, - IsDir: isDir, - } - } - return &result, nil - } - - result.FileDescriptors = []*Descriptor{} - for _, path := range paths { - filePaths, err := loader.FilteredPaths([]string{path}, filter) - if err != nil { - return nil, err - } - - for _, fp := range filePaths { - // Trim off the root directory and return path as if chrooted - cleanedPath := strings.TrimPrefix(fp, path) - if path == "." && filepath.Base(fp) == bundle.ManifestExt { - cleanedPath = fp - } - - if !strings.HasPrefix(cleanedPath, "/") { - cleanedPath = "/" + cleanedPath - } - - result.FileDescriptors = append(result.FileDescriptors, &Descriptor{ - Root: path, - Path: cleanedPath, - }) - } - } - - return &result, nil -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/semver/semver.go b/vendor/github.com/open-policy-agent/opa/internal/semver/semver.go index 389eeccc18..725f86318a 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/semver/semver.go +++ b/vendor/github.com/open-policy-agent/opa/internal/semver/semver.go @@ -14,222 +14,234 @@ // Semantic Versions http://semver.org -// Package semver has been vendored from: +// This file was originally vendored from: // https://github.com/coreos/go-semver/tree/e214231b295a8ea9479f11b70b35d5acf3556d9b/semver -// A number of the original functions of the package have been removed since -// they are not required for our built-ins. +// There isn't a single line left from the original source today, but being generous about +// attribution won't hurt. package semver import ( - "bytes" "fmt" "regexp" "strconv" "strings" + + "github.com/open-policy-agent/opa/v1/util" ) +// reMetaIdentifier matches pre-release and metadata identifiers against the spec requirements +var reMetaIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) + // Version represents a parsed SemVer type Version struct { Major int64 Minor int64 Patch int64 - PreRelease PreRelease - Metadata string + PreRelease string `json:"PreRelease,omitempty"` + Metadata string `json:"Metadata,omitempty"` } -// PreRelease represents a pre-release suffix string -type PreRelease string +// Parse constructs new semver Version from version string. +func Parse(version string) (v Version, err error) { + version = strings.TrimPrefix(version, "v") -func splitOff(input *string, delim string) (val string) { - parts := strings.SplitN(*input, delim, 2) + version, v.Metadata = cut(version, '+') + if v.Metadata != "" && !reMetaIdentifier.MatchString(v.Metadata) { + return v, fmt.Errorf("invalid metadata identifier: %s", v.Metadata) + } - if len(parts) == 2 { - *input = parts[0] - val = parts[1] + version, v.PreRelease = cut(version, '-') + if v.PreRelease != "" && !reMetaIdentifier.MatchString(v.PreRelease) { + return v, fmt.Errorf("invalid pre-release identifier: %s", v.PreRelease) } - return val -} + if strings.Count(version, ".") != 2 { + return v, fmt.Errorf("%s should contain major, minor, and patch versions", version) + } -// NewVersion constructs new SemVers from strings -func NewVersion(version string) (*Version, error) { - v := Version{} + major, after := cut(version, '.') + if v.Major, err = strconv.ParseInt(major, 10, 64); err != nil { + return v, err + } - if err := v.Set(version); err != nil { - return nil, err + minor, after := cut(after, '.') + if v.Minor, err = strconv.ParseInt(minor, 10, 64); err != nil { + return v, err } - return &v, nil -} + if v.Patch, err = strconv.ParseInt(after, 10, 64); err != nil { + return v, err + } -// Set parses and updates v from the given version string. Implements flag.Value -func (v *Version) Set(version string) error { - metadata := splitOff(&version, "+") - preRelease := PreRelease(splitOff(&version, "-")) - dotParts := strings.SplitN(version, ".", 3) + return v, nil +} - if len(dotParts) != 3 { - return fmt.Errorf("%s is not in dotted-tri format", version) +// MustParse is like Parse but panics if the version string is invalid instead of returning an error. +func MustParse(version string) Version { + v, err := Parse(version) + if err != nil { + panic(err) } - if err := validateIdentifier(string(preRelease)); err != nil { - return fmt.Errorf("failed to validate pre-release: %v", err) - } + return v +} - if err := validateIdentifier(metadata); err != nil { - return fmt.Errorf("failed to validate metadata: %v", err) +// Compare compares two semver strings. +func Compare(a, b string) int { + aV, err := Parse(a) + if err != nil { + return -1 } - parsed := make([]int64, 3) - - for i, v := range dotParts[:3] { - val, err := strconv.ParseInt(v, 10, 64) - parsed[i] = val - if err != nil { - return err - } + bV, err := Parse(b) + if err != nil { + return 1 } - v.Metadata = metadata - v.PreRelease = preRelease - v.Major = parsed[0] - v.Minor = parsed[1] - v.Patch = parsed[2] - return nil + return aV.Compare(bV) } -func (v Version) String() string { - var buffer bytes.Buffer +// AppendText appends the textual representation of the version to b and returns the extended buffer. +// This method conforms to the encoding.TextAppender interface, and is useful for serializing the Version +// without allocating, provided the caller has pre-allocated sufficient space in b. +func (v Version) AppendText(b []byte) ([]byte, error) { + if b == nil { + b = make([]byte, 0, length(v)) + } - fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) + b = append(strconv.AppendInt(b, v.Major, 10), '.') + b = append(strconv.AppendInt(b, v.Minor, 10), '.') + b = strconv.AppendInt(b, v.Patch, 10) if v.PreRelease != "" { - fmt.Fprintf(&buffer, "-%s", v.PreRelease) + b = append(append(b, '-'), v.PreRelease...) } - if v.Metadata != "" { - fmt.Fprintf(&buffer, "+%s", v.Metadata) + b = append(append(b, '+'), v.Metadata...) } - return buffer.String() -} - -// Compare tests if v is less than, equal to, or greater than versionB, -// returning -1, 0, or +1 respectively. -func (v Version) Compare(versionB Version) int { - if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { - return cmp - } - return preReleaseCompare(v, versionB) + return b, nil } -// Slice converts the comparable parts of the semver into a slice of integers. -func (v Version) Slice() []int64 { - return []int64{v.Major, v.Minor, v.Patch} -} +// String returns the string representation of the version. +func (v Version) String() string { + bs := make([]byte, 0, length(v)) + bs, _ = v.AppendText(bs) -// Slice splits the pre-release suffix string -func (p PreRelease) Slice() []string { - preRelease := string(p) - return strings.Split(preRelease, ".") + return string(bs) } -func preReleaseCompare(versionA Version, versionB Version) int { - a := versionA.PreRelease - b := versionB.PreRelease +// Compare tests if v is less than, equal to, or greater than other, returning -1, 0, or +1 respectively. +// Comparison is based on the SemVer specification (https://semver.org/#spec-item-11). +func (v Version) Compare(other Version) int { + if v.Major > other.Major { + return 1 + } else if v.Major < other.Major { + return -1 + } - /* Handle the case where if two versions are otherwise equal it is the - * one without a PreRelease that is greater */ - if len(a) == 0 && (len(b) > 0) { + if v.Minor > other.Minor { return 1 - } else if len(b) == 0 && (len(a) > 0) { + } else if v.Minor < other.Minor { return -1 } - // If there is a prerelease, check and compare each part. - return recursivePreReleaseCompare(a.Slice(), b.Slice()) -} + if v.Patch > other.Patch { + return 1 + } else if v.Patch < other.Patch { + return -1 + } -func recursiveCompare(versionA []int64, versionB []int64) int { - if len(versionA) == 0 { + if v.PreRelease == other.PreRelease { return 0 } - a := versionA[0] - b := versionB[0] - - if a > b { + // if two versions are otherwise equal it is the one without a pre-release that is greater + if v.PreRelease == "" && other.PreRelease != "" { return 1 - } else if a < b { + } + if other.PreRelease == "" && v.PreRelease != "" { return -1 } - return recursiveCompare(versionA[1:], versionB[1:]) -} + a, afterA := cut(v.PreRelease, '.') + b, afterB := cut(other.PreRelease, '.') -func recursivePreReleaseCompare(versionA []string, versionB []string) int { - // A larger set of pre-release fields has a higher precedence than a smaller set, - // if all of the preceding identifiers are equal. - if len(versionA) == 0 { - if len(versionB) > 0 { + for { + if a == "" && b != "" { return -1 } - return 0 - } else if len(versionB) == 0 { - // We're longer than versionB so return 1. - return 1 - } - - a := versionA[0] - b := versionB[0] - - aInt := false - bInt := false + if a != "" && b == "" { + return 1 + } - aI, err := strconv.Atoi(versionA[0]) - if err == nil { - aInt = true - } + aIsInt := isAllDecimals(a) + bIsInt := isAllDecimals(b) - bI, err := strconv.Atoi(versionB[0]) - if err == nil { - bInt = true - } + // numeric identifiers have lower precedence than non-numeric + if aIsInt && !bIsInt { + return -1 + } else if !aIsInt && bIsInt { + return 1 + } - // Numeric identifiers always have lower precedence than non-numeric identifiers. - if aInt && !bInt { - return -1 - } else if !aInt && bInt { - return 1 - } + if aIsInt && bIsInt { + aInt, _ := strconv.Atoi(a) + bInt, _ := strconv.Atoi(b) + + if aInt > bInt { + return 1 + } else if aInt < bInt { + return -1 + } + } else { + // string comparison + if a > b { + return 1 + } else if a < b { + return -1 + } + } - // Handle Integer Comparison - if aInt && bInt { - if aI > bI { + // a larger set of pre-release fields has a higher precedence than a + // smaller set, if all of the preceding identifiers are equal. + if afterA != "" && afterB == "" { return 1 - } else if aI < bI { + } else if afterA == "" && afterB != "" { return -1 } - } - // Handle String Comparison - if a > b { - return 1 - } else if a < b { - return -1 + a, afterA = cut(afterA, '.') + b, afterB = cut(afterB, '.') } +} - return recursivePreReleaseCompare(versionA[1:], versionB[1:]) +func isAllDecimals(s string) bool { + for _, r := range s { + if r < '0' || r > '9' { + return false + } + } + return s != "" } -// validateIdentifier makes sure the provided identifier satisfies semver spec -func validateIdentifier(id string) error { - if id != "" && !reIdentifier.MatchString(id) { - return fmt.Errorf("%s is not a valid semver identifier", id) +// length allows calculating the length of the version for pre-allocation. +func length(v Version) int { + n := util.NumDigitsInt64(v.Major) + util.NumDigitsInt64(v.Minor) + util.NumDigitsInt64(v.Patch) + 2 + if v.PreRelease != "" { + n += len(v.PreRelease) + 1 } - return nil + if v.Metadata != "" { + n += len(v.Metadata) + 1 + } + return n } -// reIdentifier is a regular expression used to check that pre-release and metadata -// identifiers satisfy the spec requirements -var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) +// cut is a *slightly* faster version of strings.Cut only accepting +// single byte separators, and skipping the boolean return value. +func cut(s string, sep byte) (before, after string) { + if i := strings.IndexByte(s, sep); i >= 0 { + return s[:i], s[i+1:] + } + return s, "" +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/strings/strings.go b/vendor/github.com/open-policy-agent/opa/internal/strings/strings.go index 08f3bf9182..f2838ac36a 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/strings/strings.go +++ b/vendor/github.com/open-policy-agent/opa/internal/strings/strings.go @@ -57,7 +57,7 @@ func TruncateFilePaths(maxIdealWidth, maxWidth int, path ...string) (map[string] } // Drop the overall length down to match our substitution - longestLocation = longestLocation - (len(lcs) - 3) + longestLocation -= (len(lcs) - 3) } return result, longestLocation diff --git a/vendor/github.com/open-policy-agent/opa/internal/strvals/doc.go b/vendor/github.com/open-policy-agent/opa/internal/strvals/doc.go deleted file mode 100644 index 019dc87bb9..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/strvals/doc.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package strvals provides tools for working with strval lines. - -OPA runtime config supports a compressed format for YAML settings which we call strvals. -The format is roughly like this: - - name=value,topname.subname=value - -The above is equivalent to the YAML document - - name: value - topname: - subname: value - -This package provides a parser and utilities for converting the strvals format -to other formats. -*/ -package strvals diff --git a/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go b/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go deleted file mode 100644 index 1fc07f68c3..0000000000 --- a/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go +++ /dev/null @@ -1,431 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strvals - -import ( - "bytes" - "errors" - "fmt" - "io" - "strconv" - "strings" - - "sigs.k8s.io/yaml" -) - -// ErrNotList indicates that a non-list was treated as a list. -var ErrNotList = errors.New("not a list") - -// MaxIndex is the maximum index that will be allowed by setIndex. -// The default value 65536 = 1024 * 64 -var MaxIndex = 65536 - -// ToYAML takes a string of arguments and converts to a YAML document. -func ToYAML(s string) (string, error) { - m, err := Parse(s) - if err != nil { - return "", err - } - d, err := yaml.Marshal(m) - return string(d), err -} - -// Parse parses a set line. -// -// A set line is of the form name1=value1,name2=value2 -func Parse(s string) (map[string]interface{}, error) { - vals := map[string]interface{}{} - scanner := bytes.NewBufferString(s) - t := newParser(scanner, vals, false) - err := t.parse() - return vals, err -} - -// ParseString parses a set line and forces a string value. -// -// A set line is of the form name1=value1,name2=value2 -func ParseString(s string) (map[string]interface{}, error) { - vals := map[string]interface{}{} - scanner := bytes.NewBufferString(s) - t := newParser(scanner, vals, true) - err := t.parse() - return vals, err -} - -// ParseInto parses a strvals line and merges the result into dest. -// -// If the strval string has a key that exists in dest, it overwrites the -// dest version. -func ParseInto(s string, dest map[string]interface{}) error { - scanner := bytes.NewBufferString(s) - t := newParser(scanner, dest, false) - return t.parse() -} - -// ParseIntoFile parses a filevals line and merges the result into dest. -// -// This method always returns a string as the value. -func ParseIntoFile(s string, dest map[string]interface{}, runesToVal runesToVal) error { - scanner := bytes.NewBufferString(s) - t := newFileParser(scanner, dest, runesToVal) - return t.parse() -} - -// ParseIntoString parses a strvals line and merges the result into dest. -// -// This method always returns a string as the value. -func ParseIntoString(s string, dest map[string]interface{}) error { - scanner := bytes.NewBufferString(s) - t := newParser(scanner, dest, true) - return t.parse() -} - -// parser is a simple parser that takes a strvals line and parses it into a -// map representation. -// -// where sc is the source of the original data being parsed -// where data is the final parsed data from the parses with correct types -// where st is a boolean to figure out if we're forcing it to parse values as string -type parser struct { - sc *bytes.Buffer - data map[string]interface{} - runesToVal runesToVal -} - -type runesToVal func([]rune) (interface{}, error) - -func newParser(sc *bytes.Buffer, data map[string]interface{}, stringBool bool) *parser { - rs2v := func(rs []rune) (interface{}, error) { - return typedVal(rs, stringBool), nil - } - return &parser{sc: sc, data: data, runesToVal: rs2v} -} - -func newFileParser(sc *bytes.Buffer, data map[string]interface{}, runesToVal runesToVal) *parser { - return &parser{sc: sc, data: data, runesToVal: runesToVal} -} - -func (t *parser) parse() error { - for { - err := t.key(t.data) - if err == nil { - continue - } - if err == io.EOF { - return nil - } - return err - } -} - -func runeSet(r []rune) map[rune]bool { - s := make(map[rune]bool, len(r)) - for _, rr := range r { - s[rr] = true - } - return s -} - -func (t *parser) key(data map[string]interface{}) error { - stop := runeSet([]rune{'=', '[', ',', '.'}) - for { - switch k, last, err := runesUntil(t.sc, stop); { - case err != nil: - if len(k) == 0 { - return err - } - return fmt.Errorf("key %q has no value", string(k)) - //set(data, string(k), "") - //return err - case last == '[': - // We are in a list index context, so we need to set an index. - i, err := t.keyIndex() - if err != nil { - return fmt.Errorf("error parsing index: %s", err) - } - kk := string(k) - // Find or create target list - list := []interface{}{} - if _, ok := data[kk]; ok { - list = data[kk].([]interface{}) - } - - // Now we need to get the value after the ]. - list, err = t.listItem(list, i) - set(data, kk, list) - return err - case last == '=': - //End of key. Consume =, Get value. - // FIXME: Get value list first - vl, e := t.valList() - switch e { - case nil: - set(data, string(k), vl) - return nil - case io.EOF: - set(data, string(k), "") - return e - case ErrNotList: - rs, e := t.val() - if e != nil && e != io.EOF { - return e - } - v, e := t.runesToVal(rs) - set(data, string(k), v) - return e - default: - return e - } - - case last == ',': - // No value given. Set the value to empty string. Return error. - set(data, string(k), "") - return fmt.Errorf("key %q has no value (cannot end with ,)", string(k)) - case last == '.': - // First, create or find the target map. - inner := map[string]interface{}{} - if _, ok := data[string(k)]; ok { - inner = data[string(k)].(map[string]interface{}) - } - - // Recurse - e := t.key(inner) - if len(inner) == 0 { - return fmt.Errorf("key map %q has no value", string(k)) - } - set(data, string(k), inner) - return e - } - } -} - -func set(data map[string]interface{}, key string, val interface{}) { - // If key is empty, don't set it. - if len(key) == 0 { - return - } - data[key] = val -} - -func setIndex(list []interface{}, index int, val interface{}) (l2 []interface{}, err error) { - // There are possible index values that are out of range on a target system - // causing a panic. This will catch the panic and return an error instead. - // The value of the index that causes a panic varies from system to system. - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("error processing index %d: %s", index, r) - } - }() - - if index < 0 { - return list, fmt.Errorf("negative %d index not allowed", index) - } - if index > MaxIndex { - return list, fmt.Errorf("index of %d is greater than maximum supported index of %d", index, MaxIndex) - } - if len(list) <= index { - newlist := make([]interface{}, index+1) - copy(newlist, list) - list = newlist - } - list[index] = val - return list, nil -} - -func (t *parser) keyIndex() (int, error) { - // First, get the key. - stop := runeSet([]rune{']'}) - v, _, err := runesUntil(t.sc, stop) - if err != nil { - return 0, err - } - // v should be the index - return strconv.Atoi(string(v)) - -} -func (t *parser) listItem(list []interface{}, i int) ([]interface{}, error) { - if i < 0 { - return list, fmt.Errorf("negative %d index not allowed", i) - } - stop := runeSet([]rune{'[', '.', '='}) - switch k, last, err := runesUntil(t.sc, stop); { - case len(k) > 0: - return list, fmt.Errorf("unexpected data at end of array index: %q", k) - case err != nil: - return list, err - case last == '=': - vl, e := t.valList() - switch e { - case nil: - return setIndex(list, i, vl) - case io.EOF: - return setIndex(list, i, "") - case ErrNotList: - rs, e := t.val() - if e != nil && e != io.EOF { - return list, e - } - v, e := t.runesToVal(rs) - if e != nil { - return nil, e - } - return setIndex(list, i, v) - default: - return list, e - } - case last == '[': - // now we have a nested list. Read the index and handle. - i, err := t.keyIndex() - if err != nil { - return list, fmt.Errorf("error parsing index: %s", err) - } - // Now we need to get the value after the ]. - list2, err := t.listItem(list, i) - if err != nil { - return nil, err - } - return setIndex(list, i, list2) - case last == '.': - // We have a nested object. Send to t.key - inner := map[string]interface{}{} - if len(list) > i { - var ok bool - inner, ok = list[i].(map[string]interface{}) - if !ok { - // We have indices out of order. Initialize empty value. - list[i] = map[string]interface{}{} - inner = list[i].(map[string]interface{}) - } - } - - // Recurse - e := t.key(inner) - if e != nil { - return list, e - } - return setIndex(list, i, inner) - default: - return nil, fmt.Errorf("parse error: unexpected token %v", last) - } -} - -func (t *parser) val() ([]rune, error) { - stop := runeSet([]rune{','}) - v, _, err := runesUntil(t.sc, stop) - return v, err -} - -func (t *parser) valList() ([]interface{}, error) { - r, _, e := t.sc.ReadRune() - if e != nil { - return []interface{}{}, e - } - - if r != '{' { - e = t.sc.UnreadRune() - if e != nil { - return []interface{}{}, e - } - return []interface{}{}, ErrNotList - } - - list := []interface{}{} - stop := runeSet([]rune{',', '}'}) - for { - switch rs, last, err := runesUntil(t.sc, stop); { - case err != nil: - if err == io.EOF { - err = errors.New("list must terminate with '}'") - } - return list, err - case last == '}': - // If this is followed by ',', consume it. - if r, _, e := t.sc.ReadRune(); e == nil && r != ',' { - e = t.sc.UnreadRune() - if e != nil { - return []interface{}{}, e - } - } - v, e := t.runesToVal(rs) - list = append(list, v) - return list, e - case last == ',': - v, e := t.runesToVal(rs) - if e != nil { - return list, e - } - list = append(list, v) - } - } -} - -func runesUntil(in io.RuneReader, stop map[rune]bool) ([]rune, rune, error) { - var v []rune - for { - switch r, _, e := in.ReadRune(); { - case e != nil: - return v, r, e - case inMap(r, stop): - return v, r, nil - case r == '\\': - next, _, e := in.ReadRune() - if e != nil { - return v, next, e - } - v = append(v, next) - default: - v = append(v, r) - } - } -} - -func inMap(k rune, m map[rune]bool) bool { - _, ok := m[k] - return ok -} - -func typedVal(v []rune, st bool) interface{} { - val := string(v) - - if st { - return val - } - - if strings.EqualFold(val, "true") { - return true - } - - if strings.EqualFold(val, "false") { - return false - } - - if strings.EqualFold(val, "null") { - return struct{}{} - } - - if strings.EqualFold(val, "0") { - return int64(0) - } - - // If this value does not start with zero, try parsing it to an int - if len(val) != 0 && val[0] != '0' { - if iv, err := strconv.ParseInt(val, 10, 64); err == nil { - return iv - } - } - - return val -} diff --git a/vendor/github.com/open-policy-agent/opa/internal/uuid/uuid.go b/vendor/github.com/open-policy-agent/opa/internal/uuid/uuid.go index 5d925e68df..63e1a5b071 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/uuid/uuid.go +++ b/vendor/github.com/open-policy-agent/opa/internal/uuid/uuid.go @@ -32,12 +32,12 @@ func New(r io.Reader) (string, error) { // if parsing fails, it will return an empty map. It will fill the map // with some decoded values with fillMap // ref: https://datatracker.ietf.org/doc/html/rfc4122 -func Parse(s string) (map[string]interface{}, error) { +func Parse(s string) (map[string]any, error) { uuid, err := uuid.Parse(s) if err != nil { return nil, err } - out := make(map[string]interface{}, getVersionLen(int(uuid.Version()))) + out := make(map[string]any, getVersionLen(int(uuid.Version()))) fillMap(out, uuid) return out, nil } @@ -46,7 +46,7 @@ func Parse(s string) (map[string]interface{}, error) { // Version 1-2 has decodable values that could be of use, version 4 is random, // and version 3,5 is not feasible to extract data. Generated with either MD5 or SHA1 hash // ref: https://datatracker.ietf.org/doc/html/rfc4122 about creation of UUIDs -func fillMap(m map[string]interface{}, u uuid.UUID) { +func fillMap(m map[string]any, u uuid.UUID) { m["version"] = int(u.Version()) m["variant"] = u.Variant().String() switch version := m["version"]; version { @@ -86,7 +86,7 @@ func byteDecimalToHexMAC(bytes []byte, sep string) string { hexs.Grow((l * 3) - 1) // 1 byte -> 2 hexes + 1 separator (if one char) for i, b := range bytes { - hexs.WriteString(fmt.Sprintf("%02x", b)) + fmt.Fprintf(&hexs, "%02x", b) if i < l-1 { hexs.WriteString(sep) } diff --git a/vendor/github.com/open-policy-agent/opa/internal/version/version.go b/vendor/github.com/open-policy-agent/opa/internal/version/version.go index 1c2e9ecd01..1264278e44 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/version/version.go +++ b/vendor/github.com/open-policy-agent/opa/internal/version/version.go @@ -10,8 +10,8 @@ import ( "fmt" "runtime" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/version" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/version" ) var versionPath = storage.MustParsePath("/system/version") @@ -24,7 +24,7 @@ func Write(ctx context.Context, store storage.Store, txn storage.Transaction) er return err } - return store.Write(ctx, txn, storage.AddOp, versionPath, map[string]interface{}{ + return store.Write(ctx, txn, storage.AddOp, versionPath, map[string]any{ "version": version.Version, "build_commit": version.Vcs, "build_timestamp": version.Timestamp, diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go index 35e6059c72..a2f23d9a64 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go +++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go @@ -7,6 +7,7 @@ package encoding import ( "bytes" "encoding/binary" + "errors" "fmt" "io" @@ -82,7 +83,7 @@ func readModule(r io.Reader) (*module.Module, error) { var m module.Module - if err := readSections(r, &m); err != nil && err != io.EOF { + if err := readSections(r, &m); err != io.EOF { return nil, err } @@ -105,7 +106,7 @@ func readMagic(r io.Reader) error { if err := binary.Read(r, binary.LittleEndian, &v); err != nil { return err } else if v != constant.Magic { - return fmt.Errorf("illegal magic value") + return errors.New("illegal magic value") } return nil } @@ -115,7 +116,7 @@ func readVersion(r io.Reader) error { if err := binary.Read(r, binary.LittleEndian, &v); err != nil { return err } else if v != constant.Version { - return fmt.Errorf("illegal wasm version") + return errors.New("illegal wasm version") } return nil } @@ -199,7 +200,7 @@ func readSections(r io.Reader, m *module.Module) error { return fmt.Errorf("code section: %w", err) } default: - return fmt.Errorf("illegal section id") + return errors.New("illegal section id") } } } @@ -269,7 +270,7 @@ func readNameMap(r io.Reader) ([]module.NameMap, error) { return nil, err } nm := make([]module.NameMap, n) - for i := uint32(0); i < n; i++ { + for i := range n { var name string id, err := leb128.ReadVarUint32(r) if err != nil { @@ -289,7 +290,7 @@ func readNameSectionLocals(r io.Reader, s *module.NameSection) error { if err != nil { return err } - for i := uint32(0); i < n; i++ { + for range n { id, err := leb128.ReadVarUint32(r) // func index if err != nil { return err @@ -326,7 +327,7 @@ func readTypeSection(r io.Reader, s *module.TypeSection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var ftype module.FunctionType if err := readFunctionType(r, &ftype); err != nil { @@ -346,7 +347,7 @@ func readImportSection(r io.Reader, s *module.ImportSection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var imp module.Import @@ -367,14 +368,14 @@ func readTableSection(r io.Reader, s *module.TableSection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var table module.Table if elem, err := readByte(r); err != nil { return err } else if elem != constant.ElementTypeAnyFunc { - return fmt.Errorf("illegal element type") + return errors.New("illegal element type") } table.Type = types.Anyfunc @@ -396,7 +397,7 @@ func readMemorySection(r io.Reader, s *module.MemorySection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var mem module.Memory @@ -417,7 +418,7 @@ func readGlobalSection(r io.Reader, s *module.GlobalSection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var global module.Global @@ -442,7 +443,7 @@ func readExportSection(r io.Reader, s *module.ExportSection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var exp module.Export @@ -463,7 +464,7 @@ func readElementSection(r io.Reader, s *module.ElementSection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var seg module.ElementSegment @@ -484,7 +485,7 @@ func readDataSection(r io.Reader, s *module.DataSection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var seg module.DataSegment @@ -505,7 +506,7 @@ func readRawCodeSection(r io.Reader, s *module.RawCodeSection) error { return err } - for i := uint32(0); i < n; i++ { + for range n { var seg module.RawCodeSegment if err := readRawCodeSegment(r, &seg); err != nil { @@ -547,7 +548,7 @@ func readGlobal(r io.Reader, global *module.Global) error { if b == 1 { global.Mutable = true } else if b != 0 { - return fmt.Errorf("illegal mutability flag") + return errors.New("illegal mutability flag") } return readConstantExpr(r, &global.Init) @@ -584,7 +585,7 @@ func readImport(r io.Reader, imp *module.Import) error { if elem, err := readByte(r); err != nil { return err } else if elem != constant.ElementTypeAnyFunc { - return fmt.Errorf("illegal element type") + return errors.New("illegal element type") } desc := module.TableImport{ Type: types.Anyfunc, @@ -617,12 +618,12 @@ func readImport(r io.Reader, imp *module.Import) error { if b == 1 { desc.Mutable = true } else if b != 0 { - return fmt.Errorf("illegal mutability flag") + return errors.New("illegal mutability flag") } return nil } - return fmt.Errorf("illegal import descriptor type") + return errors.New("illegal import descriptor type") } func readExport(r io.Reader, exp *module.Export) error { @@ -646,7 +647,7 @@ func readExport(r io.Reader, exp *module.Export) error { case constant.ExportDescGlobal: exp.Descriptor.Type = module.GlobalExportType default: - return fmt.Errorf("illegal export descriptor type") + return errors.New("illegal export descriptor type") } exp.Descriptor.Index, err = leb128.ReadVarUint32(r) @@ -727,7 +728,7 @@ func readExpr(r io.Reader, expr *module.Expr) (err error) { case error: err = r default: - err = fmt.Errorf("unknown panic") + err = errors.New("unknown panic") } } }() @@ -809,21 +810,21 @@ func readLimits(r io.Reader, l *module.Limit) error { return err } - min, err := leb128.ReadVarUint32(r) + minLim, err := leb128.ReadVarUint32(r) if err != nil { return err } - l.Min = min + l.Min = minLim if b == 1 { - max, err := leb128.ReadVarUint32(r) + maxLim, err := leb128.ReadVarUint32(r) if err != nil { return err } - l.Max = &max + l.Max = &maxLim } else if b != 0 { - return fmt.Errorf("illegal limit flag") + return errors.New("illegal limit flag") } return nil @@ -838,7 +839,7 @@ func readLocals(r io.Reader, locals *[]module.LocalDeclaration) error { ret := make([]module.LocalDeclaration, n) - for i := uint32(0); i < n; i++ { + for i := range n { if err := readVarUint32(r, &ret[i].Count); err != nil { return err } @@ -888,7 +889,7 @@ func readVarUint32Vector(r io.Reader, v *[]uint32) error { ret := make([]uint32, n) - for i := uint32(0); i < n; i++ { + for i := range n { if err := readVarUint32(r, &ret[i]); err != nil { return err } @@ -907,7 +908,7 @@ func readValueTypeVector(r io.Reader, v *[]types.ValueType) error { ret := make([]types.ValueType, n) - for i := uint32(0); i < n; i++ { + for i := range n { if err := readValueType(r, &ret[i]); err != nil { return err } diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go index 6917b8d1d1..19df3bd6e6 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go +++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go @@ -7,6 +7,7 @@ package encoding import ( "bytes" "encoding/binary" + "errors" "fmt" "io" "math" @@ -260,7 +261,7 @@ func writeTableSection(w io.Writer, s module.TableSection) error { return err } default: - return fmt.Errorf("illegal table element type") + return errors.New("illegal table element type") } if err := writeLimits(&buf, table.Lim); err != nil { return err @@ -588,7 +589,7 @@ func writeImport(w io.Writer, imp module.Import) error { } return writeByte(w, constant.Const) default: - return fmt.Errorf("illegal import descriptor type") + return errors.New("illegal import descriptor type") } } diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/control.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/control.go index 38f030982d..0b2805247f 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/control.go +++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/control.go @@ -112,8 +112,8 @@ func (Br) Op() opcode.Opcode { } // ImmediateArgs returns the block index to break to. -func (i Br) ImmediateArgs() []interface{} { - return []interface{}{i.Index} +func (i Br) ImmediateArgs() []any { + return []any{i.Index} } // BrIf represents a WASM br_if instruction. @@ -127,8 +127,8 @@ func (BrIf) Op() opcode.Opcode { } // ImmediateArgs returns the block index to break to. -func (i BrIf) ImmediateArgs() []interface{} { - return []interface{}{i.Index} +func (i BrIf) ImmediateArgs() []any { + return []any{i.Index} } // Call represents a WASM call instruction. @@ -142,8 +142,8 @@ func (Call) Op() opcode.Opcode { } // ImmediateArgs returns the function index. -func (i Call) ImmediateArgs() []interface{} { - return []interface{}{i.Index} +func (i Call) ImmediateArgs() []any { + return []any{i.Index} } // CallIndirect represents a WASM call_indirect instruction. @@ -158,8 +158,8 @@ func (CallIndirect) Op() opcode.Opcode { } // ImmediateArgs returns the function index. -func (i CallIndirect) ImmediateArgs() []interface{} { - return []interface{}{i.Index, i.Reserved} +func (i CallIndirect) ImmediateArgs() []any { + return []any{i.Index, i.Reserved} } // Return represents a WASM return instruction. diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/instruction.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/instruction.go index 066be77c44..a0ab5953b8 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/instruction.go +++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/instruction.go @@ -15,14 +15,14 @@ type NoImmediateArgs struct { } // ImmediateArgs returns the immedate arguments of an instruction. -func (NoImmediateArgs) ImmediateArgs() []interface{} { +func (NoImmediateArgs) ImmediateArgs() []any { return nil } // Instruction represents a single WASM instruction. type Instruction interface { Op() opcode.Opcode - ImmediateArgs() []interface{} + ImmediateArgs() []any } // StructuredInstruction represents a structured control instruction like br_if. diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/memory.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/memory.go index c449cb1b6a..5a052bb764 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/memory.go +++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/memory.go @@ -18,8 +18,8 @@ func (I32Load) Op() opcode.Opcode { } // ImmediateArgs returns the static offset and alignment operands. -func (i I32Load) ImmediateArgs() []interface{} { - return []interface{}{i.Align, i.Offset} +func (i I32Load) ImmediateArgs() []any { + return []any{i.Align, i.Offset} } // I32Store represents the WASM i32.store instruction. @@ -34,6 +34,6 @@ func (I32Store) Op() opcode.Opcode { } // ImmediateArgs returns the static offset and alignment operands. -func (i I32Store) ImmediateArgs() []interface{} { - return []interface{}{i.Align, i.Offset} +func (i I32Store) ImmediateArgs() []any { + return []any{i.Align, i.Offset} } diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/numeric.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/numeric.go index 03f33752a2..bbba1f0bcb 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/numeric.go +++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/numeric.go @@ -19,8 +19,8 @@ func (I32Const) Op() opcode.Opcode { } // ImmediateArgs returns the i32 value to push onto the stack. -func (i I32Const) ImmediateArgs() []interface{} { - return []interface{}{i.Value} +func (i I32Const) ImmediateArgs() []any { + return []any{i.Value} } // I64Const represents the WASM i64.const instruction. @@ -34,8 +34,8 @@ func (I64Const) Op() opcode.Opcode { } // ImmediateArgs returns the i64 value to push onto the stack. -func (i I64Const) ImmediateArgs() []interface{} { - return []interface{}{i.Value} +func (i I64Const) ImmediateArgs() []any { + return []any{i.Value} } // F32Const represents the WASM f32.const instruction. @@ -49,8 +49,8 @@ func (F32Const) Op() opcode.Opcode { } // ImmediateArgs returns the f32 value to push onto the stack. -func (i F32Const) ImmediateArgs() []interface{} { - return []interface{}{i.Value} +func (i F32Const) ImmediateArgs() []any { + return []any{i.Value} } // F64Const represents the WASM f64.const instruction. @@ -64,8 +64,8 @@ func (F64Const) Op() opcode.Opcode { } // ImmediateArgs returns the f64 value to push onto the stack. -func (i F64Const) ImmediateArgs() []interface{} { - return []interface{}{i.Value} +func (i F64Const) ImmediateArgs() []any { + return []any{i.Value} } // I32Eqz represents the WASM i32.eqz instruction. diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/variable.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/variable.go index 063ffdb96d..68be486af1 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/variable.go +++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/variable.go @@ -17,8 +17,8 @@ func (GetLocal) Op() opcode.Opcode { } // ImmediateArgs returns the index of the local variable to push onto the stack. -func (i GetLocal) ImmediateArgs() []interface{} { - return []interface{}{i.Index} +func (i GetLocal) ImmediateArgs() []any { + return []any{i.Index} } // SetLocal represents the WASM set_local instruction. @@ -33,8 +33,8 @@ func (SetLocal) Op() opcode.Opcode { // ImmediateArgs returns the index of the local variable to set with the top of // the stack. -func (i SetLocal) ImmediateArgs() []interface{} { - return []interface{}{i.Index} +func (i SetLocal) ImmediateArgs() []any { + return []any{i.Index} } // TeeLocal represents the WASM tee_local instruction. @@ -49,6 +49,6 @@ func (TeeLocal) Op() opcode.Opcode { // ImmediateArgs returns the index of the local variable to "tee" with the top of // the stack (like set, but retaining the top of the stack). -func (i TeeLocal) ImmediateArgs() []interface{} { - return []interface{}{i.Index} +func (i TeeLocal) ImmediateArgs() []any { + return []any{i.Index} } diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/module/module.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/module/module.go index 913863c10c..033d429c89 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/wasm/module/module.go +++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/module/module.go @@ -288,7 +288,7 @@ func (x ExportDescriptorType) String() string { } // Kind returns the function import type kind. -func (i FunctionImport) Kind() ImportDescriptorType { +func (FunctionImport) Kind() ImportDescriptorType { return FunctionImportType } @@ -297,7 +297,7 @@ func (i FunctionImport) String() string { } // Kind returns the memory import type kind. -func (i MemoryImport) Kind() ImportDescriptorType { +func (MemoryImport) Kind() ImportDescriptorType { return MemoryImportType } @@ -306,7 +306,7 @@ func (i MemoryImport) String() string { } // Kind returns the table import type kind. -func (i TableImport) Kind() ImportDescriptorType { +func (TableImport) Kind() ImportDescriptorType { return TableImportType } @@ -315,7 +315,7 @@ func (i TableImport) String() string { } // Kind returns the global import type kind. -func (i GlobalImport) Kind() ImportDescriptorType { +func (GlobalImport) Kind() ImportDescriptorType { return GlobalImportType } diff --git a/vendor/github.com/open-policy-agent/opa/ir/marshal.go b/vendor/github.com/open-policy-agent/opa/ir/marshal.go deleted file mode 100644 index 69f4b5caf6..0000000000 --- a/vendor/github.com/open-policy-agent/opa/ir/marshal.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2022 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package ir - -import ( - "encoding/json" - "reflect" -) - -func (a *Block) MarshalJSON() ([]byte, error) { - var result typedBlock - result.Stmts = make([]typedStmt, len(a.Stmts)) - for i := range a.Stmts { - tpe := reflect.Indirect(reflect.ValueOf(a.Stmts[i])).Type().Name() - result.Stmts[i] = typedStmt{ - Type: tpe, - Stmt: a.Stmts[i], - } - } - return json.Marshal(result) -} - -func (a *Block) UnmarshalJSON(bs []byte) error { - var typed rawTypedBlock - if err := json.Unmarshal(bs, &typed); err != nil { - return err - } - a.Stmts = make([]Stmt, len(typed.Stmts)) - for i := range typed.Stmts { - var err error - a.Stmts[i], err = typed.Stmts[i].Unmarshal() - if err != nil { - return err - } - } - return nil -} - -func (a *Operand) MarshalJSON() ([]byte, error) { - var result typedOperand - result.Value = a.Value - result.Type = a.Value.typeHint() - return json.Marshal(result) -} - -func (a *Operand) UnmarshalJSON(bs []byte) error { - var typed rawTypedOperand - if err := json.Unmarshal(bs, &typed); err != nil { - return err - } - x := valFactories[typed.Type]() - if err := json.Unmarshal(typed.Value, &x); err != nil { - return err - } - a.Value = x - return nil -} - -type typedBlock struct { - Stmts []typedStmt `json:"stmts"` -} - -type typedStmt struct { - Type string `json:"type"` - Stmt Stmt `json:"stmt"` -} - -type rawTypedBlock struct { - Stmts []rawTypedStmt `json:"stmts"` -} - -type rawTypedStmt struct { - Type string `json:"type"` - Stmt json.RawMessage `json:"stmt"` -} - -func (raw rawTypedStmt) Unmarshal() (Stmt, error) { - x := stmtFactories[raw.Type]() - if err := json.Unmarshal(raw.Stmt, &x); err != nil { - return nil, err - } - return x, nil -} - -type rawTypedOperand struct { - Type string `json:"type"` - Value json.RawMessage `json:"value"` -} - -type typedOperand struct { - Type string `json:"type"` - Value Val `json:"value"` -} - -var stmtFactories = map[string]func() Stmt{ - "ReturnLocalStmt": func() Stmt { return &ReturnLocalStmt{} }, - "CallStmt": func() Stmt { return &CallStmt{} }, - "CallDynamicStmt": func() Stmt { return &CallDynamicStmt{} }, - "BlockStmt": func() Stmt { return &BlockStmt{} }, - "BreakStmt": func() Stmt { return &BreakStmt{} }, - "DotStmt": func() Stmt { return &DotStmt{} }, - "LenStmt": func() Stmt { return &LenStmt{} }, - "ScanStmt": func() Stmt { return &ScanStmt{} }, - "NotStmt": func() Stmt { return &NotStmt{} }, - "AssignIntStmt": func() Stmt { return &AssignIntStmt{} }, - "AssignVarStmt": func() Stmt { return &AssignVarStmt{} }, - "AssignVarOnceStmt": func() Stmt { return &AssignVarOnceStmt{} }, - "ResetLocalStmt": func() Stmt { return &ResetLocalStmt{} }, - "MakeNullStmt": func() Stmt { return &MakeNullStmt{} }, - "MakeNumberIntStmt": func() Stmt { return &MakeNumberIntStmt{} }, - "MakeNumberRefStmt": func() Stmt { return &MakeNumberRefStmt{} }, - "MakeArrayStmt": func() Stmt { return &MakeArrayStmt{} }, - "MakeObjectStmt": func() Stmt { return &MakeObjectStmt{} }, - "MakeSetStmt": func() Stmt { return &MakeSetStmt{} }, - "EqualStmt": func() Stmt { return &EqualStmt{} }, - "NotEqualStmt": func() Stmt { return &NotEqualStmt{} }, - "IsArrayStmt": func() Stmt { return &IsArrayStmt{} }, - "IsObjectStmt": func() Stmt { return &IsObjectStmt{} }, - "IsDefinedStmt": func() Stmt { return &IsDefinedStmt{} }, - "IsUndefinedStmt": func() Stmt { return &IsUndefinedStmt{} }, - "ArrayAppendStmt": func() Stmt { return &ArrayAppendStmt{} }, - "ObjectInsertStmt": func() Stmt { return &ObjectInsertStmt{} }, - "ObjectInsertOnceStmt": func() Stmt { return &ObjectInsertOnceStmt{} }, - "ObjectMergeStmt": func() Stmt { return &ObjectMergeStmt{} }, - "SetAddStmt": func() Stmt { return &SetAddStmt{} }, - "WithStmt": func() Stmt { return &WithStmt{} }, - "NopStmt": func() Stmt { return &NopStmt{} }, - "ResultSetAddStmt": func() Stmt { return &ResultSetAddStmt{} }, -} - -var valFactories = map[string]func() Val{ - "bool": func() Val { var x Bool; return &x }, - "string_index": func() Val { var x StringIndex; return &x }, - "local": func() Val { var x Local; return &x }, -} diff --git a/vendor/github.com/open-policy-agent/opa/ir/pretty.go b/vendor/github.com/open-policy-agent/opa/ir/pretty.go deleted file mode 100644 index 6102c5a911..0000000000 --- a/vendor/github.com/open-policy-agent/opa/ir/pretty.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package ir - -import ( - "fmt" - "io" - "strings" -) - -// Pretty writes a human-readable representation of an IR object to w. -func Pretty(w io.Writer, x interface{}) error { - - pp := &prettyPrinter{ - depth: -1, - w: w, - } - return Walk(pp, x) -} - -type prettyPrinter struct { - depth int - w io.Writer -} - -func (pp *prettyPrinter) Before(_ interface{}) { - pp.depth++ -} - -func (pp *prettyPrinter) After(_ interface{}) { - pp.depth-- -} - -func (pp *prettyPrinter) Visit(x interface{}) (Visitor, error) { - pp.writeIndent("%T %+v", x, x) - return pp, nil -} - -func (pp *prettyPrinter) writeIndent(f string, a ...interface{}) { - pad := strings.Repeat("| ", pp.depth) - fmt.Fprintf(pp.w, pad+f+"\n", a...) -} diff --git a/vendor/github.com/open-policy-agent/opa/ir/walk.go b/vendor/github.com/open-policy-agent/opa/ir/walk.go deleted file mode 100644 index 08a8f42440..0000000000 --- a/vendor/github.com/open-policy-agent/opa/ir/walk.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package ir - -// Visitor defines the interface for visiting IR nodes. -type Visitor interface { - Before(x interface{}) - Visit(x interface{}) (Visitor, error) - After(x interface{}) -} - -// Walk invokes the visitor for nodes under x. -func Walk(vis Visitor, x interface{}) error { - impl := walkerImpl{ - vis: vis, - } - impl.walk(x) - return impl.err -} - -type walkerImpl struct { - vis Visitor - err error -} - -func (w *walkerImpl) walk(x interface{}) { - if w.err != nil { // abort on error - return - } - if x == nil { - return - } - - prev := w.vis - w.vis.Before(x) - defer w.vis.After(x) - w.vis, w.err = w.vis.Visit(x) - if w.err != nil { - return - } else if w.vis == nil { - w.vis = prev - return - } - - switch x := x.(type) { - case *Policy: - w.walk(x.Static) - w.walk(x.Plans) - w.walk(x.Funcs) - case *Static: - for _, s := range x.Strings { - w.walk(s) - } - for _, f := range x.BuiltinFuncs { - w.walk(f) - } - for _, f := range x.Files { - w.walk(f) - } - case *Plans: - for _, pl := range x.Plans { - w.walk(pl) - } - case *Funcs: - for _, fn := range x.Funcs { - w.walk(fn) - } - case *Func: - for _, b := range x.Blocks { - w.walk(b) - } - case *Plan: - for _, b := range x.Blocks { - w.walk(b) - } - case *Block: - for _, s := range x.Stmts { - w.walk(s) - } - case *BlockStmt: - for _, b := range x.Blocks { - w.walk(b) - } - case *ScanStmt: - w.walk(x.Block) - case *NotStmt: - w.walk(x.Block) - case *WithStmt: - w.walk(x.Block) - } -} diff --git a/vendor/github.com/open-policy-agent/opa/keys/keys.go b/vendor/github.com/open-policy-agent/opa/keys/keys.go deleted file mode 100644 index de03496943..0000000000 --- a/vendor/github.com/open-policy-agent/opa/keys/keys.go +++ /dev/null @@ -1,99 +0,0 @@ -package keys - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/open-policy-agent/opa/util" -) - -const defaultSigningAlgorithm = "RS256" - -var supportedAlgos = map[string]struct{}{ - "ES256": {}, "ES384": {}, "ES512": {}, - "HS256": {}, "HS384": {}, "HS512": {}, - "PS256": {}, "PS384": {}, "PS512": {}, - "RS256": {}, "RS384": {}, "RS512": {}, -} - -// IsSupportedAlgorithm true if provided alg is supported -func IsSupportedAlgorithm(alg string) bool { - _, ok := supportedAlgos[alg] - return ok -} - -// Config holds the keys used to sign or verify bundles and tokens -type Config struct { - Key string `json:"key"` - PrivateKey string `json:"private_key"` - Algorithm string `json:"algorithm"` - Scope string `json:"scope"` -} - -// Equal returns true if this key config is equal to the other. -func (k *Config) Equal(other *Config) bool { - return other != nil && *k == *other -} - -func (k *Config) validateAndInjectDefaults(id string) error { - if k.Key == "" && k.PrivateKey == "" { - return fmt.Errorf("invalid keys configuration: no keys provided for key ID %v", id) - } - - if k.Algorithm == "" { - k.Algorithm = defaultSigningAlgorithm - } - - if !IsSupportedAlgorithm(k.Algorithm) { - return fmt.Errorf("unsupported algorithm '%v'", k.Algorithm) - } - - return nil -} - -// NewKeyConfig return a new Config -func NewKeyConfig(key, alg, scope string) (*Config, error) { - var pubKey string - if _, err := os.Stat(key); err == nil { - bs, err := os.ReadFile(key) - if err != nil { - return nil, err - } - pubKey = string(bs) - } else if os.IsNotExist(err) { - pubKey = key - } else { - return nil, err - } - - return &Config{ - Key: pubKey, - Algorithm: alg, - Scope: scope, - }, nil -} - -// ParseKeysConfig returns a map containing the key and the signing algorithm -func ParseKeysConfig(raw json.RawMessage) (map[string]*Config, error) { - keys := map[string]*Config{} - var obj map[string]json.RawMessage - - if err := util.Unmarshal(raw, &obj); err == nil { - for k := range obj { - var keyConfig Config - if err = util.Unmarshal(obj[k], &keyConfig); err != nil { - return nil, err - } - - if err = keyConfig.validateAndInjectDefaults(k); err != nil { - return nil, err - } - - keys[k] = &keyConfig - } - } else { - return nil, err - } - return keys, nil -} diff --git a/vendor/github.com/open-policy-agent/opa/loader/doc.go b/vendor/github.com/open-policy-agent/opa/loader/doc.go new file mode 100644 index 0000000000..9f60920d95 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/loader/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package loader diff --git a/vendor/github.com/open-policy-agent/opa/loader/errors.go b/vendor/github.com/open-policy-agent/opa/loader/errors.go index b8aafb1421..8dc70b8673 100644 --- a/vendor/github.com/open-policy-agent/opa/loader/errors.go +++ b/vendor/github.com/open-policy-agent/opa/loader/errors.go @@ -5,58 +5,8 @@ package loader import ( - "fmt" - "strings" - - "github.com/open-policy-agent/opa/ast" + v1 "github.com/open-policy-agent/opa/v1/loader" ) // Errors is a wrapper for multiple loader errors. -type Errors []error - -func (e Errors) Error() string { - if len(e) == 0 { - return "no error(s)" - } - if len(e) == 1 { - return "1 error occurred during loading: " + e[0].Error() - } - buf := make([]string, len(e)) - for i := range buf { - buf[i] = e[i].Error() - } - return fmt.Sprintf("%v errors occurred during loading:\n", len(e)) + strings.Join(buf, "\n") -} - -func (e *Errors) add(err error) { - if errs, ok := err.(ast.Errors); ok { - for i := range errs { - *e = append(*e, errs[i]) - } - } else { - *e = append(*e, err) - } -} - -type unsupportedDocumentType string - -func (path unsupportedDocumentType) Error() string { - return string(path) + ": document must be of type object" -} - -type unrecognizedFile string - -func (path unrecognizedFile) Error() string { - return string(path) + ": can't recognize file type" -} - -func isUnrecognizedFile(err error) bool { - _, ok := err.(unrecognizedFile) - return ok -} - -type mergeError string - -func (e mergeError) Error() string { - return string(e) + ": merge error" -} +type Errors = v1.Errors diff --git a/vendor/github.com/open-policy-agent/opa/loader/loader.go b/vendor/github.com/open-policy-agent/opa/loader/loader.go index 461639ed19..a319f2c64d 100644 --- a/vendor/github.com/open-policy-agent/opa/loader/loader.go +++ b/vendor/github.com/open-policy-agent/opa/loader/loader.go @@ -6,481 +6,78 @@ package loader import ( - "bytes" - "fmt" - "io" "io/fs" "os" - "path/filepath" - "sort" "strings" - "sigs.k8s.io/yaml" - "github.com/open-policy-agent/opa/ast" - astJSON "github.com/open-policy-agent/opa/ast/json" "github.com/open-policy-agent/opa/bundle" - fileurl "github.com/open-policy-agent/opa/internal/file/url" - "github.com/open-policy-agent/opa/internal/merge" - "github.com/open-policy-agent/opa/loader/filter" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/storage/inmem" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/loader" ) // Result represents the result of successfully loading zero or more files. -type Result struct { - Documents map[string]interface{} - Modules map[string]*RegoFile - path []string -} - -// ParsedModules returns the parsed modules stored on the result. -func (l *Result) ParsedModules() map[string]*ast.Module { - modules := make(map[string]*ast.Module) - for _, module := range l.Modules { - modules[module.Name] = module.Parsed - } - return modules -} - -// Compiler returns a Compiler object with the compiled modules from this loader -// result. -func (l *Result) Compiler() (*ast.Compiler, error) { - compiler := ast.NewCompiler() - compiler.Compile(l.ParsedModules()) - if compiler.Failed() { - return nil, compiler.Errors - } - return compiler, nil -} - -// Store returns a Store object with the documents from this loader result. -func (l *Result) Store() (storage.Store, error) { - return l.StoreWithOpts() -} - -// StoreWithOpts returns a Store object with the documents from this loader result, -// instantiated with the passed options. -func (l *Result) StoreWithOpts(opts ...inmem.Opt) (storage.Store, error) { - return inmem.NewFromObjectWithOpts(l.Documents, opts...), nil -} +type Result = v1.Result // RegoFile represents the result of loading a single Rego source file. -type RegoFile struct { - Name string - Parsed *ast.Module - Raw []byte -} +type RegoFile = v1.RegoFile // Filter defines the interface for filtering files during loading. If the // filter returns true, the file should be excluded from the result. -type Filter = filter.LoaderFilter +type Filter = v1.Filter // GlobExcludeName excludes files and directories whose names do not match the // shell style pattern at minDepth or greater. func GlobExcludeName(pattern string, minDepth int) Filter { - return func(_ string, info fs.FileInfo, depth int) bool { - match, _ := filepath.Match(pattern, info.Name()) - return match && depth >= minDepth - } + return v1.GlobExcludeName(pattern, minDepth) } // FileLoader defines an interface for loading OPA data files // and Rego policies. -type FileLoader interface { - All(paths []string) (*Result, error) - Filtered(paths []string, filter Filter) (*Result, error) - AsBundle(path string) (*bundle.Bundle, error) - WithReader(io.Reader) FileLoader - WithFS(fs.FS) FileLoader - WithMetrics(metrics.Metrics) FileLoader - WithFilter(Filter) FileLoader - WithBundleVerificationConfig(*bundle.VerificationConfig) FileLoader - WithSkipBundleVerification(bool) FileLoader - WithProcessAnnotation(bool) FileLoader - WithCapabilities(*ast.Capabilities) FileLoader - WithJSONOptions(*astJSON.Options) FileLoader - WithRegoVersion(ast.RegoVersion) FileLoader - WithFollowSymlinks(bool) FileLoader -} +type FileLoader = v1.FileLoader // NewFileLoader returns a new FileLoader instance. func NewFileLoader() FileLoader { - return &fileLoader{ - metrics: metrics.New(), - files: make(map[string]bundle.FileInfo), - } -} - -type fileLoader struct { - metrics metrics.Metrics - filter Filter - bvc *bundle.VerificationConfig - skipVerify bool - files map[string]bundle.FileInfo - opts ast.ParserOptions - fsys fs.FS - reader io.Reader - followSymlinks bool -} - -// WithFS provides an fs.FS to use for loading files. You can pass nil to -// use plain IO calls (e.g. os.Open, os.Stat, etc.), this is the default -// behaviour. -func (fl *fileLoader) WithFS(fsys fs.FS) FileLoader { - fl.fsys = fsys - return fl -} - -// WithReader provides an io.Reader to use for loading the bundle tarball. -// An io.Reader passed via WithReader takes precedence over an fs.FS passed -// via WithFS. -func (fl *fileLoader) WithReader(rdr io.Reader) FileLoader { - fl.reader = rdr - return fl -} - -// WithMetrics provides the metrics instance to use while loading -func (fl *fileLoader) WithMetrics(m metrics.Metrics) FileLoader { - fl.metrics = m - return fl -} - -// WithFilter specifies the filter object to use to filter files while loading -func (fl *fileLoader) WithFilter(filter Filter) FileLoader { - fl.filter = filter - return fl -} - -// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle -func (fl *fileLoader) WithBundleVerificationConfig(config *bundle.VerificationConfig) FileLoader { - fl.bvc = config - return fl -} - -// WithSkipBundleVerification skips verification of a signed bundle -func (fl *fileLoader) WithSkipBundleVerification(skipVerify bool) FileLoader { - fl.skipVerify = skipVerify - return fl -} - -// WithProcessAnnotation enables or disables processing of schema annotations on rules -func (fl *fileLoader) WithProcessAnnotation(processAnnotation bool) FileLoader { - fl.opts.ProcessAnnotation = processAnnotation - return fl -} - -// WithCapabilities sets the supported capabilities when loading the files -func (fl *fileLoader) WithCapabilities(caps *ast.Capabilities) FileLoader { - fl.opts.Capabilities = caps - return fl -} - -// WithJSONOptions sets the JSONOptions for use when parsing files -func (fl *fileLoader) WithJSONOptions(opts *astJSON.Options) FileLoader { - fl.opts.JSONOptions = opts - return fl -} - -// WithRegoVersion sets the ast.RegoVersion to use when parsing and compiling modules. -func (fl *fileLoader) WithRegoVersion(version ast.RegoVersion) FileLoader { - fl.opts.RegoVersion = version - return fl -} - -// WithFollowSymlinks enables or disables following symlinks when loading files -func (fl *fileLoader) WithFollowSymlinks(followSymlinks bool) FileLoader { - fl.followSymlinks = followSymlinks - return fl -} - -// All returns a Result object loaded (recursively) from the specified paths. -func (fl fileLoader) All(paths []string) (*Result, error) { - return fl.Filtered(paths, nil) -} - -// Filtered returns a Result object loaded (recursively) from the specified -// paths while applying the given filters. If any filter returns true, the -// file/directory is excluded. -func (fl fileLoader) Filtered(paths []string, filter Filter) (*Result, error) { - return all(fl.fsys, paths, filter, func(curr *Result, path string, depth int) error { - - var ( - bs []byte - err error - ) - if fl.fsys != nil { - bs, err = fs.ReadFile(fl.fsys, path) - } else { - bs, err = os.ReadFile(path) - } - if err != nil { - return err - } - - result, err := loadKnownTypes(path, bs, fl.metrics, fl.opts) - if err != nil { - if !isUnrecognizedFile(err) { - return err - } - if depth > 0 { - return nil - } - result, err = loadFileForAnyType(path, bs, fl.metrics, fl.opts) - if err != nil { - return err - } - } - - return curr.merge(path, result) - }) -} - -// AsBundle loads a path as a bundle. If it is a single file -// it will be treated as a normal tarball bundle. If a directory -// is supplied it will be loaded as an unzipped bundle tree. -func (fl fileLoader) AsBundle(path string) (*bundle.Bundle, error) { - path, err := fileurl.Clean(path) - if err != nil { - return nil, err - } - - if err := checkForUNCPath(path); err != nil { - return nil, err - } - - var bundleLoader bundle.DirectoryLoader - var isDir bool - if fl.reader != nil { - bundleLoader = bundle.NewTarballLoaderWithBaseURL(fl.reader, path).WithFilter(fl.filter) - } else { - bundleLoader, isDir, err = GetBundleDirectoryLoaderFS(fl.fsys, path, fl.filter) - } - - if err != nil { - return nil, err - } - bundleLoader = bundleLoader.WithFollowSymlinks(fl.followSymlinks) - - br := bundle.NewCustomReader(bundleLoader). - WithMetrics(fl.metrics). - WithBundleVerificationConfig(fl.bvc). - WithSkipBundleVerification(fl.skipVerify). - WithProcessAnnotations(fl.opts.ProcessAnnotation). - WithCapabilities(fl.opts.Capabilities). - WithJSONOptions(fl.opts.JSONOptions). - WithFollowSymlinks(fl.followSymlinks). - WithRegoVersion(fl.opts.RegoVersion) - - // For bundle directories add the full path in front of module file names - // to simplify debugging. - if isDir { - br.WithBaseDir(path) - } - - b, err := br.Read() - if err != nil { - err = fmt.Errorf("bundle %s: %w", path, err) - } - - return &b, err + return v1.NewFileLoader().WithRegoVersion(ast.DefaultRegoVersion) } // GetBundleDirectoryLoader returns a bundle directory loader which can be used to load // files in the directory func GetBundleDirectoryLoader(path string) (bundle.DirectoryLoader, bool, error) { - return GetBundleDirectoryLoaderFS(nil, path, nil) + return v1.GetBundleDirectoryLoader(path) } // GetBundleDirectoryLoaderWithFilter returns a bundle directory loader which can be used to load // files in the directory after applying the given filter. func GetBundleDirectoryLoaderWithFilter(path string, filter Filter) (bundle.DirectoryLoader, bool, error) { - return GetBundleDirectoryLoaderFS(nil, path, filter) + return v1.GetBundleDirectoryLoaderWithFilter(path, filter) } // GetBundleDirectoryLoaderFS returns a bundle directory loader which can be used to load // files in the directory. func GetBundleDirectoryLoaderFS(fsys fs.FS, path string, filter Filter) (bundle.DirectoryLoader, bool, error) { - path, err := fileurl.Clean(path) - if err != nil { - return nil, false, err - } - - if err := checkForUNCPath(path); err != nil { - return nil, false, err - } - - var fi fs.FileInfo - if fsys != nil { - fi, err = fs.Stat(fsys, path) - } else { - fi, err = os.Stat(path) - } - if err != nil { - return nil, false, fmt.Errorf("error reading %q: %s", path, err) - } - - var bundleLoader bundle.DirectoryLoader - if fi.IsDir() { - if fsys != nil { - bundleLoader = bundle.NewFSLoaderWithRoot(fsys, path) - } else { - bundleLoader = bundle.NewDirectoryLoader(path) - } - } else { - var fh fs.File - if fsys != nil { - fh, err = fsys.Open(path) - } else { - fh, err = os.Open(path) - } - if err != nil { - return nil, false, err - } - bundleLoader = bundle.NewTarballLoaderWithBaseURL(fh, path) - } - - if filter != nil { - bundleLoader = bundleLoader.WithFilter(filter) - } - return bundleLoader, fi.IsDir(), nil + return v1.GetBundleDirectoryLoaderFS(fsys, path, filter) } // FilteredPaths is the same as FilterPathsFS using the current diretory file // system func FilteredPaths(paths []string, filter Filter) ([]string, error) { - return FilteredPathsFS(nil, paths, filter) + return v1.FilteredPaths(paths, filter) } // FilteredPathsFS return a list of files from the specified // paths while applying the given filters. If any filter returns true, the // file/directory is excluded. func FilteredPathsFS(fsys fs.FS, paths []string, filter Filter) ([]string, error) { - result := []string{} - - _, err := all(fsys, paths, filter, func(_ *Result, path string, _ int) error { - result = append(result, path) - return nil - }) - if err != nil { - return nil, err - } - return result, nil + return v1.FilteredPathsFS(fsys, paths, filter) } // Schemas loads a schema set from the specified file path. func Schemas(schemaPath string) (*ast.SchemaSet, error) { - - var errs Errors - ss, err := loadSchemas(schemaPath) - if err != nil { - errs.add(err) - return nil, errs - } - - return ss, nil -} - -func loadSchemas(schemaPath string) (*ast.SchemaSet, error) { - - if schemaPath == "" { - return nil, nil - } - - ss := ast.NewSchemaSet() - path, err := fileurl.Clean(schemaPath) - if err != nil { - return nil, err - } - - info, err := os.Stat(path) - if err != nil { - return nil, err - } - - // Handle single file case. - if !info.IsDir() { - schema, err := loadOneSchema(path) - if err != nil { - return nil, err - } - ss.Put(ast.SchemaRootRef, schema) - return ss, nil - - } - - // Handle directory case. - rootDir := path - - err = filepath.Walk(path, - func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } else if info.IsDir() { - return nil - } - - schema, err := loadOneSchema(path) - if err != nil { - return err - } - - relPath, err := filepath.Rel(rootDir, path) - if err != nil { - return err - } - - key := getSchemaSetByPathKey(relPath) - ss.Put(key, schema) - return nil - }) - - if err != nil { - return nil, err - } - - return ss, nil -} - -func getSchemaSetByPathKey(path string) ast.Ref { - - front := filepath.Dir(path) - last := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path)) - - var parts []string - - if front != "." { - parts = append(strings.Split(filepath.ToSlash(front), "/"), last) - } else { - parts = []string{last} - } - - key := make(ast.Ref, 1+len(parts)) - key[0] = ast.SchemaRootDocument - for i := range parts { - key[i+1] = ast.StringTerm(parts[i]) - } - - return key -} - -func loadOneSchema(path string) (interface{}, error) { - bs, err := os.ReadFile(path) - if err != nil { - return nil, err - } - - var schema interface{} - if err := util.Unmarshal(bs, &schema); err != nil { - return nil, fmt.Errorf("%s: %w", path, err) - } - - return schema, nil + return v1.Schemas(schemaPath) } // All returns a Result object loaded (recursively) from the specified paths. +// // Deprecated: Use FileLoader.Filtered() instead. func All(paths []string) (*Result, error) { return NewFileLoader().Filtered(paths, nil) @@ -489,6 +86,7 @@ func All(paths []string) (*Result, error) { // Filtered returns a Result object loaded (recursively) from the specified // paths while applying the given filters. If any filter returns true, the // file/directory is excluded. +// // Deprecated: Use FileLoader.Filtered() instead. func Filtered(paths []string, filter Filter) (*Result, error) { return NewFileLoader().Filtered(paths, filter) @@ -497,6 +95,7 @@ func Filtered(paths []string, filter Filter) (*Result, error) { // AsBundle loads a path as a bundle. If it is a single file // it will be treated as a normal tarball bundle. If a directory // is supplied it will be loaded as an unzipped bundle tree. +// // Deprecated: Use FileLoader.AsBundle() instead. func AsBundle(path string) (*bundle.Bundle, error) { return NewFileLoader().AsBundle(path) @@ -517,321 +116,33 @@ func Rego(path string) (*RegoFile, error) { // RegoWithOpts returns a RegoFile object loaded from the given path. func RegoWithOpts(path string, opts ast.ParserOptions) (*RegoFile, error) { - path, err := fileurl.Clean(path) - if err != nil { - return nil, err + if opts.RegoVersion == ast.RegoUndefined { + opts.RegoVersion = ast.DefaultRegoVersion } - bs, err := os.ReadFile(path) - if err != nil { - return nil, err - } - return loadRego(path, bs, metrics.New(), opts) + + return v1.RegoWithOpts(path, opts) } // CleanPath returns the normalized version of a path that can be used as an identifier. func CleanPath(path string) string { - return strings.Trim(path, "/") + return v1.CleanPath(path) } // Paths returns a sorted list of files contained at path. If recurse is true // and path is a directory, then Paths will walk the directory structure // recursively and list files at each level. func Paths(path string, recurse bool) (paths []string, err error) { - path, err = fileurl.Clean(path) - if err != nil { - return nil, err - } - err = filepath.Walk(path, func(f string, _ os.FileInfo, _ error) error { - if !recurse { - if path != f && path != filepath.Dir(f) { - return filepath.SkipDir - } - } - paths = append(paths, f) - return nil - }) - return paths, err + return v1.Paths(path, recurse) } // Dirs resolves filepaths to directories. It will return a list of unique // directories. func Dirs(paths []string) []string { - unique := map[string]struct{}{} - - for _, path := range paths { - // TODO: /dir/dir will register top level directory /dir - dir := filepath.Dir(path) - unique[dir] = struct{}{} - } - - u := make([]string, 0, len(unique)) - for k := range unique { - u = append(u, k) - } - sort.Strings(u) - return u + return v1.Dirs(paths) } // SplitPrefix returns a tuple specifying the document prefix and the file // path. func SplitPrefix(path string) ([]string, string) { - // Non-prefixed URLs can be returned without modification and their contents - // can be rooted directly under data. - if strings.Index(path, "://") == strings.Index(path, ":") { - return nil, path - } - parts := strings.SplitN(path, ":", 2) - if len(parts) == 2 && len(parts[0]) > 0 { - return strings.Split(parts[0], "."), parts[1] - } - return nil, path -} - -func (l *Result) merge(path string, result interface{}) error { - switch result := result.(type) { - case bundle.Bundle: - for _, module := range result.Modules { - l.Modules[module.Path] = &RegoFile{ - Name: module.Path, - Parsed: module.Parsed, - Raw: module.Raw, - } - } - return l.mergeDocument(path, result.Data) - case *RegoFile: - l.Modules[CleanPath(path)] = result - return nil - default: - return l.mergeDocument(path, result) - } -} - -func (l *Result) mergeDocument(path string, doc interface{}) error { - obj, ok := makeDir(l.path, doc) - if !ok { - return unsupportedDocumentType(path) - } - merged, ok := merge.InterfaceMaps(l.Documents, obj) - if !ok { - return mergeError(path) - } - for k := range merged { - l.Documents[k] = merged[k] - } - return nil -} - -func (l *Result) withParent(p string) *Result { - path := append(l.path, p) - return &Result{ - Documents: l.Documents, - Modules: l.Modules, - path: path, - } -} - -func newResult() *Result { - return &Result{ - Documents: map[string]interface{}{}, - Modules: map[string]*RegoFile{}, - } -} - -func all(fsys fs.FS, paths []string, filter Filter, f func(*Result, string, int) error) (*Result, error) { - errs := Errors{} - root := newResult() - - for _, path := range paths { - - // Paths can be prefixed with a string that specifies where content should be - // loaded under data. E.g., foo.bar:/path/to/some.json will load the content - // of some.json under {"foo": {"bar": ...}}. - loaded := root - prefix, path := SplitPrefix(path) - if len(prefix) > 0 { - for _, part := range prefix { - loaded = loaded.withParent(part) - } - } - - allRec(fsys, path, filter, &errs, loaded, 0, f) - } - - if len(errs) > 0 { - return nil, errs - } - - return root, nil -} - -func allRec(fsys fs.FS, path string, filter Filter, errors *Errors, loaded *Result, depth int, f func(*Result, string, int) error) { - - path, err := fileurl.Clean(path) - if err != nil { - errors.add(err) - return - } - - if err := checkForUNCPath(path); err != nil { - errors.add(err) - return - } - - var info fs.FileInfo - if fsys != nil { - info, err = fs.Stat(fsys, path) - } else { - info, err = os.Stat(path) - } - - if err != nil { - errors.add(err) - return - } - - if filter != nil && filter(path, info, depth) { - return - } - - if !info.IsDir() { - if err := f(loaded, path, depth); err != nil { - errors.add(err) - } - return - } - - // If we are recursing on directories then content must be loaded under path - // specified by directory hierarchy. - if depth > 0 { - loaded = loaded.withParent(info.Name()) - } - - var files []fs.DirEntry - if fsys != nil { - files, err = fs.ReadDir(fsys, path) - } else { - files, err = os.ReadDir(path) - } - if err != nil { - errors.add(err) - return - } - - for _, file := range files { - allRec(fsys, filepath.Join(path, file.Name()), filter, errors, loaded, depth+1, f) - } -} - -func loadKnownTypes(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (interface{}, error) { - switch filepath.Ext(path) { - case ".json": - return loadJSON(path, bs, m) - case ".rego": - return loadRego(path, bs, m, opts) - case ".yaml", ".yml": - return loadYAML(path, bs, m) - default: - if strings.HasSuffix(path, ".tar.gz") { - r, err := loadBundleFile(path, bs, m, opts) - if err != nil { - err = fmt.Errorf("bundle %s: %w", path, err) - } - return r, err - } - } - return nil, unrecognizedFile(path) -} - -func loadFileForAnyType(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (interface{}, error) { - module, err := loadRego(path, bs, m, opts) - if err == nil { - return module, nil - } - doc, err := loadJSON(path, bs, m) - if err == nil { - return doc, nil - } - doc, err = loadYAML(path, bs, m) - if err == nil { - return doc, nil - } - return nil, unrecognizedFile(path) -} - -func loadBundleFile(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (bundle.Bundle, error) { - tl := bundle.NewTarballLoaderWithBaseURL(bytes.NewBuffer(bs), path) - br := bundle.NewCustomReader(tl). - WithRegoVersion(opts.RegoVersion). - WithJSONOptions(opts.JSONOptions). - WithProcessAnnotations(opts.ProcessAnnotation). - WithMetrics(m). - WithSkipBundleVerification(true). - IncludeManifestInData(true) - return br.Read() -} - -func loadRego(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (*RegoFile, error) { - m.Timer(metrics.RegoModuleParse).Start() - var module *ast.Module - var err error - module, err = ast.ParseModuleWithOpts(path, string(bs), opts) - m.Timer(metrics.RegoModuleParse).Stop() - if err != nil { - return nil, err - } - result := &RegoFile{ - Name: path, - Parsed: module, - Raw: bs, - } - return result, nil -} - -func loadJSON(path string, bs []byte, m metrics.Metrics) (interface{}, error) { - m.Timer(metrics.RegoDataParse).Start() - var x interface{} - err := util.UnmarshalJSON(bs, &x) - m.Timer(metrics.RegoDataParse).Stop() - - if err != nil { - return nil, fmt.Errorf("%s: %w", path, err) - } - return x, nil -} - -func loadYAML(path string, bs []byte, m metrics.Metrics) (interface{}, error) { - m.Timer(metrics.RegoDataParse).Start() - bs, err := yaml.YAMLToJSON(bs) - m.Timer(metrics.RegoDataParse).Stop() - if err != nil { - return nil, fmt.Errorf("%v: error converting YAML to JSON: %v", path, err) - } - return loadJSON(path, bs, m) -} - -func makeDir(path []string, x interface{}) (map[string]interface{}, bool) { - if len(path) == 0 { - obj, ok := x.(map[string]interface{}) - if !ok { - return nil, false - } - return obj, true - } - return makeDir(path[:len(path)-1], map[string]interface{}{path[len(path)-1]: x}) -} - -// isUNC reports whether path is a UNC path. -func isUNC(path string) bool { - return len(path) > 1 && isSlash(path[0]) && isSlash(path[1]) -} - -func isSlash(c uint8) bool { - return c == '\\' || c == '/' -} - -func checkForUNCPath(path string) error { - if isUNC(path) { - return fmt.Errorf("UNC path read is not allowed: %s", path) - } - return nil + return v1.SplitPrefix(path) } diff --git a/vendor/github.com/open-policy-agent/opa/logging/logging.go b/vendor/github.com/open-policy-agent/opa/logging/logging.go deleted file mode 100644 index 7a1edfb563..0000000000 --- a/vendor/github.com/open-policy-agent/opa/logging/logging.go +++ /dev/null @@ -1,266 +0,0 @@ -package logging - -import ( - "context" - "io" - "net/http" - - "github.com/sirupsen/logrus" -) - -// Level log level for Logger -type Level uint8 - -const ( - // Error error log level - Error Level = iota - // Warn warn log level - Warn - // Info info log level - Info - // Debug debug log level - Debug -) - -// Logger provides interface for OPA logger implementations -type Logger interface { - Debug(fmt string, a ...interface{}) - Info(fmt string, a ...interface{}) - Error(fmt string, a ...interface{}) - Warn(fmt string, a ...interface{}) - - WithFields(map[string]interface{}) Logger - - GetLevel() Level - SetLevel(Level) -} - -// StandardLogger is the default OPA logger implementation. -type StandardLogger struct { - logger *logrus.Logger - fields map[string]interface{} -} - -// New returns a new standard logger. -func New() *StandardLogger { - return &StandardLogger{ - logger: logrus.New(), - } -} - -// Get returns the standard logger used throughout OPA. -// -// Deprecated. Do not rely on the global logger. -func Get() *StandardLogger { - return &StandardLogger{ - logger: logrus.StandardLogger(), - } -} - -// SetOutput sets the underlying logrus output. -func (l *StandardLogger) SetOutput(w io.Writer) { - l.logger.SetOutput(w) -} - -// SetFormatter sets the underlying logrus formatter. -func (l *StandardLogger) SetFormatter(formatter logrus.Formatter) { - l.logger.SetFormatter(formatter) -} - -// WithFields provides additional fields to include in log output -func (l *StandardLogger) WithFields(fields map[string]interface{}) Logger { - cp := *l - cp.fields = make(map[string]interface{}) - for k, v := range l.fields { - cp.fields[k] = v - } - for k, v := range fields { - cp.fields[k] = v - } - return &cp -} - -// getFields returns additional fields of this logger -func (l *StandardLogger) getFields() map[string]interface{} { - return l.fields -} - -// SetLevel sets the standard logger level. -func (l *StandardLogger) SetLevel(level Level) { - var logrusLevel logrus.Level - switch level { - case Error: // set logging level report Warn or higher (includes Error) - logrusLevel = logrus.WarnLevel - case Warn: - logrusLevel = logrus.WarnLevel - case Info: - logrusLevel = logrus.InfoLevel - case Debug: - logrusLevel = logrus.DebugLevel - default: - l.Warn("unknown log level %v", level) - logrusLevel = logrus.InfoLevel - } - - l.logger.SetLevel(logrusLevel) -} - -// GetLevel returns the standard logger level. -func (l *StandardLogger) GetLevel() Level { - logrusLevel := l.logger.GetLevel() - - var level Level - switch logrusLevel { - case logrus.WarnLevel: - level = Error - case logrus.InfoLevel: - level = Info - case logrus.DebugLevel: - level = Debug - default: - l.Warn("unknown log level %v", logrusLevel) - level = Info - } - - return level -} - -// Debug logs at debug level -func (l *StandardLogger) Debug(fmt string, a ...interface{}) { - if len(a) == 0 { - l.logger.WithFields(l.getFields()).Debug(fmt) - return - } - l.logger.WithFields(l.getFields()).Debugf(fmt, a...) -} - -// Info logs at info level -func (l *StandardLogger) Info(fmt string, a ...interface{}) { - if len(a) == 0 { - l.logger.WithFields(l.getFields()).Info(fmt) - return - } - l.logger.WithFields(l.getFields()).Infof(fmt, a...) -} - -// Error logs at error level -func (l *StandardLogger) Error(fmt string, a ...interface{}) { - if len(a) == 0 { - l.logger.WithFields(l.getFields()).Error(fmt) - return - } - l.logger.WithFields(l.getFields()).Errorf(fmt, a...) -} - -// Warn logs at warn level -func (l *StandardLogger) Warn(fmt string, a ...interface{}) { - if len(a) == 0 { - l.logger.WithFields(l.getFields()).Warn(fmt) - return - } - l.logger.WithFields(l.getFields()).Warnf(fmt, a...) -} - -// NoOpLogger logging implementation that does nothing -type NoOpLogger struct { - level Level - fields map[string]interface{} -} - -// NewNoOpLogger instantiates new NoOpLogger -func NewNoOpLogger() *NoOpLogger { - return &NoOpLogger{ - level: Info, - } -} - -// WithFields provides additional fields to include in log output. -// Implemented here primarily to be able to switch between implementations without loss of data. -func (l *NoOpLogger) WithFields(fields map[string]interface{}) Logger { - cp := *l - cp.fields = fields - return &cp -} - -// Debug noop -func (*NoOpLogger) Debug(string, ...interface{}) {} - -// Info noop -func (*NoOpLogger) Info(string, ...interface{}) {} - -// Error noop -func (*NoOpLogger) Error(string, ...interface{}) {} - -// Warn noop -func (*NoOpLogger) Warn(string, ...interface{}) {} - -// SetLevel set log level -func (l *NoOpLogger) SetLevel(level Level) { - l.level = level -} - -// GetLevel get log level -func (l *NoOpLogger) GetLevel() Level { - return l.level -} - -type requestContextKey string - -const reqCtxKey = requestContextKey("request-context-key") - -// RequestContext represents the request context used to store data -// related to the request that could be used on logs. -type RequestContext struct { - ClientAddr string - ReqID uint64 - ReqMethod string - ReqPath string - HTTPRequestContext HTTPRequestContext -} - -type HTTPRequestContext struct { - Header http.Header -} - -// Fields adapts the RequestContext fields to logrus.Fields. -func (rctx RequestContext) Fields() logrus.Fields { - return logrus.Fields{ - "client_addr": rctx.ClientAddr, - "req_id": rctx.ReqID, - "req_method": rctx.ReqMethod, - "req_path": rctx.ReqPath, - } -} - -// NewContext returns a copy of parent with an associated RequestContext. -func NewContext(parent context.Context, val *RequestContext) context.Context { - return context.WithValue(parent, reqCtxKey, val) -} - -// FromContext returns the RequestContext associated with ctx, if any. -func FromContext(ctx context.Context) (*RequestContext, bool) { - requestContext, ok := ctx.Value(reqCtxKey).(*RequestContext) - return requestContext, ok -} - -const httpReqCtxKey = requestContextKey("http-request-context-key") - -func WithHTTPRequestContext(parent context.Context, val *HTTPRequestContext) context.Context { - return context.WithValue(parent, httpReqCtxKey, val) -} - -func HTTPRequestContextFromContext(ctx context.Context) (*HTTPRequestContext, bool) { - requestContext, ok := ctx.Value(httpReqCtxKey).(*HTTPRequestContext) - return requestContext, ok -} - -const decisionCtxKey = requestContextKey("decision_id") - -func WithDecisionID(parent context.Context, id string) context.Context { - return context.WithValue(parent, decisionCtxKey, id) -} - -func DecisionIDFromContext(ctx context.Context) (string, bool) { - s, ok := ctx.Value(decisionCtxKey).(string) - return s, ok -} diff --git a/vendor/github.com/open-policy-agent/opa/metrics/metrics.go b/vendor/github.com/open-policy-agent/opa/metrics/metrics.go deleted file mode 100644 index 53cd606a36..0000000000 --- a/vendor/github.com/open-policy-agent/opa/metrics/metrics.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2017 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package metrics contains helpers for performance metric management inside the policy engine. -package metrics - -import ( - "encoding/json" - "fmt" - "sort" - "strings" - "sync" - "sync/atomic" - "time" - - go_metrics "github.com/rcrowley/go-metrics" -) - -// Well-known metric names. -const ( - BundleRequest = "bundle_request" - ServerHandler = "server_handler" - ServerQueryCacheHit = "server_query_cache_hit" - SDKDecisionEval = "sdk_decision_eval" - RegoQueryCompile = "rego_query_compile" - RegoQueryEval = "rego_query_eval" - RegoQueryParse = "rego_query_parse" - RegoModuleParse = "rego_module_parse" - RegoDataParse = "rego_data_parse" - RegoModuleCompile = "rego_module_compile" - RegoPartialEval = "rego_partial_eval" - RegoInputParse = "rego_input_parse" - RegoLoadFiles = "rego_load_files" - RegoLoadBundles = "rego_load_bundles" - RegoExternalResolve = "rego_external_resolve" -) - -// Info contains attributes describing the underlying metrics provider. -type Info struct { - Name string `json:"name"` // name is a unique human-readable identifier for the provider. -} - -// Metrics defines the interface for a collection of performance metrics in the -// policy engine. -type Metrics interface { - Info() Info - Timer(name string) Timer - Histogram(name string) Histogram - Counter(name string) Counter - All() map[string]interface{} - Clear() - json.Marshaler -} - -type TimerMetrics interface { - Timers() map[string]interface{} -} - -type metrics struct { - mtx sync.Mutex - timers map[string]Timer - histograms map[string]Histogram - counters map[string]Counter -} - -// New returns a new Metrics object. -func New() Metrics { - m := &metrics{} - m.Clear() - return m -} - -type metric struct { - Key string - Value interface{} -} - -func (*metrics) Info() Info { - return Info{ - Name: "", - } -} - -func (m *metrics) String() string { - - all := m.All() - sorted := make([]metric, 0, len(all)) - - for key, value := range all { - sorted = append(sorted, metric{ - Key: key, - Value: value, - }) - } - - sort.Slice(sorted, func(i, j int) bool { - return sorted[i].Key < sorted[j].Key - }) - - buf := make([]string, len(sorted)) - for i := range sorted { - buf[i] = fmt.Sprintf("%v:%v", sorted[i].Key, sorted[i].Value) - } - - return strings.Join(buf, " ") -} - -func (m *metrics) MarshalJSON() ([]byte, error) { - return json.Marshal(m.All()) -} - -func (m *metrics) Timer(name string) Timer { - m.mtx.Lock() - defer m.mtx.Unlock() - t, ok := m.timers[name] - if !ok { - t = &timer{} - m.timers[name] = t - } - return t -} - -func (m *metrics) Histogram(name string) Histogram { - m.mtx.Lock() - defer m.mtx.Unlock() - h, ok := m.histograms[name] - if !ok { - h = newHistogram() - m.histograms[name] = h - } - return h -} - -func (m *metrics) Counter(name string) Counter { - m.mtx.Lock() - defer m.mtx.Unlock() - c, ok := m.counters[name] - if !ok { - zero := counter{} - c = &zero - m.counters[name] = c - } - return c -} - -func (m *metrics) All() map[string]interface{} { - m.mtx.Lock() - defer m.mtx.Unlock() - result := map[string]interface{}{} - for name, timer := range m.timers { - result[m.formatKey(name, timer)] = timer.Value() - } - for name, hist := range m.histograms { - result[m.formatKey(name, hist)] = hist.Value() - } - for name, cntr := range m.counters { - result[m.formatKey(name, cntr)] = cntr.Value() - } - return result -} - -func (m *metrics) Timers() map[string]interface{} { - m.mtx.Lock() - defer m.mtx.Unlock() - ts := map[string]interface{}{} - for n, t := range m.timers { - ts[m.formatKey(n, t)] = t.Value() - } - return ts -} - -func (m *metrics) Clear() { - m.mtx.Lock() - defer m.mtx.Unlock() - m.timers = map[string]Timer{} - m.histograms = map[string]Histogram{} - m.counters = map[string]Counter{} -} - -func (m *metrics) formatKey(name string, metrics interface{}) string { - switch metrics.(type) { - case Timer: - return "timer_" + name + "_ns" - case Histogram: - return "histogram_" + name - case Counter: - return "counter_" + name - default: - return name - } -} - -// Timer defines the interface for a restartable timer that accumulates elapsed -// time. -type Timer interface { - Value() interface{} - Int64() int64 - Start() - Stop() int64 -} - -type timer struct { - mtx sync.Mutex - start time.Time - value int64 -} - -func (t *timer) Start() { - t.mtx.Lock() - defer t.mtx.Unlock() - t.start = time.Now() -} - -func (t *timer) Stop() int64 { - t.mtx.Lock() - defer t.mtx.Unlock() - delta := time.Since(t.start).Nanoseconds() - t.value += delta - return delta -} - -func (t *timer) Value() interface{} { - return t.Int64() -} - -func (t *timer) Int64() int64 { - t.mtx.Lock() - defer t.mtx.Unlock() - return t.value -} - -// Histogram defines the interface for a histogram with hardcoded percentiles. -type Histogram interface { - Value() interface{} - Update(int64) -} - -type histogram struct { - hist go_metrics.Histogram // is thread-safe because of the underlying ExpDecaySample -} - -func newHistogram() Histogram { - // NOTE(tsandall): the reservoir size and alpha factor are taken from - // https://github.com/rcrowley/go-metrics. They may need to be tweaked in - // the future. - sample := go_metrics.NewExpDecaySample(1028, 0.015) - hist := go_metrics.NewHistogram(sample) - return &histogram{hist} -} - -func (h *histogram) Update(v int64) { - h.hist.Update(v) -} - -func (h *histogram) Value() interface{} { - values := map[string]interface{}{} - snap := h.hist.Snapshot() - percentiles := snap.Percentiles([]float64{ - 0.5, - 0.75, - 0.9, - 0.95, - 0.99, - 0.999, - 0.9999, - }) - values["count"] = snap.Count() - values["min"] = snap.Min() - values["max"] = snap.Max() - values["mean"] = snap.Mean() - values["stddev"] = snap.StdDev() - values["median"] = percentiles[0] - values["75%"] = percentiles[1] - values["90%"] = percentiles[2] - values["95%"] = percentiles[3] - values["99%"] = percentiles[4] - values["99.9%"] = percentiles[5] - values["99.99%"] = percentiles[6] - return values -} - -// Counter defines the interface for a monotonic increasing counter. -type Counter interface { - Value() interface{} - Incr() - Add(n uint64) -} - -type counter struct { - c uint64 -} - -func (c *counter) Incr() { - atomic.AddUint64(&c.c, 1) -} - -func (c *counter) Add(n uint64) { - atomic.AddUint64(&c.c, n) -} - -func (c *counter) Value() interface{} { - return atomic.LoadUint64(&c.c) -} - -func Statistics(num ...int64) interface{} { - t := newHistogram() - for _, n := range num { - t.Update(n) - } - return t.Value() -} diff --git a/vendor/github.com/open-policy-agent/opa/plugins/plugins.go b/vendor/github.com/open-policy-agent/opa/plugins/plugins.go deleted file mode 100644 index 567acfb817..0000000000 --- a/vendor/github.com/open-policy-agent/opa/plugins/plugins.go +++ /dev/null @@ -1,1098 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package plugins implements plugin management for the policy engine. -package plugins - -import ( - "context" - "errors" - "fmt" - mr "math/rand" - "sync" - "time" - - "github.com/open-policy-agent/opa/internal/report" - "github.com/prometheus/client_golang/prometheus" - "go.opentelemetry.io/otel/sdk/trace" - - "github.com/gorilla/mux" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/bundle" - "github.com/open-policy-agent/opa/config" - "github.com/open-policy-agent/opa/hooks" - bundleUtils "github.com/open-policy-agent/opa/internal/bundle" - cfg "github.com/open-policy-agent/opa/internal/config" - initload "github.com/open-policy-agent/opa/internal/runtime/init" - "github.com/open-policy-agent/opa/keys" - "github.com/open-policy-agent/opa/loader" - "github.com/open-policy-agent/opa/logging" - "github.com/open-policy-agent/opa/plugins/rest" - "github.com/open-policy-agent/opa/resolver/wasm" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/print" - "github.com/open-policy-agent/opa/tracing" -) - -// Factory defines the interface OPA uses to instantiate your plugin. -// -// When OPA processes it's configuration it looks for factories that -// have been registered by calling runtime.RegisterPlugin. Factories -// are registered to a name which is used to key into the -// configuration blob. If your plugin has not been configured, your -// factory will not be invoked. -// -// plugins: -// my_plugin1: -// some_key: foo -// # my_plugin2: -// # some_key2: bar -// -// If OPA was started with the configuration above and received two -// calls to runtime.RegisterPlugins (one with NAME "my_plugin1" and -// one with NAME "my_plugin2"), it would only invoke the factory for -// for my_plugin1. -// -// OPA instantiates and reconfigures plugins in two steps. First, OPA -// will call Validate to check the configuration. Assuming the -// configuration is valid, your factory should return a configuration -// value that can be used to construct your plugin. Second, OPA will -// call New to instantiate your plugin providing the configuration -// value returned from the Validate call. -// -// Validate receives a slice of bytes representing plugin -// configuration and returns a configuration value that can be used to -// instantiate your plugin. The manager is provided to give access to -// the OPA's compiler, storage layer, and global configuration. Your -// Validate function will typically: -// -// 1. Deserialize the raw config bytes -// 2. Validate the deserialized config for semantic errors -// 3. Inject default values -// 4. Return a deserialized/parsed config -// -// New receives a valid configuration for your plugin and returns a -// plugin object. Your New function will typically: -// -// 1. Cast the config value to it's own type -// 2. Instantiate a plugin object -// 3. Return the plugin object -// 4. Update status via `plugins.Manager#UpdatePluginStatus` -// -// After a plugin has been created subsequent status updates can be -// send anytime the plugin enters a ready or error state. -type Factory interface { - Validate(manager *Manager, config []byte) (interface{}, error) - New(manager *Manager, config interface{}) Plugin -} - -// Plugin defines the interface OPA uses to manage your plugin. -// -// When OPA starts it will start all of the plugins it was configured -// to instantiate. Each time a new plugin is configured (via -// discovery), OPA will start it. You can use the Start call to spawn -// additional goroutines or perform initialization tasks. -// -// Currently OPA will not call Stop on plugins. -// -// When OPA receives new configuration for your plugin via discovery -// it will first Validate the configuration using your factory and -// then call Reconfigure. -type Plugin interface { - Start(ctx context.Context) error - Stop(ctx context.Context) - Reconfigure(ctx context.Context, config interface{}) -} - -// Triggerable defines the interface plugins use for manual plugin triggers. -type Triggerable interface { - Trigger(context.Context) error -} - -// State defines the state that a Plugin instance is currently -// in with pre-defined states. -type State string - -const ( - // StateNotReady indicates that the Plugin is not in an error state, but isn't - // ready for normal operation yet. This should only happen at - // initialization time. - StateNotReady State = "NOT_READY" - - // StateOK signifies that the Plugin is operating normally. - StateOK State = "OK" - - // StateErr indicates that the Plugin is in an error state and should not - // be considered as functional. - StateErr State = "ERROR" - - // StateWarn indicates the Plugin is operating, but in a potentially dangerous or - // degraded state. It may be used to indicate manual remediation is needed, or to - // alert admins of some other noteworthy state. - StateWarn State = "WARN" -) - -// TriggerMode defines the trigger mode utilized by a Plugin for bundle download, -// log upload etc. -type TriggerMode string - -const ( - // TriggerPeriodic represents periodic polling mechanism - TriggerPeriodic TriggerMode = "periodic" - - // TriggerManual represents manual triggering mechanism - TriggerManual TriggerMode = "manual" - - // DefaultTriggerMode represents default trigger mechanism - DefaultTriggerMode TriggerMode = "periodic" -) - -// default interval between OPA report uploads -var defaultUploadIntervalSec = int64(3600) - -// Status has a Plugin's current status plus an optional Message. -type Status struct { - State State `json:"state"` - Message string `json:"message,omitempty"` -} - -func (s *Status) String() string { - return fmt.Sprintf("{%v %q}", s.State, s.Message) -} - -// StatusListener defines a handler to register for status updates. -type StatusListener func(status map[string]*Status) - -// Manager implements lifecycle management of plugins and gives plugins access -// to engine-wide components like storage. -type Manager struct { - Store storage.Store - Config *config.Config - Info *ast.Term - ID string - - compiler *ast.Compiler - compilerMux sync.RWMutex - wasmResolvers []*wasm.Resolver - wasmResolversMtx sync.RWMutex - services map[string]rest.Client - keys map[string]*keys.Config - plugins []namedplugin - registeredTriggers []func(storage.Transaction) - mtx sync.Mutex - pluginStatus map[string]*Status - pluginStatusListeners map[string]StatusListener - initBundles map[string]*bundle.Bundle - initFiles loader.Result - maxErrors int - initialized bool - interQueryBuiltinCacheConfig *cache.Config - gracefulShutdownPeriod int - registeredCacheTriggers []func(*cache.Config) - logger logging.Logger - consoleLogger logging.Logger - serverInitialized chan struct{} - serverInitializedOnce sync.Once - printHook print.Hook - enablePrintStatements bool - router *mux.Router - prometheusRegister prometheus.Registerer - tracerProvider *trace.TracerProvider - distributedTacingOpts tracing.Options - registeredNDCacheTriggers []func(bool) - registeredTelemetryGatherers map[string]report.Gatherer - bootstrapConfigLabels map[string]string - hooks hooks.Hooks - enableTelemetry bool - reporter *report.Reporter - opaReportNotifyCh chan struct{} - stop chan chan struct{} - parserOptions ast.ParserOptions -} - -type managerContextKey string -type managerWasmResolverKey string - -const managerCompilerContextKey = managerContextKey("compiler") -const managerWasmResolverContextKey = managerWasmResolverKey("wasmResolvers") - -// SetCompilerOnContext puts the compiler into the storage context. Calling this -// function before committing updated policies to storage allows the manager to -// skip parsing and compiling of modules. Instead, the manager will use the -// compiler that was stored on the context. -func SetCompilerOnContext(context *storage.Context, compiler *ast.Compiler) { - context.Put(managerCompilerContextKey, compiler) -} - -// GetCompilerOnContext gets the compiler cached on the storage context. -func GetCompilerOnContext(context *storage.Context) *ast.Compiler { - compiler, ok := context.Get(managerCompilerContextKey).(*ast.Compiler) - if !ok { - return nil - } - return compiler -} - -// SetWasmResolversOnContext puts a set of Wasm Resolvers into the storage -// context. Calling this function before committing updated wasm modules to -// storage allows the manager to skip initializing modules before using them. -// Instead, the manager will use the compiler that was stored on the context. -func SetWasmResolversOnContext(context *storage.Context, rs []*wasm.Resolver) { - context.Put(managerWasmResolverContextKey, rs) -} - -// getWasmResolversOnContext gets the resolvers cached on the storage context. -func getWasmResolversOnContext(context *storage.Context) []*wasm.Resolver { - resolvers, ok := context.Get(managerWasmResolverContextKey).([]*wasm.Resolver) - if !ok { - return nil - } - return resolvers -} - -func validateTriggerMode(mode TriggerMode) error { - switch mode { - case TriggerPeriodic, TriggerManual: - return nil - default: - return fmt.Errorf("invalid trigger mode %q (want %q or %q)", mode, TriggerPeriodic, TriggerManual) - } -} - -// ValidateAndInjectDefaultsForTriggerMode validates the trigger mode and injects default values -func ValidateAndInjectDefaultsForTriggerMode(a, b *TriggerMode) (*TriggerMode, error) { - - if a == nil && b != nil { - err := validateTriggerMode(*b) - if err != nil { - return nil, err - } - return b, nil - } else if a != nil && b == nil { - err := validateTriggerMode(*a) - if err != nil { - return nil, err - } - return a, nil - } else if a != nil && b != nil { - if *a != *b { - return nil, fmt.Errorf("trigger mode mismatch: %s and %s (hint: check discovery configuration)", *a, *b) - } - err := validateTriggerMode(*a) - if err != nil { - return nil, err - } - return a, nil - } - - t := DefaultTriggerMode - return &t, nil -} - -type namedplugin struct { - name string - plugin Plugin -} - -// Info sets the runtime information on the manager. The runtime information is -// propagated to opa.runtime() built-in function calls. -func Info(term *ast.Term) func(*Manager) { - return func(m *Manager) { - m.Info = term - } -} - -// InitBundles provides the initial set of bundles to load. -func InitBundles(b map[string]*bundle.Bundle) func(*Manager) { - return func(m *Manager) { - m.initBundles = b - } -} - -// InitFiles provides the initial set of other data/policy files to load. -func InitFiles(f loader.Result) func(*Manager) { - return func(m *Manager) { - m.initFiles = f - } -} - -// MaxErrors sets the error limit for the manager's shared compiler. -func MaxErrors(n int) func(*Manager) { - return func(m *Manager) { - m.maxErrors = n - } -} - -// GracefulShutdownPeriod passes the configured graceful shutdown period to plugins -func GracefulShutdownPeriod(gracefulShutdownPeriod int) func(*Manager) { - return func(m *Manager) { - m.gracefulShutdownPeriod = gracefulShutdownPeriod - } -} - -// Logger configures the passed logger on the plugin manager (useful to -// configure default fields) -func Logger(logger logging.Logger) func(*Manager) { - return func(m *Manager) { - m.logger = logger - } -} - -// ConsoleLogger sets the passed logger to be used by plugins that are -// configured with console logging enabled. -func ConsoleLogger(logger logging.Logger) func(*Manager) { - return func(m *Manager) { - m.consoleLogger = logger - } -} - -func EnablePrintStatements(yes bool) func(*Manager) { - return func(m *Manager) { - m.enablePrintStatements = yes - } -} - -func PrintHook(h print.Hook) func(*Manager) { - return func(m *Manager) { - m.printHook = h - } -} - -func WithRouter(r *mux.Router) func(*Manager) { - return func(m *Manager) { - m.router = r - } -} - -// WithPrometheusRegister sets the passed prometheus.Registerer to be used by plugins -func WithPrometheusRegister(prometheusRegister prometheus.Registerer) func(*Manager) { - return func(m *Manager) { - m.prometheusRegister = prometheusRegister - } -} - -// WithTracerProvider sets the passed *trace.TracerProvider to be used by plugins -func WithTracerProvider(tracerProvider *trace.TracerProvider) func(*Manager) { - return func(m *Manager) { - m.tracerProvider = tracerProvider - } -} - -// WithDistributedTracingOpts sets the options to be used by distributed tracing. -func WithDistributedTracingOpts(tr tracing.Options) func(*Manager) { - return func(m *Manager) { - m.distributedTacingOpts = tr - } -} - -// WithHooks allows passing hooks to the plugin manager. -func WithHooks(hs hooks.Hooks) func(*Manager) { - return func(m *Manager) { - m.hooks = hs - } -} - -// WithParserOptions sets the parser options to be used by the plugin manager. -func WithParserOptions(opts ast.ParserOptions) func(*Manager) { - return func(m *Manager) { - m.parserOptions = opts - } -} - -// WithEnableTelemetry controls whether OPA will send telemetry reports to an external service. -func WithEnableTelemetry(enableTelemetry bool) func(*Manager) { - return func(m *Manager) { - m.enableTelemetry = enableTelemetry - } -} - -// WithTelemetryGatherers allows registration of telemetry gatherers which enable injection of additional data in the -// telemetry report -func WithTelemetryGatherers(gs map[string]report.Gatherer) func(*Manager) { - return func(m *Manager) { - m.registeredTelemetryGatherers = gs - } -} - -// New creates a new Manager using config. -func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*Manager, error) { - - parsedConfig, err := config.ParseConfig(raw, id) - if err != nil { - return nil, err - } - - m := &Manager{ - Store: store, - Config: parsedConfig, - ID: id, - pluginStatus: map[string]*Status{}, - pluginStatusListeners: map[string]StatusListener{}, - maxErrors: -1, - serverInitialized: make(chan struct{}), - bootstrapConfigLabels: parsedConfig.Labels, - } - - for _, f := range opts { - f(m) - } - - if m.logger == nil { - m.logger = logging.Get() - } - - if m.consoleLogger == nil { - m.consoleLogger = logging.New() - } - - m.hooks.Each(func(h hooks.Hook) { - if f, ok := h.(hooks.ConfigHook); ok { - if c, e := f.OnConfig(context.Background(), parsedConfig); e != nil { - err = errors.Join(err, e) - } else { - parsedConfig = c - } - } - }) - if err != nil { - return nil, err - } - - // do after options and overrides - m.keys, err = keys.ParseKeysConfig(parsedConfig.Keys) - if err != nil { - return nil, err - } - - m.interQueryBuiltinCacheConfig, err = cache.ParseCachingConfig(parsedConfig.Caching) - if err != nil { - return nil, err - } - - serviceOpts := cfg.ServiceOptions{ - Raw: parsedConfig.Services, - AuthPlugin: m.AuthPlugin, - Keys: m.keys, - Logger: m.logger, - DistributedTacingOpts: m.distributedTacingOpts, - } - - m.services, err = cfg.ParseServicesConfig(serviceOpts) - if err != nil { - return nil, err - } - - if m.enableTelemetry { - reporter, err := report.New(id, report.Options{Logger: m.logger}) - if err != nil { - return nil, err - } - m.reporter = reporter - - m.reporter.RegisterGatherer("min_compatible_version", func(_ context.Context) (any, error) { - var minimumCompatibleVersion string - if m.compiler != nil && m.compiler.Required != nil { - minimumCompatibleVersion, _ = m.compiler.Required.MinimumCompatibleVersion() - } - return minimumCompatibleVersion, nil - }) - - // register any additional gatherers - for k, g := range m.registeredTelemetryGatherers { - m.reporter.RegisterGatherer(k, g) - } - } - - return m, nil -} - -// Init returns an error if the manager could not initialize itself. Init() should -// be called before Start(). Init() is idempotent. -func (m *Manager) Init(ctx context.Context) error { - - if m.initialized { - return nil - } - - params := storage.TransactionParams{ - Write: true, - Context: storage.NewContext(), - } - - if m.enableTelemetry { - m.opaReportNotifyCh = make(chan struct{}) - m.stop = make(chan chan struct{}) - go m.sendOPAUpdateLoop(ctx) - } - - err := storage.Txn(ctx, m.Store, params, func(txn storage.Transaction) error { - - result, err := initload.InsertAndCompile(ctx, initload.InsertAndCompileOptions{ - Store: m.Store, - Txn: txn, - Files: m.initFiles, - Bundles: m.initBundles, - MaxErrors: m.maxErrors, - EnablePrintStatements: m.enablePrintStatements, - }) - - if err != nil { - return err - } - - SetCompilerOnContext(params.Context, result.Compiler) - - resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, m.Store, txn, nil) - if err != nil { - return err - } - SetWasmResolversOnContext(params.Context, resolvers) - - _, err = m.Store.Register(ctx, txn, storage.TriggerConfig{OnCommit: m.onCommit}) - return err - }) - - if err != nil { - if m.stop != nil { - done := make(chan struct{}) - m.stop <- done - <-done - } - - return err - } - - m.initialized = true - return nil -} - -// Labels returns the set of labels from the configuration. -func (m *Manager) Labels() map[string]string { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.Config.Labels -} - -// InterQueryBuiltinCacheConfig returns the configuration for the inter-query caches. -func (m *Manager) InterQueryBuiltinCacheConfig() *cache.Config { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.interQueryBuiltinCacheConfig -} - -// Register adds a plugin to the manager. When the manager is started, all of -// the plugins will be started. -func (m *Manager) Register(name string, plugin Plugin) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.plugins = append(m.plugins, namedplugin{ - name: name, - plugin: plugin, - }) - if _, ok := m.pluginStatus[name]; !ok { - m.pluginStatus[name] = &Status{State: StateNotReady} - } -} - -// Plugins returns the list of plugins registered with the manager. -func (m *Manager) Plugins() []string { - m.mtx.Lock() - defer m.mtx.Unlock() - result := make([]string, len(m.plugins)) - for i := range m.plugins { - result[i] = m.plugins[i].name - } - return result -} - -// Plugin returns the plugin registered with name or nil if name is not found. -func (m *Manager) Plugin(name string) Plugin { - m.mtx.Lock() - defer m.mtx.Unlock() - for i := range m.plugins { - if m.plugins[i].name == name { - return m.plugins[i].plugin - } - } - return nil -} - -// AuthPlugin returns the HTTPAuthPlugin registered with name or nil if name is not found. -func (m *Manager) AuthPlugin(name string) rest.HTTPAuthPlugin { - m.mtx.Lock() - defer m.mtx.Unlock() - for i := range m.plugins { - if m.plugins[i].name == name { - return m.plugins[i].plugin.(rest.HTTPAuthPlugin) - } - } - return nil -} - -// GetCompiler returns the manager's compiler. -func (m *Manager) GetCompiler() *ast.Compiler { - m.compilerMux.RLock() - defer m.compilerMux.RUnlock() - return m.compiler -} - -func (m *Manager) setCompiler(compiler *ast.Compiler) { - m.compilerMux.Lock() - defer m.compilerMux.Unlock() - m.compiler = compiler -} - -// GetRouter returns the managers router if set -func (m *Manager) GetRouter() *mux.Router { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.router -} - -// RegisterCompilerTrigger registers for change notifications when the compiler -// is changed. -func (m *Manager) RegisterCompilerTrigger(f func(storage.Transaction)) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.registeredTriggers = append(m.registeredTriggers, f) -} - -// GetWasmResolvers returns the manager's set of Wasm Resolvers. -func (m *Manager) GetWasmResolvers() []*wasm.Resolver { - m.wasmResolversMtx.RLock() - defer m.wasmResolversMtx.RUnlock() - return m.wasmResolvers -} - -func (m *Manager) setWasmResolvers(rs []*wasm.Resolver) { - m.wasmResolversMtx.Lock() - defer m.wasmResolversMtx.Unlock() - m.wasmResolvers = rs -} - -// Start starts the manager. Init() should be called once before Start(). -func (m *Manager) Start(ctx context.Context) error { - - if m == nil { - return nil - } - - if !m.initialized { - if err := m.Init(ctx); err != nil { - return err - } - } - - var toStart []Plugin - - func() { - m.mtx.Lock() - defer m.mtx.Unlock() - toStart = make([]Plugin, len(m.plugins)) - for i := range m.plugins { - toStart[i] = m.plugins[i].plugin - } - }() - - for i := range toStart { - if err := toStart[i].Start(ctx); err != nil { - return err - } - } - - return nil -} - -// Stop stops the manager, stopping all the plugins registered with it. -// Any plugin that needs to perform cleanup should do so within the duration -// of the graceful shutdown period passed with the context as a timeout. -// Note that a graceful shutdown period configured with the Manager instance -// will override the timeout of the passed in context (if applicable). -func (m *Manager) Stop(ctx context.Context) { - var toStop []Plugin - - func() { - m.mtx.Lock() - defer m.mtx.Unlock() - toStop = make([]Plugin, len(m.plugins)) - for i := range m.plugins { - toStop[i] = m.plugins[i].plugin - } - }() - - var cancel context.CancelFunc - if m.gracefulShutdownPeriod > 0 { - ctx, cancel = context.WithTimeout(ctx, time.Duration(m.gracefulShutdownPeriod)*time.Second) - } else { - ctx, cancel = context.WithCancel(ctx) - } - defer cancel() - for i := range toStop { - toStop[i].Stop(ctx) - } - if c, ok := m.Store.(interface{ Close(context.Context) error }); ok { - if err := c.Close(ctx); err != nil { - m.logger.Error("Error closing store: %v", err) - } - } - - if m.stop != nil { - done := make(chan struct{}) - m.stop <- done - <-done - } -} - -// Reconfigure updates the configuration on the manager. -func (m *Manager) Reconfigure(config *config.Config) error { - opts := cfg.ServiceOptions{ - Raw: config.Services, - AuthPlugin: m.AuthPlugin, - Logger: m.logger, - DistributedTacingOpts: m.distributedTacingOpts, - } - - keys, err := keys.ParseKeysConfig(config.Keys) - if err != nil { - return err - } - opts.Keys = keys - - services, err := cfg.ParseServicesConfig(opts) - if err != nil { - return err - } - - interQueryBuiltinCacheConfig, err := cache.ParseCachingConfig(config.Caching) - if err != nil { - return err - } - - m.mtx.Lock() - defer m.mtx.Unlock() - - // don't overwrite existing labels, only allow additions - always based on the boostrap config - if config.Labels == nil { - config.Labels = m.bootstrapConfigLabels - } else { - for label, value := range m.bootstrapConfigLabels { - config.Labels[label] = value - } - } - - // don't erase persistence directory - if config.PersistenceDirectory == nil { - config.PersistenceDirectory = m.Config.PersistenceDirectory - } - - m.Config = config - m.interQueryBuiltinCacheConfig = interQueryBuiltinCacheConfig - for name, client := range services { - m.services[name] = client - } - - for name, key := range keys { - m.keys[name] = key - } - - for _, trigger := range m.registeredCacheTriggers { - trigger(interQueryBuiltinCacheConfig) - } - - for _, trigger := range m.registeredNDCacheTriggers { - trigger(config.NDBuiltinCache) - } - - return nil -} - -// PluginStatus returns the current statuses of any plugins registered. -func (m *Manager) PluginStatus() map[string]*Status { - m.mtx.Lock() - defer m.mtx.Unlock() - - return m.copyPluginStatus() -} - -// RegisterPluginStatusListener registers a StatusListener to be -// called when plugin status updates occur. -func (m *Manager) RegisterPluginStatusListener(name string, listener StatusListener) { - m.mtx.Lock() - defer m.mtx.Unlock() - - m.pluginStatusListeners[name] = listener -} - -// UnregisterPluginStatusListener removes a StatusListener registered with the -// same name. -func (m *Manager) UnregisterPluginStatusListener(name string) { - m.mtx.Lock() - defer m.mtx.Unlock() - - delete(m.pluginStatusListeners, name) -} - -// UpdatePluginStatus updates a named plugins status. Any registered -// listeners will be called with a copy of the new state of all -// plugins. -func (m *Manager) UpdatePluginStatus(pluginName string, status *Status) { - - var toNotify map[string]StatusListener - var statuses map[string]*Status - - func() { - m.mtx.Lock() - defer m.mtx.Unlock() - m.pluginStatus[pluginName] = status - toNotify = make(map[string]StatusListener, len(m.pluginStatusListeners)) - for k, v := range m.pluginStatusListeners { - toNotify[k] = v - } - statuses = m.copyPluginStatus() - }() - - for _, l := range toNotify { - l(statuses) - } -} - -func (m *Manager) copyPluginStatus() map[string]*Status { - statusCpy := map[string]*Status{} - for k, v := range m.pluginStatus { - var cpy *Status - if v != nil { - cpy = &Status{ - State: v.State, - Message: v.Message, - } - } - statusCpy[k] = cpy - } - return statusCpy -} - -func (m *Manager) onCommit(ctx context.Context, txn storage.Transaction, event storage.TriggerEvent) { - - compiler := GetCompilerOnContext(event.Context) - - // If the context does not contain the compiler fallback to loading the - // compiler from the store. Currently the bundle plugin sets the - // compiler on the context but the server does not (nor would users - // implementing their own policy loading.) - if compiler == nil && event.PolicyChanged() { - compiler, _ = loadCompilerFromStore(ctx, m.Store, txn, m.enablePrintStatements, m.ParserOptions()) - } - - if compiler != nil { - m.setCompiler(compiler) - - if m.enableTelemetry && event.PolicyChanged() { - m.opaReportNotifyCh <- struct{}{} - } - - for _, f := range m.registeredTriggers { - f(txn) - } - } - - // Similar to the compiler, look for a set of resolvers on the transaction - // context. If they are not set we may need to reload from the store. - resolvers := getWasmResolversOnContext(event.Context) - if resolvers != nil { - m.setWasmResolvers(resolvers) - - } else if event.DataChanged() { - if requiresWasmResolverReload(event) { - resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, m.Store, txn, nil) - if err != nil { - panic(err) - } - m.setWasmResolvers(resolvers) - } else { - err := m.updateWasmResolversData(ctx, event) - if err != nil { - panic(err) - } - } - } -} - -func loadCompilerFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, enablePrintStatements bool, popts ast.ParserOptions) (*ast.Compiler, error) { - policies, err := store.ListPolicies(ctx, txn) - if err != nil { - return nil, err - } - modules := map[string]*ast.Module{} - - for _, policy := range policies { - bs, err := store.GetPolicy(ctx, txn, policy) - if err != nil { - return nil, err - } - module, err := ast.ParseModuleWithOpts(policy, string(bs), popts) - if err != nil { - return nil, err - } - modules[policy] = module - } - - compiler := ast.NewCompiler().WithEnablePrintStatements(enablePrintStatements) - compiler.Compile(modules) - return compiler, nil -} - -func requiresWasmResolverReload(event storage.TriggerEvent) bool { - // If the data changes touched the bundle path (which includes - // the wasm modules) we will reload them. Otherwise update - // data for each module already on the manager. - for _, dataEvent := range event.Data { - if dataEvent.Path.HasPrefix(bundle.BundlesBasePath) { - return true - } - } - return false -} - -func (m *Manager) updateWasmResolversData(ctx context.Context, event storage.TriggerEvent) error { - m.wasmResolversMtx.Lock() - defer m.wasmResolversMtx.Unlock() - - for _, resolver := range m.wasmResolvers { - for _, dataEvent := range event.Data { - var err error - if dataEvent.Removed { - err = resolver.RemoveDataPath(ctx, dataEvent.Path) - } else { - err = resolver.SetDataPath(ctx, dataEvent.Path, dataEvent.Data) - } - if err != nil { - return fmt.Errorf("failed to update wasm runtime data: %s", err) - } - } - } - return nil -} - -// PublicKeys returns a public keys that can be used for verifying signed bundles. -func (m *Manager) PublicKeys() map[string]*keys.Config { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.keys -} - -// Client returns a client for communicating with a remote service. -func (m *Manager) Client(name string) rest.Client { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.services[name] -} - -// Services returns a list of services that m can provide clients for. -func (m *Manager) Services() []string { - m.mtx.Lock() - defer m.mtx.Unlock() - s := make([]string, 0, len(m.services)) - for name := range m.services { - s = append(s, name) - } - return s -} - -// Logger gets the standard logger for this plugin manager. -func (m *Manager) Logger() logging.Logger { - return m.logger -} - -// ConsoleLogger gets the console logger for this plugin manager. -func (m *Manager) ConsoleLogger() logging.Logger { - return m.consoleLogger -} - -func (m *Manager) PrintHook() print.Hook { - return m.printHook -} - -func (m *Manager) EnablePrintStatements() bool { - return m.enablePrintStatements -} - -// ServerInitialized signals a channel indicating that the OPA -// server has finished initialization. -func (m *Manager) ServerInitialized() { - m.serverInitializedOnce.Do(func() { close(m.serverInitialized) }) -} - -// ServerInitializedChannel returns a receive-only channel that -// is closed when the OPA server has finished initialization. -// Be aware that the socket of the server listener may not be -// open by the time this channel is closed. There is a very -// small window where the socket may still be closed, due to -// a race condition. -func (m *Manager) ServerInitializedChannel() <-chan struct{} { - return m.serverInitialized -} - -// RegisterCacheTrigger accepts a func that receives new inter-query cache config generated by -// a reconfigure of the plugin manager, so that it can be propagated to existing inter-query caches. -func (m *Manager) RegisterCacheTrigger(trigger func(*cache.Config)) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.registeredCacheTriggers = append(m.registeredCacheTriggers, trigger) -} - -// PrometheusRegister gets the prometheus.Registerer for this plugin manager. -func (m *Manager) PrometheusRegister() prometheus.Registerer { - return m.prometheusRegister -} - -// TracerProvider gets the *trace.TracerProvider for this plugin manager. -func (m *Manager) TracerProvider() *trace.TracerProvider { - return m.tracerProvider -} - -func (m *Manager) RegisterNDCacheTrigger(trigger func(bool)) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.registeredNDCacheTriggers = append(m.registeredNDCacheTriggers, trigger) -} - -func (m *Manager) sendOPAUpdateLoop(ctx context.Context) { - ticker := time.NewTicker(time.Duration(int64(time.Second) * defaultUploadIntervalSec)) - mr.New(mr.NewSource(time.Now().UnixNano())) - - ctx, cancel := context.WithCancel(ctx) - - var opaReportNotify bool - - for { - select { - case <-m.opaReportNotifyCh: - opaReportNotify = true - case <-ticker.C: - ticker.Stop() - - if opaReportNotify { - opaReportNotify = false - _, err := m.reporter.SendReport(ctx) - if err != nil { - m.logger.WithFields(map[string]interface{}{"err": err}).Debug("Unable to send OPA telemetry report.") - } - } - - newInterval := mr.Int63n(defaultUploadIntervalSec) + defaultUploadIntervalSec - ticker = time.NewTicker(time.Duration(int64(time.Second) * newInterval)) - case done := <-m.stop: - cancel() - ticker.Stop() - done <- struct{}{} - return - } - } -} - -func (m *Manager) ParserOptions() ast.ParserOptions { - return m.parserOptions -} diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go b/vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go deleted file mode 100644 index 11e72001a2..0000000000 --- a/vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go +++ /dev/null @@ -1,1002 +0,0 @@ -// Copyright 2019 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package rest - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/sha512" - "crypto/tls" - "crypto/x509" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "hash" - "io" - "math/big" - "net/http" - "net/url" - "os" - "strings" - "time" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" - "github.com/open-policy-agent/opa/internal/jwx/jws" - "github.com/open-policy-agent/opa/internal/jwx/jws/sign" - "github.com/open-policy-agent/opa/internal/providers/aws" - "github.com/open-policy-agent/opa/internal/uuid" - "github.com/open-policy-agent/opa/keys" - "github.com/open-policy-agent/opa/logging" -) - -const ( - // Default to s3 when the service for sigv4 signing is not specified for backwards compatibility - awsSigv4SigningDefaultService = "s3" - // Default to urn:ietf:params:oauth:client-assertion-type:jwt-bearer for ClientAssertionType when not specified - defaultClientAssertionType = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer" -) - -// DefaultTLSConfig defines standard TLS configurations based on the Config -func DefaultTLSConfig(c Config) (*tls.Config, error) { - t := &tls.Config{} - url, err := url.Parse(c.URL) - if err != nil { - return nil, err - } - if url.Scheme == "https" { - t.InsecureSkipVerify = c.AllowInsecureTLS - } - - if c.TLS != nil && c.TLS.CACert != "" { - caCert, err := os.ReadFile(c.TLS.CACert) - if err != nil { - return nil, err - } - - var rootCAs *x509.CertPool - if c.TLS.SystemCARequired { - rootCAs, err = x509.SystemCertPool() - if err != nil { - return nil, err - } - } else { - rootCAs = x509.NewCertPool() - } - - ok := rootCAs.AppendCertsFromPEM(caCert) - if !ok { - return nil, errors.New("unable to parse and append CA certificate to certificate pool") - } - t.RootCAs = rootCAs - } - - return t, nil -} - -// DefaultRoundTripperClient is a reasonable set of defaults for HTTP auth plugins -func DefaultRoundTripperClient(t *tls.Config, timeout int64) *http.Client { - // Ensure we use a http.Transport with proper settings: the zero values are not - // a good choice, as they cause leaking connections: - // https://github.com/golang/go/issues/19620 - - // copy, we don't want to alter the default client's Transport - tr := http.DefaultTransport.(*http.Transport).Clone() - tr.ResponseHeaderTimeout = time.Duration(timeout) * time.Second - tr.TLSClientConfig = t - - c := *http.DefaultClient - c.Transport = tr - return &c -} - -// defaultAuthPlugin represents baseline 'no auth' behavior if no alternative plugin is specified for a service -type defaultAuthPlugin struct{} - -func (*defaultAuthPlugin) NewClient(c Config) (*http.Client, error) { - t, err := DefaultTLSConfig(c) - if err != nil { - return nil, err - } - return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil -} - -func (*defaultAuthPlugin) Prepare(*http.Request) error { - return nil -} - -type serverTLSConfig struct { - CACert string `json:"ca_cert,omitempty"` - SystemCARequired bool `json:"system_ca_required,omitempty"` -} - -// bearerAuthPlugin represents authentication via a bearer token in the HTTP Authorization header -type bearerAuthPlugin struct { - Token string `json:"token"` - TokenPath string `json:"token_path"` - Scheme string `json:"scheme,omitempty"` - - // encode is set to true for the OCIDownloader because - // it expects tokens in plain text but needs them in base64. - encode bool -} - -func (ap *bearerAuthPlugin) NewClient(c Config) (*http.Client, error) { - t, err := DefaultTLSConfig(c) - if err != nil { - return nil, err - } - - if ap.Token != "" && ap.TokenPath != "" { - return nil, errors.New("invalid config: specify a value for either the \"token\" or \"token_path\" field") - } - - if ap.Scheme == "" { - ap.Scheme = "Bearer" - } - - if c.Type == "oci" { - // Standard rest clients use the bearer token as it is defined in the Config - // but the OCIDownloader needs it encoded to base64 before using to sign a request. - ap.encode = true - } - - return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil -} - -func (ap *bearerAuthPlugin) Prepare(req *http.Request) error { - token := ap.Token - - if ap.TokenPath != "" { - bytes, err := os.ReadFile(ap.TokenPath) - if err != nil { - return err - } - token = strings.TrimSpace(string(bytes)) - } - - if ap.encode { - token = base64.StdEncoding.EncodeToString([]byte(token)) - } - - req.Header.Add("Authorization", fmt.Sprintf("%v %v", ap.Scheme, token)) - return nil -} - -type tokenEndpointResponse struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - ExpiresIn int64 `json:"expires_in"` -} - -type awsKmsKeyConfig struct { - Name string `json:"name"` - Algorithm string `json:"algorithm"` -} - -func convertSignatureToBase64(alg string, der []byte) (string, error) { - r, s, derErr := pointsFromDER(der) - if derErr != nil { - return "", fmt.Errorf("failed to read points from der %v", derErr) - } - - signatureData, err := convertPointsToBase64(alg, r.Bytes(), s.Bytes()) - if err != nil { - return "", err - } - return signatureData, nil -} - -func pointsFromDER(der []byte) (R, S *big.Int, err error) { - R, S = &big.Int{}, &big.Int{} - data := asn1.RawValue{} - if _, err := asn1.Unmarshal(der, &data); err != nil { - return nil, nil, fmt.Errorf("failed to unmarshall the signature from DER format %v", err) - - } - // https://docs.aws.amazon.com/kms/latest/APIReference/API_Sign.html#API_Sign_ResponseSyntax - // https://datatracker.ietf.org/doc/html/rfc3279#section-2.2.3 - // The format of our DER string is 0x02 + rlen + r + 0x02 + slen + s - rLen := data.Bytes[1] // The entire length of R + offset of 2 for 0x02 and rlen - r := data.Bytes[2 : rLen+2] - // Ignore the next 0x02 and slen bytes and just take the start of S to the end of the byte array - s := data.Bytes[rLen+4:] - R.SetBytes(r) - S.SetBytes(s) - return -} - -func convertPointsToBase64(alg string, r, s []byte) (string, error) { - curveBits, err := retrieveCurveBits(alg) - if err != nil { - return "", err - } - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes++ - } - // We serialize the outputs (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(r):], r) - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(s):], s) - signatureEnc := append(rBytesPadded, sBytesPadded...) - - return base64.RawURLEncoding.EncodeToString(signatureEnc), nil -} - -func retrieveCurveBits(alg string) (int, error) { - var curveBits int - switch alg { - case "ECDSA_SHA_256": - curveBits = 256 - case "ECDSA_SHA_384": - curveBits = 384 - case "ECDSA_SHA_512": - curveBits = 512 - default: - return 0, fmt.Errorf("unsupported sign algorithm %s", alg) - } - return curveBits, nil -} - -func messageDigest(message []byte, alg string) ([]byte, error) { - var digest hash.Hash - - switch alg { - case "ECDSA_SHA_256": - digest = sha256.New() - case "ECDSA_SHA_384": - digest = sha512.New384() - case "ECDSA_SHA_512": - digest = sha512.New() - default: - return []byte{}, fmt.Errorf("unsupported sign algorithm %s", alg) - } - - digest.Write(message) - return digest.Sum(nil), nil -} - -// oauth2ClientCredentialsAuthPlugin represents authentication via a bearer token in the HTTP Authorization header -// obtained through the OAuth2 client credentials flow -type oauth2ClientCredentialsAuthPlugin struct { - GrantType string `json:"grant_type"` - TokenURL string `json:"token_url"` - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - SigningKeyID string `json:"signing_key"` - Thumbprint string `json:"thumbprint"` - Claims map[string]interface{} `json:"additional_claims"` - IncludeJti bool `json:"include_jti_claim"` - Scopes []string `json:"scopes,omitempty"` - AdditionalHeaders map[string]string `json:"additional_headers,omitempty"` - AdditionalParameters map[string]string `json:"additional_parameters,omitempty"` - AWSKmsKey *awsKmsKeyConfig `json:"aws_kms,omitempty"` - AWSSigningPlugin *awsSigningAuthPlugin `json:"aws_signing,omitempty"` - ClientAssertionType string `json:"client_assertion_type"` - ClientAssertion string `json:"client_assertion"` - ClientAssertionPath string `json:"client_assertion_path"` - - signingKey *keys.Config - signingKeyParsed interface{} - tokenCache *oauth2Token - tlsSkipVerify bool - logger logging.Logger -} - -type oauth2Token struct { - Token string - ExpiresAt time.Time -} - -func (ap *oauth2ClientCredentialsAuthPlugin) createAuthJWT(ctx context.Context, extClaims map[string]interface{}, signingKey interface{}) (*string, error) { - now := time.Now() - claims := map[string]interface{}{ - "iat": now.Unix(), - "exp": now.Add(10 * time.Minute).Unix(), - } - for k, v := range extClaims { - claims[k] = v - } - - if len(ap.Scopes) > 0 { - claims["scope"] = strings.Join(ap.Scopes, " ") - } - - if ap.IncludeJti { - jti, err := uuid.New(rand.Reader) - if err != nil { - return nil, err - } - claims["jti"] = jti - } - - payload, err := json.Marshal(claims) - if err != nil { - return nil, err - } - - var jwsHeaders []byte - var signatureAlg string - if ap.AWSKmsKey == nil { - signatureAlg = ap.signingKey.Algorithm - } else { - signatureAlg, err = ap.mapKMSAlgToSign(ap.AWSKmsKey.Algorithm) - if err != nil { - return nil, err - } - } - if ap.Thumbprint != "" { - bytes, err := hex.DecodeString(ap.Thumbprint) - if err != nil { - return nil, err - } - x5t := base64.URLEncoding.EncodeToString(bytes) - jwsHeaders = []byte(fmt.Sprintf(`{"typ":"JWT","alg":"%s","x5t":"%s"}`, signatureAlg, x5t)) - } else { - jwsHeaders = []byte(fmt.Sprintf(`{"typ":"JWT","alg":"%s"}`, signatureAlg)) - } - var jwsCompact []byte - if ap.AWSKmsKey == nil { - jwsCompact, err = jws.SignLiteral(payload, - jwa.SignatureAlgorithm(signatureAlg), - signingKey, - jwsHeaders, - rand.Reader) - } else { - jwsCompact, err = ap.SignWithKMS(ctx, payload, jwsHeaders) - } - if err != nil { - return nil, err - } - jwt := string(jwsCompact) - - return &jwt, nil -} - -func (ap *oauth2ClientCredentialsAuthPlugin) mapKMSAlgToSign(alg string) (string, error) { - switch alg { - case "ECDSA_SHA_256": - return "ES256", nil - case "ECDSA_SHA_384": - return "ES384", nil - case "ECDSA_SHA_512": - return "ES512", nil - default: - return "", fmt.Errorf("unsupported sign algorithm %s", alg) - } -} - -// SignWithKMS will sign the JWT in AWS using the key stored in the supplied kmsArn -func (ap *oauth2ClientCredentialsAuthPlugin) SignWithKMS(ctx context.Context, payload []byte, hdrBuf []byte) ([]byte, error) { - - encodedHdr := base64.RawURLEncoding.EncodeToString(hdrBuf) - encodedPayload := base64.RawURLEncoding.EncodeToString(payload) - input := strings.Join( - []string{ - encodedHdr, - encodedPayload, - }, ".", - ) - digest, err := messageDigest([]byte(input), ap.AWSKmsKey.Algorithm) - if err != nil { - return nil, err - } - if ap.AWSSigningPlugin != nil { - signature, err := ap.AWSSigningPlugin.SignDigest(ctx, digest, ap.AWSKmsKey.Name, ap.AWSKmsKey.Algorithm) - if err != nil { - return nil, err - } - der, err := base64.StdEncoding.DecodeString(signature) - if err != nil { - return nil, err - } - signatureData, err := convertSignatureToBase64(ap.AWSKmsKey.Algorithm, der) - if err != nil { - return nil, err - } - - signedAssertion := input + "." + signatureData - - return []byte(signedAssertion), nil - } - return nil, errors.New("missing AWS credentials, failed to sign the assertion with kms") -} - -func (ap *oauth2ClientCredentialsAuthPlugin) parseSigningKey(c Config) (err error) { - if ap.SigningKeyID == "" { - return errors.New("signing_key required for jwt_bearer grant type") - } - - if val, ok := c.keys[ap.SigningKeyID]; ok { - if val.PrivateKey == "" { - return errors.New("referenced signing_key does not include a private key") - } - ap.signingKey = val - } else { - return errors.New("signing_key refers to non-existent key") - } - - alg := jwa.SignatureAlgorithm(ap.signingKey.Algorithm) - ap.signingKeyParsed, err = sign.GetSigningKey(ap.signingKey.PrivateKey, alg) - if err != nil { - return err - } - - return nil -} - -func (ap *oauth2ClientCredentialsAuthPlugin) NewClient(c Config) (*http.Client, error) { - t, err := DefaultTLSConfig(c) - if err != nil { - return nil, err - } - - if ap.GrantType == "" { - // Use client_credentials as default to not break existing config - ap.GrantType = grantTypeClientCredentials - } else if ap.GrantType != grantTypeClientCredentials && ap.GrantType != grantTypeJwtBearer { - return nil, errors.New("grant_type must be either client_credentials or jwt_bearer") - } - - if ap.GrantType == grantTypeJwtBearer || (ap.GrantType == grantTypeClientCredentials && ap.SigningKeyID != "") { - if err = ap.parseSigningKey(c); err != nil { - return nil, err - } - } - - // Inherit skip verify from the "parent" settings. Should this be configurable on the credentials too? - ap.tlsSkipVerify = c.AllowInsecureTLS - - ap.logger = c.logger - - if !strings.HasPrefix(ap.TokenURL, "https://") { - return nil, errors.New("token_url required to use https scheme") - } - if ap.GrantType == grantTypeClientCredentials { - clientCredentialExists := make(map[string]bool) - clientCredentialExists["client_secret"] = ap.ClientSecret != "" - clientCredentialExists["signing_key"] = ap.SigningKeyID != "" - clientCredentialExists["aws_kms"] = ap.AWSKmsKey != nil - clientCredentialExists["client_assertion"] = ap.ClientAssertion != "" - clientCredentialExists["client_assertion_path"] = ap.ClientAssertionPath != "" - - var notEmptyVarCount int - - for _, credentialSet := range clientCredentialExists { - if credentialSet { - notEmptyVarCount++ - } - } - - if notEmptyVarCount == 0 { - return nil, errors.New("please provide one of client_secret, signing_key, aws_kms, client_assertion, or client_assertion_path required") - } - - if notEmptyVarCount > 1 { - return nil, errors.New("can only use one of client_secret, signing_key, aws_kms, client_assertion, or client_assertion_path") - } - - if clientCredentialExists["aws_kms"] { - if ap.AWSSigningPlugin == nil { - return nil, errors.New("aws_kms and aws_signing required") - } - // initialize the awsSigningAuthPlugin - _, err = ap.AWSSigningPlugin.NewClient(c) - if err != nil { - return nil, err - } - } else if clientCredentialExists["client_assertion"] { - if ap.ClientAssertionType == "" { - ap.ClientAssertionType = defaultClientAssertionType - } - if ap.ClientID == "" { - return nil, errors.New("client_id and client_assertion required") - } - } else if clientCredentialExists["client_assertion_path"] { - if ap.ClientAssertionType == "" { - ap.ClientAssertionType = defaultClientAssertionType - } - if ap.ClientID == "" { - return nil, errors.New("client_id and client_assertion_path required") - } - } else if clientCredentialExists["client_secret"] { - if ap.ClientID == "" { - return nil, errors.New("client_id and client_secret required") - } - } - } - - return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil -} - -// requestToken tries to obtain an access token using either the client credentials flow -// https://tools.ietf.org/html/rfc6749#section-4.4 -// or the JWT authorization grant -// https://tools.ietf.org/html/rfc7523 -func (ap *oauth2ClientCredentialsAuthPlugin) requestToken(ctx context.Context) (*oauth2Token, error) { - body := url.Values{} - if ap.GrantType == grantTypeJwtBearer { - authJwt, err := ap.createAuthJWT(ctx, ap.Claims, ap.signingKeyParsed) - if err != nil { - return nil, err - } - body.Add("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer") - body.Add("assertion", *authJwt) - } else { - body.Add("grant_type", grantTypeClientCredentials) - - if ap.SigningKeyID != "" || ap.AWSKmsKey != nil { - authJwt, err := ap.createAuthJWT(ctx, ap.Claims, ap.signingKeyParsed) - if err != nil { - return nil, err - } - body.Add("client_assertion_type", defaultClientAssertionType) - body.Add("client_assertion", *authJwt) - - if ap.ClientID != "" { - body.Add("client_id", ap.ClientID) - } - } else if ap.ClientAssertion != "" { - if ap.ClientAssertionType == "" { - ap.ClientAssertionType = defaultClientAssertionType - } - if ap.ClientID != "" { - body.Add("client_id", ap.ClientID) - } - body.Add("client_assertion_type", ap.ClientAssertionType) - body.Add("client_assertion", ap.ClientAssertion) - } else if ap.ClientAssertionPath != "" { - if ap.ClientAssertionType == "" { - ap.ClientAssertionType = defaultClientAssertionType - } - bytes, err := os.ReadFile(ap.ClientAssertionPath) - if err != nil { - return nil, err - } - if ap.ClientID != "" { - body.Add("client_id", ap.ClientID) - } - body.Add("client_assertion_type", ap.ClientAssertionType) - body.Add("client_assertion", strings.TrimSpace(string(bytes))) - } - } - - if len(ap.Scopes) > 0 { - body.Add("scope", strings.Join(ap.Scopes, " ")) - } - - for k, v := range ap.AdditionalParameters { - body.Set(k, v) - } - - r, err := http.NewRequestWithContext(ctx, "POST", ap.TokenURL, strings.NewReader(body.Encode())) - if err != nil { - return nil, err - } - r.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - if ap.GrantType == grantTypeClientCredentials && ap.ClientSecret != "" { - r.SetBasicAuth(ap.ClientID, ap.ClientSecret) - } - - for k, v := range ap.AdditionalHeaders { - r.Header.Add(k, v) - } - - client := DefaultRoundTripperClient(&tls.Config{InsecureSkipVerify: ap.tlsSkipVerify}, 10) - response, err := client.Do(r) - if err != nil { - return nil, err - } - defer response.Body.Close() - - bodyRaw, err := io.ReadAll(response.Body) - if err != nil { - return nil, err - } - - if response.StatusCode != 200 { - return nil, fmt.Errorf("error in response from OAuth2 token endpoint: %v", string(bodyRaw)) - } - - var tokenResponse tokenEndpointResponse - err = json.Unmarshal(bodyRaw, &tokenResponse) - if err != nil { - return nil, err - } - - if strings.ToLower(tokenResponse.TokenType) != "bearer" { - return nil, errors.New("unknown token type returned from token endpoint") - } - - return &oauth2Token{ - Token: strings.TrimSpace(tokenResponse.AccessToken), - ExpiresAt: time.Now().Add(time.Duration(tokenResponse.ExpiresIn) * time.Second), - }, nil -} - -func (ap *oauth2ClientCredentialsAuthPlugin) Prepare(req *http.Request) error { - minTokenLifetime := float64(10) - if ap.tokenCache == nil || time.Until(ap.tokenCache.ExpiresAt).Seconds() < minTokenLifetime { - ap.logger.Debug("Requesting token from token_url %v", ap.TokenURL) - token, err := ap.requestToken(req.Context()) - if err != nil { - return err - } - ap.tokenCache = token - } - - req.Header.Add("Authorization", fmt.Sprintf("Bearer %v", ap.tokenCache.Token)) - return nil -} - -// clientTLSAuthPlugin represents authentication via client certificate on a TLS connection -type clientTLSAuthPlugin struct { - Cert string `json:"cert"` - PrivateKey string `json:"private_key"` - PrivateKeyPassphrase string `json:"private_key_passphrase,omitempty"` - CACert string `json:"ca_cert,omitempty"` // Deprecated: Use `services[_].tls.ca_cert` instead - SystemCARequired bool `json:"system_ca_required,omitempty"` // Deprecated: Use `services[_].tls.system_ca_required` instead -} - -func (ap *clientTLSAuthPlugin) NewClient(c Config) (*http.Client, error) { - tlsConfig, err := DefaultTLSConfig(c) - if err != nil { - return nil, err - } - - if ap.Cert == "" { - return nil, errors.New("client certificate is needed when client TLS is enabled") - } - if ap.PrivateKey == "" { - return nil, errors.New("private key is needed when client TLS is enabled") - } - - var keyPEMBlock []byte - data, err := os.ReadFile(ap.PrivateKey) - if err != nil { - return nil, err - } - - block, _ := pem.Decode(data) - if block == nil { - return nil, errors.New("PEM data could not be found") - } - - // nolint: staticcheck // We don't want to forbid users from using this encryption. - if x509.IsEncryptedPEMBlock(block) { - if ap.PrivateKeyPassphrase == "" { - return nil, errors.New("client certificate passphrase is needed, because the certificate is password encrypted") - } - // nolint: staticcheck // We don't want to forbid users from using this encryption. - block, err := x509.DecryptPEMBlock(block, []byte(ap.PrivateKeyPassphrase)) - if err != nil { - return nil, err - } - key, err := x509.ParsePKCS8PrivateKey(block) - if err != nil { - key, err = x509.ParsePKCS1PrivateKey(block) - if err != nil { - return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err) - } - } - rsa, ok := key.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("private key is invalid") - } - keyPEMBlock = pem.EncodeToMemory( - &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(rsa), - }, - ) - } else { - keyPEMBlock = data - } - - certPEMBlock, err := os.ReadFile(ap.Cert) - if err != nil { - return nil, err - } - - cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) - if err != nil { - return nil, err - } - tlsConfig.Certificates = []tls.Certificate{cert} - - var client *http.Client - - if c.TLS != nil && c.TLS.CACert != "" { - client = DefaultRoundTripperClient(tlsConfig, *c.ResponseHeaderTimeoutSeconds) - } else { - if ap.CACert != "" { - c.logger.Warn("Deprecated 'services[_].credentials.client_tls.ca_cert' configuration specified. Use 'services[_].tls.ca_cert' instead. See https://www.openpolicyagent.org/docs/latest/configuration/#services") - caCert, err := os.ReadFile(ap.CACert) - if err != nil { - return nil, err - } - - var caCertPool *x509.CertPool - if ap.SystemCARequired { - caCertPool, err = x509.SystemCertPool() - if err != nil { - return nil, err - } - } else { - caCertPool = x509.NewCertPool() - } - - ok := caCertPool.AppendCertsFromPEM(caCert) - if !ok { - return nil, errors.New("unable to parse and append CA certificate to certificate pool") - } - tlsConfig.RootCAs = caCertPool - } - - client = DefaultRoundTripperClient(tlsConfig, *c.ResponseHeaderTimeoutSeconds) - } - - return client, nil -} - -func (ap *clientTLSAuthPlugin) Prepare(_ *http.Request) error { - return nil -} - -// awsSigningAuthPlugin represents authentication using AWS V4 HMAC signing in the Authorization header -type awsSigningAuthPlugin struct { - AWSEnvironmentCredentials *awsEnvironmentCredentialService `json:"environment_credentials,omitempty"` - AWSMetadataCredentials *awsMetadataCredentialService `json:"metadata_credentials,omitempty"` - AWSAssumeRoleCredentials *awsAssumeRoleCredentialService `json:"assume_role_credentials,omitempty"` - AWSWebIdentityCredentials *awsWebIdentityCredentialService `json:"web_identity_credentials,omitempty"` - AWSProfileCredentials *awsProfileCredentialService `json:"profile_credentials,omitempty"` - - AWSService string `json:"service,omitempty"` - AWSSignatureVersion string `json:"signature_version,omitempty"` - - host string - ecrAuthPlugin *ecrAuthPlugin - kmsSignPlugin *awsKMSSignPlugin - - logger logging.Logger -} - -type awsCredentialServiceChain struct { - awsCredentialServices []awsCredentialService - logger logging.Logger -} - -func (acs *awsCredentialServiceChain) addService(service awsCredentialService) { - acs.awsCredentialServices = append(acs.awsCredentialServices, service) -} - -type awsCredentialCheckErrors []*awsCredentialCheckError - -func (e awsCredentialCheckErrors) Error() string { - - if len(e) == 0 { - return "no error(s)" - } - - if len(e) == 1 { - return fmt.Sprintf("1 error occurred: %v", e[0].Error()) - } - - s := make([]string, len(e)) - for i, err := range e { - s[i] = err.Error() - } - - return fmt.Sprintf("%d errors occurred:\n%s", len(e), strings.Join(s, "\n")) -} - -type awsCredentialCheckError struct { - message string -} - -func newAWSCredentialError(message string) *awsCredentialCheckError { - return &awsCredentialCheckError{ - message: message, - } -} - -func (e *awsCredentialCheckError) Error() string { - return e.message -} - -func (acs *awsCredentialServiceChain) credentials(ctx context.Context) (aws.Credentials, error) { - var errs awsCredentialCheckErrors - - for _, service := range acs.awsCredentialServices { - credential, err := service.credentials(ctx) - if err != nil { - acs.logger.Debug("awsSigningAuthPlugin:%T failed: %v", service, err) - - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return aws.Credentials{}, err - } - - errs = append(errs, newAWSCredentialError(err.Error())) - continue - } - - acs.logger.Debug("awsSigningAuthPlugin:%T successful", service) - return credential, nil - } - - return aws.Credentials{}, fmt.Errorf("all AWS credential providers failed: %v", errs) -} - -func (ap *awsSigningAuthPlugin) awsCredentialService() awsCredentialService { - chain := awsCredentialServiceChain{ - logger: ap.logger, - } - - /* - Here we maintain the order of addition to the chain inline with - the order of credential providers followed by default by the - AWS SDK. For example - - https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/DefaultAWSCredentialsProviderChain.html - */ - - if ap.AWSEnvironmentCredentials != nil { - ap.AWSEnvironmentCredentials.logger = ap.logger - chain.addService(ap.AWSEnvironmentCredentials) - } - - if ap.AWSAssumeRoleCredentials != nil { - ap.AWSAssumeRoleCredentials.logger = ap.logger - chain.addService(ap.AWSAssumeRoleCredentials) - } - - if ap.AWSWebIdentityCredentials != nil { - ap.AWSWebIdentityCredentials.logger = ap.logger - chain.addService(ap.AWSWebIdentityCredentials) - } - - if ap.AWSProfileCredentials != nil { - ap.AWSProfileCredentials.logger = ap.logger - chain.addService(ap.AWSProfileCredentials) - } - - if ap.AWSMetadataCredentials != nil { - ap.AWSMetadataCredentials.logger = ap.logger - chain.addService(ap.AWSMetadataCredentials) - } - - return &chain -} - -func (ap *awsSigningAuthPlugin) NewClient(c Config) (*http.Client, error) { - t, err := DefaultTLSConfig(c) - if err != nil { - return nil, err - } - - url, err := url.Parse(c.URL) - if err != nil { - return nil, err - } - - ap.host = url.Host - - if ap.logger == nil { - ap.logger = c.logger - } - - if err := ap.validateAndSetDefaults(c.Type); err != nil { - return nil, err - } - - return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil -} - -func (ap *awsSigningAuthPlugin) Prepare(req *http.Request) error { - if ap.host != req.URL.Host { - // Return early if the host does not match. - // This can happen when the OCI registry responded with a redirect to another host. - // For instance, ECR redirects to S3 and the ECR auth header should not be included in the S3 request. - return nil - } - - switch ap.AWSService { - case "ecr": - return ap.ecrAuthPlugin.Prepare(req) - default: - creds, err := ap.awsCredentialService().credentials(req.Context()) - if err != nil { - return fmt.Errorf("failed to get aws credentials: %w", err) - } - - ap.logger.Debug("Signing request with AWS credentials.") - - return aws.SignRequest(req, ap.AWSService, creds, time.Now(), ap.AWSSignatureVersion) - } -} - -func (ap *awsSigningAuthPlugin) validateAndSetDefaults(serviceType string) error { - cfgs := map[bool]int{} - cfgs[ap.AWSEnvironmentCredentials != nil]++ - cfgs[ap.AWSMetadataCredentials != nil]++ - cfgs[ap.AWSAssumeRoleCredentials != nil]++ - cfgs[ap.AWSWebIdentityCredentials != nil]++ - cfgs[ap.AWSProfileCredentials != nil]++ - - if cfgs[true] == 0 { - return errors.New("a AWS credential service must be specified when S3 signing is enabled") - } - - if ap.AWSMetadataCredentials != nil { - if ap.AWSMetadataCredentials.RegionName == "" { - return errors.New("at least aws_region must be specified for AWS metadata credential service") - } - } - - if ap.AWSAssumeRoleCredentials != nil { - if err := ap.AWSAssumeRoleCredentials.populateFromEnv(); err != nil { - return err - } - } - - if ap.AWSWebIdentityCredentials != nil { - if err := ap.AWSWebIdentityCredentials.populateFromEnv(); err != nil { - return err - } - } - - ap.AWSService = strings.ToLower(ap.AWSService) - - // Only allow ECR for OCI service types - if serviceType == "oci" { - if ap.AWSService == "" { - ap.AWSService = "ecr" - } - - if ap.AWSService != "ecr" { - return fmt.Errorf(`cannot use aws service %q with service type "oci"`, ap.AWSService) - } - - // We need to setup a special auth plugin for ECR. - ap.ecrAuthPlugin = newECRAuthPlugin(ap) - } else { - // Disallow ECR for non-OCI service types - if ap.AWSService == "ecr" { - return errors.New(`aws service "ecr" must be used with service type "oci"`) - } - if ap.AWSService == "kms" && ap.kmsSignPlugin == nil { - // We need a special plugin for KMS. - ap.kmsSignPlugin = newKMSSignPlugin(ap) - } - if ap.AWSService == "" { - ap.AWSService = awsSigv4SigningDefaultService - } - } - - if ap.AWSSignatureVersion == "" { - ap.AWSSignatureVersion = "4" - } - - return nil -} - -func (ap *awsSigningAuthPlugin) SignDigest(ctx context.Context, digest []byte, keyID string, signingAlgorithm string) (string, error) { - switch ap.AWSService { - case "kms": - return ap.kmsSignPlugin.SignDigest(ctx, digest, keyID, signingAlgorithm) - default: - return "", fmt.Errorf(`cannot use SignDigest with aws service %q`, ap.AWSService) - } -} diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go b/vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go deleted file mode 100644 index cc45dfa9c7..0000000000 --- a/vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go +++ /dev/null @@ -1,756 +0,0 @@ -// Copyright 2019 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package rest - -import ( - "context" - "encoding/json" - "encoding/xml" - "errors" - "fmt" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - "time" - - "github.com/go-ini/ini" - "github.com/open-policy-agent/opa/internal/providers/aws" - "github.com/open-policy-agent/opa/logging" -) - -const ( - // ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - ec2DefaultCredServicePath = "http://169.254.169.254/latest/meta-data/iam/security-credentials/" - - // ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html - ec2DefaultTokenPath = "http://169.254.169.254/latest/api/token" - - // ref. https://docs.aws.amazon.com/AmazonECS/latest/userguide/task-iam-roles.html - ecsDefaultCredServicePath = "http://169.254.170.2" - ecsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" - ecsFullPathEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" - ecsAuthorizationTokenEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" - ecsAuthorizationTokenFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE" - - // ref. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html - stsDefaultDomain = "amazonaws.com" - stsDefaultPath = "https://sts.%s" - stsRegionPath = "https://sts.%s.%s" - - // ref. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html - accessKeyEnvVar = "AWS_ACCESS_KEY_ID" - secretKeyEnvVar = "AWS_SECRET_ACCESS_KEY" - securityTokenEnvVar = "AWS_SECURITY_TOKEN" - sessionTokenEnvVar = "AWS_SESSION_TOKEN" - awsRegionEnvVar = "AWS_REGION" - awsDomainEnvVar = "AWS_DOMAIN" - awsRoleArnEnvVar = "AWS_ROLE_ARN" - awsWebIdentityTokenFileEnvVar = "AWS_WEB_IDENTITY_TOKEN_FILE" - awsCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" - awsProfileEnvVar = "AWS_PROFILE" - - // ref. https://docs.aws.amazon.com/sdkref/latest/guide/settings-global.html - accessKeyGlobalSetting = "aws_access_key_id" - secretKeyGlobalSetting = "aws_secret_access_key" - securityTokenGlobalSetting = "aws_session_token" -) - -// awsCredentialService represents the interface for AWS credential providers -type awsCredentialService interface { - credentials(context.Context) (aws.Credentials, error) -} - -// awsEnvironmentCredentialService represents an static environment-variable credential provider for AWS -type awsEnvironmentCredentialService struct { - logger logging.Logger -} - -func (cs *awsEnvironmentCredentialService) credentials(context.Context) (aws.Credentials, error) { - var creds aws.Credentials - creds.AccessKey = os.Getenv(accessKeyEnvVar) - if creds.AccessKey == "" { - return creds, errors.New("no " + accessKeyEnvVar + " set in environment") - } - creds.SecretKey = os.Getenv(secretKeyEnvVar) - if creds.SecretKey == "" { - return creds, errors.New("no " + secretKeyEnvVar + " set in environment") - } - creds.RegionName = os.Getenv(awsRegionEnvVar) - if creds.RegionName == "" { - return creds, errors.New("no " + awsRegionEnvVar + " set in environment") - } - // SessionToken is required if using temporary ENV credentials from assumed IAM role - // Missing SessionToken results with 403 s3 error. - creds.SessionToken = os.Getenv(sessionTokenEnvVar) - if creds.SessionToken == "" { - // In case of missing SessionToken try to get SecurityToken - // AWS switched to use SessionToken, but SecurityToken was left for backward compatibility - creds.SessionToken = os.Getenv(securityTokenEnvVar) - } - - return creds, nil -} - -// awsProfileCredentialService represents a credential provider for AWS that extracts credentials from the AWS -// credentials file -type awsProfileCredentialService struct { - - // Path to the credentials file. - // - // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.aws/credentials" - // Windows: "%USERPROFILE%\.aws\credentials" - Path string `json:"path,omitempty"` - - // AWS Profile to extract credentials from the credentials file. If empty - // will default to environment variable "AWS_PROFILE" or "default" if - // environment variable is also not set. - Profile string `json:"profile,omitempty"` - - RegionName string `json:"aws_region"` - - logger logging.Logger -} - -func (cs *awsProfileCredentialService) credentials(context.Context) (aws.Credentials, error) { - var creds aws.Credentials - - filename, err := cs.path() - if err != nil { - return creds, err - } - - cfg, err := ini.Load(filename) - if err != nil { - return creds, fmt.Errorf("failed to read credentials file: %v", err) - } - - profile, err := cfg.GetSection(cs.profile()) - if err != nil { - return creds, fmt.Errorf("failed to get profile: %v", err) - } - - creds.AccessKey = profile.Key(accessKeyGlobalSetting).String() - if creds.AccessKey == "" { - return creds, fmt.Errorf("profile \"%v\" in credentials file %v does not contain \"%v\"", cs.Profile, cs.Path, accessKeyGlobalSetting) - } - - creds.SecretKey = profile.Key(secretKeyGlobalSetting).String() - if creds.SecretKey == "" { - return creds, fmt.Errorf("profile \"%v\" in credentials file %v does not contain \"%v\"", cs.Profile, cs.Path, secretKeyGlobalSetting) - } - - creds.SessionToken = profile.Key(securityTokenGlobalSetting).String() // default to empty string - - if cs.RegionName == "" { - if cs.RegionName = os.Getenv(awsRegionEnvVar); cs.RegionName == "" { - return creds, errors.New("no " + awsRegionEnvVar + " set in environment or configuration") - } - } - creds.RegionName = cs.RegionName - - return creds, nil -} - -func (cs *awsProfileCredentialService) path() (string, error) { - if len(cs.Path) != 0 { - return cs.Path, nil - } - - if cs.Path = os.Getenv(awsCredentialsFileEnvVar); len(cs.Path) != 0 { - return cs.Path, nil - } - - homeDir, err := os.UserHomeDir() - if err != nil { - return "", fmt.Errorf("user home directory not found: %w", err) - } - - cs.Path = filepath.Join(homeDir, ".aws", "credentials") - - return cs.Path, nil -} - -func (cs *awsProfileCredentialService) profile() string { - if cs.Profile != "" { - return cs.Profile - } - - cs.Profile = os.Getenv(awsProfileEnvVar) - - if cs.Profile == "" { - cs.Profile = "default" - } - - return cs.Profile -} - -// awsMetadataCredentialService represents an EC2 metadata service credential provider for AWS -type awsMetadataCredentialService struct { - RoleName string `json:"iam_role,omitempty"` - RegionName string `json:"aws_region"` - creds aws.Credentials - expiration time.Time - credServicePath string - tokenPath string - logger logging.Logger -} - -func (cs *awsMetadataCredentialService) urlForMetadataService() (string, error) { - // override default path for testing - if cs.credServicePath != "" { - return cs.credServicePath + cs.RoleName, nil - } - // otherwise, normal flow - // if a role name is provided, look up via the EC2 credential service - if cs.RoleName != "" { - return ec2DefaultCredServicePath + cs.RoleName, nil - } - // otherwise, check environment to see if it looks like we're in an ECS - // container (with implied role association) - if isECS() { - // first check if the relative env var exists; if so we use that otherwise we - // use the "full" var - if _, relativeExists := os.LookupEnv(ecsRelativePathEnvVar); relativeExists { - return ecsDefaultCredServicePath + os.Getenv(ecsRelativePathEnvVar), nil - } - return os.Getenv(ecsFullPathEnvVar), nil - } - // if there's no role name and we don't appear to have a path to the - // ECS container service, then the configuration is invalid - return "", errors.New("metadata endpoint cannot be determined from settings and environment") -} - -func (cs *awsMetadataCredentialService) tokenRequest(ctx context.Context) (*http.Request, error) { - tokenURL := ec2DefaultTokenPath - if cs.tokenPath != "" { - // override for testing - tokenURL = cs.tokenPath - } - req, err := http.NewRequestWithContext(ctx, http.MethodPut, tokenURL, nil) - if err != nil { - return nil, err - } - - // we are going to use the token in the immediate future, so a long TTL is not necessary - req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", "60") - return req, nil -} - -func (cs *awsMetadataCredentialService) refreshFromService(ctx context.Context) error { - // define the expected JSON payload from the EC2 credential service - // ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - type metadataPayload struct { - Code string - AccessKeyID string `json:"AccessKeyId"` - SecretAccessKey string - Token string - Expiration time.Time - } - - // Short circuit if a reasonable amount of time until credential expiration remains - const tokenExpirationMargin = 5 * time.Minute - - if time.Now().Add(tokenExpirationMargin).Before(cs.expiration) { - cs.logger.Debug("Credentials previously obtained from metadata service still valid.") - return nil - } - - cs.logger.Debug("Obtaining credentials from metadata service.") - metaDataURL, err := cs.urlForMetadataService() - if err != nil { - // configuration issue or missing ECS environment - return err - } - - // construct an HTTP client with a reasonably short timeout - client := &http.Client{Timeout: time.Second * 10} - req, err := http.NewRequestWithContext(ctx, http.MethodGet, metaDataURL, nil) - if err != nil { - return errors.New("unable to construct metadata HTTP request: " + err.Error()) - } - - // if using the AWS_CONTAINER_CREDENTIALS_FULL_URI variable, we need to associate the token - // to the request - if _, useFullPath := os.LookupEnv(ecsFullPathEnvVar); useFullPath { - var token string - tokenFilePath, tokenFilePathExists := os.LookupEnv(ecsAuthorizationTokenFileEnvVar) - - if tokenFilePathExists { - tokenBytes, err := os.ReadFile(tokenFilePath) - if err != nil { - return errors.New("failed to read ECS metadata authorization token from file: " + err.Error()) - } - token = string(tokenBytes) - // If token doesn't exist as a file check if it exists as an environment variable - } else { - var tokenExists bool - token, tokenExists = os.LookupEnv(ecsAuthorizationTokenEnvVar) - if !tokenExists { - return errors.New("unable to get ECS metadata authorization token") - } - } - req.Header.Set("Authorization", token) - } - - // if in the EC2 environment, we will use IMDSv2, which requires a session cookie from a - // PUT request on the token endpoint before it will give the credentials, this provides - // protection from SSRF attacks - if !isECS() { - tokenReq, err := cs.tokenRequest(ctx) - if err != nil { - return errors.New("unable to construct metadata token HTTP request: " + err.Error()) - } - body, err := aws.DoRequestWithClient(tokenReq, client, "metadata token", cs.logger) - if err != nil { - return err - } - // token is the body of response; add to header of metadata request - req.Header.Set("X-aws-ec2-metadata-token", string(body)) - } - - body, err := aws.DoRequestWithClient(req, client, "metadata", cs.logger) - if err != nil { - return err - } - - var payload metadataPayload - err = json.Unmarshal(body, &payload) - if err != nil { - return errors.New("failed to parse credential response from metadata service: " + err.Error()) - } - - // Only the EC2 endpoint returns the "Code" element which indicates whether the query was - // successful; the ECS endpoint does not! Some other fields are missing in the ECS payload - // but we do not depend on them. - if cs.RoleName != "" && payload.Code != "Success" { - return errors.New("metadata service query did not succeed: " + payload.Code) - } - - cs.expiration = payload.Expiration - cs.creds.AccessKey = payload.AccessKeyID - cs.creds.SecretKey = payload.SecretAccessKey - cs.creds.SessionToken = payload.Token - cs.creds.RegionName = cs.RegionName - - return nil -} - -func (cs *awsMetadataCredentialService) credentials(ctx context.Context) (aws.Credentials, error) { - err := cs.refreshFromService(ctx) - if err != nil { - return cs.creds, err - } - return cs.creds, nil -} - -// awsAssumeRoleCredentialService represents a STS credential service that uses active IAM credentials -// to obtain temporary security credentials generated by AWS STS via AssumeRole API operation -type awsAssumeRoleCredentialService struct { - RegionName string `json:"aws_region"` - RoleArn string `json:"iam_role_arn"` - SessionName string `json:"session_name"` - Domain string `json:"aws_domain"` - AWSSigningPlugin *awsSigningAuthPlugin `json:"aws_signing,omitempty"` - stsURL string - creds aws.Credentials - expiration time.Time - logger logging.Logger -} - -func (cs *awsAssumeRoleCredentialService) populateFromEnv() error { - if cs.AWSSigningPlugin == nil { - return errors.New("a AWS signing plugin must be specified when AssumeRole credential provider is enabled") - } - - switch { - case cs.AWSSigningPlugin.AWSEnvironmentCredentials != nil: - case cs.AWSSigningPlugin.AWSProfileCredentials != nil: - case cs.AWSSigningPlugin.AWSMetadataCredentials != nil: - default: - return errors.New("unsupported AWS signing plugin with AssumeRole credential provider") - } - - if cs.AWSSigningPlugin.AWSMetadataCredentials != nil { - if cs.AWSSigningPlugin.AWSMetadataCredentials.RegionName == "" { - if cs.AWSSigningPlugin.AWSMetadataCredentials.RegionName = os.Getenv(awsRegionEnvVar); cs.AWSSigningPlugin.AWSMetadataCredentials.RegionName == "" { - return errors.New("no " + awsRegionEnvVar + " set in environment or configuration") - } - } - } - - if cs.AWSSigningPlugin.AWSSignatureVersion == "" { - cs.AWSSigningPlugin.AWSSignatureVersion = "4" - } - - if cs.Domain == "" { - cs.Domain = os.Getenv(awsDomainEnvVar) - } - - if cs.RegionName == "" { - if cs.RegionName = os.Getenv(awsRegionEnvVar); cs.RegionName == "" { - return errors.New("no " + awsRegionEnvVar + " set in environment or configuration") - } - } - - if cs.RoleArn == "" { - if cs.RoleArn = os.Getenv(awsRoleArnEnvVar); cs.RoleArn == "" { - return errors.New("no " + awsRoleArnEnvVar + " set in environment or configuration") - } - } - - return nil -} - -func (cs *awsAssumeRoleCredentialService) signingCredentials(ctx context.Context) (aws.Credentials, error) { - if cs.AWSSigningPlugin.AWSEnvironmentCredentials != nil { - cs.AWSSigningPlugin.AWSEnvironmentCredentials.logger = cs.logger - return cs.AWSSigningPlugin.AWSEnvironmentCredentials.credentials(ctx) - } - - if cs.AWSSigningPlugin.AWSProfileCredentials != nil { - cs.AWSSigningPlugin.AWSProfileCredentials.logger = cs.logger - return cs.AWSSigningPlugin.AWSProfileCredentials.credentials(ctx) - } - - cs.AWSSigningPlugin.AWSMetadataCredentials.logger = cs.logger - return cs.AWSSigningPlugin.AWSMetadataCredentials.credentials(ctx) -} - -func (cs *awsAssumeRoleCredentialService) stsPath() string { - return getSTSPath(cs.Domain, cs.stsURL, cs.RegionName) -} - -func (cs *awsAssumeRoleCredentialService) refreshFromService(ctx context.Context) error { - // define the expected JSON payload from the EC2 credential service - // ref. https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html - type responsePayload struct { - Result struct { - Credentials struct { - SessionToken string - SecretAccessKey string - Expiration time.Time - AccessKeyID string `xml:"AccessKeyId"` - } - } `xml:"AssumeRoleResult"` - } - - // short circuit if a reasonable amount of time until credential expiration remains - if time.Now().Add(time.Minute * 5).Before(cs.expiration) { - cs.logger.Debug("Credentials previously obtained from sts service still valid.") - return nil - } - - cs.logger.Debug("Obtaining credentials from sts for role %s.", cs.RoleArn) - - var sessionName string - if cs.SessionName == "" { - sessionName = "open-policy-agent" - } else { - sessionName = cs.SessionName - } - - queryVals := url.Values{ - "Action": []string{"AssumeRole"}, - "RoleSessionName": []string{sessionName}, - "RoleArn": []string{cs.RoleArn}, - "Version": []string{"2011-06-15"}, - } - stsRequestURL, _ := url.Parse(cs.stsPath()) - - // construct an HTTP client with a reasonably short timeout - client := &http.Client{Timeout: time.Second * 10} - req, err := http.NewRequestWithContext(ctx, http.MethodPost, stsRequestURL.String(), strings.NewReader(queryVals.Encode())) - if err != nil { - return errors.New("unable to construct STS HTTP request: " + err.Error()) - } - - req.Header.Add("Content-Type", "application/x-www-form-urlencoded") - - // Note: Calls to AWS STS AssumeRole must be signed using the access key ID - // and secret access key - signingCreds, err := cs.signingCredentials(ctx) - if err != nil { - return err - } - - err = aws.SignRequest(req, "sts", signingCreds, time.Now(), cs.AWSSigningPlugin.AWSSignatureVersion) - if err != nil { - return err - } - - body, err := aws.DoRequestWithClient(req, client, "STS", cs.logger) - if err != nil { - return err - } - - var payload responsePayload - err = xml.Unmarshal(body, &payload) - if err != nil { - return errors.New("failed to parse credential response from STS service: " + err.Error()) - } - - cs.expiration = payload.Result.Credentials.Expiration - cs.creds.AccessKey = payload.Result.Credentials.AccessKeyID - cs.creds.SecretKey = payload.Result.Credentials.SecretAccessKey - cs.creds.SessionToken = payload.Result.Credentials.SessionToken - cs.creds.RegionName = cs.RegionName - - return nil -} - -func (cs *awsAssumeRoleCredentialService) credentials(ctx context.Context) (aws.Credentials, error) { - err := cs.refreshFromService(ctx) - if err != nil { - return cs.creds, err - } - return cs.creds, nil -} - -// awsWebIdentityCredentialService represents an STS WebIdentity credential services -type awsWebIdentityCredentialService struct { - RoleArn string - WebIdentityTokenFile string - RegionName string `json:"aws_region"` - SessionName string `json:"session_name"` - Domain string `json:"aws_domain"` - stsURL string - creds aws.Credentials - expiration time.Time - logger logging.Logger -} - -func (cs *awsWebIdentityCredentialService) populateFromEnv() error { - cs.RoleArn = os.Getenv(awsRoleArnEnvVar) - if cs.RoleArn == "" { - return errors.New("no " + awsRoleArnEnvVar + " set in environment") - } - cs.WebIdentityTokenFile = os.Getenv(awsWebIdentityTokenFileEnvVar) - if cs.WebIdentityTokenFile == "" { - return errors.New("no " + awsWebIdentityTokenFileEnvVar + " set in environment") - } - - if cs.Domain == "" { - cs.Domain = os.Getenv(awsDomainEnvVar) - } - - if cs.RegionName == "" { - if cs.RegionName = os.Getenv(awsRegionEnvVar); cs.RegionName == "" { - return errors.New("no " + awsRegionEnvVar + " set in environment or configuration") - } - } - return nil -} - -func (cs *awsWebIdentityCredentialService) stsPath() string { - return getSTSPath(cs.Domain, cs.stsURL, cs.RegionName) -} - -func (cs *awsWebIdentityCredentialService) refreshFromService(ctx context.Context) error { - // define the expected JSON payload from the EC2 credential service - // ref. https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html - type responsePayload struct { - Result struct { - Credentials struct { - SessionToken string - SecretAccessKey string - Expiration time.Time - AccessKeyID string `xml:"AccessKeyId"` - } - } `xml:"AssumeRoleWithWebIdentityResult"` - } - - // short circuit if a reasonable amount of time until credential expiration remains - if time.Now().Add(time.Minute * 5).Before(cs.expiration) { - cs.logger.Debug("Credentials previously obtained from sts service still valid.") - return nil - } - - cs.logger.Debug("Obtaining credentials from sts for role %s.", cs.RoleArn) - - var sessionName string - if cs.SessionName == "" { - sessionName = "open-policy-agent" - } else { - sessionName = cs.SessionName - } - - tokenData, err := os.ReadFile(cs.WebIdentityTokenFile) - if err != nil { - return errors.New("unable to read web token for sts HTTP request: " + err.Error()) - } - - token := string(tokenData) - - queryVals := url.Values{ - "Action": []string{"AssumeRoleWithWebIdentity"}, - "RoleSessionName": []string{sessionName}, - "RoleArn": []string{cs.RoleArn}, - "WebIdentityToken": []string{token}, - "Version": []string{"2011-06-15"}, - } - stsRequestURL, _ := url.Parse(cs.stsPath()) - - // construct an HTTP client with a reasonably short timeout - client := &http.Client{Timeout: time.Second * 10} - req, err := http.NewRequestWithContext(ctx, http.MethodPost, stsRequestURL.String(), strings.NewReader(queryVals.Encode())) - if err != nil { - return errors.New("unable to construct STS HTTP request: " + err.Error()) - } - - req.Header.Add("Content-Type", "application/x-www-form-urlencoded") - - body, err := aws.DoRequestWithClient(req, client, "STS", cs.logger) - if err != nil { - return err - } - - var payload responsePayload - err = xml.Unmarshal(body, &payload) - if err != nil { - return errors.New("failed to parse credential response from STS service: " + err.Error()) - } - - cs.expiration = payload.Result.Credentials.Expiration - cs.creds.AccessKey = payload.Result.Credentials.AccessKeyID - cs.creds.SecretKey = payload.Result.Credentials.SecretAccessKey - cs.creds.SessionToken = payload.Result.Credentials.SessionToken - cs.creds.RegionName = cs.RegionName - - return nil -} - -func (cs *awsWebIdentityCredentialService) credentials(ctx context.Context) (aws.Credentials, error) { - err := cs.refreshFromService(ctx) - if err != nil { - return cs.creds, err - } - return cs.creds, nil -} - -func isECS() bool { - // the special relative path URI is set by the container agent in the ECS environment only - _, isECSRelative := os.LookupEnv(ecsRelativePathEnvVar) - _, isECSFull := os.LookupEnv(ecsFullPathEnvVar) - return isECSRelative || isECSFull -} - -// ecrAuthPlugin authorizes requests to AWS ECR. -type ecrAuthPlugin struct { - token aws.ECRAuthorizationToken - - // awsAuthPlugin is used to sign ecr authorization token requests. - awsAuthPlugin *awsSigningAuthPlugin - - // ecr represents the service we request tokens from. - ecr ecr - - logger logging.Logger -} - -type ecr interface { - GetAuthorizationToken(context.Context, aws.Credentials, string) (aws.ECRAuthorizationToken, error) -} - -func newECRAuthPlugin(ap *awsSigningAuthPlugin) *ecrAuthPlugin { - return &ecrAuthPlugin{ - awsAuthPlugin: ap, - ecr: aws.NewECR(ap.logger), - logger: ap.logger, - } -} - -// Prepare should be called with any request to AWS ECR. -// It takes care of retrieving an ECR authorization token to sign -// the request with. -func (ap *ecrAuthPlugin) Prepare(r *http.Request) error { - if !ap.token.IsValid() { - ap.logger.Debug("Refreshing ECR auth token") - if err := ap.refreshAuthorizationToken(r.Context()); err != nil { - return err - } - } - - ap.logger.Debug("Signing request with ECR authorization token") - - r.Header.Set("Authorization", fmt.Sprintf("Basic %s", ap.token.AuthorizationToken)) - return nil -} - -func (ap *ecrAuthPlugin) refreshAuthorizationToken(ctx context.Context) error { - creds, err := ap.awsAuthPlugin.awsCredentialService().credentials(ctx) - if err != nil { - return fmt.Errorf("failed to get aws credentials: %w", err) - } - - token, err := ap.ecr.GetAuthorizationToken(ctx, creds, ap.awsAuthPlugin.AWSSignatureVersion) - if err != nil { - return fmt.Errorf("ecr: failed to get authorization token: %w", err) - } - - ap.token = token - return nil -} - -// awsKMSSignPlugin signs digests using AWS KMS. -type awsKMSSignPlugin struct { - - // awsAuthPlugin is used to sign kms sign requests. - awsAuthPlugin *awsSigningAuthPlugin - - // kms represents the service for signing digests. - kms awskms - - logger logging.Logger -} - -type awskms interface { - SignDigest(ctx context.Context, digest []byte, keyID string, signingAlgorithm string, creds aws.Credentials, signatureVersion string) (string, error) -} - -func newKMSSignPlugin(ap *awsSigningAuthPlugin) *awsKMSSignPlugin { - return &awsKMSSignPlugin{ - awsAuthPlugin: ap, - kms: aws.NewKMS(ap.logger), - logger: ap.logger, - } -} - -func (ap *awsKMSSignPlugin) SignDigest(ctx context.Context, digest []byte, keyID string, signingAlgorithm string) (string, error) { - creds, err := ap.awsAuthPlugin.awsCredentialService().credentials(ctx) - if err != nil { - return "", fmt.Errorf("failed to get aws credentials: %w", err) - } - - signature, err := ap.kms.SignDigest(ctx, digest, keyID, signingAlgorithm, creds, ap.awsAuthPlugin.AWSSignatureVersion) - if err != nil { - return "", fmt.Errorf("kms: failed to sign digest: %w", err) - } - - return signature, nil -} - -func getSTSPath(stsDomain, stsURL, regionName string) string { - var domain string - if stsDomain != "" { - domain = strings.ToLower(stsDomain) - } else { - domain = stsDefaultDomain - } - - var stsPath string - switch { - case stsURL != "": - stsPath = stsURL - case regionName != "": - stsPath = fmt.Sprintf(stsRegionPath, strings.ToLower(regionName), domain) - default: - stsPath = fmt.Sprintf(stsDefaultPath, domain) - } - return stsPath -} diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/azure.go b/vendor/github.com/open-policy-agent/opa/plugins/rest/azure.go deleted file mode 100644 index ae00d48a7c..0000000000 --- a/vendor/github.com/open-policy-agent/opa/plugins/rest/azure.go +++ /dev/null @@ -1,180 +0,0 @@ -package rest - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "os" - "time" -) - -var ( - azureIMDSEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" - defaultAPIVersion = "2018-02-01" - defaultResource = "https://storage.azure.com/" - timeout = 5 * time.Second - defaultAPIVersionForAppServiceMsi = "2019-08-01" -) - -// azureManagedIdentitiesToken holds a token for managed identities for Azure resources -type azureManagedIdentitiesToken struct { - AccessToken string `json:"access_token"` - ExpiresIn string `json:"expires_in"` - ExpiresOn string `json:"expires_on"` - NotBefore string `json:"not_before"` - Resource string `json:"resource"` - TokenType string `json:"token_type"` -} - -// azureManagedIdentitiesError represents an error fetching an azureManagedIdentitiesToken -type azureManagedIdentitiesError struct { - Err string `json:"error"` - Description string `json:"error_description"` - Endpoint string - StatusCode int -} - -func (e *azureManagedIdentitiesError) Error() string { - return fmt.Sprintf("%v %s retrieving azure token from %s: %s", e.StatusCode, e.Err, e.Endpoint, e.Description) -} - -// azureManagedIdentitiesAuthPlugin uses an azureManagedIdentitiesToken.AccessToken for bearer authorization -type azureManagedIdentitiesAuthPlugin struct { - Endpoint string `json:"endpoint"` - APIVersion string `json:"api_version"` - Resource string `json:"resource"` - ObjectID string `json:"object_id"` - ClientID string `json:"client_id"` - MiResID string `json:"mi_res_id"` - UseAppServiceMsi bool `json:"use_app_service_msi,omitempty"` -} - -func (ap *azureManagedIdentitiesAuthPlugin) NewClient(c Config) (*http.Client, error) { - if c.Type == "oci" { - return nil, errors.New("azure managed identities auth: OCI service not supported") - } - - if ap.Endpoint == "" { - identityEndpoint := os.Getenv("IDENTITY_ENDPOINT") - if identityEndpoint != "" { - ap.UseAppServiceMsi = true - ap.Endpoint = identityEndpoint - } else { - ap.Endpoint = azureIMDSEndpoint - } - } - - if ap.Resource == "" { - ap.Resource = defaultResource - } - - if ap.APIVersion == "" { - if ap.UseAppServiceMsi { - ap.APIVersion = defaultAPIVersionForAppServiceMsi - } else { - ap.APIVersion = defaultAPIVersion - } - } - - t, err := DefaultTLSConfig(c) - if err != nil { - return nil, err - } - - return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil -} - -func (ap *azureManagedIdentitiesAuthPlugin) Prepare(req *http.Request) error { - token, err := azureManagedIdentitiesTokenRequest( - ap.Endpoint, ap.APIVersion, ap.Resource, - ap.ObjectID, ap.ClientID, ap.MiResID, - ap.UseAppServiceMsi, - ) - if err != nil { - return err - } - - req.Header.Add("Authorization", "Bearer "+token.AccessToken) - return nil -} - -// azureManagedIdentitiesTokenRequest fetches an azureManagedIdentitiesToken -func azureManagedIdentitiesTokenRequest( - endpoint, apiVersion, resource, objectID, clientID, miResID string, - useAppServiceMsi bool, -) (azureManagedIdentitiesToken, error) { - var token azureManagedIdentitiesToken - e := buildAzureManagedIdentitiesRequestPath(endpoint, apiVersion, resource, objectID, clientID, miResID) - - request, err := http.NewRequest("GET", e, nil) - if err != nil { - return token, err - } - if useAppServiceMsi { - identityHeader := os.Getenv("IDENTITY_HEADER") - if identityHeader == "" { - return token, errors.New("azure managed identities auth: IDENTITY_HEADER env var not found") - } - request.Header.Add("x-identity-header", identityHeader) - } else { - request.Header.Add("Metadata", "true") - } - - httpClient := http.Client{Timeout: timeout} - response, err := httpClient.Do(request) - if err != nil { - return token, err - } - defer response.Body.Close() - - data, err := io.ReadAll(response.Body) - if err != nil { - return token, err - } - - if s := response.StatusCode; s != http.StatusOK { - var azureError azureManagedIdentitiesError - err = json.Unmarshal(data, &azureError) - if err != nil { - return token, err - } - - azureError.Endpoint = e - azureError.StatusCode = s - return token, &azureError - } - - err = json.Unmarshal(data, &token) - if err != nil { - return token, err - } - - return token, nil -} - -// buildAzureManagedIdentitiesRequestPath constructs the request URL for an Azure managed identities token request -func buildAzureManagedIdentitiesRequestPath( - endpoint, apiVersion, resource, objectID, clientID, miResID string, -) string { - params := url.Values{ - "api-version": []string{apiVersion}, - "resource": []string{resource}, - } - - if objectID != "" { - params.Add("object_id", objectID) - } - - if clientID != "" { - params.Add("client_id", clientID) - } - - if miResID != "" { - params.Add("mi_res_id", miResID) - } - - return endpoint + "?" + params.Encode() -} diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/gcp.go b/vendor/github.com/open-policy-agent/opa/plugins/rest/gcp.go deleted file mode 100644 index c717105c7f..0000000000 --- a/vendor/github.com/open-policy-agent/opa/plugins/rest/gcp.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package rest - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "strings" - "time" -) - -var ( - defaultGCPMetadataEndpoint = "http://metadata.google.internal" - defaultAccessTokenPath = "/computeMetadata/v1/instance/service-accounts/default/token" - defaultIdentityTokenPath = "/computeMetadata/v1/instance/service-accounts/default/identity" -) - -// AccessToken holds a GCP access token. -type AccessToken struct { - AccessToken string `json:"access_token"` - ExpiresIn int64 `json:"expires_in"` - TokenType string `json:"token_type"` -} - -type gcpMetadataError struct { - err error - endpoint string - statusCode int -} - -func (e *gcpMetadataError) Error() string { - return fmt.Sprintf("error retrieving gcp ID token from %s %d: %v", e.endpoint, e.statusCode, e.err) -} - -func (e *gcpMetadataError) Unwrap() error { return e.err } - -var ( - errGCPMetadataNotFound = errors.New("not found") - errGCPMetadataInvalidRequest = errors.New("invalid request") - errGCPMetadataUnexpected = errors.New("unexpected error") -) - -// gcpMetadataAuthPlugin represents authentication via GCP metadata service. -type gcpMetadataAuthPlugin struct { - AccessTokenPath string `json:"access_token_path"` - Audience string `json:"audience"` - Endpoint string `json:"endpoint"` - IdentityTokenPath string `json:"identity_token_path"` - Scopes []string `json:"scopes"` -} - -func (ap *gcpMetadataAuthPlugin) NewClient(c Config) (*http.Client, error) { - if ap.Audience == "" && len(ap.Scopes) == 0 { - return nil, errors.New("audience or scopes is required when gcp metadata is enabled") - } - - if ap.Audience != "" && len(ap.Scopes) > 0 { - return nil, errors.New("either audience or scopes can be set, not both, when gcp metadata is enabled") - } - - if ap.Endpoint == "" { - ap.Endpoint = defaultGCPMetadataEndpoint - } - - if ap.AccessTokenPath == "" { - ap.AccessTokenPath = defaultAccessTokenPath - } - - if ap.IdentityTokenPath == "" { - ap.IdentityTokenPath = defaultIdentityTokenPath - } - - t, err := DefaultTLSConfig(c) - if err != nil { - return nil, err - } - - return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil -} - -func (ap *gcpMetadataAuthPlugin) Prepare(req *http.Request) error { - var err error - var token string - - if ap.Audience != "" { - token, err = identityTokenFromMetadataService(ap.Endpoint, ap.IdentityTokenPath, ap.Audience) - if err != nil { - return fmt.Errorf("error retrieving identity token from gcp metadata service: %w", err) - } - } - - if len(ap.Scopes) != 0 { - token, err = accessTokenFromMetadataService(ap.Endpoint, ap.AccessTokenPath, ap.Scopes) - if err != nil { - return fmt.Errorf("error retrieving access token from gcp metadata service: %w", err) - } - } - - req.Header.Add("Authorization", fmt.Sprintf("Bearer %v", token)) - return nil -} - -// accessTokenFromMetadataService returns an access token based on the scopes. -func accessTokenFromMetadataService(endpoint, path string, scopes []string) (string, error) { - s := strings.Join(scopes, ",") - - e := fmt.Sprintf("%s%s?scopes=%s", endpoint, path, s) - - data, err := gcpMetadataServiceRequest(e) - if err != nil { - return "", err - } - - var accessToken AccessToken - err = json.Unmarshal(data, &accessToken) - if err != nil { - return "", err - } - - return accessToken.AccessToken, nil -} - -// identityTokenFromMetadataService returns an identity token based on the audience. -func identityTokenFromMetadataService(endpoint, path, audience string) (string, error) { - e := fmt.Sprintf("%s%s?audience=%s", endpoint, path, audience) - - data, err := gcpMetadataServiceRequest(e) - if err != nil { - return "", err - } - return string(data), nil -} - -func gcpMetadataServiceRequest(endpoint string) ([]byte, error) { - request, err := http.NewRequest("GET", endpoint, nil) - if err != nil { - return nil, err - } - - request.Header.Add("Metadata-Flavor", "Google") - - timeout := time.Duration(5) * time.Second - httpClient := http.Client{Timeout: timeout} - - response, err := httpClient.Do(request) - if err != nil { - return nil, err - } - defer response.Body.Close() - - switch s := response.StatusCode; s { - case 200: - break - case 400: - return nil, &gcpMetadataError{errGCPMetadataInvalidRequest, endpoint, s} - case 404: - return nil, &gcpMetadataError{errGCPMetadataNotFound, endpoint, s} - default: - return nil, &gcpMetadataError{errGCPMetadataUnexpected, endpoint, s} - } - - data, err := io.ReadAll(response.Body) - if err != nil { - return nil, err - } - - return data, nil -} diff --git a/vendor/github.com/open-policy-agent/opa/plugins/rest/rest.go b/vendor/github.com/open-policy-agent/opa/plugins/rest/rest.go deleted file mode 100644 index fd59058ca1..0000000000 --- a/vendor/github.com/open-policy-agent/opa/plugins/rest/rest.go +++ /dev/null @@ -1,372 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package rest implements a REST client for communicating with remote services. -package rest - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/http/httputil" - "reflect" - "strings" - - "github.com/open-policy-agent/opa/internal/version" - "github.com/open-policy-agent/opa/keys" - "github.com/open-policy-agent/opa/logging" - "github.com/open-policy-agent/opa/tracing" - "github.com/open-policy-agent/opa/util" -) - -const ( - defaultResponseHeaderTimeoutSeconds = int64(10) - defaultResponseSizeLimitBytes = 1024 - - grantTypeClientCredentials = "client_credentials" - grantTypeJwtBearer = "jwt_bearer" -) - -var maskedHeaderKeys = map[string]struct{}{ - "Authorization": {}, - "X-Amz-Security-Token": {}, -} - -// An HTTPAuthPlugin represents a mechanism to construct and configure HTTP authentication for a REST service -type HTTPAuthPlugin interface { - // implementations can assume NewClient will be called before Prepare - NewClient(Config) (*http.Client, error) - Prepare(*http.Request) error -} - -// Config represents configuration for a REST client. -type Config struct { - Name string `json:"name"` - URL string `json:"url"` - Headers map[string]string `json:"headers"` - AllowInsecureTLS bool `json:"allow_insecure_tls,omitempty"` - ResponseHeaderTimeoutSeconds *int64 `json:"response_header_timeout_seconds,omitempty"` - TLS *serverTLSConfig `json:"tls,omitempty"` - Credentials struct { - Bearer *bearerAuthPlugin `json:"bearer,omitempty"` - OAuth2 *oauth2ClientCredentialsAuthPlugin `json:"oauth2,omitempty"` - ClientTLS *clientTLSAuthPlugin `json:"client_tls,omitempty"` - S3Signing *awsSigningAuthPlugin `json:"s3_signing,omitempty"` - GCPMetadata *gcpMetadataAuthPlugin `json:"gcp_metadata,omitempty"` - AzureManagedIdentity *azureManagedIdentitiesAuthPlugin `json:"azure_managed_identity,omitempty"` - Plugin *string `json:"plugin,omitempty"` - } `json:"credentials"` - Type string `json:"type,omitempty"` - keys map[string]*keys.Config - logger logging.Logger -} - -// Equal returns true if this client config is equal to the other. -func (c *Config) Equal(other *Config) bool { - otherWithoutLogger := *other - otherWithoutLogger.logger = c.logger - return reflect.DeepEqual(c, &otherWithoutLogger) -} - -// An AuthPluginLookupFunc can lookup auth plugins by their name. -type AuthPluginLookupFunc func(name string) HTTPAuthPlugin - -// AuthPlugin should be used to get an authentication method from the config. -func (c *Config) AuthPlugin(lookup AuthPluginLookupFunc) (HTTPAuthPlugin, error) { - var candidate HTTPAuthPlugin - if c.Credentials.Plugin != nil { - if lookup == nil { - // if no authPluginLookup function is passed we can't resolve the plugin - return nil, errors.New("missing auth plugin lookup function") - } - - candidate := lookup(*c.Credentials.Plugin) - if candidate == nil { - return nil, fmt.Errorf("auth plugin %q not found", *c.Credentials.Plugin) - } - - return candidate, nil - } - // reflection avoids need for this code to change as auth plugins are added - s := reflect.ValueOf(c.Credentials) - for i := 0; i < s.NumField(); i++ { - if s.Field(i).IsNil() { - continue - } - - if candidate != nil { - return nil, errors.New("a maximum one credential method must be specified") - } - - candidate = s.Field(i).Interface().(HTTPAuthPlugin) - } - - if candidate == nil { - return &defaultAuthPlugin{}, nil - } - return candidate, nil -} - -func (c *Config) authHTTPClient(lookup AuthPluginLookupFunc) (*http.Client, error) { - plugin, err := c.AuthPlugin(lookup) - if err != nil { - return nil, err - } - return plugin.NewClient(*c) -} - -func (c *Config) authPrepare(req *http.Request, lookup AuthPluginLookupFunc) error { - plugin, err := c.AuthPlugin(lookup) - if err != nil { - return err - } - return plugin.Prepare(req) -} - -// Client implements an HTTP/REST client for communicating with remote -// services. -type Client struct { - bytes *[]byte - json *interface{} - config Config - headers map[string]string - authPluginLookup AuthPluginLookupFunc - logger logging.Logger - loggerFields map[string]interface{} - distributedTacingOpts tracing.Options -} - -// Name returns an option that overrides the service name on the client. -func Name(s string) func(*Client) { - return func(c *Client) { - c.config.Name = s - } -} - -// AuthPluginLookup assigns a function to lookup an HTTPAuthPlugin to a new Client. -// It's intended to be used when creating a Client using New(). Usually this is passed -// the plugins.AuthPlugin func, which retrieves a registered HTTPAuthPlugin from the -// plugin manager. -func AuthPluginLookup(l AuthPluginLookupFunc) func(*Client) { - return func(c *Client) { - c.authPluginLookup = l - } -} - -// Logger assigns a logger to the client -func Logger(l logging.Logger) func(*Client) { - return func(c *Client) { - c.logger = l - } -} - -// DistributedTracingOpts sets the options to be used by distributed tracing. -func DistributedTracingOpts(tr tracing.Options) func(*Client) { - return func(c *Client) { - c.distributedTacingOpts = tr - } -} - -// New returns a new Client for config. -func New(config []byte, keys map[string]*keys.Config, opts ...func(*Client)) (Client, error) { - var parsedConfig Config - if err := util.Unmarshal(config, &parsedConfig); err != nil { - return Client{}, err - } - - parsedConfig.URL = strings.TrimRight(parsedConfig.URL, "/") - - if parsedConfig.ResponseHeaderTimeoutSeconds == nil { - timeout := defaultResponseHeaderTimeoutSeconds - parsedConfig.ResponseHeaderTimeoutSeconds = &timeout - } - - parsedConfig.keys = keys - - client := Client{ - config: parsedConfig, - } - - for _, f := range opts { - f(&client) - } - - if client.logger == nil { - client.logger = logging.Get() - } - - client.config.logger = client.logger - - return client, nil -} - -// AuthPluginLookup returns the lookup function to find a custom registered -// auth plugin by its name. -func (c Client) AuthPluginLookup() AuthPluginLookupFunc { - return c.authPluginLookup -} - -// Service returns the name of the service this Client is configured for. -func (c Client) Service() string { - return c.config.Name -} - -// Config returns this Client's configuration -func (c Client) Config() *Config { - return &c.config -} - -// SetResponseHeaderTimeout sets the "ResponseHeaderTimeout" in the http client's Transport -func (c Client) SetResponseHeaderTimeout(timeout *int64) Client { - c.config.ResponseHeaderTimeoutSeconds = timeout - return c -} - -// Logger returns the logger assigned to the Client -func (c Client) Logger() logging.Logger { - return c.logger -} - -// LoggerFields returns the fields used for log statements used by Client -func (c Client) LoggerFields() map[string]interface{} { - return c.loggerFields -} - -// WithHeader returns a shallow copy of the client with a header to include the -// requests. -func (c Client) WithHeader(k, v string) Client { - if v == "" { - return c - } - if c.headers == nil { - c.headers = map[string]string{} - } - c.headers[k] = v - return c -} - -// WithJSON returns a shallow copy of the client with the JSON value set as the -// message body to include the requests. This function sets the Content-Type -// header. -func (c Client) WithJSON(body interface{}) Client { - c = c.WithHeader("Content-Type", "application/json") - c.json = &body - return c -} - -// WithBytes returns a shallow copy of the client with the bytes set as the -// message body to include in the requests. -func (c Client) WithBytes(body []byte) Client { - c.bytes = &body - return c -} - -// Do executes a request using the client. -func (c Client) Do(ctx context.Context, method, path string) (*http.Response, error) { - - httpClient, err := c.config.authHTTPClient(c.authPluginLookup) - if err != nil { - return nil, err - } - - if len(c.distributedTacingOpts) > 0 { - httpClient.Transport = tracing.NewTransport(httpClient.Transport, c.distributedTacingOpts) - } - - path = strings.Trim(path, "/") - - var body io.Reader - - if c.bytes != nil { - body = bytes.NewReader(*c.bytes) - } else if c.json != nil { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(*c.json); err != nil { - return nil, err - } - body = &buf - } - - url := c.config.URL + "/" + path - req, err := http.NewRequest(method, url, body) - if err != nil { - return nil, err - } - - headers := map[string]string{ - "User-Agent": version.UserAgent, - } - - // Copy custom headers from config. - for key, value := range c.config.Headers { - headers[key] = value - } - - // Overwrite with headers set directly on client. - for key, value := range c.headers { - headers[key] = value - } - - for key, value := range headers { - req.Header.Add(key, value) - } - - req = req.WithContext(ctx) - - err = c.config.authPrepare(req, c.authPluginLookup) - if err != nil { - return nil, err - } - - if c.logger.GetLevel() >= logging.Debug { - c.loggerFields = map[string]interface{}{ - "method": method, - "url": url, - "headers": withMaskedHeaders(req.Header), - } - - c.logger.WithFields(c.loggerFields).Debug("Sending request.") - } - - resp, err := httpClient.Do(req) - - if resp != nil && c.logger.GetLevel() >= logging.Debug { - // Only log for debug purposes. If an error occurred, the caller should handle - // that. In the non-error case, the caller may not do anything. - c.loggerFields["status"] = resp.Status - c.loggerFields["headers"] = resp.Header - - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - dump, err := httputil.DumpResponse(resp, true) - if err != nil { - return nil, err - } - - if len(string(dump)) < defaultResponseSizeLimitBytes { - c.loggerFields["response"] = string(dump) - } else { - c.loggerFields["response"] = fmt.Sprintf("%v...", string(dump[:defaultResponseSizeLimitBytes])) - } - } - c.logger.WithFields(c.loggerFields).Debug("Received response.") - } - - return resp, err -} - -func withMaskedHeaders(headers http.Header) http.Header { - masked := make(http.Header) - for k, v := range headers { - if _, ok := maskedHeaderKeys[k]; ok { - masked.Set(k, "REDACTED") - } else { - masked[k] = v - } - } - return masked -} diff --git a/vendor/github.com/open-policy-agent/opa/rego/doc.go b/vendor/github.com/open-policy-agent/opa/rego/doc.go new file mode 100644 index 0000000000..febe75696c --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/rego/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package rego diff --git a/vendor/github.com/open-policy-agent/opa/rego/errors.go b/vendor/github.com/open-policy-agent/opa/rego/errors.go index dcc5e2679d..bcbd2efedd 100644 --- a/vendor/github.com/open-policy-agent/opa/rego/errors.go +++ b/vendor/github.com/open-policy-agent/opa/rego/errors.go @@ -1,24 +1,17 @@ package rego +import v1 "github.com/open-policy-agent/opa/v1/rego" + // HaltError is an error type to return from a custom function implementation // that will abort the evaluation process (analogous to topdown.Halt). -type HaltError struct { - err error -} - -// Error delegates to the wrapped error -func (h *HaltError) Error() string { - return h.err.Error() -} +type HaltError = v1.HaltError // NewHaltError wraps an error such that the evaluation process will stop // when it occurs. func NewHaltError(err error) error { - return &HaltError{err: err} + return v1.NewHaltError(err) } // ErrorDetails interface is satisfied by an error that provides further // details. -type ErrorDetails interface { - Lines() []string -} +type ErrorDetails = v1.ErrorDetails diff --git a/vendor/github.com/open-policy-agent/opa/rego/plugins.go b/vendor/github.com/open-policy-agent/opa/rego/plugins.go index abaa910341..38ef84416f 100644 --- a/vendor/github.com/open-policy-agent/opa/rego/plugins.go +++ b/vendor/github.com/open-policy-agent/opa/rego/plugins.go @@ -5,39 +5,13 @@ package rego import ( - "context" - "sync" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/ir" + v1 "github.com/open-policy-agent/opa/v1/rego" ) -var targetPlugins = map[string]TargetPlugin{} -var pluginMtx sync.Mutex - -type TargetPlugin interface { - IsTarget(string) bool - PrepareForEval(context.Context, *ir.Policy, ...PrepareOption) (TargetPluginEval, error) -} - -type TargetPluginEval interface { - Eval(context.Context, *EvalContext, ast.Value) (ast.Value, error) -} +type TargetPlugin = v1.TargetPlugin -func (r *Rego) targetPlugin(tgt string) TargetPlugin { - for _, p := range targetPlugins { - if p.IsTarget(tgt) { - return p - } - } - return nil -} +type TargetPluginEval = v1.TargetPluginEval func RegisterPlugin(name string, p TargetPlugin) { - pluginMtx.Lock() - defer pluginMtx.Unlock() - if _, ok := targetPlugins[name]; ok { - panic("plugin already registered " + name) - } - targetPlugins[name] = p + v1.RegisterPlugin(name, p) } diff --git a/vendor/github.com/open-policy-agent/opa/rego/rego.go b/vendor/github.com/open-policy-agent/opa/rego/rego.go index 64b4b9b93e..c9caf9f8cb 100644 --- a/vendor/github.com/open-policy-agent/opa/rego/rego.go +++ b/vendor/github.com/open-policy-agent/opa/rego/rego.go @@ -6,958 +6,368 @@ package rego import ( - "bytes" - "context" - "errors" - "fmt" "io" - "strings" "time" "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/bundle" - bundleUtils "github.com/open-policy-agent/opa/internal/bundle" - "github.com/open-policy-agent/opa/internal/compiler/wasm" - "github.com/open-policy-agent/opa/internal/future" - "github.com/open-policy-agent/opa/internal/planner" - "github.com/open-policy-agent/opa/internal/rego/opa" - "github.com/open-policy-agent/opa/internal/wasm/encoding" - "github.com/open-policy-agent/opa/ir" "github.com/open-policy-agent/opa/loader" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/plugins" - "github.com/open-policy-agent/opa/resolver" "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/storage/inmem" - "github.com/open-policy-agent/opa/topdown" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/print" - "github.com/open-policy-agent/opa/tracing" - "github.com/open-policy-agent/opa/types" - "github.com/open-policy-agent/opa/util" -) - -const ( - defaultPartialNamespace = "partial" - wasmVarPrefix = "^" -) - -// nolint: deadcode,varcheck -const ( - targetWasm = "wasm" - targetRego = "rego" + "github.com/open-policy-agent/opa/v1/metrics" + v1 "github.com/open-policy-agent/opa/v1/rego" + "github.com/open-policy-agent/opa/v1/resolver" + "github.com/open-policy-agent/opa/v1/topdown" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/print" + "github.com/open-policy-agent/opa/v1/tracing" ) // CompileResult represents the result of compiling a Rego query, zero or more // Rego modules, and arbitrary contextual data into an executable. -type CompileResult struct { - Bytes []byte `json:"bytes"` -} +type CompileResult = v1.CompileResult // PartialQueries contains the queries and support modules produced by partial // evaluation. -type PartialQueries struct { - Queries []ast.Body `json:"queries,omitempty"` - Support []*ast.Module `json:"modules,omitempty"` -} +type PartialQueries = v1.PartialQueries // PartialResult represents the result of partial evaluation. The result can be // used to generate a new query that can be run when inputs are known. -type PartialResult struct { - compiler *ast.Compiler - store storage.Store - body ast.Body - builtinDecls map[string]*ast.Builtin - builtinFuncs map[string]*topdown.Builtin -} - -// Rego returns an object that can be evaluated to produce a query result. -func (pr PartialResult) Rego(options ...func(*Rego)) *Rego { - options = append(options, Compiler(pr.compiler), Store(pr.store), ParsedQuery(pr.body)) - r := New(options...) - - // Propagate any custom builtins. - for k, v := range pr.builtinDecls { - r.builtinDecls[k] = v - } - for k, v := range pr.builtinFuncs { - r.builtinFuncs[k] = v - } - return r -} - -// preparedQuery is a wrapper around a Rego object which has pre-processed -// state stored on it. Once prepared there are a more limited number of actions -// that can be taken with it. It will, however, be able to evaluate faster since -// it will not have to re-parse or compile as much. -type preparedQuery struct { - r *Rego - cfg *PrepareConfig -} +type PartialResult = v1.PartialResult // EvalContext defines the set of options allowed to be set at evaluation // time. Any other options will need to be set on a new Rego object. -type EvalContext struct { - hasInput bool - time time.Time - seed io.Reader - rawInput *interface{} - parsedInput ast.Value - metrics metrics.Metrics - txn storage.Transaction - instrument bool - instrumentation *topdown.Instrumentation - partialNamespace string - queryTracers []topdown.QueryTracer - compiledQuery compiledQuery - unknowns []string - disableInlining []ast.Ref - parsedUnknowns []*ast.Term - indexing bool - earlyExit bool - interQueryBuiltinCache cache.InterQueryCache - interQueryBuiltinValueCache cache.InterQueryValueCache - ndBuiltinCache builtins.NDBCache - resolvers []refResolver - sortSets bool - copyMaps bool - printHook print.Hook - capabilities *ast.Capabilities - strictBuiltinErrors bool - virtualCache topdown.VirtualCache -} - -func (e *EvalContext) RawInput() *interface{} { - return e.rawInput -} - -func (e *EvalContext) ParsedInput() ast.Value { - return e.parsedInput -} - -func (e *EvalContext) Time() time.Time { - return e.time -} - -func (e *EvalContext) Seed() io.Reader { - return e.seed -} - -func (e *EvalContext) InterQueryBuiltinCache() cache.InterQueryCache { - return e.interQueryBuiltinCache -} - -func (e *EvalContext) InterQueryBuiltinValueCache() cache.InterQueryValueCache { - return e.interQueryBuiltinValueCache -} - -func (e *EvalContext) PrintHook() print.Hook { - return e.printHook -} - -func (e *EvalContext) Metrics() metrics.Metrics { - return e.metrics -} - -func (e *EvalContext) StrictBuiltinErrors() bool { - return e.strictBuiltinErrors -} - -func (e *EvalContext) NDBCache() builtins.NDBCache { - return e.ndBuiltinCache -} - -func (e *EvalContext) CompiledQuery() ast.Body { - return e.compiledQuery.query -} - -func (e *EvalContext) Capabilities() *ast.Capabilities { - return e.capabilities -} - -func (e *EvalContext) Transaction() storage.Transaction { - return e.txn -} +type EvalContext = v1.EvalContext // EvalOption defines a function to set an option on an EvalConfig -type EvalOption func(*EvalContext) +type EvalOption = v1.EvalOption // EvalInput configures the input for a Prepared Query's evaluation -func EvalInput(input interface{}) EvalOption { - return func(e *EvalContext) { - e.rawInput = &input - e.hasInput = true - } +func EvalInput(input any) EvalOption { + return v1.EvalInput(input) } // EvalParsedInput configures the input for a Prepared Query's evaluation func EvalParsedInput(input ast.Value) EvalOption { - return func(e *EvalContext) { - e.parsedInput = input - e.hasInput = true - } + return v1.EvalParsedInput(input) } // EvalMetrics configures the metrics for a Prepared Query's evaluation func EvalMetrics(metric metrics.Metrics) EvalOption { - return func(e *EvalContext) { - e.metrics = metric - } + return v1.EvalMetrics(metric) } // EvalTransaction configures the Transaction for a Prepared Query's evaluation func EvalTransaction(txn storage.Transaction) EvalOption { - return func(e *EvalContext) { - e.txn = txn - } + return v1.EvalTransaction(txn) } // EvalInstrument enables or disables instrumenting for a Prepared Query's evaluation func EvalInstrument(instrument bool) EvalOption { - return func(e *EvalContext) { - e.instrument = instrument - } + return v1.EvalInstrument(instrument) } // EvalTracer configures a tracer for a Prepared Query's evaluation +// // Deprecated: Use EvalQueryTracer instead. func EvalTracer(tracer topdown.Tracer) EvalOption { - return func(e *EvalContext) { - if tracer != nil { - e.queryTracers = append(e.queryTracers, topdown.WrapLegacyTracer(tracer)) - } - } + return v1.EvalTracer(tracer) } // EvalQueryTracer configures a tracer for a Prepared Query's evaluation func EvalQueryTracer(tracer topdown.QueryTracer) EvalOption { - return func(e *EvalContext) { - if tracer != nil { - e.queryTracers = append(e.queryTracers, tracer) - } - } + return v1.EvalQueryTracer(tracer) } // EvalPartialNamespace returns an argument that sets the namespace to use for // partial evaluation results. The namespace must be a valid package path // component. func EvalPartialNamespace(ns string) EvalOption { - return func(e *EvalContext) { - e.partialNamespace = ns - } + return v1.EvalPartialNamespace(ns) } // EvalUnknowns returns an argument that sets the values to treat as // unknown during partial evaluation. func EvalUnknowns(unknowns []string) EvalOption { - return func(e *EvalContext) { - e.unknowns = unknowns - } + return v1.EvalUnknowns(unknowns) } // EvalDisableInlining returns an argument that adds a set of paths to exclude from // partial evaluation inlining. func EvalDisableInlining(paths []ast.Ref) EvalOption { - return func(e *EvalContext) { - e.disableInlining = paths - } + return v1.EvalDisableInlining(paths) } // EvalParsedUnknowns returns an argument that sets the values to treat // as unknown during partial evaluation. func EvalParsedUnknowns(unknowns []*ast.Term) EvalOption { - return func(e *EvalContext) { - e.parsedUnknowns = unknowns - } + return v1.EvalParsedUnknowns(unknowns) } // EvalRuleIndexing will disable indexing optimizations for the // evaluation. This should only be used when tracing in debug mode. func EvalRuleIndexing(enabled bool) EvalOption { - return func(e *EvalContext) { - e.indexing = enabled - } + return v1.EvalRuleIndexing(enabled) } // EvalEarlyExit will disable 'early exit' optimizations for the // evaluation. This should only be used when tracing in debug mode. func EvalEarlyExit(enabled bool) EvalOption { - return func(e *EvalContext) { - e.earlyExit = enabled - } + return v1.EvalEarlyExit(enabled) } // EvalTime sets the wall clock time to use during policy evaluation. // time.now_ns() calls will return this value. func EvalTime(x time.Time) EvalOption { - return func(e *EvalContext) { - e.time = x - } + return v1.EvalTime(x) } // EvalSeed sets a reader that will seed randomization required by built-in functions. // If a seed is not provided crypto/rand.Reader is used. func EvalSeed(r io.Reader) EvalOption { - return func(e *EvalContext) { - e.seed = r - } + return v1.EvalSeed(r) } // EvalInterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize // during evaluation. func EvalInterQueryBuiltinCache(c cache.InterQueryCache) EvalOption { - return func(e *EvalContext) { - e.interQueryBuiltinCache = c - } + return v1.EvalInterQueryBuiltinCache(c) } // EvalInterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize // during evaluation. func EvalInterQueryBuiltinValueCache(c cache.InterQueryValueCache) EvalOption { - return func(e *EvalContext) { - e.interQueryBuiltinValueCache = c - } + return v1.EvalInterQueryBuiltinValueCache(c) } // EvalNDBuiltinCache sets the non-deterministic builtin cache that built-in functions can // use during evaluation. func EvalNDBuiltinCache(c builtins.NDBCache) EvalOption { - return func(e *EvalContext) { - e.ndBuiltinCache = c - } + return v1.EvalNDBuiltinCache(c) } // EvalResolver sets a Resolver for a specified ref path for this evaluation. func EvalResolver(ref ast.Ref, r resolver.Resolver) EvalOption { - return func(e *EvalContext) { - e.resolvers = append(e.resolvers, refResolver{ref, r}) - } + return v1.EvalResolver(ref, r) } // EvalSortSets causes the evaluator to sort sets before returning them as JSON arrays. func EvalSortSets(yes bool) EvalOption { - return func(e *EvalContext) { - e.sortSets = yes - } + return v1.EvalSortSets(yes) } -// EvalCopyMaps causes the evaluator to copy `map[string]interface{}`s before returning them. +// EvalCopyMaps causes the evaluator to copy `map[string]any`s before returning them. func EvalCopyMaps(yes bool) EvalOption { - return func(e *EvalContext) { - e.copyMaps = yes - } + return v1.EvalCopyMaps(yes) } // EvalPrintHook sets the object to use for handling print statement outputs. func EvalPrintHook(ph print.Hook) EvalOption { - return func(e *EvalContext) { - e.printHook = ph - } + return v1.EvalPrintHook(ph) } // EvalVirtualCache sets the topdown.VirtualCache to use for evaluation. This is // optional, and if not set, the default cache is used. func EvalVirtualCache(vc topdown.VirtualCache) EvalOption { - return func(e *EvalContext) { - e.virtualCache = vc - } -} - -func (pq preparedQuery) Modules() map[string]*ast.Module { - mods := make(map[string]*ast.Module) - - for name, mod := range pq.r.parsedModules { - mods[name] = mod - } - - for _, b := range pq.r.bundles { - for _, mod := range b.Modules { - mods[mod.Path] = mod.Parsed - } - } - - return mods -} - -// newEvalContext creates a new EvalContext overlaying any EvalOptions over top -// the Rego object on the preparedQuery. The returned function should be called -// once the evaluation is complete to close any transactions that might have -// been opened. -func (pq preparedQuery) newEvalContext(ctx context.Context, options []EvalOption) (*EvalContext, func(context.Context), error) { - ectx := &EvalContext{ - hasInput: false, - rawInput: nil, - parsedInput: nil, - metrics: nil, - txn: nil, - instrument: false, - instrumentation: nil, - partialNamespace: pq.r.partialNamespace, - queryTracers: nil, - unknowns: pq.r.unknowns, - parsedUnknowns: pq.r.parsedUnknowns, - compiledQuery: compiledQuery{}, - indexing: true, - earlyExit: true, - resolvers: pq.r.resolvers, - printHook: pq.r.printHook, - capabilities: pq.r.capabilities, - strictBuiltinErrors: pq.r.strictBuiltinErrors, - } - - for _, o := range options { - o(ectx) - } - - if ectx.metrics == nil { - ectx.metrics = metrics.New() - } - - if ectx.instrument { - ectx.instrumentation = topdown.NewInstrumentation(ectx.metrics) - } - - // Default to an empty "finish" function - finishFunc := func(context.Context) {} - - var err error - ectx.disableInlining, err = parseStringsToRefs(pq.r.disableInlining) - if err != nil { - return nil, finishFunc, err - } - - if ectx.txn == nil { - ectx.txn, err = pq.r.store.NewTransaction(ctx) - if err != nil { - return nil, finishFunc, err - } - finishFunc = func(ctx context.Context) { - pq.r.store.Abort(ctx, ectx.txn) - } - } - - // If we didn't get an input specified in the Eval options - // then fall back to the Rego object's input fields. - if !ectx.hasInput { - ectx.rawInput = pq.r.rawInput - ectx.parsedInput = pq.r.parsedInput - } - - if ectx.parsedInput == nil { - if ectx.rawInput == nil { - // Fall back to the original Rego objects input if none was specified - // Note that it could still be nil - ectx.rawInput = pq.r.rawInput - } - - if pq.r.targetPlugin(pq.r.target) == nil && // no plugin claims this target - pq.r.target != targetWasm { - ectx.parsedInput, err = pq.r.parseRawInput(ectx.rawInput, ectx.metrics) - if err != nil { - return nil, finishFunc, err - } - } - } - - return ectx, finishFunc, nil + return v1.EvalVirtualCache(vc) } // PreparedEvalQuery holds the prepared Rego state that has been pre-processed // for subsequent evaluations. -type PreparedEvalQuery struct { - preparedQuery -} - -// Eval evaluates this PartialResult's Rego object with additional eval options -// and returns a ResultSet. -// If options are provided they will override the original Rego options respective value. -// The original Rego object transaction will *not* be re-used. A new transaction will be opened -// if one is not provided with an EvalOption. -func (pq PreparedEvalQuery) Eval(ctx context.Context, options ...EvalOption) (ResultSet, error) { - ectx, finish, err := pq.newEvalContext(ctx, options) - if err != nil { - return nil, err - } - defer finish(ctx) - - ectx.compiledQuery = pq.r.compiledQueries[evalQueryType] - - return pq.r.eval(ctx, ectx) -} +type PreparedEvalQuery = v1.PreparedEvalQuery // PreparedPartialQuery holds the prepared Rego state that has been pre-processed // for partial evaluations. -type PreparedPartialQuery struct { - preparedQuery -} - -// Partial runs partial evaluation on the prepared query and returns the result. -// The original Rego object transaction will *not* be re-used. A new transaction will be opened -// if one is not provided with an EvalOption. -func (pq PreparedPartialQuery) Partial(ctx context.Context, options ...EvalOption) (*PartialQueries, error) { - ectx, finish, err := pq.newEvalContext(ctx, options) - if err != nil { - return nil, err - } - defer finish(ctx) - - ectx.compiledQuery = pq.r.compiledQueries[partialQueryType] - - return pq.r.partial(ctx, ectx) -} +type PreparedPartialQuery = v1.PreparedPartialQuery // Errors represents a collection of errors returned when evaluating Rego. -type Errors []error - -func (errs Errors) Error() string { - if len(errs) == 0 { - return "no error" - } - if len(errs) == 1 { - return fmt.Sprintf("1 error occurred: %v", errs[0].Error()) - } - buf := []string{fmt.Sprintf("%v errors occurred", len(errs))} - for _, err := range errs { - buf = append(buf, err.Error()) - } - return strings.Join(buf, "\n") -} - -var errPartialEvaluationNotEffective = errors.New("partial evaluation not effective") +type Errors = v1.Errors // IsPartialEvaluationNotEffectiveErr returns true if err is an error returned by // this package to indicate that partial evaluation was ineffective. func IsPartialEvaluationNotEffectiveErr(err error) bool { - errs, ok := err.(Errors) - if !ok { - return false - } - return len(errs) == 1 && errs[0] == errPartialEvaluationNotEffective -} - -type compiledQuery struct { - query ast.Body - compiler ast.QueryCompiler -} - -type queryType int - -// Define a query type for each of the top level Rego -// API's that compile queries differently. -const ( - evalQueryType queryType = iota - partialResultQueryType - partialQueryType - compileQueryType -) - -type loadPaths struct { - paths []string - filter loader.Filter + return v1.IsPartialEvaluationNotEffectiveErr(err) } // Rego constructs a query and can be evaluated to obtain results. -type Rego struct { - query string - parsedQuery ast.Body - compiledQueries map[queryType]compiledQuery - pkg string - parsedPackage *ast.Package - imports []string - parsedImports []*ast.Import - rawInput *interface{} - parsedInput ast.Value - unknowns []string - parsedUnknowns []*ast.Term - disableInlining []string - shallowInlining bool - skipPartialNamespace bool - partialNamespace string - modules []rawModule - parsedModules map[string]*ast.Module - compiler *ast.Compiler - store storage.Store - ownStore bool - ownStoreReadAst bool - txn storage.Transaction - metrics metrics.Metrics - queryTracers []topdown.QueryTracer - tracebuf *topdown.BufferTracer - trace bool - instrumentation *topdown.Instrumentation - instrument bool - capture map[*ast.Expr]ast.Var // map exprs to generated capture vars - termVarID int - dump io.Writer - runtime *ast.Term - time time.Time - seed io.Reader - capabilities *ast.Capabilities - builtinDecls map[string]*ast.Builtin - builtinFuncs map[string]*topdown.Builtin - unsafeBuiltins map[string]struct{} - loadPaths loadPaths - bundlePaths []string - bundles map[string]*bundle.Bundle - skipBundleVerification bool - interQueryBuiltinCache cache.InterQueryCache - interQueryBuiltinValueCache cache.InterQueryValueCache - ndBuiltinCache builtins.NDBCache - strictBuiltinErrors bool - builtinErrorList *[]topdown.Error - resolvers []refResolver - schemaSet *ast.SchemaSet - target string // target type (wasm, rego, etc.) - opa opa.EvalEngine - generateJSON func(*ast.Term, *EvalContext) (interface{}, error) - printHook print.Hook - enablePrintStatements bool - distributedTacingOpts tracing.Options - strict bool - pluginMgr *plugins.Manager - plugins []TargetPlugin - targetPrepState TargetPluginEval - regoVersion ast.RegoVersion -} +type Rego = v1.Rego // Function represents a built-in function that is callable in Rego. -type Function struct { - Name string - Description string - Decl *types.Function - Memoize bool - Nondeterministic bool -} +type Function = v1.Function // BuiltinContext contains additional attributes from the evaluator that // built-in functions can use, e.g., the request context.Context, caches, etc. -type BuiltinContext = topdown.BuiltinContext +type BuiltinContext = v1.BuiltinContext type ( // Builtin1 defines a built-in function that accepts 1 argument. - Builtin1 func(bctx BuiltinContext, op1 *ast.Term) (*ast.Term, error) + Builtin1 = v1.Builtin1 // Builtin2 defines a built-in function that accepts 2 arguments. - Builtin2 func(bctx BuiltinContext, op1, op2 *ast.Term) (*ast.Term, error) + Builtin2 = v1.Builtin2 // Builtin3 defines a built-in function that accepts 3 argument. - Builtin3 func(bctx BuiltinContext, op1, op2, op3 *ast.Term) (*ast.Term, error) + Builtin3 = v1.Builtin3 // Builtin4 defines a built-in function that accepts 4 argument. - Builtin4 func(bctx BuiltinContext, op1, op2, op3, op4 *ast.Term) (*ast.Term, error) + Builtin4 = v1.Builtin4 // BuiltinDyn defines a built-in function that accepts a list of arguments. - BuiltinDyn func(bctx BuiltinContext, terms []*ast.Term) (*ast.Term, error) + BuiltinDyn = v1.BuiltinDyn ) // RegisterBuiltin1 adds a built-in function globally inside the OPA runtime. func RegisterBuiltin1(decl *Function, impl Builtin1) { - ast.RegisterBuiltin(&ast.Builtin{ - Name: decl.Name, - Description: decl.Description, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - }) - topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + v1.RegisterBuiltin1(decl, impl) } // RegisterBuiltin2 adds a built-in function globally inside the OPA runtime. func RegisterBuiltin2(decl *Function, impl Builtin2) { - ast.RegisterBuiltin(&ast.Builtin{ - Name: decl.Name, - Description: decl.Description, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - }) - topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + v1.RegisterBuiltin2(decl, impl) } // RegisterBuiltin3 adds a built-in function globally inside the OPA runtime. func RegisterBuiltin3(decl *Function, impl Builtin3) { - ast.RegisterBuiltin(&ast.Builtin{ - Name: decl.Name, - Description: decl.Description, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - }) - topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + v1.RegisterBuiltin3(decl, impl) } // RegisterBuiltin4 adds a built-in function globally inside the OPA runtime. func RegisterBuiltin4(decl *Function, impl Builtin4) { - ast.RegisterBuiltin(&ast.Builtin{ - Name: decl.Name, - Description: decl.Description, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - }) - topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2], terms[3]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + v1.RegisterBuiltin4(decl, impl) } // RegisterBuiltinDyn adds a built-in function globally inside the OPA runtime. func RegisterBuiltinDyn(decl *Function, impl BuiltinDyn) { - ast.RegisterBuiltin(&ast.Builtin{ - Name: decl.Name, - Description: decl.Description, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - }) - topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + v1.RegisterBuiltinDyn(decl, impl) } // Function1 returns an option that adds a built-in function to the Rego object. func Function1(decl *Function, f Builtin1) func(*Rego) { - return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + return v1.Function1(decl, f) } // Function2 returns an option that adds a built-in function to the Rego object. func Function2(decl *Function, f Builtin2) func(*Rego) { - return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + return v1.Function2(decl, f) } // Function3 returns an option that adds a built-in function to the Rego object. func Function3(decl *Function, f Builtin3) func(*Rego) { - return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + return v1.Function3(decl, f) } // Function4 returns an option that adds a built-in function to the Rego object. func Function4(decl *Function, f Builtin4) func(*Rego) { - return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2], terms[3]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + return v1.Function4(decl, f) } // FunctionDyn returns an option that adds a built-in function to the Rego object. func FunctionDyn(decl *Function, f BuiltinDyn) func(*Rego) { - return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + return v1.FunctionDyn(decl, f) } // FunctionDecl returns an option that adds a custom-built-in function // __declaration__. NO implementation is provided. This is used for // non-interpreter execution envs (e.g., Wasm). func FunctionDecl(decl *Function) func(*Rego) { - return newDecl(decl) -} - -func newDecl(decl *Function) func(*Rego) { - return func(r *Rego) { - r.builtinDecls[decl.Name] = &ast.Builtin{ - Name: decl.Name, - Decl: decl.Decl, - } - } -} - -type memo struct { - term *ast.Term - err error -} - -type memokey string - -func memoize(decl *Function, bctx BuiltinContext, terms []*ast.Term, ifEmpty func() (*ast.Term, error)) (*ast.Term, error) { - - if !decl.Memoize { - return ifEmpty() - } - - // NOTE(tsandall): we assume memoization is applied to infrequent built-in - // calls that do things like fetch data from remote locations. As such, - // converting the terms to strings is acceptable for now. - var b strings.Builder - if _, err := b.WriteString(decl.Name); err != nil { - return nil, err - } - - // The term slice _may_ include an output term depending on how the caller - // referred to the built-in function. Only use the arguments as the cache - // key. Unification ensures we don't get false positive matches. - for i := 0; i < len(decl.Decl.Args()); i++ { - if _, err := b.WriteString(terms[i].String()); err != nil { - return nil, err - } - } - - key := memokey(b.String()) - hit, ok := bctx.Cache.Get(key) - var m memo - if ok { - m = hit.(memo) - } else { - m.term, m.err = ifEmpty() - bctx.Cache.Put(key, m) - } - - return m.term, m.err + return v1.FunctionDecl(decl) } // Dump returns an argument that sets the writer to dump debugging information to. func Dump(w io.Writer) func(r *Rego) { - return func(r *Rego) { - r.dump = w - } + return v1.Dump(w) } // Query returns an argument that sets the Rego query. func Query(q string) func(r *Rego) { - return func(r *Rego) { - r.query = q - } + return v1.Query(q) } // ParsedQuery returns an argument that sets the Rego query. func ParsedQuery(q ast.Body) func(r *Rego) { - return func(r *Rego) { - r.parsedQuery = q - } + return v1.ParsedQuery(q) } // Package returns an argument that sets the Rego package on the query's // context. func Package(p string) func(r *Rego) { - return func(r *Rego) { - r.pkg = p - } + return v1.Package(p) } // ParsedPackage returns an argument that sets the Rego package on the query's // context. func ParsedPackage(pkg *ast.Package) func(r *Rego) { - return func(r *Rego) { - r.parsedPackage = pkg - } + return v1.ParsedPackage(pkg) } // Imports returns an argument that adds a Rego import to the query's context. func Imports(p []string) func(r *Rego) { - return func(r *Rego) { - r.imports = append(r.imports, p...) - } + return v1.Imports(p) } // ParsedImports returns an argument that adds Rego imports to the query's // context. func ParsedImports(imp []*ast.Import) func(r *Rego) { - return func(r *Rego) { - r.parsedImports = append(r.parsedImports, imp...) - } + return v1.ParsedImports(imp) } // Input returns an argument that sets the Rego input document. Input should be // a native Go value representing the input document. -func Input(x interface{}) func(r *Rego) { - return func(r *Rego) { - r.rawInput = &x - } +func Input(x any) func(r *Rego) { + return v1.Input(x) } // ParsedInput returns an argument that sets the Rego input document. func ParsedInput(x ast.Value) func(r *Rego) { - return func(r *Rego) { - r.parsedInput = x - } + return v1.ParsedInput(x) } // Unknowns returns an argument that sets the values to treat as unknown during // partial evaluation. func Unknowns(unknowns []string) func(r *Rego) { - return func(r *Rego) { - r.unknowns = unknowns - } + return v1.Unknowns(unknowns) } // ParsedUnknowns returns an argument that sets the values to treat as unknown // during partial evaluation. func ParsedUnknowns(unknowns []*ast.Term) func(r *Rego) { - return func(r *Rego) { - r.parsedUnknowns = unknowns - } + return v1.ParsedUnknowns(unknowns) } // DisableInlining adds a set of paths to exclude from partial evaluation inlining. func DisableInlining(paths []string) func(r *Rego) { - return func(r *Rego) { - r.disableInlining = paths - } + return v1.DisableInlining(paths) } // ShallowInlining prevents rules that depend on unknown values from being inlined. // Rules that only depend on known values are inlined. func ShallowInlining(yes bool) func(r *Rego) { - return func(r *Rego) { - r.shallowInlining = yes - } + return v1.ShallowInlining(yes) } // SkipPartialNamespace disables namespacing of partial evalution results for support // rules generated from policy. Synthetic support rules are still namespaced. func SkipPartialNamespace(yes bool) func(r *Rego) { - return func(r *Rego) { - r.skipPartialNamespace = yes - } + return v1.SkipPartialNamespace(yes) } // PartialNamespace returns an argument that sets the namespace to use for // partial evaluation results. The namespace must be a valid package path // component. func PartialNamespace(ns string) func(r *Rego) { - return func(r *Rego) { - r.partialNamespace = ns - } + return v1.PartialNamespace(ns) } // Module returns an argument that adds a Rego module. func Module(filename, input string) func(r *Rego) { - return func(r *Rego) { - r.modules = append(r.modules, rawModule{ - filename: filename, - module: input, - }) - } + return v1.Module(filename, input) } // ParsedModule returns an argument that adds a parsed Rego module. If a string // module with the same filename name is added, it will override the parsed // module. func ParsedModule(module *ast.Module) func(*Rego) { - return func(r *Rego) { - var filename string - if module.Package.Location != nil { - filename = module.Package.Location.File - } else { - filename = fmt.Sprintf("module_%p.rego", module) - } - r.parsedModules[filename] = module - } + return v1.ParsedModule(module) } // Load returns an argument that adds a filesystem path to load data @@ -968,9 +378,7 @@ func ParsedModule(module *ast.Module) func(*Rego) { // The Load option can only be used once. // Note: Loading files will require a write transaction on the store. func Load(paths []string, filter loader.Filter) func(r *Rego) { - return func(r *Rego) { - r.loadPaths = loadPaths{paths, filter} - } + return v1.Load(paths, filter) } // LoadBundle returns an argument that adds a filesystem path to load @@ -978,23 +386,17 @@ func Load(paths []string, filter loader.Filter) func(r *Rego) { // to be loaded as a bundle. // Note: Loading bundles will require a write transaction on the store. func LoadBundle(path string) func(r *Rego) { - return func(r *Rego) { - r.bundlePaths = append(r.bundlePaths, path) - } + return v1.LoadBundle(path) } // ParsedBundle returns an argument that adds a bundle to be loaded. func ParsedBundle(name string, b *bundle.Bundle) func(r *Rego) { - return func(r *Rego) { - r.bundles[name] = b - } + return v1.ParsedBundle(name, b) } // Compiler returns an argument that sets the Rego compiler. func Compiler(c *ast.Compiler) func(r *Rego) { - return func(r *Rego) { - r.compiler = c - } + return v1.Compiler(c) } // Store returns an argument that sets the policy engine's data storage layer. @@ -1003,18 +405,22 @@ func Compiler(c *ast.Compiler) func(r *Rego) { // must also be provided via the Transaction() option. After loading files // or bundles the transaction should be aborted or committed. func Store(s storage.Store) func(r *Rego) { - return func(r *Rego) { - r.store = s - } + return v1.Store(s) +} + +// Data returns an argument that sets the Rego data document. Data should be +// a map representing the data document. This is a simpler alternative to +// using Store with inmem.NewFromObject for cases where an in-memory store +// with static data is sufficient. +func Data(x map[string]any) func(r *Rego) { + return v1.Data(x) } // StoreReadAST returns an argument that sets whether the store should eagerly convert data to AST values. // // Only applicable when no store has been set on the Rego object through the Store option. func StoreReadAST(enabled bool) func(r *Rego) { - return func(r *Rego) { - r.ownStoreReadAst = enabled - } + return v1.StoreReadAST(enabled) } // Transaction returns an argument that sets the transaction to use for storage @@ -1024,93 +430,66 @@ func StoreReadAST(enabled bool) func(r *Rego) { // Store() option. If using Load(), LoadBundle(), or ParsedBundle() options // the transaction will likely require write params. func Transaction(txn storage.Transaction) func(r *Rego) { - return func(r *Rego) { - r.txn = txn - } + return v1.Transaction(txn) } // Metrics returns an argument that sets the metrics collection. func Metrics(m metrics.Metrics) func(r *Rego) { - return func(r *Rego) { - r.metrics = m - } + return v1.Metrics(m) } // Instrument returns an argument that enables instrumentation for diagnosing // performance issues. func Instrument(yes bool) func(r *Rego) { - return func(r *Rego) { - r.instrument = yes - } + return v1.Instrument(yes) } // Trace returns an argument that enables tracing on r. func Trace(yes bool) func(r *Rego) { - return func(r *Rego) { - r.trace = yes - } + return v1.Trace(yes) } // Tracer returns an argument that adds a query tracer to r. +// // Deprecated: Use QueryTracer instead. func Tracer(t topdown.Tracer) func(r *Rego) { - return func(r *Rego) { - if t != nil { - r.queryTracers = append(r.queryTracers, topdown.WrapLegacyTracer(t)) - } - } + return v1.Tracer(t) } // QueryTracer returns an argument that adds a query tracer to r. func QueryTracer(t topdown.QueryTracer) func(r *Rego) { - return func(r *Rego) { - if t != nil { - r.queryTracers = append(r.queryTracers, t) - } - } + return v1.QueryTracer(t) } // Runtime returns an argument that sets the runtime data to provide to the // evaluation engine. func Runtime(term *ast.Term) func(r *Rego) { - return func(r *Rego) { - r.runtime = term - } + return v1.Runtime(term) } // Time sets the wall clock time to use during policy evaluation. Prepared queries // do not inherit this parameter. Use EvalTime to set the wall clock time when // executing a prepared query. func Time(x time.Time) func(r *Rego) { - return func(r *Rego) { - r.time = x - } + return v1.Time(x) } // Seed sets a reader that will seed randomization required by built-in functions. // If a seed is not provided crypto/rand.Reader is used. func Seed(r io.Reader) func(*Rego) { - return func(e *Rego) { - e.seed = r - } + return v1.Seed(r) } // PrintTrace is a helper function to write a human-readable version of the // trace to the writer w. func PrintTrace(w io.Writer, r *Rego) { - if r == nil || r.tracebuf == nil { - return - } - topdown.PrettyTrace(w, *r.tracebuf) + v1.PrintTrace(w, r) } // PrintTraceWithLocation is a helper function to write a human-readable version of the // trace to the writer w. func PrintTraceWithLocation(w io.Writer, r *Rego) { - if r == nil || r.tracebuf == nil { - return - } - topdown.PrettyTraceWithLocation(w, *r.tracebuf) + v1.PrintTraceWithLocation(w, r) } // UnsafeBuiltins sets the built-in functions to treat as unsafe and not allow. @@ -1118,104 +497,76 @@ func PrintTraceWithLocation(w io.Writer, r *Rego) { // compiler. This option is always honored for query compilation. Provide an // empty (non-nil) map to disable checks on queries. func UnsafeBuiltins(unsafeBuiltins map[string]struct{}) func(r *Rego) { - return func(r *Rego) { - r.unsafeBuiltins = unsafeBuiltins - } + return v1.UnsafeBuiltins(unsafeBuiltins) } // SkipBundleVerification skips verification of a signed bundle. func SkipBundleVerification(yes bool) func(r *Rego) { - return func(r *Rego) { - r.skipBundleVerification = yes - } + return v1.SkipBundleVerification(yes) } // InterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize // during evaluation. func InterQueryBuiltinCache(c cache.InterQueryCache) func(r *Rego) { - return func(r *Rego) { - r.interQueryBuiltinCache = c - } + return v1.InterQueryBuiltinCache(c) } // InterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize // during evaluation. func InterQueryBuiltinValueCache(c cache.InterQueryValueCache) func(r *Rego) { - return func(r *Rego) { - r.interQueryBuiltinValueCache = c - } + return v1.InterQueryBuiltinValueCache(c) } // NDBuiltinCache sets the non-deterministic builtins cache. func NDBuiltinCache(c builtins.NDBCache) func(r *Rego) { - return func(r *Rego) { - r.ndBuiltinCache = c - } + return v1.NDBuiltinCache(c) } // StrictBuiltinErrors tells the evaluator to treat all built-in function errors as fatal errors. func StrictBuiltinErrors(yes bool) func(r *Rego) { - return func(r *Rego) { - r.strictBuiltinErrors = yes - } + return v1.StrictBuiltinErrors(yes) } // BuiltinErrorList supplies an error slice to store built-in function errors. func BuiltinErrorList(list *[]topdown.Error) func(r *Rego) { - return func(r *Rego) { - r.builtinErrorList = list - } + return v1.BuiltinErrorList(list) } // Resolver sets a Resolver for a specified ref path. func Resolver(ref ast.Ref, r resolver.Resolver) func(r *Rego) { - return func(rego *Rego) { - rego.resolvers = append(rego.resolvers, refResolver{ref, r}) - } + return v1.Resolver(ref, r) } // Schemas sets the schemaSet func Schemas(x *ast.SchemaSet) func(r *Rego) { - return func(r *Rego) { - r.schemaSet = x - } + return v1.Schemas(x) } // Capabilities configures the underlying compiler's capabilities. // This option is ignored for module compilation if the caller supplies the // compiler. func Capabilities(c *ast.Capabilities) func(r *Rego) { - return func(r *Rego) { - r.capabilities = c - } + return v1.Capabilities(c) } // Target sets the runtime to exercise. func Target(t string) func(r *Rego) { - return func(r *Rego) { - r.target = t - } + return v1.Target(t) } // GenerateJSON sets the AST to JSON converter for the results. -func GenerateJSON(f func(*ast.Term, *EvalContext) (interface{}, error)) func(r *Rego) { - return func(r *Rego) { - r.generateJSON = f - } +func GenerateJSON(f func(*ast.Term, *EvalContext) (any, error)) func(r *Rego) { + return v1.GenerateJSON(f) } // PrintHook sets the object to use for handling print statement outputs. func PrintHook(h print.Hook) func(r *Rego) { - return func(r *Rego) { - r.printHook = h - } + return v1.PrintHook(h) } // DistributedTracingOpts sets the options to be used by distributed tracing. func DistributedTracingOpts(tr tracing.Options) func(r *Rego) { - return func(r *Rego) { - r.distributedTacingOpts = tr - } + return v1.DistributedTracingOpts(tr) } // EnablePrintStatements enables print() calls. If this option is not provided, @@ -1223,1667 +574,65 @@ func DistributedTracingOpts(tr tracing.Options) func(r *Rego) { // queries and policies that passed as raw strings, i.e., this function will not // have any affect if the caller supplies the ast.Compiler instance. func EnablePrintStatements(yes bool) func(r *Rego) { - return func(r *Rego) { - r.enablePrintStatements = yes - } + return v1.EnablePrintStatements(yes) } // Strict enables or disables strict-mode in the compiler func Strict(yes bool) func(r *Rego) { - return func(r *Rego) { - r.strict = yes - } + return v1.Strict(yes) } func SetRegoVersion(version ast.RegoVersion) func(r *Rego) { - return func(r *Rego) { - r.regoVersion = version - } + return v1.SetRegoVersion(version) } // New returns a new Rego object. func New(options ...func(r *Rego)) *Rego { - - r := &Rego{ - parsedModules: map[string]*ast.Module{}, - capture: map[*ast.Expr]ast.Var{}, - compiledQueries: map[queryType]compiledQuery{}, - builtinDecls: map[string]*ast.Builtin{}, - builtinFuncs: map[string]*topdown.Builtin{}, - bundles: map[string]*bundle.Bundle{}, - } - - for _, option := range options { - option(r) - } - - if r.compiler == nil { - r.compiler = ast.NewCompiler(). - WithUnsafeBuiltins(r.unsafeBuiltins). - WithBuiltins(r.builtinDecls). - WithDebug(r.dump). - WithSchemas(r.schemaSet). - WithCapabilities(r.capabilities). - WithEnablePrintStatements(r.enablePrintStatements). - WithStrict(r.strict). - WithUseTypeCheckAnnotations(true) - - // topdown could be target "" or "rego", but both could be overridden by - // a target plugin (checked below) - if r.target == targetWasm { - r.compiler = r.compiler.WithEvalMode(ast.EvalModeIR) - } - } - - if r.store == nil { - r.store = inmem.NewWithOpts(inmem.OptReturnASTValuesOnRead(r.ownStoreReadAst)) - r.ownStore = true - } else { - r.ownStore = false - } - - if r.metrics == nil { - r.metrics = metrics.New() - } - - if r.instrument { - r.instrumentation = topdown.NewInstrumentation(r.metrics) - r.compiler.WithMetrics(r.metrics) - } - - if r.trace { - r.tracebuf = topdown.NewBufferTracer() - r.queryTracers = append(r.queryTracers, r.tracebuf) - } - - if r.partialNamespace == "" { - r.partialNamespace = defaultPartialNamespace - } - - if r.generateJSON == nil { - r.generateJSON = generateJSON - } - - if r.pluginMgr != nil { - for _, name := range r.pluginMgr.Plugins() { - p := r.pluginMgr.Plugin(name) - if p0, ok := p.(TargetPlugin); ok { - r.plugins = append(r.plugins, p0) - } + opts := make([]func(r *Rego), 0, len(options)+1) + opts = append(opts, options...) + opts = append(opts, func(r *Rego) { + if r.RegoVersion() == ast.RegoUndefined { + SetRegoVersion(ast.DefaultRegoVersion)(r) } - } - - if t := r.targetPlugin(r.target); t != nil { - r.compiler = r.compiler.WithEvalMode(ast.EvalModeIR) - } - - return r -} - -// Eval evaluates this Rego object and returns a ResultSet. -func (r *Rego) Eval(ctx context.Context) (ResultSet, error) { - var err error - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return nil, err - } - - pq, err := r.PrepareForEval(ctx) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return nil, err - } - - evalArgs := []EvalOption{ - EvalTransaction(r.txn), - EvalMetrics(r.metrics), - EvalInstrument(r.instrument), - EvalTime(r.time), - EvalInterQueryBuiltinCache(r.interQueryBuiltinCache), - EvalInterQueryBuiltinValueCache(r.interQueryBuiltinValueCache), - EvalSeed(r.seed), - } - - if r.ndBuiltinCache != nil { - evalArgs = append(evalArgs, EvalNDBuiltinCache(r.ndBuiltinCache)) - } - - for _, qt := range r.queryTracers { - evalArgs = append(evalArgs, EvalQueryTracer(qt)) - } - - for i := range r.resolvers { - evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r)) - } - - rs, err := pq.Eval(ctx, evalArgs...) - txnErr := txnClose(ctx, err) // Always call closer - if err == nil { - err = txnErr - } - return rs, err -} - -// PartialEval has been deprecated and renamed to PartialResult. -func (r *Rego) PartialEval(ctx context.Context) (PartialResult, error) { - return r.PartialResult(ctx) -} - -// PartialResult partially evaluates this Rego object and returns a PartialResult. -func (r *Rego) PartialResult(ctx context.Context) (PartialResult, error) { - var err error - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return PartialResult{}, err - } - - pq, err := r.PrepareForEval(ctx, WithPartialEval()) - txnErr := txnClose(ctx, err) // Always call closer - if err != nil { - return PartialResult{}, err - } - if txnErr != nil { - return PartialResult{}, txnErr - } - - pr := PartialResult{ - compiler: pq.r.compiler, - store: pq.r.store, - body: pq.r.parsedQuery, - builtinDecls: pq.r.builtinDecls, - builtinFuncs: pq.r.builtinFuncs, - } - - return pr, nil -} - -// Partial runs partial evaluation on r and returns the result. -func (r *Rego) Partial(ctx context.Context) (*PartialQueries, error) { - var err error - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return nil, err - } - - pq, err := r.PrepareForPartial(ctx) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return nil, err - } - - evalArgs := []EvalOption{ - EvalTransaction(r.txn), - EvalMetrics(r.metrics), - EvalInstrument(r.instrument), - EvalInterQueryBuiltinCache(r.interQueryBuiltinCache), - EvalInterQueryBuiltinValueCache(r.interQueryBuiltinValueCache), - } - - if r.ndBuiltinCache != nil { - evalArgs = append(evalArgs, EvalNDBuiltinCache(r.ndBuiltinCache)) - } - - for _, t := range r.queryTracers { - evalArgs = append(evalArgs, EvalQueryTracer(t)) - } - - for i := range r.resolvers { - evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r)) - } - - pqs, err := pq.Partial(ctx, evalArgs...) - txnErr := txnClose(ctx, err) // Always call closer - if err == nil { - err = txnErr - } - return pqs, err + }) + + return v1.New(opts...) } // CompileOption defines a function to set options on Compile calls. -type CompileOption func(*CompileContext) +type CompileOption = v1.CompileOption // CompileContext contains options for Compile calls. -type CompileContext struct { - partial bool -} +type CompileContext = v1.CompileContext // CompilePartial defines an option to control whether partial evaluation is run // before the query is planned and compiled. func CompilePartial(yes bool) CompileOption { - return func(cfg *CompileContext) { - cfg.partial = yes - } -} - -// Compile returns a compiled policy query. -func (r *Rego) Compile(ctx context.Context, opts ...CompileOption) (*CompileResult, error) { - - var cfg CompileContext - - for _, opt := range opts { - opt(&cfg) - } - - var queries []ast.Body - modules := make([]*ast.Module, 0, len(r.compiler.Modules)) - - if cfg.partial { - - pq, err := r.Partial(ctx) - if err != nil { - return nil, err - } - if r.dump != nil { - if len(pq.Queries) != 0 { - msg := fmt.Sprintf("QUERIES (%d total):", len(pq.Queries)) - fmt.Fprintln(r.dump, msg) - fmt.Fprintln(r.dump, strings.Repeat("-", len(msg))) - for i := range pq.Queries { - fmt.Println(pq.Queries[i]) - } - fmt.Fprintln(r.dump) - } - if len(pq.Support) != 0 { - msg := fmt.Sprintf("SUPPORT (%d total):", len(pq.Support)) - fmt.Fprintln(r.dump, msg) - fmt.Fprintln(r.dump, strings.Repeat("-", len(msg))) - for i := range pq.Support { - fmt.Println(pq.Support[i]) - } - fmt.Fprintln(r.dump) - } - } - - queries = pq.Queries - modules = pq.Support - - for _, module := range r.compiler.Modules { - modules = append(modules, module) - } - } else { - var err error - // If creating a new transaction it should be closed before calling the - // planner to avoid holding open the transaction longer than needed. - // - // TODO(tsandall): in future, planner could make use of store, in which - // case this will need to change. - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return nil, err - } - - err = r.prepare(ctx, compileQueryType, nil) - txnErr := txnClose(ctx, err) // Always call closer - if err != nil { - return nil, err - } - if txnErr != nil { - return nil, err - } - - for _, module := range r.compiler.Modules { - modules = append(modules, module) - } - - queries = []ast.Body{r.compiledQueries[compileQueryType].query} - } - - if tgt := r.targetPlugin(r.target); tgt != nil { - return nil, fmt.Errorf("unsupported for rego target plugins") - } - - return r.compileWasm(modules, queries, compileQueryType) // TODO(sr) control flow is funky here -} - -func (r *Rego) compileWasm(_ []*ast.Module, queries []ast.Body, qType queryType) (*CompileResult, error) { - policy, err := r.planQuery(queries, qType) - if err != nil { - return nil, err - } - - m, err := wasm.New().WithPolicy(policy).Compile() - if err != nil { - return nil, err - } - - var out bytes.Buffer - if err := encoding.WriteModule(&out, m); err != nil { - return nil, err - } - - return &CompileResult{ - Bytes: out.Bytes(), - }, nil + return v1.CompilePartial(yes) } // PrepareOption defines a function to set an option to control // the behavior of the Prepare call. -type PrepareOption func(*PrepareConfig) +type PrepareOption = v1.PrepareOption // PrepareConfig holds settings to control the behavior of the // Prepare call. -type PrepareConfig struct { - doPartialEval bool - disableInlining *[]string - builtinFuncs map[string]*topdown.Builtin -} +type PrepareConfig = v1.PrepareConfig // WithPartialEval configures an option for PrepareForEval // which will have it perform partial evaluation while preparing // the query (similar to rego.Rego#PartialResult) func WithPartialEval() PrepareOption { - return func(p *PrepareConfig) { - p.doPartialEval = true - } + return v1.WithPartialEval() } // WithNoInline adds a set of paths to exclude from partial evaluation inlining. func WithNoInline(paths []string) PrepareOption { - return func(p *PrepareConfig) { - p.disableInlining = &paths - } + return v1.WithNoInline(paths) } // WithBuiltinFuncs carries the rego.Function{1,2,3} per-query function definitions // to the target plugins. func WithBuiltinFuncs(bis map[string]*topdown.Builtin) PrepareOption { - return func(p *PrepareConfig) { - if p.builtinFuncs == nil { - p.builtinFuncs = make(map[string]*topdown.Builtin, len(bis)) - } - for k, v := range bis { - p.builtinFuncs[k] = v - } - } -} - -// BuiltinFuncs allows retrieving the builtin funcs set via PrepareOption -// WithBuiltinFuncs. -func (p *PrepareConfig) BuiltinFuncs() map[string]*topdown.Builtin { - return p.builtinFuncs -} - -// PrepareForEval will parse inputs, modules, and query arguments in preparation -// of evaluating them. -func (r *Rego) PrepareForEval(ctx context.Context, opts ...PrepareOption) (PreparedEvalQuery, error) { - if !r.hasQuery() { - return PreparedEvalQuery{}, fmt.Errorf("cannot evaluate empty query") - } - - pCfg := &PrepareConfig{} - for _, o := range opts { - o(pCfg) - } - - var err error - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return PreparedEvalQuery{}, err - } - - // If the caller wanted to do partial evaluation as part of preparation - // do it now and use the new Rego object. - if pCfg.doPartialEval { - - pr, err := r.partialResult(ctx, pCfg) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, err - } - - // Prepare the new query using the result of partial evaluation - pq, err := pr.Rego(Transaction(r.txn)).PrepareForEval(ctx) - txnErr := txnClose(ctx, err) - if err != nil { - return pq, err - } - return pq, txnErr - } - - err = r.prepare(ctx, evalQueryType, []extraStage{ - { - after: "ResolveRefs", - stage: ast.QueryCompilerStageDefinition{ - Name: "RewriteToCaptureValue", - MetricName: "query_compile_stage_rewrite_to_capture_value", - Stage: r.rewriteQueryToCaptureValue, - }, - }, - }) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, err - } - - switch r.target { - case targetWasm: // TODO(sr): make wasm a target plugin, too - - if r.hasWasmModule() { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, fmt.Errorf("wasm target not supported") - } - - var modules []*ast.Module - for _, module := range r.compiler.Modules { - modules = append(modules, module) - } - - queries := []ast.Body{r.compiledQueries[evalQueryType].query} - - e, err := opa.LookupEngine(targetWasm) - if err != nil { - return PreparedEvalQuery{}, err - } - - // nolint: staticcheck // SA4006 false positive - cr, err := r.compileWasm(modules, queries, evalQueryType) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, err - } - - // nolint: staticcheck // SA4006 false positive - data, err := r.store.Read(ctx, r.txn, storage.Path{}) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, err - } - - o, err := e.New().WithPolicyBytes(cr.Bytes).WithDataJSON(data).Init() - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, err - } - r.opa = o - - case targetRego: // do nothing, don't lookup default plugin - default: // either a specific plugin target, or one that is default - if tgt := r.targetPlugin(r.target); tgt != nil { - queries := []ast.Body{r.compiledQueries[evalQueryType].query} - pol, err := r.planQuery(queries, evalQueryType) - if err != nil { - return PreparedEvalQuery{}, err - } - // always add the builtins provided via rego.FunctionN options - opts = append(opts, WithBuiltinFuncs(r.builtinFuncs)) - r.targetPrepState, err = tgt.PrepareForEval(ctx, pol, opts...) - if err != nil { - return PreparedEvalQuery{}, err - } - } - } - - txnErr := txnClose(ctx, err) // Always call closer - if err != nil { - return PreparedEvalQuery{}, err - } - if txnErr != nil { - return PreparedEvalQuery{}, txnErr - } - - return PreparedEvalQuery{preparedQuery{r, pCfg}}, err -} - -// PrepareForPartial will parse inputs, modules, and query arguments in preparation -// of partially evaluating them. -func (r *Rego) PrepareForPartial(ctx context.Context, opts ...PrepareOption) (PreparedPartialQuery, error) { - if !r.hasQuery() { - return PreparedPartialQuery{}, fmt.Errorf("cannot evaluate empty query") - } - - pCfg := &PrepareConfig{} - for _, o := range opts { - o(pCfg) - } - - var err error - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return PreparedPartialQuery{}, err - } - - err = r.prepare(ctx, partialQueryType, []extraStage{ - { - after: "CheckSafety", - stage: ast.QueryCompilerStageDefinition{ - Name: "RewriteEquals", - MetricName: "query_compile_stage_rewrite_equals", - Stage: r.rewriteEqualsForPartialQueryCompile, - }, - }, - }) - txnErr := txnClose(ctx, err) // Always call closer - if err != nil { - return PreparedPartialQuery{}, err - } - if txnErr != nil { - return PreparedPartialQuery{}, txnErr - } - - return PreparedPartialQuery{preparedQuery{r, pCfg}}, err -} - -func (r *Rego) prepare(ctx context.Context, qType queryType, extras []extraStage) error { - var err error - - r.parsedInput, err = r.parseInput() - if err != nil { - return err - } - - err = r.loadFiles(ctx, r.txn, r.metrics) - if err != nil { - return err - } - - err = r.loadBundles(ctx, r.txn, r.metrics) - if err != nil { - return err - } - - err = r.parseModules(ctx, r.txn, r.metrics) - if err != nil { - return err - } - - // Compile the modules *before* the query, else functions - // defined in the module won't be found... - err = r.compileModules(ctx, r.txn, r.metrics) - if err != nil { - return err - } - - imports, err := r.prepareImports() - if err != nil { - return err - } - - queryImports := []*ast.Import{} - for _, imp := range imports { - path := imp.Path.Value.(ast.Ref) - if path.HasPrefix([]*ast.Term{ast.FutureRootDocument}) || path.HasPrefix([]*ast.Term{ast.RegoRootDocument}) { - queryImports = append(queryImports, imp) - } - } - - r.parsedQuery, err = r.parseQuery(queryImports, r.metrics) - if err != nil { - return err - } - - err = r.compileAndCacheQuery(qType, r.parsedQuery, imports, r.metrics, extras) - if err != nil { - return err - } - - return nil -} - -func (r *Rego) parseModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { - if len(r.modules) == 0 { - return nil - } - - ids, err := r.store.ListPolicies(ctx, txn) - if err != nil { - return err - } - - m.Timer(metrics.RegoModuleParse).Start() - defer m.Timer(metrics.RegoModuleParse).Stop() - var errs Errors - - // Parse any modules that are saved to the store, but only if - // another compile step is going to occur (ie. we have parsed modules - // that need to be compiled). - for _, id := range ids { - // if it is already on the compiler we're using - // then don't bother to re-parse it from source - if _, haveMod := r.compiler.Modules[id]; haveMod { - continue - } - - bs, err := r.store.GetPolicy(ctx, txn, id) - if err != nil { - return err - } - - parsed, err := ast.ParseModuleWithOpts(id, string(bs), ast.ParserOptions{RegoVersion: r.regoVersion}) - if err != nil { - errs = append(errs, err) - } - - r.parsedModules[id] = parsed - } - - // Parse any passed in as arguments to the Rego object - for _, module := range r.modules { - p, err := module.ParseWithOpts(ast.ParserOptions{RegoVersion: r.regoVersion}) - if err != nil { - switch errorWithType := err.(type) { - case ast.Errors: - for _, e := range errorWithType { - errs = append(errs, e) - } - default: - errs = append(errs, errorWithType) - } - } - r.parsedModules[module.filename] = p - } - - if len(errs) > 0 { - return errs - } - - return nil -} - -func (r *Rego) loadFiles(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { - if len(r.loadPaths.paths) == 0 { - return nil - } - - m.Timer(metrics.RegoLoadFiles).Start() - defer m.Timer(metrics.RegoLoadFiles).Stop() - - result, err := loader.NewFileLoader(). - WithMetrics(m). - WithProcessAnnotation(true). - WithRegoVersion(r.regoVersion). - Filtered(r.loadPaths.paths, r.loadPaths.filter) - if err != nil { - return err - } - for name, mod := range result.Modules { - r.parsedModules[name] = mod.Parsed - } - - if len(result.Documents) > 0 { - err = r.store.Write(ctx, txn, storage.AddOp, storage.Path{}, result.Documents) - if err != nil { - return err - } - } - return nil -} - -func (r *Rego) loadBundles(_ context.Context, _ storage.Transaction, m metrics.Metrics) error { - if len(r.bundlePaths) == 0 { - return nil - } - - m.Timer(metrics.RegoLoadBundles).Start() - defer m.Timer(metrics.RegoLoadBundles).Stop() - - for _, path := range r.bundlePaths { - bndl, err := loader.NewFileLoader(). - WithMetrics(m). - WithProcessAnnotation(true). - WithSkipBundleVerification(r.skipBundleVerification). - WithRegoVersion(r.regoVersion). - AsBundle(path) - if err != nil { - return fmt.Errorf("loading error: %s", err) - } - r.bundles[path] = bndl - } - return nil -} - -func (r *Rego) parseInput() (ast.Value, error) { - if r.parsedInput != nil { - return r.parsedInput, nil - } - return r.parseRawInput(r.rawInput, r.metrics) -} - -func (r *Rego) parseRawInput(rawInput *interface{}, m metrics.Metrics) (ast.Value, error) { - var input ast.Value - - if rawInput == nil { - return input, nil - } - - m.Timer(metrics.RegoInputParse).Start() - defer m.Timer(metrics.RegoInputParse).Stop() - - rawPtr := util.Reference(rawInput) - - // roundtrip through json: this turns slices (e.g. []string, []bool) into - // []interface{}, the only array type ast.InterfaceToValue can work with - if err := util.RoundTrip(rawPtr); err != nil { - return nil, err - } - - return ast.InterfaceToValue(*rawPtr) -} - -func (r *Rego) parseQuery(queryImports []*ast.Import, m metrics.Metrics) (ast.Body, error) { - if r.parsedQuery != nil { - return r.parsedQuery, nil - } - - m.Timer(metrics.RegoQueryParse).Start() - defer m.Timer(metrics.RegoQueryParse).Stop() - - popts, err := future.ParserOptionsFromFutureImports(queryImports) - if err != nil { - return nil, err - } - popts.RegoVersion = r.regoVersion - popts, err = parserOptionsFromRegoVersionImport(queryImports, popts) - if err != nil { - return nil, err - } - popts.SkipRules = true - return ast.ParseBodyWithOpts(r.query, popts) -} - -func parserOptionsFromRegoVersionImport(imports []*ast.Import, popts ast.ParserOptions) (ast.ParserOptions, error) { - for _, imp := range imports { - path := imp.Path.Value.(ast.Ref) - if ast.Compare(path, ast.RegoV1CompatibleRef) == 0 { - popts.RegoVersion = ast.RegoV1 - return popts, nil - } - } - return popts, nil -} - -func (r *Rego) compileModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { - - // Only compile again if there are new modules. - if len(r.bundles) > 0 || len(r.parsedModules) > 0 { - - // The bundle.Activate call will activate any bundles passed in - // (ie compile + handle data store changes), and include any of - // the additional modules passed in. If no bundles are provided - // it will only compile the passed in modules. - // Use this as the single-point of compiling everything only a - // single time. - opts := &bundle.ActivateOpts{ - Ctx: ctx, - Store: r.store, - Txn: txn, - Compiler: r.compilerForTxn(ctx, r.store, txn), - Metrics: m, - Bundles: r.bundles, - ExtraModules: r.parsedModules, - ParserOptions: ast.ParserOptions{RegoVersion: r.regoVersion}, - } - err := bundle.Activate(opts) - if err != nil { - return err - } - } - - // Ensure all configured resolvers from the store are loaded. Skip if any were explicitly provided. - if len(r.resolvers) == 0 { - resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, r.store, txn, r.bundles) - if err != nil { - return err - } - - for _, rslvr := range resolvers { - for _, ep := range rslvr.Entrypoints() { - r.resolvers = append(r.resolvers, refResolver{ep, rslvr}) - } - } - } - return nil -} - -func (r *Rego) compileAndCacheQuery(qType queryType, query ast.Body, imports []*ast.Import, m metrics.Metrics, extras []extraStage) error { - m.Timer(metrics.RegoQueryCompile).Start() - defer m.Timer(metrics.RegoQueryCompile).Stop() - - cachedQuery, ok := r.compiledQueries[qType] - if ok && cachedQuery.query != nil && cachedQuery.compiler != nil { - return nil - } - - qc, compiled, err := r.compileQuery(query, imports, m, extras) - if err != nil { - return err - } - - // cache the query for future use - r.compiledQueries[qType] = compiledQuery{ - query: compiled, - compiler: qc, - } - return nil -} - -func (r *Rego) prepareImports() ([]*ast.Import, error) { - imports := r.parsedImports - - if len(r.imports) > 0 { - s := make([]string, len(r.imports)) - for i := range r.imports { - s[i] = fmt.Sprintf("import %v", r.imports[i]) - } - parsed, err := ast.ParseImports(strings.Join(s, "\n")) - if err != nil { - return nil, err - } - imports = append(imports, parsed...) - } - return imports, nil -} - -func (r *Rego) compileQuery(query ast.Body, imports []*ast.Import, _ metrics.Metrics, extras []extraStage) (ast.QueryCompiler, ast.Body, error) { - var pkg *ast.Package - - if r.pkg != "" { - var err error - pkg, err = ast.ParsePackage(fmt.Sprintf("package %v", r.pkg)) - if err != nil { - return nil, nil, err - } - } else { - pkg = r.parsedPackage - } - - qctx := ast.NewQueryContext(). - WithPackage(pkg). - WithImports(imports) - - qc := r.compiler.QueryCompiler(). - WithContext(qctx). - WithUnsafeBuiltins(r.unsafeBuiltins). - WithEnablePrintStatements(r.enablePrintStatements). - WithStrict(false) - - for _, extra := range extras { - qc = qc.WithStageAfter(extra.after, extra.stage) - } - - compiled, err := qc.Compile(query) - - return qc, compiled, err - -} - -func (r *Rego) eval(ctx context.Context, ectx *EvalContext) (ResultSet, error) { - switch { - case r.targetPrepState != nil: // target plugin flow - var val ast.Value - if r.runtime != nil { - val = r.runtime.Value - } - s, err := r.targetPrepState.Eval(ctx, ectx, val) - if err != nil { - return nil, err - } - return r.valueToQueryResult(s, ectx) - case r.target == targetWasm: - return r.evalWasm(ctx, ectx) - case r.target == targetRego: // continue - } - - q := topdown.NewQuery(ectx.compiledQuery.query). - WithQueryCompiler(ectx.compiledQuery.compiler). - WithCompiler(r.compiler). - WithStore(r.store). - WithTransaction(ectx.txn). - WithBuiltins(r.builtinFuncs). - WithMetrics(ectx.metrics). - WithInstrumentation(ectx.instrumentation). - WithRuntime(r.runtime). - WithIndexing(ectx.indexing). - WithEarlyExit(ectx.earlyExit). - WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache). - WithInterQueryBuiltinValueCache(ectx.interQueryBuiltinValueCache). - WithStrictBuiltinErrors(r.strictBuiltinErrors). - WithBuiltinErrorList(r.builtinErrorList). - WithSeed(ectx.seed). - WithPrintHook(ectx.printHook). - WithDistributedTracingOpts(r.distributedTacingOpts). - WithVirtualCache(ectx.virtualCache) - - if !ectx.time.IsZero() { - q = q.WithTime(ectx.time) - } - - if ectx.ndBuiltinCache != nil { - q = q.WithNDBuiltinCache(ectx.ndBuiltinCache) - } - - for i := range ectx.queryTracers { - q = q.WithQueryTracer(ectx.queryTracers[i]) - } - - if ectx.parsedInput != nil { - q = q.WithInput(ast.NewTerm(ectx.parsedInput)) - } - - for i := range ectx.resolvers { - q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r) - } - - // Cancel query if context is cancelled or deadline is reached. - c := topdown.NewCancel() - q = q.WithCancel(c) - exit := make(chan struct{}) - defer close(exit) - go waitForDone(ctx, exit, func() { - c.Cancel() - }) - - var rs ResultSet - err := q.Iter(ctx, func(qr topdown.QueryResult) error { - result, err := r.generateResult(qr, ectx) - if err != nil { - return err - } - rs = append(rs, result) - return nil - }) - - if err != nil { - return nil, err - } - - if len(rs) == 0 { - return nil, nil - } - - return rs, nil -} - -func (r *Rego) evalWasm(ctx context.Context, ectx *EvalContext) (ResultSet, error) { - input := ectx.rawInput - if ectx.parsedInput != nil { - i := interface{}(ectx.parsedInput) - input = &i - } - result, err := r.opa.Eval(ctx, opa.EvalOpts{ - Metrics: r.metrics, - Input: input, - Time: ectx.time, - Seed: ectx.seed, - InterQueryBuiltinCache: ectx.interQueryBuiltinCache, - NDBuiltinCache: ectx.ndBuiltinCache, - PrintHook: ectx.printHook, - Capabilities: ectx.capabilities, - }) - if err != nil { - return nil, err - } - - parsed, err := ast.ParseTerm(string(result.Result)) - if err != nil { - return nil, err - } - - return r.valueToQueryResult(parsed.Value, ectx) -} - -func (r *Rego) valueToQueryResult(res ast.Value, ectx *EvalContext) (ResultSet, error) { - resultSet, ok := res.(ast.Set) - if !ok { - return nil, fmt.Errorf("illegal result type") - } - - if resultSet.Len() == 0 { - return nil, nil - } - - var rs ResultSet - err := resultSet.Iter(func(term *ast.Term) error { - obj, ok := term.Value.(ast.Object) - if !ok { - return fmt.Errorf("illegal result type") - } - qr := topdown.QueryResult{} - obj.Foreach(func(k, v *ast.Term) { - kvt := ast.VarTerm(string(k.Value.(ast.String))) - qr[kvt.Value.(ast.Var)] = v - }) - result, err := r.generateResult(qr, ectx) - if err != nil { - return err - } - rs = append(rs, result) - return nil - }) - - return rs, err -} - -func (r *Rego) generateResult(qr topdown.QueryResult, ectx *EvalContext) (Result, error) { - - rewritten := ectx.compiledQuery.compiler.RewrittenVars() - - result := newResult() - for k, term := range qr { - v, err := r.generateJSON(term, ectx) - if err != nil { - return result, err - } - - if rw, ok := rewritten[k]; ok { - k = rw - } - if isTermVar(k) || isTermWasmVar(k) || k.IsGenerated() || k.IsWildcard() { - continue - } - result.Bindings[string(k)] = v - } - - for _, expr := range ectx.compiledQuery.query { - if expr.Generated { - continue - } - - if k, ok := r.capture[expr]; ok { - v, err := r.generateJSON(qr[k], ectx) - if err != nil { - return result, err - } - result.Expressions = append(result.Expressions, newExpressionValue(expr, v)) - } else { - result.Expressions = append(result.Expressions, newExpressionValue(expr, true)) - } - - } - return result, nil -} - -func (r *Rego) partialResult(ctx context.Context, pCfg *PrepareConfig) (PartialResult, error) { - - err := r.prepare(ctx, partialResultQueryType, []extraStage{ - { - after: "ResolveRefs", - stage: ast.QueryCompilerStageDefinition{ - Name: "RewriteForPartialEval", - MetricName: "query_compile_stage_rewrite_for_partial_eval", - Stage: r.rewriteQueryForPartialEval, - }, - }, - }) - if err != nil { - return PartialResult{}, err - } - - ectx := &EvalContext{ - parsedInput: r.parsedInput, - metrics: r.metrics, - txn: r.txn, - partialNamespace: r.partialNamespace, - queryTracers: r.queryTracers, - compiledQuery: r.compiledQueries[partialResultQueryType], - instrumentation: r.instrumentation, - indexing: true, - resolvers: r.resolvers, - capabilities: r.capabilities, - strictBuiltinErrors: r.strictBuiltinErrors, - } - - disableInlining := r.disableInlining - - if pCfg.disableInlining != nil { - disableInlining = *pCfg.disableInlining - } - - ectx.disableInlining, err = parseStringsToRefs(disableInlining) - if err != nil { - return PartialResult{}, err - } - - pq, err := r.partial(ctx, ectx) - if err != nil { - return PartialResult{}, err - } - - // Construct module for queries. - id := fmt.Sprintf("__partialresult__%s__", ectx.partialNamespace) - - module, err := ast.ParseModule(id, "package "+ectx.partialNamespace) - if err != nil { - return PartialResult{}, fmt.Errorf("bad partial namespace") - } - - module.Rules = make([]*ast.Rule, len(pq.Queries)) - for i, body := range pq.Queries { - rule := &ast.Rule{ - Head: ast.NewHead(ast.Var("__result__"), nil, ast.Wildcard), - Body: body, - Module: module, - } - module.Rules[i] = rule - if checkPartialResultForRecursiveRefs(body, rule.Path()) { - return PartialResult{}, Errors{errPartialEvaluationNotEffective} - } - } - - // Update compiler with partial evaluation output. - r.compiler.Modules[id] = module - for i, module := range pq.Support { - r.compiler.Modules[fmt.Sprintf("__partialsupport__%s__%d__", ectx.partialNamespace, i)] = module - } - - r.metrics.Timer(metrics.RegoModuleCompile).Start() - r.compilerForTxn(ctx, r.store, r.txn).Compile(r.compiler.Modules) - r.metrics.Timer(metrics.RegoModuleCompile).Stop() - - if r.compiler.Failed() { - return PartialResult{}, r.compiler.Errors - } - - result := PartialResult{ - compiler: r.compiler, - store: r.store, - body: ast.MustParseBody(fmt.Sprintf("data.%v.__result__", ectx.partialNamespace)), - builtinDecls: r.builtinDecls, - builtinFuncs: r.builtinFuncs, - } - - return result, nil -} - -func (r *Rego) partial(ctx context.Context, ectx *EvalContext) (*PartialQueries, error) { - - var unknowns []*ast.Term - - switch { - case ectx.parsedUnknowns != nil: - unknowns = ectx.parsedUnknowns - case ectx.unknowns != nil: - unknowns = make([]*ast.Term, len(ectx.unknowns)) - for i := range ectx.unknowns { - var err error - unknowns[i], err = ast.ParseTerm(ectx.unknowns[i]) - if err != nil { - return nil, err - } - } - default: - // Use input document as unknown if caller has not specified any. - unknowns = []*ast.Term{ast.NewTerm(ast.InputRootRef)} - } - - q := topdown.NewQuery(ectx.compiledQuery.query). - WithQueryCompiler(ectx.compiledQuery.compiler). - WithCompiler(r.compiler). - WithStore(r.store). - WithTransaction(ectx.txn). - WithBuiltins(r.builtinFuncs). - WithMetrics(ectx.metrics). - WithInstrumentation(ectx.instrumentation). - WithUnknowns(unknowns). - WithDisableInlining(ectx.disableInlining). - WithRuntime(r.runtime). - WithIndexing(ectx.indexing). - WithEarlyExit(ectx.earlyExit). - WithPartialNamespace(ectx.partialNamespace). - WithSkipPartialNamespace(r.skipPartialNamespace). - WithShallowInlining(r.shallowInlining). - WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache). - WithInterQueryBuiltinValueCache(ectx.interQueryBuiltinValueCache). - WithStrictBuiltinErrors(ectx.strictBuiltinErrors). - WithSeed(ectx.seed). - WithPrintHook(ectx.printHook) - - if !ectx.time.IsZero() { - q = q.WithTime(ectx.time) - } - - if ectx.ndBuiltinCache != nil { - q = q.WithNDBuiltinCache(ectx.ndBuiltinCache) - } - - for i := range ectx.queryTracers { - q = q.WithQueryTracer(ectx.queryTracers[i]) - } - - if ectx.parsedInput != nil { - q = q.WithInput(ast.NewTerm(ectx.parsedInput)) - } - - for i := range ectx.resolvers { - q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r) - } - - // Cancel query if context is cancelled or deadline is reached. - c := topdown.NewCancel() - q = q.WithCancel(c) - exit := make(chan struct{}) - defer close(exit) - go waitForDone(ctx, exit, func() { - c.Cancel() - }) - - queries, support, err := q.PartialRun(ctx) - if err != nil { - return nil, err - } - - // If the target rego-version is v0, and the rego.v1 import is available, then we attempt to apply it to support modules. - if r.regoVersion == ast.RegoV0 && (r.capabilities == nil || r.capabilities.ContainsFeature(ast.FeatureRegoV1Import)) { - - for i, mod := range support { - // We can't apply the RegoV0CompatV1 version to the support module if it contains rules or vars that - // conflict with future keywords. - applyRegoVersion := true - - ast.WalkRules(mod, func(r *ast.Rule) bool { - name := r.Head.Name - if name == "" && len(r.Head.Reference) > 0 { - name = r.Head.Reference[0].Value.(ast.Var) - } - if ast.IsFutureKeyword(name.String()) { - applyRegoVersion = false - return true - } - return false - }) - - if applyRegoVersion { - ast.WalkVars(mod, func(v ast.Var) bool { - if ast.IsFutureKeyword(v.String()) { - applyRegoVersion = false - return true - } - return false - }) - } - - if applyRegoVersion { - support[i].SetRegoVersion(ast.RegoV0CompatV1) - } else { - support[i].SetRegoVersion(r.regoVersion) - } - } - } else { - // If the target rego-version is not v0, then we apply the target rego-version to the support modules. - for i := range support { - support[i].SetRegoVersion(r.regoVersion) - } - } - - pq := &PartialQueries{ - Queries: queries, - Support: support, - } - - return pq, nil -} - -func (r *Rego) rewriteQueryToCaptureValue(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { - - checkCapture := iteration(query) || len(query) > 1 - - for _, expr := range query { - - if expr.Negated { - continue - } - - if expr.IsAssignment() || expr.IsEquality() { - continue - } - - var capture *ast.Term - - // If the expression can be evaluated as a function, rewrite it to - // capture the return value. E.g., neq(1,2) becomes neq(1,2,x) but - // plus(1,2,x) does not get rewritten. - switch terms := expr.Terms.(type) { - case *ast.Term: - capture = r.generateTermVar() - expr.Terms = ast.Equality.Expr(terms, capture).Terms - r.capture[expr] = capture.Value.(ast.Var) - case []*ast.Term: - tpe := r.compiler.TypeEnv.Get(terms[0]) - if !types.Void(tpe) && types.Arity(tpe) == len(terms)-1 { - capture = r.generateTermVar() - expr.Terms = append(terms, capture) - r.capture[expr] = capture.Value.(ast.Var) - } - } - - if capture != nil && checkCapture { - cpy := expr.Copy() - cpy.Terms = capture - cpy.Generated = true - cpy.With = nil - query.Append(cpy) - } - } - - return query, nil -} - -func (r *Rego) rewriteQueryForPartialEval(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { - if len(query) != 1 { - return nil, fmt.Errorf("partial evaluation requires single ref (not multiple expressions)") - } - - term, ok := query[0].Terms.(*ast.Term) - if !ok { - return nil, fmt.Errorf("partial evaluation requires ref (not expression)") - } - - ref, ok := term.Value.(ast.Ref) - if !ok { - return nil, fmt.Errorf("partial evaluation requires ref (not %v)", ast.TypeName(term.Value)) - } - - if !ref.IsGround() { - return nil, fmt.Errorf("partial evaluation requires ground ref") - } - - return ast.NewBody(ast.Equality.Expr(ast.Wildcard, term)), nil -} - -// rewriteEqualsForPartialQueryCompile will rewrite == to = in queries. Normally -// this wouldn't be done, except for handling queries with the `Partial` API -// where rewriting them can substantially simplify the result, and it is unlikely -// that the caller would need expression values. -func (r *Rego) rewriteEqualsForPartialQueryCompile(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { - doubleEq := ast.Equal.Ref() - unifyOp := ast.Equality.Ref() - ast.WalkExprs(query, func(x *ast.Expr) bool { - if x.IsCall() { - operator := x.Operator() - if operator.Equal(doubleEq) && len(x.Operands()) == 2 { - x.SetOperator(ast.NewTerm(unifyOp)) - } - } - return false - }) - return query, nil -} - -func (r *Rego) generateTermVar() *ast.Term { - r.termVarID++ - prefix := ast.WildcardPrefix - if p := r.targetPlugin(r.target); p != nil { - prefix = wasmVarPrefix - } else if r.target == targetWasm { - prefix = wasmVarPrefix - } - return ast.VarTerm(fmt.Sprintf("%sterm%v", prefix, r.termVarID)) -} - -func (r Rego) hasQuery() bool { - return len(r.query) != 0 || len(r.parsedQuery) != 0 -} - -func (r Rego) hasWasmModule() bool { - for _, b := range r.bundles { - if len(b.WasmModules) > 0 { - return true - } - } - return false -} - -type transactionCloser func(ctx context.Context, err error) error - -// getTxn will conditionally create a read or write transaction suitable for -// the configured Rego object. The returned function should be used to close the txn -// regardless of status. -func (r *Rego) getTxn(ctx context.Context) (storage.Transaction, transactionCloser, error) { - - noopCloser := func(_ context.Context, _ error) error { - return nil // no-op default - } - - if r.txn != nil { - // Externally provided txn - return r.txn, noopCloser, nil - } - - // Create a new transaction.. - params := storage.TransactionParams{} - - // Bundles and data paths may require writing data files or manifests to storage - if len(r.bundles) > 0 || len(r.bundlePaths) > 0 || len(r.loadPaths.paths) > 0 { - - // If we were given a store we will *not* write to it, only do that on one - // which was created automatically on behalf of the user. - if !r.ownStore { - return nil, noopCloser, errors.New("unable to start write transaction when store was provided") - } - - params.Write = true - } - - txn, err := r.store.NewTransaction(ctx, params) - if err != nil { - return nil, noopCloser, err - } - - // Setup a closer function that will abort or commit as needed. - closer := func(ctx context.Context, txnErr error) error { - var err error - - if txnErr == nil && params.Write { - err = r.store.Commit(ctx, txn) - } else { - r.store.Abort(ctx, txn) - } - - // Clear the auto created transaction now that it is closed. - r.txn = nil - - return err - } - - return txn, closer, nil -} - -func (r *Rego) compilerForTxn(ctx context.Context, store storage.Store, txn storage.Transaction) *ast.Compiler { - // Update the compiler to have a valid path conflict check - // for the current context and transaction. - return r.compiler.WithPathConflictsCheck(storage.NonEmpty(ctx, store, txn)) -} - -func checkPartialResultForRecursiveRefs(body ast.Body, path ast.Ref) bool { - var stop bool - ast.WalkRefs(body, func(x ast.Ref) bool { - if !stop { - if path.HasPrefix(x) { - stop = true - } - } - return stop - }) - return stop -} - -func isTermVar(v ast.Var) bool { - return strings.HasPrefix(string(v), ast.WildcardPrefix+"term") -} - -func isTermWasmVar(v ast.Var) bool { - return strings.HasPrefix(string(v), wasmVarPrefix+"term") -} - -func waitForDone(ctx context.Context, exit chan struct{}, f func()) { - select { - case <-exit: - return - case <-ctx.Done(): - f() - return - } -} - -type rawModule struct { - filename string - module string -} - -func (m rawModule) Parse() (*ast.Module, error) { - return ast.ParseModule(m.filename, m.module) -} - -func (m rawModule) ParseWithOpts(opts ast.ParserOptions) (*ast.Module, error) { - return ast.ParseModuleWithOpts(m.filename, m.module, opts) -} - -type extraStage struct { - after string - stage ast.QueryCompilerStageDefinition -} - -type refResolver struct { - ref ast.Ref - r resolver.Resolver -} - -func iteration(x interface{}) bool { - - var stopped bool - - vis := ast.NewGenericVisitor(func(x interface{}) bool { - switch x := x.(type) { - case *ast.Term: - if ast.IsComprehension(x.Value) { - return true - } - case ast.Ref: - if !stopped { - if bi := ast.BuiltinMap[x.String()]; bi != nil { - if bi.Relation { - stopped = true - return stopped - } - } - for i := 1; i < len(x); i++ { - if _, ok := x[i].Value.(ast.Var); ok { - stopped = true - return stopped - } - } - } - return stopped - } - return stopped - }) - - vis.Walk(x) - - return stopped -} - -func parseStringsToRefs(s []string) ([]ast.Ref, error) { - - refs := make([]ast.Ref, len(s)) - for i := range refs { - var err error - refs[i], err = ast.ParseRef(s[i]) - if err != nil { - return nil, err - } - } - - return refs, nil -} - -// helper function to finish a built-in function call. If an error occurred, -// wrap the error and return it. Otherwise, invoke the iterator if the result -// was defined. -func finishFunction(name string, bctx topdown.BuiltinContext, result *ast.Term, err error, iter func(*ast.Term) error) error { - if err != nil { - var e *HaltError - if errors.As(err, &e) { - tdErr := &topdown.Error{ - Code: topdown.BuiltinErr, - Message: fmt.Sprintf("%v: %v", name, e.Error()), - Location: bctx.Location, - } - return topdown.Halt{Err: tdErr.Wrap(e)} - } - tdErr := &topdown.Error{ - Code: topdown.BuiltinErr, - Message: fmt.Sprintf("%v: %v", name, err.Error()), - Location: bctx.Location, - } - return tdErr.Wrap(err) - } - if result == nil { - return nil - } - return iter(result) -} - -// helper function to return an option that sets a custom built-in function. -func newFunction(decl *Function, f topdown.BuiltinFunc) func(*Rego) { - return func(r *Rego) { - r.builtinDecls[decl.Name] = &ast.Builtin{ - Name: decl.Name, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - } - r.builtinFuncs[decl.Name] = &topdown.Builtin{ - Decl: r.builtinDecls[decl.Name], - Func: f, - } - } -} - -func generateJSON(term *ast.Term, ectx *EvalContext) (interface{}, error) { - return ast.JSONWithOpt(term.Value, - ast.JSONOpt{ - SortSets: ectx.sortSets, - CopyMaps: ectx.copyMaps, - }) -} - -func (r *Rego) planQuery(queries []ast.Body, evalQueryType queryType) (*ir.Policy, error) { - modules := make([]*ast.Module, 0, len(r.compiler.Modules)) - for _, module := range r.compiler.Modules { - modules = append(modules, module) - } - - decls := make(map[string]*ast.Builtin, len(r.builtinDecls)+len(ast.BuiltinMap)) - - for k, v := range ast.BuiltinMap { - decls[k] = v - } - - for k, v := range r.builtinDecls { - decls[k] = v - } - - const queryName = "eval" // NOTE(tsandall): the query name is arbitrary - - p := planner.New(). - WithQueries([]planner.QuerySet{ - { - Name: queryName, - Queries: queries, - RewrittenVars: r.compiledQueries[evalQueryType].compiler.RewrittenVars(), - }, - }). - WithModules(modules). - WithBuiltinDecls(decls). - WithDebug(r.dump) - - policy, err := p.Plan() - if err != nil { - return nil, err - } - if r.dump != nil { - fmt.Fprintln(r.dump, "PLAN:") - fmt.Fprintln(r.dump, "-----") - err = ir.Pretty(r.dump, policy) - if err != nil { - return nil, err - } - fmt.Fprintln(r.dump) - } - return policy, nil + return v1.WithBuiltinFuncs(bis) } diff --git a/vendor/github.com/open-policy-agent/opa/rego/resultset.go b/vendor/github.com/open-policy-agent/opa/rego/resultset.go index e60fa6fbe4..5c03360dfa 100644 --- a/vendor/github.com/open-policy-agent/opa/rego/resultset.go +++ b/vendor/github.com/open-policy-agent/opa/rego/resultset.go @@ -1,90 +1,22 @@ package rego import ( - "fmt" - - "github.com/open-policy-agent/opa/ast" + v1 "github.com/open-policy-agent/opa/v1/rego" ) // ResultSet represents a collection of output from Rego evaluation. An empty // result set represents an undefined query. -type ResultSet []Result +type ResultSet = v1.ResultSet // Vars represents a collection of variable bindings. The keys are the variable // names and the values are the binding values. -type Vars map[string]interface{} - -// WithoutWildcards returns a copy of v with wildcard variables removed. -func (v Vars) WithoutWildcards() Vars { - n := Vars{} - for k, v := range v { - if ast.Var(k).IsWildcard() || ast.Var(k).IsGenerated() { - continue - } - n[k] = v - } - return n -} +type Vars = v1.Vars // Result defines the output of Rego evaluation. -type Result struct { - Expressions []*ExpressionValue `json:"expressions"` - Bindings Vars `json:"bindings,omitempty"` -} - -func newResult() Result { - return Result{ - Bindings: Vars{}, - } -} +type Result = v1.Result // Location defines a position in a Rego query or module. -type Location struct { - Row int `json:"row"` - Col int `json:"col"` -} +type Location = v1.Location // ExpressionValue defines the value of an expression in a Rego query. -type ExpressionValue struct { - Value interface{} `json:"value"` - Text string `json:"text"` - Location *Location `json:"location"` -} - -func newExpressionValue(expr *ast.Expr, value interface{}) *ExpressionValue { - result := &ExpressionValue{ - Value: value, - } - if expr.Location != nil { - result.Text = string(expr.Location.Text) - result.Location = &Location{ - Row: expr.Location.Row, - Col: expr.Location.Col, - } - } - return result -} - -func (ev *ExpressionValue) String() string { - return fmt.Sprint(ev.Value) -} - -// Allowed is a helper method that'll return true if all of these conditions hold: -// - the result set only has one element -// - there is only one expression in the result set's only element -// - that expression has the value `true` -// - there are no bindings. -// -// If bindings are present, this will yield `false`: it would be a pitfall to -// return `true` for a query like `data.authz.allow = x`, which always has result -// set element with value true, but could also have a binding `x: false`. -func (rs ResultSet) Allowed() bool { - if len(rs) == 1 && len(rs[0].Bindings) == 0 { - if exprs := rs[0].Expressions; len(exprs) == 1 { - if b, ok := exprs[0].Value.(bool); ok { - return b - } - } - } - return false -} +type ExpressionValue = v1.ExpressionValue diff --git a/vendor/github.com/open-policy-agent/opa/resolver/interface.go b/vendor/github.com/open-policy-agent/opa/resolver/interface.go deleted file mode 100644 index fc02329f57..0000000000 --- a/vendor/github.com/open-policy-agent/opa/resolver/interface.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package resolver - -import ( - "context" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" -) - -// Resolver defines an external value resolver for OPA evaluations. -type Resolver interface { - Eval(context.Context, Input) (Result, error) -} - -// Input as provided to a Resolver instance when evaluating. -type Input struct { - Ref ast.Ref - Input *ast.Term - Metrics metrics.Metrics -} - -// Result of resolving a ref. -type Result struct { - Value ast.Value -} diff --git a/vendor/github.com/open-policy-agent/opa/storage/doc.go b/vendor/github.com/open-policy-agent/opa/storage/doc.go index 6fa2f86d98..c33db689ed 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/doc.go +++ b/vendor/github.com/open-policy-agent/opa/storage/doc.go @@ -3,4 +3,8 @@ // license that can be found in the LICENSE file. // Package storage exposes the policy engine's storage layer. +// +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. package storage diff --git a/vendor/github.com/open-policy-agent/opa/storage/errors.go b/vendor/github.com/open-policy-agent/opa/storage/errors.go index 8c789052ed..1403b3a988 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/errors.go +++ b/vendor/github.com/open-policy-agent/opa/storage/errors.go @@ -5,118 +5,69 @@ package storage import ( - "fmt" + v1 "github.com/open-policy-agent/opa/v1/storage" ) const ( // InternalErr indicates an unknown, internal error has occurred. - InternalErr = "storage_internal_error" + InternalErr = v1.InternalErr // NotFoundErr indicates the path used in the storage operation does not // locate a document. - NotFoundErr = "storage_not_found_error" + NotFoundErr = v1.NotFoundErr // WriteConflictErr indicates a write on the path enocuntered a conflicting // value inside the transaction. - WriteConflictErr = "storage_write_conflict_error" + WriteConflictErr = v1.WriteConflictErr // InvalidPatchErr indicates an invalid patch/write was issued. The patch // was rejected. - InvalidPatchErr = "storage_invalid_patch_error" + InvalidPatchErr = v1.InvalidPatchErr // InvalidTransactionErr indicates an invalid operation was performed // inside of the transaction. - InvalidTransactionErr = "storage_invalid_txn_error" + InvalidTransactionErr = v1.InvalidTransactionErr // TriggersNotSupportedErr indicates the caller attempted to register a // trigger against a store that does not support them. - TriggersNotSupportedErr = "storage_triggers_not_supported_error" + TriggersNotSupportedErr = v1.TriggersNotSupportedErr // WritesNotSupportedErr indicate the caller attempted to perform a write // against a store that does not support them. - WritesNotSupportedErr = "storage_writes_not_supported_error" + WritesNotSupportedErr = v1.WritesNotSupportedErr // PolicyNotSupportedErr indicate the caller attempted to perform a policy // management operation against a store that does not support them. - PolicyNotSupportedErr = "storage_policy_not_supported_error" + PolicyNotSupportedErr = v1.PolicyNotSupportedErr ) // Error is the error type returned by the storage layer. -type Error struct { - Code string `json:"code"` - Message string `json:"message"` -} - -func (err *Error) Error() string { - if err.Message != "" { - return fmt.Sprintf("%v: %v", err.Code, err.Message) - } - return err.Code -} +type Error = v1.Error // IsNotFound returns true if this error is a NotFoundErr. func IsNotFound(err error) bool { - switch err := err.(type) { - case *Error: - return err.Code == NotFoundErr - } - return false + return v1.IsNotFound(err) } // IsWriteConflictError returns true if this error a WriteConflictErr. func IsWriteConflictError(err error) bool { - switch err := err.(type) { - case *Error: - return err.Code == WriteConflictErr - } - return false + return v1.IsWriteConflictError(err) } // IsInvalidPatch returns true if this error is a InvalidPatchErr. func IsInvalidPatch(err error) bool { - switch err := err.(type) { - case *Error: - return err.Code == InvalidPatchErr - } - return false + return v1.IsInvalidPatch(err) } // IsInvalidTransaction returns true if this error is a InvalidTransactionErr. func IsInvalidTransaction(err error) bool { - switch err := err.(type) { - case *Error: - return err.Code == InvalidTransactionErr - } - return false + return v1.IsInvalidTransaction(err) } // IsIndexingNotSupported is a stub for backwards-compatibility. // // Deprecated: We no longer return IndexingNotSupported errors, so it is // unnecessary to check for them. -func IsIndexingNotSupported(error) bool { return false } - -func writeConflictError(path Path) *Error { - return &Error{ - Code: WriteConflictErr, - Message: path.String(), - } -} - -func triggersNotSupportedError() *Error { - return &Error{ - Code: TriggersNotSupportedErr, - } -} - -func writesNotSupportedError() *Error { - return &Error{ - Code: WritesNotSupportedErr, - } -} - -func policyNotSupportedError() *Error { - return &Error{ - Code: PolicyNotSupportedErr, - } +func IsIndexingNotSupported(err error) bool { + return v1.IsIndexingNotSupported(err) } diff --git a/vendor/github.com/open-policy-agent/opa/storage/inmem/doc.go b/vendor/github.com/open-policy-agent/opa/storage/inmem/doc.go new file mode 100644 index 0000000000..5f536b66dd --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/storage/inmem/doc.go @@ -0,0 +1,8 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package inmem diff --git a/vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go b/vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go index 9f5b8ba258..dabedd4ef8 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go +++ b/vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go @@ -16,443 +16,41 @@ package inmem import ( - "context" - "fmt" "io" - "path/filepath" - "strings" - "sync" - "sync/atomic" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/internal/merge" "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/storage/inmem" ) // New returns an empty in-memory store. func New() storage.Store { - return NewWithOpts() + return v1.New() } // NewWithOpts returns an empty in-memory store, with extra options passed. func NewWithOpts(opts ...Opt) storage.Store { - s := &store{ - triggers: map[*handle]storage.TriggerConfig{}, - policies: map[string][]byte{}, - roundTripOnWrite: true, - returnASTValuesOnRead: false, - } - - for _, opt := range opts { - opt(s) - } - - if s.returnASTValuesOnRead { - s.data = ast.NewObject() - } else { - s.data = map[string]interface{}{} - } - - return s + return v1.NewWithOpts(opts...) } // NewFromObject returns a new in-memory store from the supplied data object. -func NewFromObject(data map[string]interface{}) storage.Store { - return NewFromObjectWithOpts(data) +func NewFromObject(data map[string]any) storage.Store { + return v1.NewFromObject(data) } // NewFromObjectWithOpts returns a new in-memory store from the supplied data object, with the // options passed. -func NewFromObjectWithOpts(data map[string]interface{}, opts ...Opt) storage.Store { - db := NewWithOpts(opts...) - ctx := context.Background() - txn, err := db.NewTransaction(ctx, storage.WriteParams) - if err != nil { - panic(err) - } - if err := db.Write(ctx, txn, storage.AddOp, storage.Path{}, data); err != nil { - panic(err) - } - if err := db.Commit(ctx, txn); err != nil { - panic(err) - } - return db +func NewFromObjectWithOpts(data map[string]any, opts ...Opt) storage.Store { + return v1.NewFromObjectWithOpts(data, opts...) } // NewFromReader returns a new in-memory store from a reader that produces a // JSON serialized object. This function is for test purposes. func NewFromReader(r io.Reader) storage.Store { - return NewFromReaderWithOpts(r) + return v1.NewFromReader(r) } // NewFromReader returns a new in-memory store from a reader that produces a // JSON serialized object, with extra options. This function is for test purposes. func NewFromReaderWithOpts(r io.Reader, opts ...Opt) storage.Store { - d := util.NewJSONDecoder(r) - var data map[string]interface{} - if err := d.Decode(&data); err != nil { - panic(err) - } - return NewFromObjectWithOpts(data, opts...) -} - -type store struct { - rmu sync.RWMutex // reader-writer lock - wmu sync.Mutex // writer lock - xid uint64 // last generated transaction id - data interface{} // raw or AST data - policies map[string][]byte // raw policies - triggers map[*handle]storage.TriggerConfig // registered triggers - - // roundTripOnWrite, if true, means that every call to Write round trips the - // data through JSON before adding the data to the store. Defaults to true. - roundTripOnWrite bool - - // returnASTValuesOnRead, if true, means that the store will eagerly convert data to AST values, - // and return them on Read. - // FIXME: naming(?) - returnASTValuesOnRead bool -} - -type handle struct { - db *store -} - -func (db *store) NewTransaction(_ context.Context, params ...storage.TransactionParams) (storage.Transaction, error) { - var write bool - var ctx *storage.Context - if len(params) > 0 { - write = params[0].Write - ctx = params[0].Context - } - xid := atomic.AddUint64(&db.xid, uint64(1)) - if write { - db.wmu.Lock() - } else { - db.rmu.RLock() - } - return newTransaction(xid, write, ctx, db), nil -} - -// Truncate implements the storage.Store interface. This method must be called within a transaction. -func (db *store) Truncate(ctx context.Context, txn storage.Transaction, params storage.TransactionParams, it storage.Iterator) error { - var update *storage.Update - var err error - mergedData := map[string]interface{}{} - - underlying, err := db.underlying(txn) - if err != nil { - return err - } - - for { - update, err = it.Next() - if err != nil { - break - } - - if update.IsPolicy { - err = underlying.UpsertPolicy(strings.TrimLeft(update.Path.String(), "/"), update.Value) - if err != nil { - return err - } - } else { - var value interface{} - err = util.Unmarshal(update.Value, &value) - if err != nil { - return err - } - - var key []string - dirpath := strings.TrimLeft(update.Path.String(), "/") - if len(dirpath) > 0 { - key = strings.Split(dirpath, "/") - } - - if value != nil { - obj, err := mktree(key, value) - if err != nil { - return err - } - - merged, ok := merge.InterfaceMaps(mergedData, obj) - if !ok { - return fmt.Errorf("failed to insert data file from path %s", filepath.Join(key...)) - } - mergedData = merged - } - } - } - - if err != nil && err != io.EOF { - return err - } - - // For backwards compatibility, check if `RootOverwrite` was configured. - if params.RootOverwrite { - newPath, ok := storage.ParsePathEscaped("/") - if !ok { - return fmt.Errorf("storage path invalid: %v", newPath) - } - return underlying.Write(storage.AddOp, newPath, mergedData) - } - - for _, root := range params.BasePaths { - newPath, ok := storage.ParsePathEscaped("/" + root) - if !ok { - return fmt.Errorf("storage path invalid: %v", newPath) - } - - if value, ok := lookup(newPath, mergedData); ok { - if len(newPath) > 0 { - if err := storage.MakeDir(ctx, db, txn, newPath[:len(newPath)-1]); err != nil { - return err - } - } - if err := underlying.Write(storage.AddOp, newPath, value); err != nil { - return err - } - } - } - return nil -} - -func (db *store) Commit(ctx context.Context, txn storage.Transaction) error { - underlying, err := db.underlying(txn) - if err != nil { - return err - } - if underlying.write { - db.rmu.Lock() - event := underlying.Commit() - db.runOnCommitTriggers(ctx, txn, event) - // Mark the transaction stale after executing triggers, so they can - // perform store operations if needed. - underlying.stale = true - db.rmu.Unlock() - db.wmu.Unlock() - } else { - db.rmu.RUnlock() - } - return nil -} - -func (db *store) Abort(_ context.Context, txn storage.Transaction) { - underlying, err := db.underlying(txn) - if err != nil { - panic(err) - } - underlying.stale = true - if underlying.write { - db.wmu.Unlock() - } else { - db.rmu.RUnlock() - } -} - -func (db *store) ListPolicies(_ context.Context, txn storage.Transaction) ([]string, error) { - underlying, err := db.underlying(txn) - if err != nil { - return nil, err - } - return underlying.ListPolicies(), nil -} - -func (db *store) GetPolicy(_ context.Context, txn storage.Transaction, id string) ([]byte, error) { - underlying, err := db.underlying(txn) - if err != nil { - return nil, err - } - return underlying.GetPolicy(id) -} - -func (db *store) UpsertPolicy(_ context.Context, txn storage.Transaction, id string, bs []byte) error { - underlying, err := db.underlying(txn) - if err != nil { - return err - } - return underlying.UpsertPolicy(id, bs) -} - -func (db *store) DeletePolicy(_ context.Context, txn storage.Transaction, id string) error { - underlying, err := db.underlying(txn) - if err != nil { - return err - } - if _, err := underlying.GetPolicy(id); err != nil { - return err - } - return underlying.DeletePolicy(id) -} - -func (db *store) Register(_ context.Context, txn storage.Transaction, config storage.TriggerConfig) (storage.TriggerHandle, error) { - underlying, err := db.underlying(txn) - if err != nil { - return nil, err - } - if !underlying.write { - return nil, &storage.Error{ - Code: storage.InvalidTransactionErr, - Message: "triggers must be registered with a write transaction", - } - } - h := &handle{db} - db.triggers[h] = config - return h, nil -} - -func (db *store) Read(_ context.Context, txn storage.Transaction, path storage.Path) (interface{}, error) { - underlying, err := db.underlying(txn) - if err != nil { - return nil, err - } - - v, err := underlying.Read(path) - if err != nil { - return nil, err - } - - return v, nil -} - -func (db *store) Write(_ context.Context, txn storage.Transaction, op storage.PatchOp, path storage.Path, value interface{}) error { - underlying, err := db.underlying(txn) - if err != nil { - return err - } - val := util.Reference(value) - if db.roundTripOnWrite { - if err := util.RoundTrip(val); err != nil { - return err - } - } - return underlying.Write(op, path, *val) -} - -func (h *handle) Unregister(_ context.Context, txn storage.Transaction) { - underlying, err := h.db.underlying(txn) - if err != nil { - panic(err) - } - if !underlying.write { - panic(&storage.Error{ - Code: storage.InvalidTransactionErr, - Message: "triggers must be unregistered with a write transaction", - }) - } - delete(h.db.triggers, h) -} - -func (db *store) runOnCommitTriggers(ctx context.Context, txn storage.Transaction, event storage.TriggerEvent) { - if db.returnASTValuesOnRead && len(db.triggers) > 0 { - // FIXME: Not very performant for large data. - - dataEvents := make([]storage.DataEvent, 0, len(event.Data)) - - for _, dataEvent := range event.Data { - if astData, ok := dataEvent.Data.(ast.Value); ok { - jsn, err := ast.ValueToInterface(astData, illegalResolver{}) - if err != nil { - panic(err) - } - dataEvents = append(dataEvents, storage.DataEvent{ - Path: dataEvent.Path, - Data: jsn, - Removed: dataEvent.Removed, - }) - } else { - dataEvents = append(dataEvents, dataEvent) - } - } - - event = storage.TriggerEvent{ - Policy: event.Policy, - Data: dataEvents, - Context: event.Context, - } - } - - for _, t := range db.triggers { - t.OnCommit(ctx, txn, event) - } -} - -type illegalResolver struct{} - -func (illegalResolver) Resolve(ref ast.Ref) (interface{}, error) { - return nil, fmt.Errorf("illegal value: %v", ref) -} - -func (db *store) underlying(txn storage.Transaction) (*transaction, error) { - underlying, ok := txn.(*transaction) - if !ok { - return nil, &storage.Error{ - Code: storage.InvalidTransactionErr, - Message: fmt.Sprintf("unexpected transaction type %T", txn), - } - } - if underlying.db != db { - return nil, &storage.Error{ - Code: storage.InvalidTransactionErr, - Message: "unknown transaction", - } - } - if underlying.stale { - return nil, &storage.Error{ - Code: storage.InvalidTransactionErr, - Message: "stale transaction", - } - } - return underlying, nil -} - -const rootMustBeObjectMsg = "root must be object" -const rootCannotBeRemovedMsg = "root cannot be removed" - -func invalidPatchError(f string, a ...interface{}) *storage.Error { - return &storage.Error{ - Code: storage.InvalidPatchErr, - Message: fmt.Sprintf(f, a...), - } -} - -func mktree(path []string, value interface{}) (map[string]interface{}, error) { - if len(path) == 0 { - // For 0 length path the value is the full tree. - obj, ok := value.(map[string]interface{}) - if !ok { - return nil, invalidPatchError(rootMustBeObjectMsg) - } - return obj, nil - } - - dir := map[string]interface{}{} - for i := len(path) - 1; i > 0; i-- { - dir[path[i]] = value - value = dir - dir = map[string]interface{}{} - } - dir[path[0]] = value - - return dir, nil -} - -func lookup(path storage.Path, data map[string]interface{}) (interface{}, bool) { - if len(path) == 0 { - return data, true - } - for i := 0; i < len(path)-1; i++ { - value, ok := data[path[i]] - if !ok { - return nil, false - } - obj, ok := value.(map[string]interface{}) - if !ok { - return nil, false - } - data = obj - } - value, ok := data[path[len(path)-1]] - return value, ok + return v1.NewFromReaderWithOpts(r, opts...) } diff --git a/vendor/github.com/open-policy-agent/opa/storage/inmem/opts.go b/vendor/github.com/open-policy-agent/opa/storage/inmem/opts.go index 2239fc73a3..43f03ef27b 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/inmem/opts.go +++ b/vendor/github.com/open-policy-agent/opa/storage/inmem/opts.go @@ -1,7 +1,9 @@ package inmem +import v1 "github.com/open-policy-agent/opa/v1/storage/inmem" + // An Opt modifies store at instantiation. -type Opt func(*store) +type Opt = v1.Opt // OptRoundTripOnWrite sets whether incoming objects written to store are // round-tripped through JSON to ensure they are serializable to JSON. @@ -19,9 +21,7 @@ type Opt func(*store) // and that mutations happening to the objects after they have been passed into // Write() don't affect their logic. func OptRoundTripOnWrite(enabled bool) Opt { - return func(s *store) { - s.roundTripOnWrite = enabled - } + return v1.OptRoundTripOnWrite(enabled) } // OptReturnASTValuesOnRead sets whether data values added to the store should be @@ -31,7 +31,5 @@ func OptRoundTripOnWrite(enabled bool) Opt { // which may result in panics if the data is not valid. Callers should ensure that passed data // can be serialized to AST values; otherwise, it's recommended to also enable OptRoundTripOnWrite. func OptReturnASTValuesOnRead(enabled bool) Opt { - return func(s *store) { - s.returnASTValuesOnRead = enabled - } + return v1.OptReturnASTValuesOnRead(enabled) } diff --git a/vendor/github.com/open-policy-agent/opa/storage/inmem/txn.go b/vendor/github.com/open-policy-agent/opa/storage/inmem/txn.go deleted file mode 100644 index d3252e8822..0000000000 --- a/vendor/github.com/open-policy-agent/opa/storage/inmem/txn.go +++ /dev/null @@ -1,483 +0,0 @@ -// Copyright 2017 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package inmem - -import ( - "container/list" - "encoding/json" - "strconv" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/internal/deepcopy" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/storage/internal/errors" - "github.com/open-policy-agent/opa/storage/internal/ptr" -) - -// transaction implements the low-level read/write operations on the in-memory -// store and contains the state required for pending transactions. -// -// For write transactions, the struct contains a logical set of updates -// performed by write operations in the transaction. Each write operation -// compacts the set such that two updates never overlap: -// -// - If new update path is a prefix of existing update path, existing update is -// removed, new update is added. -// -// - If existing update path is a prefix of new update path, existing update is -// modified. -// -// - Otherwise, new update is added. -// -// Read transactions do not require any special handling and simply passthrough -// to the underlying store. Read transactions do not support upgrade. -type transaction struct { - xid uint64 - write bool - stale bool - db *store - updates *list.List - policies map[string]policyUpdate - context *storage.Context -} - -type policyUpdate struct { - value []byte - remove bool -} - -func newTransaction(xid uint64, write bool, context *storage.Context, db *store) *transaction { - return &transaction{ - xid: xid, - write: write, - db: db, - policies: map[string]policyUpdate{}, - updates: list.New(), - context: context, - } -} - -func (txn *transaction) ID() uint64 { - return txn.xid -} - -func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value interface{}) error { - - if !txn.write { - return &storage.Error{ - Code: storage.InvalidTransactionErr, - Message: "data write during read transaction", - } - } - - if len(path) == 0 { - return txn.updateRoot(op, value) - } - - for curr := txn.updates.Front(); curr != nil; { - update := curr.Value.(dataUpdate) - - // Check if new update masks existing update exactly. In this case, the - // existing update can be removed and no other updates have to be - // visited (because no two updates overlap.) - if update.Path().Equal(path) { - if update.Remove() { - if op != storage.AddOp { - return errors.NewNotFoundError(path) - } - } - txn.updates.Remove(curr) - break - } - - // Check if new update masks existing update. In this case, the - // existing update has to be removed but other updates may overlap, so - // we must continue. - if update.Path().HasPrefix(path) { - remove := curr - curr = curr.Next() - txn.updates.Remove(remove) - continue - } - - // Check if new update modifies existing update. In this case, the - // existing update is mutated. - if path.HasPrefix(update.Path()) { - if update.Remove() { - return errors.NewNotFoundError(path) - } - suffix := path[len(update.Path()):] - newUpdate, err := txn.db.newUpdate(update.Value(), op, suffix, 0, value) - if err != nil { - return err - } - update.Set(newUpdate.Apply(update.Value())) - return nil - } - - curr = curr.Next() - } - - update, err := txn.db.newUpdate(txn.db.data, op, path, 0, value) - if err != nil { - return err - } - - txn.updates.PushFront(update) - return nil -} - -func (txn *transaction) updateRoot(op storage.PatchOp, value interface{}) error { - if op == storage.RemoveOp { - return invalidPatchError(rootCannotBeRemovedMsg) - } - - var update any - if txn.db.returnASTValuesOnRead { - valueAST, err := interfaceToValue(value) - if err != nil { - return err - } - if _, ok := valueAST.(ast.Object); !ok { - return invalidPatchError(rootMustBeObjectMsg) - } - - update = &updateAST{ - path: storage.Path{}, - remove: false, - value: valueAST, - } - } else { - if _, ok := value.(map[string]interface{}); !ok { - return invalidPatchError(rootMustBeObjectMsg) - } - - update = &updateRaw{ - path: storage.Path{}, - remove: false, - value: value, - } - } - - txn.updates.Init() - txn.updates.PushFront(update) - return nil -} - -func (txn *transaction) Commit() (result storage.TriggerEvent) { - result.Context = txn.context - for curr := txn.updates.Front(); curr != nil; curr = curr.Next() { - action := curr.Value.(dataUpdate) - txn.db.data = action.Apply(txn.db.data) - - result.Data = append(result.Data, storage.DataEvent{ - Path: action.Path(), - Data: action.Value(), - Removed: action.Remove(), - }) - } - for id, upd := range txn.policies { - if upd.remove { - delete(txn.db.policies, id) - } else { - txn.db.policies[id] = upd.value - } - - result.Policy = append(result.Policy, storage.PolicyEvent{ - ID: id, - Data: upd.value, - Removed: upd.remove, - }) - } - return result -} - -func pointer(v interface{}, path storage.Path) (interface{}, error) { - if v, ok := v.(ast.Value); ok { - return ptr.ValuePtr(v, path) - } - return ptr.Ptr(v, path) -} - -func deepcpy(v interface{}) interface{} { - if v, ok := v.(ast.Value); ok { - var cpy ast.Value - - switch data := v.(type) { - case ast.Object: - cpy = data.Copy() - case *ast.Array: - cpy = data.Copy() - } - - return cpy - } - return deepcopy.DeepCopy(v) -} - -func (txn *transaction) Read(path storage.Path) (interface{}, error) { - - if !txn.write { - return pointer(txn.db.data, path) - } - - var merge []dataUpdate - - for curr := txn.updates.Front(); curr != nil; curr = curr.Next() { - - upd := curr.Value.(dataUpdate) - - if path.HasPrefix(upd.Path()) { - if upd.Remove() { - return nil, errors.NewNotFoundError(path) - } - return pointer(upd.Value(), path[len(upd.Path()):]) - } - - if upd.Path().HasPrefix(path) { - merge = append(merge, upd) - } - } - - data, err := pointer(txn.db.data, path) - - if err != nil { - return nil, err - } - - if len(merge) == 0 { - return data, nil - } - - cpy := deepcpy(data) - - for _, update := range merge { - cpy = update.Relative(path).Apply(cpy) - } - - return cpy, nil -} - -func (txn *transaction) ListPolicies() []string { - var ids []string - for id := range txn.db.policies { - if _, ok := txn.policies[id]; !ok { - ids = append(ids, id) - } - } - for id, update := range txn.policies { - if !update.remove { - ids = append(ids, id) - } - } - return ids -} - -func (txn *transaction) GetPolicy(id string) ([]byte, error) { - if update, ok := txn.policies[id]; ok { - if !update.remove { - return update.value, nil - } - return nil, errors.NewNotFoundErrorf("policy id %q", id) - } - if exist, ok := txn.db.policies[id]; ok { - return exist, nil - } - return nil, errors.NewNotFoundErrorf("policy id %q", id) -} - -func (txn *transaction) UpsertPolicy(id string, bs []byte) error { - if !txn.write { - return &storage.Error{ - Code: storage.InvalidTransactionErr, - Message: "policy write during read transaction", - } - } - txn.policies[id] = policyUpdate{bs, false} - return nil -} - -func (txn *transaction) DeletePolicy(id string) error { - if !txn.write { - return &storage.Error{ - Code: storage.InvalidTransactionErr, - Message: "policy write during read transaction", - } - } - txn.policies[id] = policyUpdate{nil, true} - return nil -} - -type dataUpdate interface { - Path() storage.Path - Remove() bool - Apply(interface{}) interface{} - Relative(path storage.Path) dataUpdate - Set(interface{}) - Value() interface{} -} - -// update contains state associated with an update to be applied to the -// in-memory data store. -type updateRaw struct { - path storage.Path // data path modified by update - remove bool // indicates whether update removes the value at path - value interface{} // value to add/replace at path (ignored if remove is true) -} - -func (db *store) newUpdate(data interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (dataUpdate, error) { - if db.returnASTValuesOnRead { - astData, err := interfaceToValue(data) - if err != nil { - return nil, err - } - astValue, err := interfaceToValue(value) - if err != nil { - return nil, err - } - return newUpdateAST(astData, op, path, idx, astValue) - } - return newUpdateRaw(data, op, path, idx, value) -} - -func newUpdateRaw(data interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (dataUpdate, error) { - - switch data.(type) { - case nil, bool, json.Number, string: - return nil, errors.NewNotFoundError(path) - } - - switch data := data.(type) { - case map[string]interface{}: - return newUpdateObject(data, op, path, idx, value) - - case []interface{}: - return newUpdateArray(data, op, path, idx, value) - } - - return nil, &storage.Error{ - Code: storage.InternalErr, - Message: "invalid data value encountered", - } -} - -func newUpdateArray(data []interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (dataUpdate, error) { - - if idx == len(path)-1 { - if path[idx] == "-" || path[idx] == strconv.Itoa(len(data)) { - if op != storage.AddOp { - return nil, invalidPatchError("%v: invalid patch path", path) - } - cpy := make([]interface{}, len(data)+1) - copy(cpy, data) - cpy[len(data)] = value - return &updateRaw{path[:len(path)-1], false, cpy}, nil - } - - pos, err := ptr.ValidateArrayIndex(data, path[idx], path) - if err != nil { - return nil, err - } - - switch op { - case storage.AddOp: - cpy := make([]interface{}, len(data)+1) - copy(cpy[:pos], data[:pos]) - copy(cpy[pos+1:], data[pos:]) - cpy[pos] = value - return &updateRaw{path[:len(path)-1], false, cpy}, nil - - case storage.RemoveOp: - cpy := make([]interface{}, len(data)-1) - copy(cpy[:pos], data[:pos]) - copy(cpy[pos:], data[pos+1:]) - return &updateRaw{path[:len(path)-1], false, cpy}, nil - - default: - cpy := make([]interface{}, len(data)) - copy(cpy, data) - cpy[pos] = value - return &updateRaw{path[:len(path)-1], false, cpy}, nil - } - } - - pos, err := ptr.ValidateArrayIndex(data, path[idx], path) - if err != nil { - return nil, err - } - - return newUpdateRaw(data[pos], op, path, idx+1, value) -} - -func newUpdateObject(data map[string]interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (dataUpdate, error) { - - if idx == len(path)-1 { - switch op { - case storage.ReplaceOp, storage.RemoveOp: - if _, ok := data[path[idx]]; !ok { - return nil, errors.NewNotFoundError(path) - } - } - return &updateRaw{path, op == storage.RemoveOp, value}, nil - } - - if data, ok := data[path[idx]]; ok { - return newUpdateRaw(data, op, path, idx+1, value) - } - - return nil, errors.NewNotFoundError(path) -} - -func (u *updateRaw) Remove() bool { - return u.remove -} - -func (u *updateRaw) Path() storage.Path { - return u.path -} - -func (u *updateRaw) Apply(data interface{}) interface{} { - if len(u.path) == 0 { - return u.value - } - parent, err := ptr.Ptr(data, u.path[:len(u.path)-1]) - if err != nil { - panic(err) - } - key := u.path[len(u.path)-1] - if u.remove { - obj := parent.(map[string]interface{}) - delete(obj, key) - return data - } - switch parent := parent.(type) { - case map[string]interface{}: - if parent == nil { - parent = make(map[string]interface{}, 1) - } - parent[key] = u.value - case []interface{}: - idx, err := strconv.Atoi(key) - if err != nil { - panic(err) - } - parent[idx] = u.value - } - return data -} - -func (u *updateRaw) Set(v interface{}) { - u.value = v -} - -func (u *updateRaw) Value() interface{} { - return u.value -} - -func (u *updateRaw) Relative(path storage.Path) dataUpdate { - cpy := *u - cpy.path = cpy.path[len(path):] - return &cpy -} diff --git a/vendor/github.com/open-policy-agent/opa/storage/interface.go b/vendor/github.com/open-policy-agent/opa/storage/interface.go index 6baca9a59f..a21b5575e9 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/interface.go +++ b/vendor/github.com/open-policy-agent/opa/storage/interface.go @@ -5,243 +5,85 @@ package storage import ( - "context" - - "github.com/open-policy-agent/opa/metrics" + v1 "github.com/open-policy-agent/opa/v1/storage" ) // Transaction defines the interface that identifies a consistent snapshot over // the policy engine's storage layer. -type Transaction interface { - ID() uint64 -} +type Transaction = v1.Transaction // Store defines the interface for the storage layer's backend. -type Store interface { - Trigger - Policy - - // NewTransaction is called create a new transaction in the store. - NewTransaction(context.Context, ...TransactionParams) (Transaction, error) - - // Read is called to fetch a document referred to by path. - Read(context.Context, Transaction, Path) (interface{}, error) - - // Write is called to modify a document referred to by path. - Write(context.Context, Transaction, PatchOp, Path, interface{}) error - - // Commit is called to finish the transaction. If Commit returns an error, the - // transaction must be automatically aborted by the Store implementation. - Commit(context.Context, Transaction) error - - // Truncate is called to make a copy of the underlying store, write documents in the new store - // by creating multiple transactions in the new store as needed and finally swapping - // over to the new storage instance. This method must be called within a transaction on the original store. - Truncate(context.Context, Transaction, TransactionParams, Iterator) error - - // Abort is called to cancel the transaction. - Abort(context.Context, Transaction) -} +type Store = v1.Store // MakeDirer defines the interface a Store could realize to override the // generic MakeDir functionality in storage.MakeDir -type MakeDirer interface { - MakeDir(context.Context, Transaction, Path) error -} - -// TransactionParams describes a new transaction. -type TransactionParams struct { - - // BasePaths indicates the top-level paths where write operations will be performed in this transaction. - BasePaths []string - - // RootOverwrite is deprecated. Use BasePaths instead. - RootOverwrite bool +type MakeDirer = v1.MakeDirer - // Write indicates if this transaction will perform any write operations. - Write bool +// NonEmptyer allows a store implemention to override NonEmpty()) +type NonEmptyer = v1.NonEmptyer - // Context contains key/value pairs passed to triggers. - Context *Context -} +// TransactionParams describes a new transaction. +type TransactionParams = v1.TransactionParams // Context is a simple container for key/value pairs. -type Context struct { - values map[interface{}]interface{} -} +type Context = v1.Context // NewContext returns a new context object. func NewContext() *Context { - return &Context{ - values: map[interface{}]interface{}{}, - } -} - -// Get returns the key value in the context. -func (ctx *Context) Get(key interface{}) interface{} { - if ctx == nil { - return nil - } - return ctx.values[key] -} - -// Put adds a key/value pair to the context. -func (ctx *Context) Put(key, value interface{}) { - ctx.values[key] = value -} - -var metricsKey = struct{}{} - -// WithMetrics allows passing metrics via the Context. -// It puts the metrics object in the ctx, and returns the same -// ctx (not a copy) for convenience. -func (ctx *Context) WithMetrics(m metrics.Metrics) *Context { - ctx.values[metricsKey] = m - return ctx -} - -// Metrics() allows using a Context's metrics. Returns nil if metrics -// were not attached to the Context. -func (ctx *Context) Metrics() metrics.Metrics { - if m, ok := ctx.values[metricsKey]; ok { - if met, ok := m.(metrics.Metrics); ok { - return met - } - } - return nil + return v1.NewContext() } // WriteParams specifies the TransactionParams for a write transaction. -var WriteParams = TransactionParams{ - Write: true, -} +var WriteParams = v1.WriteParams // PatchOp is the enumeration of supposed modifications. -type PatchOp int +type PatchOp = v1.PatchOp // Patch supports add, remove, and replace operations. const ( - AddOp PatchOp = iota - RemoveOp = iota - ReplaceOp = iota + AddOp = v1.AddOp + RemoveOp = v1.RemoveOp + ReplaceOp = v1.ReplaceOp ) // WritesNotSupported provides a default implementation of the write // interface which may be used if the backend does not support writes. -type WritesNotSupported struct{} - -func (WritesNotSupported) Write(context.Context, Transaction, PatchOp, Path, interface{}) error { - return writesNotSupportedError() -} +type WritesNotSupported = v1.WritesNotSupported // Policy defines the interface for policy module storage. -type Policy interface { - ListPolicies(context.Context, Transaction) ([]string, error) - GetPolicy(context.Context, Transaction, string) ([]byte, error) - UpsertPolicy(context.Context, Transaction, string, []byte) error - DeletePolicy(context.Context, Transaction, string) error -} +type Policy = v1.Policy // PolicyNotSupported provides a default implementation of the policy interface // which may be used if the backend does not support policy storage. -type PolicyNotSupported struct{} - -// ListPolicies always returns a PolicyNotSupportedErr. -func (PolicyNotSupported) ListPolicies(context.Context, Transaction) ([]string, error) { - return nil, policyNotSupportedError() -} - -// GetPolicy always returns a PolicyNotSupportedErr. -func (PolicyNotSupported) GetPolicy(context.Context, Transaction, string) ([]byte, error) { - return nil, policyNotSupportedError() -} - -// UpsertPolicy always returns a PolicyNotSupportedErr. -func (PolicyNotSupported) UpsertPolicy(context.Context, Transaction, string, []byte) error { - return policyNotSupportedError() -} - -// DeletePolicy always returns a PolicyNotSupportedErr. -func (PolicyNotSupported) DeletePolicy(context.Context, Transaction, string) error { - return policyNotSupportedError() -} +type PolicyNotSupported = v1.PolicyNotSupported // PolicyEvent describes a change to a policy. -type PolicyEvent struct { - ID string - Data []byte - Removed bool -} +type PolicyEvent = v1.PolicyEvent // DataEvent describes a change to a base data document. -type DataEvent struct { - Path Path - Data interface{} - Removed bool -} +type DataEvent = v1.DataEvent // TriggerEvent describes the changes that caused the trigger to be invoked. -type TriggerEvent struct { - Policy []PolicyEvent - Data []DataEvent - Context *Context -} - -// IsZero returns true if the TriggerEvent indicates no changes occurred. This -// function is primarily for test purposes. -func (e TriggerEvent) IsZero() bool { - return !e.PolicyChanged() && !e.DataChanged() -} - -// PolicyChanged returns true if the trigger was caused by a policy change. -func (e TriggerEvent) PolicyChanged() bool { - return len(e.Policy) > 0 -} - -// DataChanged returns true if the trigger was caused by a data change. -func (e TriggerEvent) DataChanged() bool { - return len(e.Data) > 0 -} +type TriggerEvent = v1.TriggerEvent // TriggerConfig contains the trigger registration configuration. -type TriggerConfig struct { - - // OnCommit is invoked when a transaction is successfully committed. The - // callback is invoked with a handle to the write transaction that - // successfully committed before other clients see the changes. - OnCommit func(context.Context, Transaction, TriggerEvent) -} +type TriggerConfig = v1.TriggerConfig // Trigger defines the interface that stores implement to register for change // notifications when the store is changed. -type Trigger interface { - Register(context.Context, Transaction, TriggerConfig) (TriggerHandle, error) -} +type Trigger = v1.Trigger // TriggersNotSupported provides default implementations of the Trigger // interface which may be used if the backend does not support triggers. -type TriggersNotSupported struct{} - -// Register always returns an error indicating triggers are not supported. -func (TriggersNotSupported) Register(context.Context, Transaction, TriggerConfig) (TriggerHandle, error) { - return nil, triggersNotSupportedError() -} +type TriggersNotSupported = v1.TriggersNotSupported // TriggerHandle defines the interface that can be used to unregister triggers that have // been registered on a Store. -type TriggerHandle interface { - Unregister(context.Context, Transaction) -} +type TriggerHandle = v1.TriggerHandle // Iterator defines the interface that can be used to read files from a directory starting with // files at the base of the directory, then sub-directories etc. -type Iterator interface { - Next() (*Update, error) -} +type Iterator = v1.Iterator // Update contains information about a file -type Update struct { - Path Path - Value []byte - IsPolicy bool -} +type Update = v1.Update diff --git a/vendor/github.com/open-policy-agent/opa/storage/internal/errors/errors.go b/vendor/github.com/open-policy-agent/opa/storage/internal/errors/errors.go deleted file mode 100644 index 0bba74b907..0000000000 --- a/vendor/github.com/open-policy-agent/opa/storage/internal/errors/errors.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2021 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package errors contains reusable error-related code for the storage layer. -package errors - -import ( - "fmt" - - "github.com/open-policy-agent/opa/storage" -) - -const ArrayIndexTypeMsg = "array index must be integer" -const DoesNotExistMsg = "document does not exist" -const OutOfRangeMsg = "array index out of range" - -func NewNotFoundError(path storage.Path) *storage.Error { - return NewNotFoundErrorWithHint(path, DoesNotExistMsg) -} - -func NewNotFoundErrorWithHint(path storage.Path, hint string) *storage.Error { - return NewNotFoundErrorf("%v: %v", path.String(), hint) -} - -func NewNotFoundErrorf(f string, a ...interface{}) *storage.Error { - msg := fmt.Sprintf(f, a...) - return &storage.Error{ - Code: storage.NotFoundErr, - Message: msg, - } -} - -func NewWriteConflictError(p storage.Path) *storage.Error { - return &storage.Error{ - Code: storage.WriteConflictErr, - Message: p.String(), - } -} diff --git a/vendor/github.com/open-policy-agent/opa/storage/internal/ptr/ptr.go b/vendor/github.com/open-policy-agent/opa/storage/internal/ptr/ptr.go deleted file mode 100644 index 14adbd682e..0000000000 --- a/vendor/github.com/open-policy-agent/opa/storage/internal/ptr/ptr.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2021 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package ptr provides utilities for pointer operations using storage layer paths. -package ptr - -import ( - "strconv" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/storage/internal/errors" -) - -func Ptr(data interface{}, path storage.Path) (interface{}, error) { - node := data - for i := range path { - key := path[i] - switch curr := node.(type) { - case map[string]interface{}: - var ok bool - if node, ok = curr[key]; !ok { - return nil, errors.NewNotFoundError(path) - } - case []interface{}: - pos, err := ValidateArrayIndex(curr, key, path) - if err != nil { - return nil, err - } - node = curr[pos] - default: - return nil, errors.NewNotFoundError(path) - } - } - - return node, nil -} - -func ValuePtr(data ast.Value, path storage.Path) (ast.Value, error) { - node := data - for i := range path { - key := path[i] - switch curr := node.(type) { - case ast.Object: - keyTerm := ast.StringTerm(key) - val := curr.Get(keyTerm) - if val == nil { - return nil, errors.NewNotFoundError(path) - } - node = val.Value - case *ast.Array: - pos, err := ValidateASTArrayIndex(curr, key, path) - if err != nil { - return nil, err - } - node = curr.Elem(pos).Value - default: - return nil, errors.NewNotFoundError(path) - } - } - - return node, nil -} - -func ValidateArrayIndex(arr []interface{}, s string, path storage.Path) (int, error) { - idx, ok := isInt(s) - if !ok { - return 0, errors.NewNotFoundErrorWithHint(path, errors.ArrayIndexTypeMsg) - } - return inRange(idx, arr, path) -} - -func ValidateASTArrayIndex(arr *ast.Array, s string, path storage.Path) (int, error) { - idx, ok := isInt(s) - if !ok { - return 0, errors.NewNotFoundErrorWithHint(path, errors.ArrayIndexTypeMsg) - } - return inRange(idx, arr, path) -} - -// ValidateArrayIndexForWrite also checks that `s` is a valid way to address an -// array element like `ValidateArrayIndex`, but returns a `resource_conflict` error -// if it is not. -func ValidateArrayIndexForWrite(arr []interface{}, s string, i int, path storage.Path) (int, error) { - idx, ok := isInt(s) - if !ok { - return 0, errors.NewWriteConflictError(path[:i-1]) - } - return inRange(idx, arr, path) -} - -func isInt(s string) (int, bool) { - idx, err := strconv.Atoi(s) - return idx, err == nil -} - -func inRange(i int, arr interface{}, path storage.Path) (int, error) { - - var arrLen int - - switch v := arr.(type) { - case []interface{}: - arrLen = len(v) - case *ast.Array: - arrLen = v.Len() - } - - if i < 0 || i >= arrLen { - return 0, errors.NewNotFoundErrorWithHint(path, errors.OutOfRangeMsg) - } - return i, nil -} diff --git a/vendor/github.com/open-policy-agent/opa/storage/path.go b/vendor/github.com/open-policy-agent/opa/storage/path.go index 02ef4cab40..91d4f34f2b 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/path.go +++ b/vendor/github.com/open-policy-agent/opa/storage/path.go @@ -5,150 +5,30 @@ package storage import ( - "fmt" - "net/url" - "strconv" - "strings" - "github.com/open-policy-agent/opa/ast" + v1 "github.com/open-policy-agent/opa/v1/storage" ) // Path refers to a document in storage. -type Path []string +type Path = v1.Path // ParsePath returns a new path for the given str. func ParsePath(str string) (path Path, ok bool) { - if len(str) == 0 { - return nil, false - } - if str[0] != '/' { - return nil, false - } - if len(str) == 1 { - return Path{}, true - } - parts := strings.Split(str[1:], "/") - return parts, true + return v1.ParsePath(str) } // ParsePathEscaped returns a new path for the given escaped str. func ParsePathEscaped(str string) (path Path, ok bool) { - path, ok = ParsePath(str) - if !ok { - return - } - for i := range path { - segment, err := url.PathUnescape(path[i]) - if err == nil { - path[i] = segment - } - } - return + return v1.ParsePathEscaped(str) } // NewPathForRef returns a new path for the given ref. func NewPathForRef(ref ast.Ref) (path Path, err error) { - - if len(ref) == 0 { - return nil, fmt.Errorf("empty reference (indicates error in caller)") - } - - if len(ref) == 1 { - return Path{}, nil - } - - path = make(Path, 0, len(ref)-1) - - for _, term := range ref[1:] { - switch v := term.Value.(type) { - case ast.String: - path = append(path, string(v)) - case ast.Number: - path = append(path, v.String()) - case ast.Boolean, ast.Null: - return nil, &Error{ - Code: NotFoundErr, - Message: fmt.Sprintf("%v: does not exist", ref), - } - case *ast.Array, ast.Object, ast.Set: - return nil, fmt.Errorf("composites cannot be base document keys: %v", ref) - default: - return nil, fmt.Errorf("unresolved reference (indicates error in caller): %v", ref) - } - } - - return path, nil -} - -// Compare performs lexigraphical comparison on p and other and returns -1 if p -// is less than other, 0 if p is equal to other, or 1 if p is greater than -// other. -func (p Path) Compare(other Path) (cmp int) { - min := len(p) - if len(other) < min { - min = len(other) - } - for i := 0; i < min; i++ { - if cmp := strings.Compare(p[i], other[i]); cmp != 0 { - return cmp - } - } - if len(p) < len(other) { - return -1 - } - if len(p) == len(other) { - return 0 - } - return 1 -} - -// Equal returns true if p is the same as other. -func (p Path) Equal(other Path) bool { - return p.Compare(other) == 0 -} - -// HasPrefix returns true if p starts with other. -func (p Path) HasPrefix(other Path) bool { - if len(other) > len(p) { - return false - } - for i := range other { - if p[i] != other[i] { - return false - } - } - return true -} - -// Ref returns a ref that represents p rooted at head. -func (p Path) Ref(head *ast.Term) (ref ast.Ref) { - ref = make(ast.Ref, len(p)+1) - ref[0] = head - for i := range p { - idx, err := strconv.ParseInt(p[i], 10, 64) - if err == nil { - ref[i+1] = ast.UIntNumberTerm(uint64(idx)) - } else { - ref[i+1] = ast.StringTerm(p[i]) - } - } - return ref -} - -func (p Path) String() string { - buf := make([]string, len(p)) - for i := range buf { - buf[i] = url.PathEscape(p[i]) - } - return "/" + strings.Join(buf, "/") + return v1.NewPathForRef(ref) } // MustParsePath returns a new Path for s. If s cannot be parsed, this function // will panic. This is mostly for test purposes. func MustParsePath(s string) Path { - path, ok := ParsePath(s) - if !ok { - panic(s) - } - return path + return v1.MustParsePath(s) } diff --git a/vendor/github.com/open-policy-agent/opa/storage/storage.go b/vendor/github.com/open-policy-agent/opa/storage/storage.go index 2f8a39c597..d1abc1046d 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/storage.go +++ b/vendor/github.com/open-policy-agent/opa/storage/storage.go @@ -7,85 +7,34 @@ package storage import ( "context" - "github.com/open-policy-agent/opa/ast" + v1 "github.com/open-policy-agent/opa/v1/storage" ) // NewTransactionOrDie is a helper function to create a new transaction. If the // storage layer cannot create a new transaction, this function will panic. This // function should only be used for tests. func NewTransactionOrDie(ctx context.Context, store Store, params ...TransactionParams) Transaction { - txn, err := store.NewTransaction(ctx, params...) - if err != nil { - panic(err) - } - return txn + return v1.NewTransactionOrDie(ctx, store, params...) } // ReadOne is a convenience function to read a single value from the provided Store. It // will create a new Transaction to perform the read with, and clean up after itself // should an error occur. -func ReadOne(ctx context.Context, store Store, path Path) (interface{}, error) { - txn, err := store.NewTransaction(ctx) - if err != nil { - return nil, err - } - defer store.Abort(ctx, txn) - - return store.Read(ctx, txn, path) +func ReadOne(ctx context.Context, store Store, path Path) (any, error) { + return v1.ReadOne(ctx, store, path) } // WriteOne is a convenience function to write a single value to the provided Store. It // will create a new Transaction to perform the write with, and clean up after itself // should an error occur. -func WriteOne(ctx context.Context, store Store, op PatchOp, path Path, value interface{}) error { - txn, err := store.NewTransaction(ctx, WriteParams) - if err != nil { - return err - } - - if err := store.Write(ctx, txn, op, path, value); err != nil { - store.Abort(ctx, txn) - return err - } - - return store.Commit(ctx, txn) +func WriteOne(ctx context.Context, store Store, op PatchOp, path Path, value any) error { + return v1.WriteOne(ctx, store, op, path, value) } // MakeDir inserts an empty object at path. If the parent path does not exist, // MakeDir will create it recursively. func MakeDir(ctx context.Context, store Store, txn Transaction, path Path) error { - - // Allow the Store implementation to deal with this in its own way. - if md, ok := store.(MakeDirer); ok { - return md.MakeDir(ctx, txn, path) - } - - if len(path) == 0 { - return nil - } - - node, err := store.Read(ctx, txn, path) - if err != nil { - if !IsNotFound(err) { - return err - } - - if err := MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { - return err - } - - return store.Write(ctx, txn, AddOp, path, map[string]interface{}{}) - } - - if _, ok := node.(map[string]interface{}); ok { - return nil - } - - if _, ok := node.(ast.Object); ok { - return nil - } - - return writeConflictError(path) + return v1.MakeDir(ctx, store, txn, path) } // Txn is a convenience function that executes f inside a new transaction @@ -93,44 +42,12 @@ func MakeDir(ctx context.Context, store Store, txn Transaction, path Path) error // aborted and the error is returned. Otherwise, the transaction is committed // and the result of the commit is returned. func Txn(ctx context.Context, store Store, params TransactionParams, f func(Transaction) error) error { - - txn, err := store.NewTransaction(ctx, params) - if err != nil { - return err - } - - if err := f(txn); err != nil { - store.Abort(ctx, txn) - return err - } - - return store.Commit(ctx, txn) + return v1.Txn(ctx, store, params, f) } // NonEmpty returns a function that tests if a path is non-empty. A // path is non-empty if a Read on the path returns a value or a Read // on any of the path prefixes returns a non-object value. func NonEmpty(ctx context.Context, store Store, txn Transaction) func([]string) (bool, error) { - return func(path []string) (bool, error) { - if _, err := store.Read(ctx, txn, Path(path)); err == nil { - return true, nil - } else if !IsNotFound(err) { - return false, err - } - for i := len(path) - 1; i > 0; i-- { - val, err := store.Read(ctx, txn, Path(path[:i])) - if err != nil && !IsNotFound(err) { - return false, err - } else if err == nil { - if _, ok := val.(map[string]interface{}); ok { - return false, nil - } - if _, ok := val.(ast.Object); ok { - return false, nil - } - return true, nil - } - } - return false, nil - } + return v1.NonEmpty(ctx, store, txn) } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/aggregates.go b/vendor/github.com/open-policy-agent/opa/topdown/aggregates.go deleted file mode 100644 index a0f67a7c95..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/aggregates.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2016 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "math/big" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -func builtinCount(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch a := operands[0].Value.(type) { - case *ast.Array: - return iter(ast.IntNumberTerm(a.Len())) - case ast.Object: - return iter(ast.IntNumberTerm(a.Len())) - case ast.Set: - return iter(ast.IntNumberTerm(a.Len())) - case ast.String: - return iter(ast.IntNumberTerm(len([]rune(a)))) - } - return builtins.NewOperandTypeErr(1, operands[0].Value, "array", "object", "set", "string") -} - -func builtinSum(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch a := operands[0].Value.(type) { - case *ast.Array: - sum := big.NewFloat(0) - err := a.Iter(func(x *ast.Term) error { - n, ok := x.Value.(ast.Number) - if !ok { - return builtins.NewOperandElementErr(1, a, x.Value, "number") - } - sum = new(big.Float).Add(sum, builtins.NumberToFloat(n)) - return nil - }) - if err != nil { - return err - } - return iter(ast.NewTerm(builtins.FloatToNumber(sum))) - case ast.Set: - sum := big.NewFloat(0) - err := a.Iter(func(x *ast.Term) error { - n, ok := x.Value.(ast.Number) - if !ok { - return builtins.NewOperandElementErr(1, a, x.Value, "number") - } - sum = new(big.Float).Add(sum, builtins.NumberToFloat(n)) - return nil - }) - if err != nil { - return err - } - return iter(ast.NewTerm(builtins.FloatToNumber(sum))) - } - return builtins.NewOperandTypeErr(1, operands[0].Value, "set", "array") -} - -func builtinProduct(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch a := operands[0].Value.(type) { - case *ast.Array: - product := big.NewFloat(1) - err := a.Iter(func(x *ast.Term) error { - n, ok := x.Value.(ast.Number) - if !ok { - return builtins.NewOperandElementErr(1, a, x.Value, "number") - } - product = new(big.Float).Mul(product, builtins.NumberToFloat(n)) - return nil - }) - if err != nil { - return err - } - return iter(ast.NewTerm(builtins.FloatToNumber(product))) - case ast.Set: - product := big.NewFloat(1) - err := a.Iter(func(x *ast.Term) error { - n, ok := x.Value.(ast.Number) - if !ok { - return builtins.NewOperandElementErr(1, a, x.Value, "number") - } - product = new(big.Float).Mul(product, builtins.NumberToFloat(n)) - return nil - }) - if err != nil { - return err - } - return iter(ast.NewTerm(builtins.FloatToNumber(product))) - } - return builtins.NewOperandTypeErr(1, operands[0].Value, "set", "array") -} - -func builtinMax(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch a := operands[0].Value.(type) { - case *ast.Array: - if a.Len() == 0 { - return nil - } - var max = ast.Value(ast.Null{}) - a.Foreach(func(x *ast.Term) { - if ast.Compare(max, x.Value) <= 0 { - max = x.Value - } - }) - return iter(ast.NewTerm(max)) - case ast.Set: - if a.Len() == 0 { - return nil - } - max, err := a.Reduce(ast.NullTerm(), func(max *ast.Term, elem *ast.Term) (*ast.Term, error) { - if ast.Compare(max, elem) <= 0 { - return elem, nil - } - return max, nil - }) - if err != nil { - return err - } - return iter(max) - } - - return builtins.NewOperandTypeErr(1, operands[0].Value, "set", "array") -} - -func builtinMin(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch a := operands[0].Value.(type) { - case *ast.Array: - if a.Len() == 0 { - return nil - } - min := a.Elem(0).Value - a.Foreach(func(x *ast.Term) { - if ast.Compare(min, x.Value) >= 0 { - min = x.Value - } - }) - return iter(ast.NewTerm(min)) - case ast.Set: - if a.Len() == 0 { - return nil - } - min, err := a.Reduce(ast.NullTerm(), func(min *ast.Term, elem *ast.Term) (*ast.Term, error) { - // The null term is considered to be less than any other term, - // so in order for min of a set to make sense, we need to check - // for it. - if min.Value.Compare(ast.Null{}) == 0 { - return elem, nil - } - - if ast.Compare(min, elem) >= 0 { - return elem, nil - } - return min, nil - }) - if err != nil { - return err - } - return iter(min) - } - - return builtins.NewOperandTypeErr(1, operands[0].Value, "set", "array") -} - -func builtinSort(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch a := operands[0].Value.(type) { - case *ast.Array: - return iter(ast.NewTerm(a.Sorted())) - case ast.Set: - return iter(ast.NewTerm(a.Sorted())) - } - return builtins.NewOperandTypeErr(1, operands[0].Value, "set", "array") -} - -func builtinAll(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch val := operands[0].Value.(type) { - case ast.Set: - res := true - match := ast.BooleanTerm(true) - val.Until(func(term *ast.Term) bool { - if !match.Equal(term) { - res = false - return true - } - return false - }) - return iter(ast.BooleanTerm(res)) - case *ast.Array: - res := true - match := ast.BooleanTerm(true) - val.Until(func(term *ast.Term) bool { - if !match.Equal(term) { - res = false - return true - } - return false - }) - return iter(ast.BooleanTerm(res)) - default: - return builtins.NewOperandTypeErr(1, operands[0].Value, "array", "set") - } -} - -func builtinAny(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch val := operands[0].Value.(type) { - case ast.Set: - res := val.Len() > 0 && val.Contains(ast.BooleanTerm(true)) - return iter(ast.BooleanTerm(res)) - case *ast.Array: - res := false - match := ast.BooleanTerm(true) - val.Until(func(term *ast.Term) bool { - if match.Equal(term) { - res = true - return true - } - return false - }) - return iter(ast.BooleanTerm(res)) - default: - return builtins.NewOperandTypeErr(1, operands[0].Value, "array", "set") - } -} - -func builtinMember(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - containee := operands[0] - switch c := operands[1].Value.(type) { - case ast.Set: - return iter(ast.BooleanTerm(c.Contains(containee))) - case *ast.Array: - ret := false - c.Until(func(v *ast.Term) bool { - if v.Value.Compare(containee.Value) == 0 { - ret = true - } - return ret - }) - return iter(ast.BooleanTerm(ret)) - case ast.Object: - ret := false - c.Until(func(_, v *ast.Term) bool { - if v.Value.Compare(containee.Value) == 0 { - ret = true - } - return ret - }) - return iter(ast.BooleanTerm(ret)) - } - return iter(ast.BooleanTerm(false)) -} - -func builtinMemberWithKey(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - key, val := operands[0], operands[1] - switch c := operands[2].Value.(type) { - case interface{ Get(*ast.Term) *ast.Term }: - ret := false - if act := c.Get(key); act != nil { - ret = act.Value.Compare(val.Value) == 0 - } - return iter(ast.BooleanTerm(ret)) - } - return iter(ast.BooleanTerm(false)) -} - -func init() { - RegisterBuiltinFunc(ast.Count.Name, builtinCount) - RegisterBuiltinFunc(ast.Sum.Name, builtinSum) - RegisterBuiltinFunc(ast.Product.Name, builtinProduct) - RegisterBuiltinFunc(ast.Max.Name, builtinMax) - RegisterBuiltinFunc(ast.Min.Name, builtinMin) - RegisterBuiltinFunc(ast.Sort.Name, builtinSort) - RegisterBuiltinFunc(ast.Any.Name, builtinAny) - RegisterBuiltinFunc(ast.All.Name, builtinAll) - RegisterBuiltinFunc(ast.Member.Name, builtinMember) - RegisterBuiltinFunc(ast.MemberWithKey.Name, builtinMemberWithKey) -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/array.go b/vendor/github.com/open-policy-agent/opa/topdown/array.go deleted file mode 100644 index e7fe5be643..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/array.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -func builtinArrayConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - arrA, err := builtins.ArrayOperand(operands[0].Value, 1) - if err != nil { - return err - } - - arrB, err := builtins.ArrayOperand(operands[1].Value, 2) - if err != nil { - return err - } - - arrC := make([]*ast.Term, arrA.Len()+arrB.Len()) - - i := 0 - arrA.Foreach(func(elemA *ast.Term) { - arrC[i] = elemA - i++ - }) - - arrB.Foreach(func(elemB *ast.Term) { - arrC[i] = elemB - i++ - }) - - return iter(ast.NewTerm(ast.NewArray(arrC...))) -} - -func builtinArraySlice(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - arr, err := builtins.ArrayOperand(operands[0].Value, 1) - if err != nil { - return err - } - - startIndex, err := builtins.IntOperand(operands[1].Value, 2) - if err != nil { - return err - } - - stopIndex, err := builtins.IntOperand(operands[2].Value, 3) - if err != nil { - return err - } - - // Clamp stopIndex to avoid out-of-range errors. If negative, clamp to zero. - // Otherwise, clamp to length of array. - if stopIndex < 0 { - stopIndex = 0 - } else if stopIndex > arr.Len() { - stopIndex = arr.Len() - } - - // Clamp startIndex to avoid out-of-range errors. If negative, clamp to zero. - // Otherwise, clamp to stopIndex to avoid to avoid cases like arr[1:0]. - if startIndex < 0 { - startIndex = 0 - } else if startIndex > stopIndex { - startIndex = stopIndex - } - - return iter(ast.NewTerm(arr.Slice(startIndex, stopIndex))) -} - -func builtinArrayReverse(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - arr, err := builtins.ArrayOperand(operands[0].Value, 1) - if err != nil { - return err - } - - length := arr.Len() - reversedArr := make([]*ast.Term, length) - - for index := 0; index < length; index++ { - reversedArr[index] = arr.Elem(length - index - 1) - } - - return iter(ast.ArrayTerm(reversedArr...)) -} - -func init() { - RegisterBuiltinFunc(ast.ArrayConcat.Name, builtinArrayConcat) - RegisterBuiltinFunc(ast.ArraySlice.Name, builtinArraySlice) - RegisterBuiltinFunc(ast.ArrayReverse.Name, builtinArrayReverse) -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/bindings.go b/vendor/github.com/open-policy-agent/opa/topdown/bindings.go deleted file mode 100644 index 30a8ac5ec4..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/bindings.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2017 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "fmt" - "strings" - - "github.com/open-policy-agent/opa/ast" -) - -type undo struct { - k *ast.Term - u *bindings -} - -func (u *undo) Undo() { - if u == nil { - // Allow call on zero value of Undo for ease-of-use. - return - } - if u.u == nil { - // Call on empty unifier undos a no-op unify operation. - return - } - u.u.delete(u.k) -} - -type bindings struct { - id uint64 - values bindingsArrayHashmap - instr *Instrumentation -} - -func newBindings(id uint64, instr *Instrumentation) *bindings { - values := newBindingsArrayHashmap() - return &bindings{id, values, instr} -} - -func (u *bindings) Iter(caller *bindings, iter func(*ast.Term, *ast.Term) error) error { - - var err error - - u.values.Iter(func(k *ast.Term, _ value) bool { - if err != nil { - return true - } - err = iter(k, u.PlugNamespaced(k, caller)) - - return false - }) - - return err -} - -func (u *bindings) Namespace(x ast.Node, caller *bindings) { - vis := namespacingVisitor{ - b: u, - caller: caller, - } - ast.NewGenericVisitor(vis.Visit).Walk(x) -} - -func (u *bindings) Plug(a *ast.Term) *ast.Term { - return u.PlugNamespaced(a, nil) -} - -func (u *bindings) PlugNamespaced(a *ast.Term, caller *bindings) *ast.Term { - if u != nil { - u.instr.startTimer(evalOpPlug) - t := u.plugNamespaced(a, caller) - u.instr.stopTimer(evalOpPlug) - return t - } - - return u.plugNamespaced(a, caller) -} - -func (u *bindings) plugNamespaced(a *ast.Term, caller *bindings) *ast.Term { - switch v := a.Value.(type) { - case ast.Var: - b, next := u.apply(a) - if a != b || u != next { - return next.plugNamespaced(b, caller) - } - return u.namespaceVar(b, caller) - case *ast.Array: - if a.IsGround() { - return a - } - cpy := *a - arr := make([]*ast.Term, v.Len()) - for i := 0; i < len(arr); i++ { - arr[i] = u.plugNamespaced(v.Elem(i), caller) - } - cpy.Value = ast.NewArray(arr...) - return &cpy - case ast.Object: - if a.IsGround() { - return a - } - cpy := *a - cpy.Value, _ = v.Map(func(k, v *ast.Term) (*ast.Term, *ast.Term, error) { - return u.plugNamespaced(k, caller), u.plugNamespaced(v, caller), nil - }) - return &cpy - case ast.Set: - if a.IsGround() { - return a - } - cpy := *a - cpy.Value, _ = v.Map(func(x *ast.Term) (*ast.Term, error) { - return u.plugNamespaced(x, caller), nil - }) - return &cpy - case ast.Ref: - cpy := *a - ref := make(ast.Ref, len(v)) - for i := 0; i < len(ref); i++ { - ref[i] = u.plugNamespaced(v[i], caller) - } - cpy.Value = ref - return &cpy - } - return a -} - -func (u *bindings) bind(a *ast.Term, b *ast.Term, other *bindings, und *undo) { - u.values.Put(a, value{ - u: other, - v: b, - }) - und.k = a - und.u = u -} - -func (u *bindings) apply(a *ast.Term) (*ast.Term, *bindings) { - // Early exit for non-var terms. Only vars are bound in the binding list, - // so the lookup below will always fail for non-var terms. In some cases, - // the lookup may be expensive as it has to hash the term (which for large - // inputs can be costly). - _, ok := a.Value.(ast.Var) - if !ok { - return a, u - } - val, ok := u.get(a) - if !ok { - return a, u - } - return val.u.apply(val.v) -} - -func (u *bindings) delete(v *ast.Term) { - u.values.Delete(v) -} - -func (u *bindings) get(v *ast.Term) (value, bool) { - if u == nil { - return value{}, false - } - return u.values.Get(v) -} - -func (u *bindings) String() string { - if u == nil { - return "()" - } - var buf []string - u.values.Iter(func(a *ast.Term, b value) bool { - buf = append(buf, fmt.Sprintf("%v: %v", a, b)) - return false - }) - return fmt.Sprintf("({%v}, %v)", strings.Join(buf, ", "), u.id) -} - -func (u *bindings) namespaceVar(v *ast.Term, caller *bindings) *ast.Term { - name, ok := v.Value.(ast.Var) - if !ok { - panic("illegal value") - } - if caller != nil && caller != u { - // Root documents (i.e., data, input) should never be namespaced because they - // are globally unique. - if !ast.RootDocumentNames.Contains(v) { - return ast.NewTerm(ast.Var(string(name) + fmt.Sprint(u.id))) - } - } - return v -} - -type value struct { - u *bindings - v *ast.Term -} - -func (v value) String() string { - return fmt.Sprintf("(%v, %d)", v.v, v.u.id) -} - -func (v value) equal(other *value) bool { - if v.u == other.u { - return v.v.Equal(other.v) - } - return false -} - -type namespacingVisitor struct { - b *bindings - caller *bindings -} - -func (vis namespacingVisitor) Visit(x interface{}) bool { - switch x := x.(type) { - case *ast.ArrayComprehension: - x.Term = vis.namespaceTerm(x.Term) - ast.NewGenericVisitor(vis.Visit).Walk(x.Body) - return true - case *ast.SetComprehension: - x.Term = vis.namespaceTerm(x.Term) - ast.NewGenericVisitor(vis.Visit).Walk(x.Body) - return true - case *ast.ObjectComprehension: - x.Key = vis.namespaceTerm(x.Key) - x.Value = vis.namespaceTerm(x.Value) - ast.NewGenericVisitor(vis.Visit).Walk(x.Body) - return true - case *ast.Expr: - switch terms := x.Terms.(type) { - case []*ast.Term: - for i := 1; i < len(terms); i++ { - terms[i] = vis.namespaceTerm(terms[i]) - } - case *ast.Term: - x.Terms = vis.namespaceTerm(terms) - } - for _, w := range x.With { - w.Target = vis.namespaceTerm(w.Target) - w.Value = vis.namespaceTerm(w.Value) - } - } - return false -} - -func (vis namespacingVisitor) namespaceTerm(a *ast.Term) *ast.Term { - switch v := a.Value.(type) { - case ast.Var: - return vis.b.namespaceVar(a, vis.caller) - case *ast.Array: - if a.IsGround() { - return a - } - cpy := *a - arr := make([]*ast.Term, v.Len()) - for i := 0; i < len(arr); i++ { - arr[i] = vis.namespaceTerm(v.Elem(i)) - } - cpy.Value = ast.NewArray(arr...) - return &cpy - case ast.Object: - if a.IsGround() { - return a - } - cpy := *a - cpy.Value, _ = v.Map(func(k, v *ast.Term) (*ast.Term, *ast.Term, error) { - return vis.namespaceTerm(k), vis.namespaceTerm(v), nil - }) - return &cpy - case ast.Set: - if a.IsGround() { - return a - } - cpy := *a - cpy.Value, _ = v.Map(func(x *ast.Term) (*ast.Term, error) { - return vis.namespaceTerm(x), nil - }) - return &cpy - case ast.Ref: - cpy := *a - ref := make(ast.Ref, len(v)) - for i := 0; i < len(ref); i++ { - ref[i] = vis.namespaceTerm(v[i]) - } - cpy.Value = ref - return &cpy - } - return a -} - -const maxLinearScan = 16 - -// bindingsArrayHashMap uses an array with linear scan instead -// of a hash map for smaller # of entries. Hash maps start to -// show off their performance advantage only after 16 keys. -type bindingsArrayHashmap struct { - n int // Entries in the array. - a *[maxLinearScan]bindingArrayKeyValue - m map[ast.Var]bindingArrayKeyValue -} - -type bindingArrayKeyValue struct { - key *ast.Term - value value -} - -func newBindingsArrayHashmap() bindingsArrayHashmap { - return bindingsArrayHashmap{} -} - -func (b *bindingsArrayHashmap) Put(key *ast.Term, value value) { - if b.m == nil { - if b.a == nil { - b.a = new([maxLinearScan]bindingArrayKeyValue) - } else if i := b.find(key); i >= 0 { - (*b.a)[i].value = value - return - } - - if b.n < maxLinearScan { - (*b.a)[b.n] = bindingArrayKeyValue{key, value} - b.n++ - return - } - - // Array is full, revert to using the hash map instead. - - b.m = make(map[ast.Var]bindingArrayKeyValue, maxLinearScan+1) - for _, kv := range *b.a { - b.m[kv.key.Value.(ast.Var)] = bindingArrayKeyValue{kv.key, kv.value} - } - b.m[key.Value.(ast.Var)] = bindingArrayKeyValue{key, value} - - b.n = 0 - return - } - - b.m[key.Value.(ast.Var)] = bindingArrayKeyValue{key, value} -} - -func (b *bindingsArrayHashmap) Get(key *ast.Term) (value, bool) { - if b.m == nil { - if i := b.find(key); i >= 0 { - return (*b.a)[i].value, true - } - - return value{}, false - } - - v, ok := b.m[key.Value.(ast.Var)] - if ok { - return v.value, true - } - - return value{}, false -} - -func (b *bindingsArrayHashmap) Delete(key *ast.Term) { - if b.m == nil { - if i := b.find(key); i >= 0 { - n := b.n - 1 - if i < n { - (*b.a)[i] = (*b.a)[n] - } - - b.n = n - } - return - } - - delete(b.m, key.Value.(ast.Var)) -} - -func (b *bindingsArrayHashmap) Iter(f func(k *ast.Term, v value) bool) { - if b.m == nil { - for i := 0; i < b.n; i++ { - if f((*b.a)[i].key, (*b.a)[i].value) { - return - } - } - return - } - - for _, v := range b.m { - if f(v.key, v.value) { - return - } - } -} - -func (b *bindingsArrayHashmap) find(key *ast.Term) int { - v := key.Value.(ast.Var) - for i := 0; i < b.n; i++ { - if (*b.a)[i].key.Value.(ast.Var) == v { - return i - } - } - - return -1 -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/builtins.go b/vendor/github.com/open-policy-agent/opa/topdown/builtins.go index cf694d4331..f28c6c795d 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/builtins.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/builtins.go @@ -5,219 +5,63 @@ package topdown import ( - "context" - "encoding/binary" - "fmt" - "io" - "math/rand" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/print" - "github.com/open-policy-agent/opa/tracing" + v1 "github.com/open-policy-agent/opa/v1/topdown" ) type ( // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead. - FunctionalBuiltin1 func(op1 ast.Value) (output ast.Value, err error) + FunctionalBuiltin1 = v1.FunctionalBuiltin1 //nolint:staticcheck // SA1019: Intentional use of deprecated type. // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead. - FunctionalBuiltin2 func(op1, op2 ast.Value) (output ast.Value, err error) + FunctionalBuiltin2 = v1.FunctionalBuiltin2 //nolint:staticcheck // SA1019: Intentional use of deprecated type. // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead. - FunctionalBuiltin3 func(op1, op2, op3 ast.Value) (output ast.Value, err error) + FunctionalBuiltin3 = v1.FunctionalBuiltin3 //nolint:staticcheck // SA1019: Intentional use of deprecated type. // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead. - FunctionalBuiltin4 func(op1, op2, op3, op4 ast.Value) (output ast.Value, err error) + FunctionalBuiltin4 = v1.FunctionalBuiltin4 //nolint:staticcheck // SA1019: Intentional use of deprecated type. // BuiltinContext contains context from the evaluator that may be used by // built-in functions. - BuiltinContext struct { - Context context.Context // request context that was passed when query started - Metrics metrics.Metrics // metrics registry for recording built-in specific metrics - Seed io.Reader // randomization source - Time *ast.Term // wall clock time - Cancel Cancel // atomic value that signals evaluation to halt - Runtime *ast.Term // runtime information on the OPA instance - Cache builtins.Cache // built-in function state cache - InterQueryBuiltinCache cache.InterQueryCache // cross-query built-in function state cache - InterQueryBuiltinValueCache cache.InterQueryValueCache // cross-query built-in function state value cache. this cache is useful for scenarios where the entry size cannot be calculated - NDBuiltinCache builtins.NDBCache // cache for non-deterministic built-in state - Location *ast.Location // location of built-in call - Tracers []Tracer // Deprecated: Use QueryTracers instead - QueryTracers []QueryTracer // tracer objects for trace() built-in function - TraceEnabled bool // indicates whether tracing is enabled for the evaluation - QueryID uint64 // identifies query being evaluated - ParentID uint64 // identifies parent of query being evaluated - PrintHook print.Hook // provides callback function to use for printing - DistributedTracingOpts tracing.Options // options to be used by distributed tracing. - rand *rand.Rand // randomization source for non-security-sensitive operations - Capabilities *ast.Capabilities - } + BuiltinContext = v1.BuiltinContext // BuiltinFunc defines an interface for implementing built-in functions. // The built-in function is called with the plugged operands from the call // (including the output operands.) The implementation should evaluate the // operands and invoke the iterator for each successful/defined output // value. - BuiltinFunc func(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error + BuiltinFunc = v1.BuiltinFunc ) -// Rand returns a random number generator based on the Seed for this built-in -// context. The random number will be re-used across multiple calls to this -// function. If a random number generator cannot be created, an error is -// returned. -func (bctx *BuiltinContext) Rand() (*rand.Rand, error) { - - if bctx.rand != nil { - return bctx.rand, nil - } - - seed, err := readInt64(bctx.Seed) - if err != nil { - return nil, err - } - - bctx.rand = rand.New(rand.NewSource(seed)) - return bctx.rand, nil -} - // RegisterBuiltinFunc adds a new built-in function to the evaluation engine. func RegisterBuiltinFunc(name string, f BuiltinFunc) { - builtinFunctions[name] = builtinErrorWrapper(name, f) + v1.RegisterBuiltinFunc(name, f) } // Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead. func RegisterFunctionalBuiltin1(name string, fun FunctionalBuiltin1) { - builtinFunctions[name] = functionalWrapper1(name, fun) + v1.RegisterFunctionalBuiltin1(name, fun) } // Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead. func RegisterFunctionalBuiltin2(name string, fun FunctionalBuiltin2) { - builtinFunctions[name] = functionalWrapper2(name, fun) + v1.RegisterFunctionalBuiltin2(name, fun) } // Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead. func RegisterFunctionalBuiltin3(name string, fun FunctionalBuiltin3) { - builtinFunctions[name] = functionalWrapper3(name, fun) + v1.RegisterFunctionalBuiltin3(name, fun) } // Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead. func RegisterFunctionalBuiltin4(name string, fun FunctionalBuiltin4) { - builtinFunctions[name] = functionalWrapper4(name, fun) + v1.RegisterFunctionalBuiltin4(name, fun) } // GetBuiltin returns a built-in function implementation, nil if no built-in found. func GetBuiltin(name string) BuiltinFunc { - return builtinFunctions[name] + return v1.GetBuiltin(name) } // Deprecated: The BuiltinEmpty type is no longer needed. Use nil return values instead. -type BuiltinEmpty struct{} - -func (BuiltinEmpty) Error() string { - return "" -} - -var builtinFunctions = map[string]BuiltinFunc{} - -func builtinErrorWrapper(name string, fn BuiltinFunc) BuiltinFunc { - return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { - err := fn(bctx, args, iter) - if err == nil { - return nil - } - return handleBuiltinErr(name, bctx.Location, err) - } -} - -func functionalWrapper1(name string, fn FunctionalBuiltin1) BuiltinFunc { - return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { - result, err := fn(args[0].Value) - if err == nil { - return iter(ast.NewTerm(result)) - } - return handleBuiltinErr(name, bctx.Location, err) - } -} - -func functionalWrapper2(name string, fn FunctionalBuiltin2) BuiltinFunc { - return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { - result, err := fn(args[0].Value, args[1].Value) - if err == nil { - return iter(ast.NewTerm(result)) - } - return handleBuiltinErr(name, bctx.Location, err) - } -} - -func functionalWrapper3(name string, fn FunctionalBuiltin3) BuiltinFunc { - return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { - result, err := fn(args[0].Value, args[1].Value, args[2].Value) - if err == nil { - return iter(ast.NewTerm(result)) - } - return handleBuiltinErr(name, bctx.Location, err) - } -} - -func functionalWrapper4(name string, fn FunctionalBuiltin4) BuiltinFunc { - return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { - result, err := fn(args[0].Value, args[1].Value, args[2].Value, args[3].Value) - if err == nil { - return iter(ast.NewTerm(result)) - } - if _, empty := err.(BuiltinEmpty); empty { - return nil - } - return handleBuiltinErr(name, bctx.Location, err) - } -} - -func handleBuiltinErr(name string, loc *ast.Location, err error) error { - switch err := err.(type) { - case BuiltinEmpty: - return nil - case *Error, Halt: - return err - case builtins.ErrOperand: - e := &Error{ - Code: TypeErr, - Message: fmt.Sprintf("%v: %v", name, err.Error()), - Location: loc, - } - return e.Wrap(err) - default: - e := &Error{ - Code: BuiltinErr, - Message: fmt.Sprintf("%v: %v", name, err.Error()), - Location: loc, - } - return e.Wrap(err) - } -} - -func readInt64(r io.Reader) (int64, error) { - bs := make([]byte, 8) - n, err := io.ReadFull(r, bs) - if n != len(bs) || err != nil { - return 0, err - } - return int64(binary.BigEndian.Uint64(bs)), nil -} - -// Used to get older-style (ast.Term, error) tuples out of newer functions. -func getResult(fn BuiltinFunc, operands ...*ast.Term) (*ast.Term, error) { - var result *ast.Term - extractionFn := func(r *ast.Term) error { - result = r - return nil - } - err := fn(BuiltinContext{}, operands, extractionFn) - if err != nil { - return nil, err - } - return result, nil -} +type BuiltinEmpty = v1.Builtin diff --git a/vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go b/vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go deleted file mode 100644 index 353f956840..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright 2016 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package builtins contains utilities for implementing built-in functions. -package builtins - -import ( - "encoding/json" - "fmt" - "math/big" - "strings" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/util" -) - -// Cache defines the built-in cache used by the top-down evaluation. The keys -// must be comparable and should not be of type string. -type Cache map[interface{}]interface{} - -// Put updates the cache for the named built-in. -func (c Cache) Put(k, v interface{}) { - c[k] = v -} - -// Get returns the cached value for k. -func (c Cache) Get(k interface{}) (interface{}, bool) { - v, ok := c[k] - return v, ok -} - -// We use an ast.Object for the cached keys/values because a naive -// map[ast.Value]ast.Value will not correctly detect value equality of -// the member keys. -type NDBCache map[string]ast.Object - -func (c NDBCache) AsValue() ast.Value { - out := ast.NewObject() - for bname, obj := range c { - out.Insert(ast.StringTerm(bname), ast.NewTerm(obj)) - } - return out -} - -// Put updates the cache for the named built-in. -// Automatically creates the 2-level hierarchy as needed. -func (c NDBCache) Put(name string, k, v ast.Value) { - if _, ok := c[name]; !ok { - c[name] = ast.NewObject() - } - c[name].Insert(ast.NewTerm(k), ast.NewTerm(v)) -} - -// Get returns the cached value for k for the named builtin. -func (c NDBCache) Get(name string, k ast.Value) (ast.Value, bool) { - if m, ok := c[name]; ok { - v := m.Get(ast.NewTerm(k)) - if v != nil { - return v.Value, true - } - return nil, false - } - return nil, false -} - -// Convenience functions for serializing the data structure. -func (c NDBCache) MarshalJSON() ([]byte, error) { - v, err := ast.JSON(c.AsValue()) - if err != nil { - return nil, err - } - return json.Marshal(v) -} - -func (c *NDBCache) UnmarshalJSON(data []byte) error { - out := map[string]ast.Object{} - var incoming interface{} - - // Note: We use util.Unmarshal instead of json.Unmarshal to get - // correct deserialization of number types. - err := util.Unmarshal(data, &incoming) - if err != nil { - return err - } - - // Convert interface types back into ast.Value types. - nestedObject, err := ast.InterfaceToValue(incoming) - if err != nil { - return err - } - - // Reconstruct NDBCache from nested ast.Object structure. - if source, ok := nestedObject.(ast.Object); ok { - err = source.Iter(func(k, v *ast.Term) error { - if obj, ok := v.Value.(ast.Object); ok { - out[string(k.Value.(ast.String))] = obj - return nil - } - return fmt.Errorf("expected Object, got other Value type in conversion") - }) - if err != nil { - return err - } - } - - *c = out - - return nil -} - -// ErrOperand represents an invalid operand has been passed to a built-in -// function. Built-ins should return ErrOperand to indicate a type error has -// occurred. -type ErrOperand string - -func (err ErrOperand) Error() string { - return string(err) -} - -// NewOperandErr returns a generic operand error. -func NewOperandErr(pos int, f string, a ...interface{}) error { - f = fmt.Sprintf("operand %v ", pos) + f - return ErrOperand(fmt.Sprintf(f, a...)) -} - -// NewOperandTypeErr returns an operand error indicating the operand's type was wrong. -func NewOperandTypeErr(pos int, got ast.Value, expected ...string) error { - - if len(expected) == 1 { - return NewOperandErr(pos, "must be %v but got %v", expected[0], ast.TypeName(got)) - } - - return NewOperandErr(pos, "must be one of {%v} but got %v", strings.Join(expected, ", "), ast.TypeName(got)) -} - -// NewOperandElementErr returns an operand error indicating an element in the -// composite operand was wrong. -func NewOperandElementErr(pos int, composite ast.Value, got ast.Value, expected ...string) error { - - tpe := ast.TypeName(composite) - - if len(expected) == 1 { - return NewOperandErr(pos, "must be %v of %vs but got %v containing %v", tpe, expected[0], tpe, ast.TypeName(got)) - } - - return NewOperandErr(pos, "must be %v of (any of) {%v} but got %v containing %v", tpe, strings.Join(expected, ", "), tpe, ast.TypeName(got)) -} - -// NewOperandEnumErr returns an operand error indicating a value was wrong. -func NewOperandEnumErr(pos int, expected ...string) error { - - if len(expected) == 1 { - return NewOperandErr(pos, "must be %v", expected[0]) - } - - return NewOperandErr(pos, "must be one of {%v}", strings.Join(expected, ", ")) -} - -// IntOperand converts x to an int. If the cast fails, a descriptive error is -// returned. -func IntOperand(x ast.Value, pos int) (int, error) { - n, ok := x.(ast.Number) - if !ok { - return 0, NewOperandTypeErr(pos, x, "number") - } - - i, ok := n.Int() - if !ok { - return 0, NewOperandErr(pos, "must be integer number but got floating-point number") - } - - return i, nil -} - -// BigIntOperand converts x to a big int. If the cast fails, a descriptive error -// is returned. -func BigIntOperand(x ast.Value, pos int) (*big.Int, error) { - n, err := NumberOperand(x, 1) - if err != nil { - return nil, NewOperandTypeErr(pos, x, "integer") - } - bi, err := NumberToInt(n) - if err != nil { - return nil, NewOperandErr(pos, "must be integer number but got floating-point number") - } - - return bi, nil -} - -// NumberOperand converts x to a number. If the cast fails, a descriptive error is -// returned. -func NumberOperand(x ast.Value, pos int) (ast.Number, error) { - n, ok := x.(ast.Number) - if !ok { - return ast.Number(""), NewOperandTypeErr(pos, x, "number") - } - return n, nil -} - -// SetOperand converts x to a set. If the cast fails, a descriptive error is -// returned. -func SetOperand(x ast.Value, pos int) (ast.Set, error) { - s, ok := x.(ast.Set) - if !ok { - return nil, NewOperandTypeErr(pos, x, "set") - } - return s, nil -} - -// StringOperand converts x to a string. If the cast fails, a descriptive error is -// returned. -func StringOperand(x ast.Value, pos int) (ast.String, error) { - s, ok := x.(ast.String) - if !ok { - return ast.String(""), NewOperandTypeErr(pos, x, "string") - } - return s, nil -} - -// ObjectOperand converts x to an object. If the cast fails, a descriptive -// error is returned. -func ObjectOperand(x ast.Value, pos int) (ast.Object, error) { - o, ok := x.(ast.Object) - if !ok { - return nil, NewOperandTypeErr(pos, x, "object") - } - return o, nil -} - -// ArrayOperand converts x to an array. If the cast fails, a descriptive -// error is returned. -func ArrayOperand(x ast.Value, pos int) (*ast.Array, error) { - a, ok := x.(*ast.Array) - if !ok { - return ast.NewArray(), NewOperandTypeErr(pos, x, "array") - } - return a, nil -} - -// NumberToFloat converts n to a big float. -func NumberToFloat(n ast.Number) *big.Float { - r, ok := new(big.Float).SetString(string(n)) - if !ok { - panic("illegal value") - } - return r -} - -// FloatToNumber converts f to a number. -func FloatToNumber(f *big.Float) ast.Number { - var format byte = 'g' - if f.IsInt() { - format = 'f' - } - return ast.Number(f.Text(format, -1)) -} - -// NumberToInt converts n to a big int. -// If n cannot be converted to an big int, an error is returned. -func NumberToInt(n ast.Number) (*big.Int, error) { - f := NumberToFloat(n) - r, accuracy := f.Int(nil) - if accuracy != big.Exact { - return nil, fmt.Errorf("illegal value") - } - return r, nil -} - -// IntToNumber converts i to a number. -func IntToNumber(i *big.Int) ast.Number { - return ast.Number(i.String()) -} - -// StringSliceOperand converts x to a []string. If the cast fails, a descriptive error is -// returned. -func StringSliceOperand(a ast.Value, pos int) ([]string, error) { - type iterable interface { - Iter(func(*ast.Term) error) error - Len() int - } - - strs, ok := a.(iterable) - if !ok { - return nil, NewOperandTypeErr(pos, a, "array", "set") - } - - var outStrs = make([]string, 0, strs.Len()) - if err := strs.Iter(func(x *ast.Term) error { - s, ok := x.Value.(ast.String) - if !ok { - return NewOperandElementErr(pos, a, x.Value, "string") - } - outStrs = append(outStrs, string(s)) - return nil - }); err != nil { - return nil, err - } - - return outStrs, nil -} - -// RuneSliceOperand converts x to a []rune. If the cast fails, a descriptive error is -// returned. -func RuneSliceOperand(x ast.Value, pos int) ([]rune, error) { - a, err := ArrayOperand(x, pos) - if err != nil { - return nil, err - } - - var f = make([]rune, a.Len()) - for k := 0; k < a.Len(); k++ { - b := a.Elem(k) - c, ok := b.Value.(ast.String) - if !ok { - return nil, NewOperandElementErr(pos, x, b.Value, "string") - } - - d := []rune(string(c)) - if len(d) != 1 { - return nil, NewOperandElementErr(pos, x, b.Value, "rune") - } - - f[k] = d[0] - } - - return f, nil -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cache.go b/vendor/github.com/open-policy-agent/opa/topdown/cache.go index 265457e02f..bb39df03e0 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/cache.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/cache.go @@ -5,348 +5,15 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/topdown" ) // VirtualCache defines the interface for a cache that stores the results of // evaluated virtual documents (rules). // The cache is a stack of frames, where each frame is a mapping from references // to values. -type VirtualCache interface { - // Push pushes a new, empty frame of value mappings onto the stack. - Push() - - // Pop pops the top frame of value mappings from the stack, removing all associated entries. - Pop() - - // Get returns the value associated with the given reference. The second return value - // indicates whether the reference has a recorded 'undefined' result. - Get(ref ast.Ref) (*ast.Term, bool) - - // Put associates the given reference with the given value. If the value is nil, the reference - // is marked as having an 'undefined' result. - Put(ref ast.Ref, value *ast.Term) - - // Keys returns the set of keys that have been cached for the active frame. - Keys() []ast.Ref -} - -type virtualCache struct { - stack []*virtualCacheElem -} - -type virtualCacheElem struct { - value *ast.Term - children *util.HashMap - undefined bool -} +type VirtualCache = v1.VirtualCache func NewVirtualCache() VirtualCache { - cache := &virtualCache{} - cache.Push() - return cache -} - -func (c *virtualCache) Push() { - c.stack = append(c.stack, newVirtualCacheElem()) -} - -func (c *virtualCache) Pop() { - c.stack = c.stack[:len(c.stack)-1] -} - -// Returns the resolved value of the AST term and a flag indicating if the value -// should be interpretted as undefined: -// -// nil, true indicates the ref is undefined -// ast.Term, false indicates the ref is defined -// nil, false indicates the ref has not been cached -// ast.Term, true is impossible -func (c *virtualCache) Get(ref ast.Ref) (*ast.Term, bool) { - node := c.stack[len(c.stack)-1] - for i := 0; i < len(ref); i++ { - x, ok := node.children.Get(ref[i]) - if !ok { - return nil, false - } - node = x.(*virtualCacheElem) - } - if node.undefined { - return nil, true - } - - return node.value, false -} - -// If value is a nil pointer, set the 'undefined' flag on the cache element to -// indicate that the Ref has resolved to undefined. -func (c *virtualCache) Put(ref ast.Ref, value *ast.Term) { - node := c.stack[len(c.stack)-1] - for i := 0; i < len(ref); i++ { - x, ok := node.children.Get(ref[i]) - if ok { - node = x.(*virtualCacheElem) - } else { - next := newVirtualCacheElem() - node.children.Put(ref[i], next) - node = next - } - } - if value != nil { - node.value = value - } else { - node.undefined = true - } -} - -func (c *virtualCache) Keys() []ast.Ref { - node := c.stack[len(c.stack)-1] - return keysRecursive(nil, node) -} - -func keysRecursive(root ast.Ref, node *virtualCacheElem) []ast.Ref { - var keys []ast.Ref - node.children.Iter(func(k, v util.T) bool { - ref := root.Append(k.(*ast.Term)) - if v.(*virtualCacheElem).value != nil { - keys = append(keys, ref) - } - if v.(*virtualCacheElem).children.Len() > 0 { - keys = append(keys, keysRecursive(ref, v.(*virtualCacheElem))...) - } - return false - }) - return keys -} - -func newVirtualCacheElem() *virtualCacheElem { - return &virtualCacheElem{children: newVirtualCacheHashMap()} -} - -func newVirtualCacheHashMap() *util.HashMap { - return util.NewHashMap(func(a, b util.T) bool { - return a.(*ast.Term).Equal(b.(*ast.Term)) - }, func(x util.T) int { - return x.(*ast.Term).Hash() - }) -} - -// baseCache implements a trie structure to cache base documents read out of -// storage. Values inserted into the cache may contain other values that were -// previously inserted. In this case, the previous values are erased from the -// structure. -type baseCache struct { - root *baseCacheElem -} - -func newBaseCache() *baseCache { - return &baseCache{ - root: newBaseCacheElem(), - } -} - -func (c *baseCache) Get(ref ast.Ref) ast.Value { - node := c.root - for i := 0; i < len(ref); i++ { - node = node.children[ref[i].Value] - if node == nil { - return nil - } else if node.value != nil { - result, err := node.value.Find(ref[i+1:]) - if err != nil { - return nil - } - return result - } - } - return nil -} - -func (c *baseCache) Put(ref ast.Ref, value ast.Value) { - node := c.root - for i := 0; i < len(ref); i++ { - if child, ok := node.children[ref[i].Value]; ok { - node = child - } else { - child := newBaseCacheElem() - node.children[ref[i].Value] = child - node = child - } - } - node.set(value) -} - -type baseCacheElem struct { - value ast.Value - children map[ast.Value]*baseCacheElem -} - -func newBaseCacheElem() *baseCacheElem { - return &baseCacheElem{ - children: map[ast.Value]*baseCacheElem{}, - } -} - -func (e *baseCacheElem) set(value ast.Value) { - e.value = value - e.children = map[ast.Value]*baseCacheElem{} -} - -type refStack struct { - sl []refStackElem -} - -type refStackElem struct { - refs []ast.Ref -} - -func newRefStack() *refStack { - return &refStack{} -} - -func (s *refStack) Push(refs []ast.Ref) { - s.sl = append(s.sl, refStackElem{refs: refs}) -} - -func (s *refStack) Pop() { - s.sl = s.sl[:len(s.sl)-1] -} - -func (s *refStack) Prefixed(ref ast.Ref) bool { - if s != nil { - for i := len(s.sl) - 1; i >= 0; i-- { - for j := range s.sl[i].refs { - if ref.HasPrefix(s.sl[i].refs[j]) { - return true - } - } - } - } - return false -} - -type comprehensionCache struct { - stack []map[*ast.Term]*comprehensionCacheElem -} - -type comprehensionCacheElem struct { - value *ast.Term - children *util.HashMap -} - -func newComprehensionCache() *comprehensionCache { - cache := &comprehensionCache{} - cache.Push() - return cache -} - -func (c *comprehensionCache) Push() { - c.stack = append(c.stack, map[*ast.Term]*comprehensionCacheElem{}) -} - -func (c *comprehensionCache) Pop() { - c.stack = c.stack[:len(c.stack)-1] -} - -func (c *comprehensionCache) Elem(t *ast.Term) (*comprehensionCacheElem, bool) { - elem, ok := c.stack[len(c.stack)-1][t] - return elem, ok -} - -func (c *comprehensionCache) Set(t *ast.Term, elem *comprehensionCacheElem) { - c.stack[len(c.stack)-1][t] = elem -} - -func newComprehensionCacheElem() *comprehensionCacheElem { - return &comprehensionCacheElem{children: newComprehensionCacheHashMap()} -} - -func (c *comprehensionCacheElem) Get(key []*ast.Term) *ast.Term { - node := c - for i := 0; i < len(key); i++ { - x, ok := node.children.Get(key[i]) - if !ok { - return nil - } - node = x.(*comprehensionCacheElem) - } - return node.value -} - -func (c *comprehensionCacheElem) Put(key []*ast.Term, value *ast.Term) { - node := c - for i := 0; i < len(key); i++ { - x, ok := node.children.Get(key[i]) - if ok { - node = x.(*comprehensionCacheElem) - } else { - next := newComprehensionCacheElem() - node.children.Put(key[i], next) - node = next - } - } - node.value = value -} - -func newComprehensionCacheHashMap() *util.HashMap { - return util.NewHashMap(func(a, b util.T) bool { - return a.(*ast.Term).Equal(b.(*ast.Term)) - }, func(x util.T) int { - return x.(*ast.Term).Hash() - }) -} - -type functionMocksStack struct { - stack []*functionMocksElem -} - -type functionMocksElem []frame - -type frame map[string]*ast.Term - -func newFunctionMocksStack() *functionMocksStack { - stack := &functionMocksStack{} - stack.Push() - return stack -} - -func newFunctionMocksElem() *functionMocksElem { - return &functionMocksElem{} -} - -func (s *functionMocksStack) Push() { - s.stack = append(s.stack, newFunctionMocksElem()) -} - -func (s *functionMocksStack) Pop() { - s.stack = s.stack[:len(s.stack)-1] -} - -func (s *functionMocksStack) PopPairs() { - current := s.stack[len(s.stack)-1] - *current = (*current)[:len(*current)-1] -} - -func (s *functionMocksStack) PutPairs(mocks [][2]*ast.Term) { - el := frame{} - for i := range mocks { - el[mocks[i][0].Value.String()] = mocks[i][1] - } - s.Put(el) -} - -func (s *functionMocksStack) Put(el frame) { - current := s.stack[len(s.stack)-1] - *current = append(*current, el) -} - -func (s *functionMocksStack) Get(f ast.Ref) (*ast.Term, bool) { - current := *s.stack[len(s.stack)-1] - for i := len(current) - 1; i >= 0; i-- { - if r, ok := current[i][f.String()]; ok { - return r, true - } - } - return nil, false + return v1.NewVirtualCache() } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go b/vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go deleted file mode 100644 index 55ed340619..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go +++ /dev/null @@ -1,406 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package cache defines the inter-query cache interface that can cache data across queries -package cache - -import ( - "container/list" - "context" - "fmt" - "math" - "sync" - "time" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/util" -) - -const ( - defaultInterQueryBuiltinValueCacheSize = int(0) // unlimited - defaultMaxSizeBytes = int64(0) // unlimited - defaultForcedEvictionThresholdPercentage = int64(100) // trigger at max_size_bytes - defaultStaleEntryEvictionPeriodSeconds = int64(0) // never -) - -// Config represents the configuration for the inter-query builtin cache. -type Config struct { - InterQueryBuiltinCache InterQueryBuiltinCacheConfig `json:"inter_query_builtin_cache"` - InterQueryBuiltinValueCache InterQueryBuiltinValueCacheConfig `json:"inter_query_builtin_value_cache"` -} - -// InterQueryBuiltinValueCacheConfig represents the configuration of the inter-query value cache that built-in functions can utilize. -// MaxNumEntries - max number of cache entries -type InterQueryBuiltinValueCacheConfig struct { - MaxNumEntries *int `json:"max_num_entries,omitempty"` -} - -// InterQueryBuiltinCacheConfig represents the configuration of the inter-query cache that built-in functions can utilize. -// MaxSizeBytes - max capacity of cache in bytes -// ForcedEvictionThresholdPercentage - capacity usage in percentage after which forced FIFO eviction starts -// StaleEntryEvictionPeriodSeconds - time period between end of previous and start of new stale entry eviction routine -type InterQueryBuiltinCacheConfig struct { - MaxSizeBytes *int64 `json:"max_size_bytes,omitempty"` - ForcedEvictionThresholdPercentage *int64 `json:"forced_eviction_threshold_percentage,omitempty"` - StaleEntryEvictionPeriodSeconds *int64 `json:"stale_entry_eviction_period_seconds,omitempty"` -} - -// ParseCachingConfig returns the config for the inter-query cache. -func ParseCachingConfig(raw []byte) (*Config, error) { - if raw == nil { - maxSize := new(int64) - *maxSize = defaultMaxSizeBytes - threshold := new(int64) - *threshold = defaultForcedEvictionThresholdPercentage - period := new(int64) - *period = defaultStaleEntryEvictionPeriodSeconds - - maxInterQueryBuiltinValueCacheSize := new(int) - *maxInterQueryBuiltinValueCacheSize = defaultInterQueryBuiltinValueCacheSize - - return &Config{InterQueryBuiltinCache: InterQueryBuiltinCacheConfig{MaxSizeBytes: maxSize, ForcedEvictionThresholdPercentage: threshold, StaleEntryEvictionPeriodSeconds: period}, - InterQueryBuiltinValueCache: InterQueryBuiltinValueCacheConfig{MaxNumEntries: maxInterQueryBuiltinValueCacheSize}}, nil - } - - var config Config - - if err := util.Unmarshal(raw, &config); err == nil { - if err = config.validateAndInjectDefaults(); err != nil { - return nil, err - } - } else { - return nil, err - } - - return &config, nil -} - -func (c *Config) validateAndInjectDefaults() error { - if c.InterQueryBuiltinCache.MaxSizeBytes == nil { - maxSize := new(int64) - *maxSize = defaultMaxSizeBytes - c.InterQueryBuiltinCache.MaxSizeBytes = maxSize - } - if c.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage == nil { - threshold := new(int64) - *threshold = defaultForcedEvictionThresholdPercentage - c.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage = threshold - } else { - threshold := *c.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage - if threshold < 0 || threshold > 100 { - return fmt.Errorf("invalid forced_eviction_threshold_percentage %v", threshold) - } - } - if c.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds == nil { - period := new(int64) - *period = defaultStaleEntryEvictionPeriodSeconds - c.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds = period - } else { - period := *c.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds - if period < 0 { - return fmt.Errorf("invalid stale_entry_eviction_period_seconds %v", period) - } - } - - if c.InterQueryBuiltinValueCache.MaxNumEntries == nil { - maxSize := new(int) - *maxSize = defaultInterQueryBuiltinValueCacheSize - c.InterQueryBuiltinValueCache.MaxNumEntries = maxSize - } else { - numEntries := *c.InterQueryBuiltinValueCache.MaxNumEntries - if numEntries < 0 { - return fmt.Errorf("invalid max_num_entries %v", numEntries) - } - } - - return nil -} - -// InterQueryCacheValue defines the interface for the data that the inter-query cache holds. -type InterQueryCacheValue interface { - SizeInBytes() int64 - Clone() (InterQueryCacheValue, error) -} - -// InterQueryCache defines the interface for the inter-query cache. -type InterQueryCache interface { - Get(key ast.Value) (value InterQueryCacheValue, found bool) - Insert(key ast.Value, value InterQueryCacheValue) int - InsertWithExpiry(key ast.Value, value InterQueryCacheValue, expiresAt time.Time) int - Delete(key ast.Value) - UpdateConfig(config *Config) - Clone(value InterQueryCacheValue) (InterQueryCacheValue, error) -} - -// NewInterQueryCache returns a new inter-query cache. -// The cache uses a FIFO eviction policy when it reaches the forced eviction threshold. -// Parameters: -// -// config - to configure the InterQueryCache -func NewInterQueryCache(config *Config) InterQueryCache { - return newCache(config) -} - -// NewInterQueryCacheWithContext returns a new inter-query cache with context. -// The cache uses a combination of FIFO eviction policy when it reaches the forced eviction threshold -// and a periodic cleanup routine to remove stale entries that exceed their expiration time, if specified. -// If configured with a zero stale_entry_eviction_period_seconds value, the stale entry cleanup routine is disabled. -// -// Parameters: -// -// ctx - used to control lifecycle of the stale entry cleanup routine -// config - to configure the InterQueryCache -func NewInterQueryCacheWithContext(ctx context.Context, config *Config) InterQueryCache { - iqCache := newCache(config) - if iqCache.staleEntryEvictionTimePeriodSeconds() > 0 { - cleanupTicker := time.NewTicker(time.Duration(iqCache.staleEntryEvictionTimePeriodSeconds()) * time.Second) - go func() { - for { - select { - case <-cleanupTicker.C: - cleanupTicker.Stop() - iqCache.cleanStaleValues() - cleanupTicker = time.NewTicker(time.Duration(iqCache.staleEntryEvictionTimePeriodSeconds()) * time.Second) - case <-ctx.Done(): - cleanupTicker.Stop() - return - } - } - }() - } - - return iqCache -} - -type cacheItem struct { - value InterQueryCacheValue - expiresAt time.Time - keyElement *list.Element -} - -type cache struct { - items map[string]cacheItem - usage int64 - config *Config - l *list.List - mtx sync.Mutex -} - -func newCache(config *Config) *cache { - return &cache{ - items: map[string]cacheItem{}, - usage: 0, - config: config, - l: list.New(), - } -} - -// InsertWithExpiry inserts a key k into the cache with value v with an expiration time expiresAt. -// A zero time value for expiresAt indicates no expiry -func (c *cache) InsertWithExpiry(k ast.Value, v InterQueryCacheValue, expiresAt time.Time) (dropped int) { - c.mtx.Lock() - defer c.mtx.Unlock() - return c.unsafeInsert(k, v, expiresAt) -} - -// Insert inserts a key k into the cache with value v with no expiration time. -func (c *cache) Insert(k ast.Value, v InterQueryCacheValue) (dropped int) { - return c.InsertWithExpiry(k, v, time.Time{}) -} - -// Get returns the value in the cache for k. -func (c *cache) Get(k ast.Value) (InterQueryCacheValue, bool) { - c.mtx.Lock() - defer c.mtx.Unlock() - cacheItem, ok := c.unsafeGet(k) - - if ok { - return cacheItem.value, true - } - return nil, false -} - -// Delete deletes the value in the cache for k. -func (c *cache) Delete(k ast.Value) { - c.mtx.Lock() - defer c.mtx.Unlock() - c.unsafeDelete(k) -} - -func (c *cache) UpdateConfig(config *Config) { - if config == nil { - return - } - c.mtx.Lock() - defer c.mtx.Unlock() - c.config = config -} - -func (c *cache) Clone(value InterQueryCacheValue) (InterQueryCacheValue, error) { - c.mtx.Lock() - defer c.mtx.Unlock() - return c.unsafeClone(value) -} - -func (c *cache) unsafeInsert(k ast.Value, v InterQueryCacheValue, expiresAt time.Time) (dropped int) { - size := v.SizeInBytes() - limit := int64(math.Ceil(float64(c.forcedEvictionThresholdPercentage())/100.0) * (float64(c.maxSizeBytes()))) - if limit > 0 { - if size > limit { - dropped++ - return dropped - } - - for key := c.l.Front(); key != nil && (c.usage+size > limit); key = c.l.Front() { - dropKey := key.Value.(ast.Value) - c.unsafeDelete(dropKey) - dropped++ - } - } - - // By deleting the old value, if it exists, we ensure the usage variable stays correct - c.unsafeDelete(k) - - c.items[k.String()] = cacheItem{ - value: v, - expiresAt: expiresAt, - keyElement: c.l.PushBack(k), - } - c.usage += size - return dropped -} - -func (c *cache) unsafeGet(k ast.Value) (cacheItem, bool) { - value, ok := c.items[k.String()] - return value, ok -} - -func (c *cache) unsafeDelete(k ast.Value) { - cacheItem, ok := c.unsafeGet(k) - if !ok { - return - } - - c.usage -= cacheItem.value.SizeInBytes() - delete(c.items, k.String()) - c.l.Remove(cacheItem.keyElement) -} - -func (c *cache) unsafeClone(value InterQueryCacheValue) (InterQueryCacheValue, error) { - return value.Clone() -} - -func (c *cache) maxSizeBytes() int64 { - if c.config == nil { - return defaultMaxSizeBytes - } - return *c.config.InterQueryBuiltinCache.MaxSizeBytes -} - -func (c *cache) forcedEvictionThresholdPercentage() int64 { - if c.config == nil { - return defaultForcedEvictionThresholdPercentage - } - return *c.config.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage -} - -func (c *cache) staleEntryEvictionTimePeriodSeconds() int64 { - if c.config == nil { - return defaultStaleEntryEvictionPeriodSeconds - } - return *c.config.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds -} - -func (c *cache) cleanStaleValues() (dropped int) { - c.mtx.Lock() - defer c.mtx.Unlock() - for key := c.l.Front(); key != nil; { - nextKey := key.Next() - // if expiresAt is zero, the item doesn't have an expiry - if ea := c.items[(key.Value.(ast.Value)).String()].expiresAt; !ea.IsZero() && ea.Before(time.Now()) { - c.unsafeDelete(key.Value.(ast.Value)) - dropped++ - } - key = nextKey - } - return dropped -} - -type InterQueryValueCache interface { - Get(key ast.Value) (value any, found bool) - Insert(key ast.Value, value any) int - Delete(key ast.Value) - UpdateConfig(config *Config) -} - -type interQueryValueCache struct { - items map[string]any - config *Config - mtx sync.RWMutex -} - -// Get returns the value in the cache for k. -func (c *interQueryValueCache) Get(k ast.Value) (any, bool) { - c.mtx.RLock() - defer c.mtx.RUnlock() - value, ok := c.items[k.String()] - return value, ok -} - -// Insert inserts a key k into the cache with value v. -func (c *interQueryValueCache) Insert(k ast.Value, v any) (dropped int) { - c.mtx.Lock() - defer c.mtx.Unlock() - - maxEntries := c.maxNumEntries() - if maxEntries > 0 { - if len(c.items) >= maxEntries { - itemsToRemove := len(c.items) - maxEntries + 1 - - // Delete a (semi-)random key to make room for the new one. - for k := range c.items { - delete(c.items, k) - dropped++ - - if itemsToRemove == dropped { - break - } - } - } - } - - c.items[k.String()] = v - return dropped -} - -// Delete deletes the value in the cache for k. -func (c *interQueryValueCache) Delete(k ast.Value) { - c.mtx.Lock() - defer c.mtx.Unlock() - delete(c.items, k.String()) -} - -// UpdateConfig updates the cache config. -func (c *interQueryValueCache) UpdateConfig(config *Config) { - if config == nil { - return - } - c.mtx.Lock() - defer c.mtx.Unlock() - c.config = config -} - -func (c *interQueryValueCache) maxNumEntries() int { - if c.config == nil { - return defaultInterQueryBuiltinValueCacheSize - } - return *c.config.InterQueryBuiltinValueCache.MaxNumEntries -} - -func NewInterQueryValueCache(_ context.Context, config *Config) InterQueryValueCache { - return &interQueryValueCache{ - items: map[string]any{}, - config: config, - } -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cancel.go b/vendor/github.com/open-policy-agent/opa/topdown/cancel.go index 534e0799a1..395a14a80d 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/cancel.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/cancel.go @@ -5,29 +5,14 @@ package topdown import ( - "sync/atomic" + v1 "github.com/open-policy-agent/opa/v1/topdown" ) // Cancel defines the interface for cancelling topdown queries. Cancel // operations are thread-safe and idempotent. -type Cancel interface { - Cancel() - Cancelled() bool -} - -type cancel struct { - flag int32 -} +type Cancel = v1.Cancel // NewCancel returns a new Cancel object. func NewCancel() Cancel { - return &cancel{} -} - -func (c *cancel) Cancel() { - atomic.StoreInt32(&c.flag, 1) -} - -func (c *cancel) Cancelled() bool { - return atomic.LoadInt32(&c.flag) != 0 + return v1.NewCancel() } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/crypto.go b/vendor/github.com/open-policy-agent/opa/topdown/crypto.go deleted file mode 100644 index f24432a264..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/crypto.go +++ /dev/null @@ -1,790 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "bytes" - "crypto" - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "encoding/json" - "encoding/pem" - "fmt" - "hash" - "os" - "strings" - "time" - - "github.com/open-policy-agent/opa/internal/jwx/jwk" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/util" -) - -const ( - // blockTypeCertificate indicates this PEM block contains the signed certificate. - // Exported for tests. - blockTypeCertificate = "CERTIFICATE" - // blockTypeCertificateRequest indicates this PEM block contains a certificate - // request. Exported for tests. - blockTypeCertificateRequest = "CERTIFICATE REQUEST" - // blockTypeRSAPrivateKey indicates this PEM block contains a RSA private key. - // Exported for tests. - blockTypeRSAPrivateKey = "RSA PRIVATE KEY" - // blockTypeRSAPrivateKey indicates this PEM block contains a RSA private key. - // Exported for tests. - blockTypePrivateKey = "PRIVATE KEY" - blockTypeEcPrivateKey = "EC PRIVATE KEY" -) - -func builtinCryptoX509ParseCertificates(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - input, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - certs, err := getX509CertsFromString(string(input)) - if err != nil { - return err - } - - v, err := ast.InterfaceToValue(extendCertificates(certs)) - if err != nil { - return err - } - - return iter(ast.NewTerm(v)) -} - -// extendedCert is a wrapper around x509.Certificate that adds additional fields for JSON serialization. -type extendedCert struct { - x509.Certificate - URIStrings []string -} - -func extendCertificates(certs []*x509.Certificate) []extendedCert { - // add a field to certs containing the URIs as strings - processedCerts := make([]extendedCert, len(certs)) - - for i, cert := range certs { - processedCerts[i].Certificate = *cert - if cert.URIs != nil { - processedCerts[i].URIStrings = make([]string, len(cert.URIs)) - for j, uri := range cert.URIs { - processedCerts[i].URIStrings[j] = uri.String() - } - } - } - return processedCerts -} - -func builtinCryptoX509ParseAndVerifyCertificates(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - a := operands[0].Value - input, err := builtins.StringOperand(a, 1) - if err != nil { - return err - } - - invalid := ast.ArrayTerm( - ast.BooleanTerm(false), - ast.NewTerm(ast.NewArray()), - ) - - certs, err := getX509CertsFromString(string(input)) - if err != nil { - return iter(invalid) - } - - verified, err := verifyX509CertificateChain(certs, x509.VerifyOptions{}) - if err != nil { - return iter(invalid) - } - - value, err := ast.InterfaceToValue(extendCertificates(verified)) - if err != nil { - return err - } - - valid := ast.ArrayTerm( - ast.BooleanTerm(true), - ast.NewTerm(value), - ) - - return iter(valid) -} - -var allowedKeyUsages = map[string]x509.ExtKeyUsage{ - "KeyUsageAny": x509.ExtKeyUsageAny, - "KeyUsageServerAuth": x509.ExtKeyUsageServerAuth, - "KeyUsageClientAuth": x509.ExtKeyUsageClientAuth, - "KeyUsageCodeSigning": x509.ExtKeyUsageCodeSigning, - "KeyUsageEmailProtection": x509.ExtKeyUsageEmailProtection, - "KeyUsageIPSECEndSystem": x509.ExtKeyUsageIPSECEndSystem, - "KeyUsageIPSECTunnel": x509.ExtKeyUsageIPSECTunnel, - "KeyUsageIPSECUser": x509.ExtKeyUsageIPSECUser, - "KeyUsageTimeStamping": x509.ExtKeyUsageTimeStamping, - "KeyUsageOCSPSigning": x509.ExtKeyUsageOCSPSigning, - "KeyUsageMicrosoftServerGatedCrypto": x509.ExtKeyUsageMicrosoftServerGatedCrypto, - "KeyUsageNetscapeServerGatedCrypto": x509.ExtKeyUsageNetscapeServerGatedCrypto, - "KeyUsageMicrosoftCommercialCodeSigning": x509.ExtKeyUsageMicrosoftCommercialCodeSigning, - "KeyUsageMicrosoftKernelCodeSigning": x509.ExtKeyUsageMicrosoftKernelCodeSigning, -} - -func builtinCryptoX509ParseAndVerifyCertificatesWithOptions(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - input, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - options, err := builtins.ObjectOperand(operands[1].Value, 2) - if err != nil { - return err - } - - invalid := ast.ArrayTerm( - ast.BooleanTerm(false), - ast.NewTerm(ast.NewArray()), - ) - - certs, err := getX509CertsFromString(string(input)) - if err != nil { - return iter(invalid) - } - - // Collect the cert verification options - verifyOpt, err := extractVerifyOpts(options) - if err != nil { - return err - } - - verified, err := verifyX509CertificateChain(certs, verifyOpt) - if err != nil { - return iter(invalid) - } - - value, err := ast.InterfaceToValue(verified) - if err != nil { - return err - } - - valid := ast.ArrayTerm( - ast.BooleanTerm(true), - ast.NewTerm(value), - ) - - return iter(valid) -} - -func extractVerifyOpts(options ast.Object) (verifyOpt x509.VerifyOptions, err error) { - - for _, key := range options.Keys() { - k, err := ast.JSON(key.Value) - if err != nil { - return verifyOpt, err - } - k, ok := k.(string) - if !ok { - continue - } - - switch k { - case "DNSName": - dns, ok := options.Get(key).Value.(ast.String) - if ok { - verifyOpt.DNSName = strings.Trim(string(dns), "\"") - } else { - return verifyOpt, fmt.Errorf("'DNSName' should be a string") - } - case "CurrentTime": - c, ok := options.Get(key).Value.(ast.Number) - if ok { - nanosecs, ok := c.Int64() - if ok { - verifyOpt.CurrentTime = time.Unix(0, nanosecs) - } else { - return verifyOpt, fmt.Errorf("'CurrentTime' should be a valid int64 number") - } - } else { - return verifyOpt, fmt.Errorf("'CurrentTime' should be a number") - } - case "MaxConstraintComparisons": - c, ok := options.Get(key).Value.(ast.Number) - if ok { - maxComparisons, ok := c.Int() - if ok { - verifyOpt.MaxConstraintComparisions = maxComparisons - } else { - return verifyOpt, fmt.Errorf("'MaxConstraintComparisons' should be a valid number") - } - } else { - return verifyOpt, fmt.Errorf("'MaxConstraintComparisons' should be a number") - } - case "KeyUsages": - type forEach interface { - Foreach(func(*ast.Term)) - } - var ks forEach - switch options.Get(key).Value.(type) { - case *ast.Array: - ks = options.Get(key).Value.(*ast.Array) - case ast.Set: - ks = options.Get(key).Value.(ast.Set) - default: - return verifyOpt, fmt.Errorf("'KeyUsages' should be an Array or Set") - } - - // Collect the x509.ExtKeyUsage values by looking up the - // mapping of key usage strings to x509.ExtKeyUsage - var invalidKUsgs []string - ks.Foreach(func(t *ast.Term) { - u, ok := t.Value.(ast.String) - if ok { - v := strings.Trim(string(u), "\"") - if k, ok := allowedKeyUsages[v]; ok { - verifyOpt.KeyUsages = append(verifyOpt.KeyUsages, k) - } else { - invalidKUsgs = append(invalidKUsgs, v) - } - } - }) - if len(invalidKUsgs) > 0 { - return x509.VerifyOptions{}, fmt.Errorf("invalid entries for 'KeyUsages' found: %s", invalidKUsgs) - } - default: - return verifyOpt, fmt.Errorf("invalid key option") - } - - } - - return verifyOpt, nil -} - -func builtinCryptoX509ParseKeyPair(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - certificate, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - key, err := builtins.StringOperand(operands[1].Value, 1) - if err != nil { - return err - } - - certs, err := getTLSx509KeyPairFromString([]byte(certificate), []byte(key)) - if err != nil { - return err - } - v, err := ast.InterfaceToValue(certs) - if err != nil { - return err - } - - return iter(ast.NewTerm(v)) -} - -func builtinCryptoX509ParseCertificateRequest(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - input, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - // data to be passed to x509.ParseCertificateRequest - bytes := []byte(input) - - // if the input is not a PEM string, attempt to decode b64 - if str := string(input); !strings.HasPrefix(str, "-----BEGIN CERTIFICATE REQUEST-----") { - bytes, err = base64.StdEncoding.DecodeString(str) - if err != nil { - return err - } - } - - p, _ := pem.Decode(bytes) - if p != nil && p.Type != blockTypeCertificateRequest { - return fmt.Errorf("invalid PEM-encoded certificate signing request") - } - if p != nil { - bytes = p.Bytes - } - - csr, err := x509.ParseCertificateRequest(bytes) - if err != nil { - return err - } - - bs, err := json.Marshal(csr) - if err != nil { - return err - } - - var x interface{} - if err := util.UnmarshalJSON(bs, &x); err != nil { - return err - } - - v, err := ast.InterfaceToValue(x) - if err != nil { - return err - } - - return iter(ast.NewTerm(v)) -} - -func builtinCryptoJWKFromPrivateKey(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - var x interface{} - - a := operands[0].Value - input, err := builtins.StringOperand(a, 1) - if err != nil { - return err - } - - // get the raw private key - pemDataString := string(input) - - if pemDataString == "" { - return fmt.Errorf("input PEM data was empty") - } - - // This built in must be supplied a valid PEM or base64 encoded string. - // If the input is not a PEM string, attempt to decode b64. - // If the base64 decode fails - this is an error - if !strings.HasPrefix(pemDataString, "-----BEGIN") { - bs, err := base64.StdEncoding.DecodeString(pemDataString) - if err != nil { - return err - } - pemDataString = string(bs) - } - - rawKeys, err := getPrivateKeysFromPEMData(pemDataString) - if err != nil { - return err - } - - if len(rawKeys) == 0 { - return iter(ast.NullTerm()) - } - - key, err := jwk.New(rawKeys[0]) - if err != nil { - return err - } - - jsonKey, err := json.Marshal(key) - if err != nil { - return err - } - - if err := util.UnmarshalJSON(jsonKey, &x); err != nil { - return err - } - - value, err := ast.InterfaceToValue(x) - if err != nil { - return err - } - - return iter(ast.NewTerm(value)) -} - -func builtinCryptoParsePrivateKeys(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - a := operands[0].Value - input, err := builtins.StringOperand(a, 1) - if err != nil { - return err - } - - if string(input) == "" { - return iter(ast.NullTerm()) - } - - // get the raw private key - rawKeys, err := getPrivateKeysFromPEMData(string(input)) - if err != nil { - return err - } - - if len(rawKeys) == 0 { - return iter(ast.NewTerm(ast.NewArray())) - } - - bs, err := json.Marshal(rawKeys) - if err != nil { - return err - } - - var x interface{} - if err := util.UnmarshalJSON(bs, &x); err != nil { - return err - } - - value, err := ast.InterfaceToValue(x) - if err != nil { - return err - } - - return iter(ast.NewTerm(value)) -} - -func hashHelper(a ast.Value, h func(ast.String) string) (ast.Value, error) { - s, err := builtins.StringOperand(a, 1) - if err != nil { - return nil, err - } - return ast.String(h(s)), nil -} - -func builtinCryptoMd5(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - res, err := hashHelper(operands[0].Value, func(s ast.String) string { return fmt.Sprintf("%x", md5.Sum([]byte(s))) }) - if err != nil { - return err - } - return iter(ast.NewTerm(res)) -} - -func builtinCryptoSha1(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - res, err := hashHelper(operands[0].Value, func(s ast.String) string { return fmt.Sprintf("%x", sha1.Sum([]byte(s))) }) - if err != nil { - return err - } - return iter(ast.NewTerm(res)) -} - -func builtinCryptoSha256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - res, err := hashHelper(operands[0].Value, func(s ast.String) string { return fmt.Sprintf("%x", sha256.Sum256([]byte(s))) }) - if err != nil { - return err - } - return iter(ast.NewTerm(res)) -} - -func hmacHelper(operands []*ast.Term, iter func(*ast.Term) error, h func() hash.Hash) error { - a1 := operands[0].Value - message, err := builtins.StringOperand(a1, 1) - if err != nil { - return err - } - - a2 := operands[1].Value - key, err := builtins.StringOperand(a2, 2) - if err != nil { - return err - } - - mac := hmac.New(h, []byte(key)) - mac.Write([]byte(message)) - messageDigest := mac.Sum(nil) - - return iter(ast.StringTerm(fmt.Sprintf("%x", messageDigest))) -} - -func builtinCryptoHmacMd5(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - return hmacHelper(operands, iter, md5.New) -} - -func builtinCryptoHmacSha1(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - return hmacHelper(operands, iter, sha1.New) -} - -func builtinCryptoHmacSha256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - return hmacHelper(operands, iter, sha256.New) -} - -func builtinCryptoHmacSha512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - return hmacHelper(operands, iter, sha512.New) -} - -func builtinCryptoHmacEqual(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - a1 := operands[0].Value - mac1, err := builtins.StringOperand(a1, 1) - if err != nil { - return err - } - - a2 := operands[1].Value - mac2, err := builtins.StringOperand(a2, 2) - if err != nil { - return err - } - - res := hmac.Equal([]byte(mac1), []byte(mac2)) - - return iter(ast.BooleanTerm(res)) -} - -func init() { - RegisterBuiltinFunc(ast.CryptoX509ParseCertificates.Name, builtinCryptoX509ParseCertificates) - RegisterBuiltinFunc(ast.CryptoX509ParseAndVerifyCertificates.Name, builtinCryptoX509ParseAndVerifyCertificates) - RegisterBuiltinFunc(ast.CryptoX509ParseAndVerifyCertificatesWithOptions.Name, builtinCryptoX509ParseAndVerifyCertificatesWithOptions) - RegisterBuiltinFunc(ast.CryptoMd5.Name, builtinCryptoMd5) - RegisterBuiltinFunc(ast.CryptoSha1.Name, builtinCryptoSha1) - RegisterBuiltinFunc(ast.CryptoSha256.Name, builtinCryptoSha256) - RegisterBuiltinFunc(ast.CryptoX509ParseCertificateRequest.Name, builtinCryptoX509ParseCertificateRequest) - RegisterBuiltinFunc(ast.CryptoX509ParseRSAPrivateKey.Name, builtinCryptoJWKFromPrivateKey) - RegisterBuiltinFunc(ast.CryptoParsePrivateKeys.Name, builtinCryptoParsePrivateKeys) - RegisterBuiltinFunc(ast.CryptoX509ParseKeyPair.Name, builtinCryptoX509ParseKeyPair) - RegisterBuiltinFunc(ast.CryptoHmacMd5.Name, builtinCryptoHmacMd5) - RegisterBuiltinFunc(ast.CryptoHmacSha1.Name, builtinCryptoHmacSha1) - RegisterBuiltinFunc(ast.CryptoHmacSha256.Name, builtinCryptoHmacSha256) - RegisterBuiltinFunc(ast.CryptoHmacSha512.Name, builtinCryptoHmacSha512) - RegisterBuiltinFunc(ast.CryptoHmacEqual.Name, builtinCryptoHmacEqual) -} - -func verifyX509CertificateChain(certs []*x509.Certificate, vo x509.VerifyOptions) ([]*x509.Certificate, error) { - if len(certs) < 2 { - return nil, builtins.NewOperandErr(1, "must supply at least two certificates to be able to verify") - } - - // first cert is the root - roots := x509.NewCertPool() - roots.AddCert(certs[0]) - - // all other certs except the last are intermediates - intermediates := x509.NewCertPool() - for i := 1; i < len(certs)-1; i++ { - intermediates.AddCert(certs[i]) - } - - // last cert is the leaf - leaf := certs[len(certs)-1] - - // verify the cert chain back to the root - verifyOpts := x509.VerifyOptions{ - Roots: roots, - Intermediates: intermediates, - DNSName: vo.DNSName, - CurrentTime: vo.CurrentTime, - KeyUsages: vo.KeyUsages, - MaxConstraintComparisions: vo.MaxConstraintComparisions, - } - chains, err := leaf.Verify(verifyOpts) - if err != nil { - return nil, err - } - - return chains[0], nil -} - -func getX509CertsFromString(certs string) ([]*x509.Certificate, error) { - // if the input is PEM handle that - if strings.HasPrefix(certs, "-----BEGIN") { - return getX509CertsFromPem([]byte(certs)) - } - - // assume input is base64 if not PEM - b64, err := base64.StdEncoding.DecodeString(certs) - if err != nil { - return nil, err - } - - // handle if the decoded base64 contains PEM rather than the expected DER - if bytes.HasPrefix(b64, []byte("-----BEGIN")) { - return getX509CertsFromPem(b64) - } - - // otherwise assume the contents are DER - return x509.ParseCertificates(b64) -} - -func getX509CertsFromPem(pemBlocks []byte) ([]*x509.Certificate, error) { - var decodedCerts []byte - for len(pemBlocks) > 0 { - p, r := pem.Decode(pemBlocks) - if p != nil && p.Type != blockTypeCertificate { - return nil, fmt.Errorf("PEM block type is '%s', expected %s", p.Type, blockTypeCertificate) - } - - if p == nil { - break - } - - pemBlocks = r - decodedCerts = append(decodedCerts, p.Bytes...) - } - - return x509.ParseCertificates(decodedCerts) -} - -func getPrivateKeysFromPEMData(pemData string) ([]crypto.PrivateKey, error) { - pemBlockString := pemData - - var validPrivateKeys []crypto.PrivateKey - - // if the input is base64, decode it - bs, err := base64.StdEncoding.DecodeString(pemBlockString) - if err == nil { - pemBlockString = string(bs) - } - bs = []byte(pemBlockString) - - for len(bs) > 0 { - inputLen := len(bs) - var block *pem.Block - block, bs = pem.Decode(bs) - if block == nil && len(bs) == 0 { - break - } - // should only happen if end of input is not a valid PEM block. See TestParseRSAPrivateKeyVariedPemInput. - if inputLen == len(bs) { - break - } - - if block == nil { - continue - } - - switch block.Type { - case blockTypeRSAPrivateKey: - parsedKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return nil, err - } - validPrivateKeys = append(validPrivateKeys, parsedKey) - case blockTypePrivateKey: - parsedKey, err := x509.ParsePKCS8PrivateKey(block.Bytes) - if err != nil { - return nil, err - } - validPrivateKeys = append(validPrivateKeys, parsedKey) - case blockTypeEcPrivateKey: - parsedKey, err := x509.ParseECPrivateKey(block.Bytes) - if err != nil { - return nil, err - } - validPrivateKeys = append(validPrivateKeys, parsedKey) - } - } - return validPrivateKeys, nil -} - -// addCACertsFromFile adds CA certificates from filePath into the given pool. -// If pool is nil, it creates a new x509.CertPool. pool is returned. -func addCACertsFromFile(pool *x509.CertPool, filePath string) (*x509.CertPool, error) { - if pool == nil { - pool = x509.NewCertPool() - } - - caCert, err := readCertFromFile(filePath) - if err != nil { - return nil, err - } - - if ok := pool.AppendCertsFromPEM(caCert); !ok { - return nil, fmt.Errorf("could not append CA certificates from %q", filePath) - } - - return pool, nil -} - -// addCACertsFromBytes adds CA certificates from pemBytes into the given pool. -// If pool is nil, it creates a new x509.CertPool. pool is returned. -func addCACertsFromBytes(pool *x509.CertPool, pemBytes []byte) (*x509.CertPool, error) { - if pool == nil { - pool = x509.NewCertPool() - } - - if ok := pool.AppendCertsFromPEM(pemBytes); !ok { - return nil, fmt.Errorf("could not append certificates") - } - - return pool, nil -} - -// addCACertsFromEnv adds CA certificates from the environment variable named -// by envName into the given pool. If pool is nil, it creates a new x509.CertPool. -// pool is returned. -func addCACertsFromEnv(pool *x509.CertPool, envName string) (*x509.CertPool, error) { - pool, err := addCACertsFromBytes(pool, []byte(os.Getenv(envName))) - if err != nil { - return nil, fmt.Errorf("could not add CA certificates from envvar %q: %w", envName, err) - } - - return pool, err -} - -// ReadCertFromFile reads a cert from file -func readCertFromFile(localCertFile string) ([]byte, error) { - // Read in the cert file - certPEM, err := os.ReadFile(localCertFile) - if err != nil { - return nil, err - } - return certPEM, nil -} - -func getTLSx509KeyPairFromString(certPemBlock []byte, keyPemBlock []byte) (*tls.Certificate, error) { - - if !strings.HasPrefix(string(certPemBlock), "-----BEGIN") { - s, err := base64.StdEncoding.DecodeString(string(certPemBlock)) - if err != nil { - return nil, err - } - certPemBlock = s - } - - if !strings.HasPrefix(string(keyPemBlock), "-----BEGIN") { - s, err := base64.StdEncoding.DecodeString(string(keyPemBlock)) - if err != nil { - return nil, err - } - keyPemBlock = s - } - - // we assume it a DER certificate and try to convert it to a PEM. - if !bytes.HasPrefix(certPemBlock, []byte("-----BEGIN")) { - - pemBlock := &pem.Block{ - Type: "CERTIFICATE", - Bytes: certPemBlock, - } - - var buf bytes.Buffer - if err := pem.Encode(&buf, pemBlock); err != nil { - return nil, err - } - certPemBlock = buf.Bytes() - - } - // we assume it a DER key and try to convert it to a PEM. - if !bytes.HasPrefix(keyPemBlock, []byte("-----BEGIN")) { - pemBlock := &pem.Block{ - Type: "PRIVATE KEY", - Bytes: keyPemBlock, - } - var buf bytes.Buffer - if err := pem.Encode(&buf, pemBlock); err != nil { - return nil, err - } - keyPemBlock = buf.Bytes() - } - - cert, err := tls.X509KeyPair(certPemBlock, keyPemBlock) - if err != nil { - return nil, err - } - - return &cert, nil -} - -// ReadKeyFromFile reads a key from file -func readKeyFromFile(localKeyFile string) ([]byte, error) { - // Read in the cert file - key, err := os.ReadFile(localKeyFile) - if err != nil { - return nil, err - } - return key, nil -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/doc.go b/vendor/github.com/open-policy-agent/opa/topdown/doc.go index 9aa7aa45c5..a303ef7886 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/doc.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/doc.go @@ -7,4 +7,8 @@ // The topdown implementation is a modified version of the standard top-down // evaluation algorithm used in Datalog. References and comprehensions are // evaluated eagerly while all other terms are evaluated lazily. +// +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. package topdown diff --git a/vendor/github.com/open-policy-agent/opa/topdown/errors.go b/vendor/github.com/open-policy-agent/opa/topdown/errors.go index 918df6c853..47853ec6d1 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/errors.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/errors.go @@ -5,145 +5,50 @@ package topdown import ( - "errors" - "fmt" - - "github.com/open-policy-agent/opa/ast" + v1 "github.com/open-policy-agent/opa/v1/topdown" ) // Halt is a special error type that built-in function implementations return to indicate // that policy evaluation should stop immediately. -type Halt struct { - Err error -} - -func (h Halt) Error() string { - return h.Err.Error() -} - -func (h Halt) Unwrap() error { return h.Err } +type Halt = v1.Halt // Error is the error type returned by the Eval and Query functions when // an evaluation error occurs. -type Error struct { - Code string `json:"code"` - Message string `json:"message"` - Location *ast.Location `json:"location,omitempty"` - err error `json:"-"` -} +type Error = v1.Error const ( // InternalErr represents an unknown evaluation error. - InternalErr string = "eval_internal_error" + InternalErr = v1.InternalErr // CancelErr indicates the evaluation process was cancelled. - CancelErr string = "eval_cancel_error" + CancelErr = v1.CancelErr // ConflictErr indicates a conflict was encountered during evaluation. For // instance, a conflict occurs if a rule produces multiple, differing values // for the same key in an object. Conflict errors indicate the policy does // not account for the data loaded into the policy engine. - ConflictErr string = "eval_conflict_error" + ConflictErr = v1.ConflictErr // TypeErr indicates evaluation stopped because an expression was applied to // a value of an inappropriate type. - TypeErr string = "eval_type_error" + TypeErr = v1.TypeErr // BuiltinErr indicates a built-in function received a semantically invalid // input or encountered some kind of runtime error, e.g., connection // timeout, connection refused, etc. - BuiltinErr string = "eval_builtin_error" + BuiltinErr = v1.BuiltinErr // WithMergeErr indicates that the real and replacement data could not be merged. - WithMergeErr string = "eval_with_merge_error" + WithMergeErr = v1.WithMergeErr ) // IsError returns true if the err is an Error. func IsError(err error) bool { - var e *Error - return errors.As(err, &e) + return v1.IsError(err) } // IsCancel returns true if err was caused by cancellation. func IsCancel(err error) bool { - return errors.Is(err, &Error{Code: CancelErr}) -} - -// Is allows matching topdown errors using errors.Is (see IsCancel). -func (e *Error) Is(target error) bool { - var t *Error - if errors.As(target, &t) { - return (t.Code == "" || e.Code == t.Code) && - (t.Message == "" || e.Message == t.Message) && - (t.Location == nil || t.Location.Compare(e.Location) == 0) - } - return false -} - -func (e *Error) Error() string { - msg := fmt.Sprintf("%v: %v", e.Code, e.Message) - - if e.Location != nil { - msg = e.Location.String() + ": " + msg - } - - return msg -} - -func (e *Error) Wrap(err error) *Error { - e.err = err - return e -} - -func (e *Error) Unwrap() error { - return e.err -} - -func functionConflictErr(loc *ast.Location) error { - return &Error{ - Code: ConflictErr, - Location: loc, - Message: "functions must not produce multiple outputs for same inputs", - } -} - -func completeDocConflictErr(loc *ast.Location) error { - return &Error{ - Code: ConflictErr, - Location: loc, - Message: "complete rules must not produce multiple outputs", - } -} - -func objectDocKeyConflictErr(loc *ast.Location) error { - return &Error{ - Code: ConflictErr, - Location: loc, - Message: "object keys must be unique", - } -} - -func unsupportedBuiltinErr(loc *ast.Location) error { - return &Error{ - Code: InternalErr, - Location: loc, - Message: "unsupported built-in", - } -} - -func mergeConflictErr(loc *ast.Location) error { - return &Error{ - Code: WithMergeErr, - Location: loc, - Message: "real and replacement data could not be merged", - } -} - -func internalErr(loc *ast.Location, msg string) error { - return &Error{ - Code: InternalErr, - Location: loc, - Message: msg, - } + return v1.IsCancel(err) } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/graphql.go b/vendor/github.com/open-policy-agent/opa/topdown/graphql.go deleted file mode 100644 index 8fb1b58a76..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/graphql.go +++ /dev/null @@ -1,485 +0,0 @@ -// Copyright 2022 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "encoding/json" - "fmt" - "strings" - - gqlast "github.com/open-policy-agent/opa/internal/gqlparser/ast" - gqlparser "github.com/open-policy-agent/opa/internal/gqlparser/parser" - gqlvalidator "github.com/open-policy-agent/opa/internal/gqlparser/validator" - - // Side-effecting import. Triggers GraphQL library's validation rule init() functions. - _ "github.com/open-policy-agent/opa/internal/gqlparser/validator/rules" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -// Parses a GraphQL schema, and returns the GraphQL AST for the schema. -func parseSchema(schema string) (*gqlast.SchemaDocument, error) { - // NOTE(philipc): We don't include the "built-in schema defs" from the - // underlying graphql parsing library here, because those definitions - // generate enormous AST blobs. In the future, if there is demand for - // a "full-spec" version of schema ASTs, we may need to provide a - // version of this function that includes the built-in schema - // definitions. - schemaAST, err := gqlparser.ParseSchema(&gqlast.Source{Input: schema}) - if err != nil { - errorParts := strings.SplitN(err.Error(), ":", 4) - msg := strings.TrimLeft(errorParts[3], " ") - return nil, fmt.Errorf("%s in GraphQL string at location %s:%s", msg, errorParts[1], errorParts[2]) - } - return schemaAST, nil -} - -// Parses a GraphQL query, and returns the GraphQL AST for the query. -func parseQuery(query string) (*gqlast.QueryDocument, error) { - queryAST, err := gqlparser.ParseQuery(&gqlast.Source{Input: query}) - if err != nil { - errorParts := strings.SplitN(err.Error(), ":", 4) - msg := strings.TrimLeft(errorParts[3], " ") - return nil, fmt.Errorf("%s in GraphQL string at location %s:%s", msg, errorParts[1], errorParts[2]) - } - return queryAST, nil -} - -// Validates a GraphQL query against a schema, and returns an error. -// In this case, we get a wrappered error list type, and pluck out -// just the first error message in the list. -func validateQuery(schema *gqlast.Schema, query *gqlast.QueryDocument) error { - // Validate the query against the schema, erroring if there's an issue. - err := gqlvalidator.Validate(schema, query) - if err != nil { - // We use strings.TrimSuffix to remove the '.' characters that the library - // authors include on most of their validation errors. This should be safe, - // since variable names in their error messages are usually quoted, and - // this affects only the last character(s) in the string. - // NOTE(philipc): We know the error location will be in the query string, - // because schema validation always happens before this function is called. - errorParts := strings.SplitN(err.Error(), ":", 4) - msg := strings.TrimSuffix(strings.TrimLeft(errorParts[3], " "), ".\n") - return fmt.Errorf("%s in GraphQL query string at location %s:%s", msg, errorParts[1], errorParts[2]) - } - return nil -} - -func getBuiltinSchema() *gqlast.SchemaDocument { - schema, err := gqlparser.ParseSchema(gqlvalidator.Prelude) - if err != nil { - panic(fmt.Errorf("Error in gqlparser Prelude (should be impossible): %w", err)) - } - return schema -} - -// NOTE(philipc): This function expects *validated* schema documents, and will break -// if it is fed arbitrary structures. -func mergeSchemaDocuments(docA *gqlast.SchemaDocument, docB *gqlast.SchemaDocument) *gqlast.SchemaDocument { - ast := &gqlast.SchemaDocument{} - ast.Merge(docA) - ast.Merge(docB) - return ast -} - -// Converts a SchemaDocument into a gqlast.Schema object that can be used for validation. -// It merges in the builtin schema typedefs exactly as gqltop.LoadSchema did internally. -func convertSchema(schemaDoc *gqlast.SchemaDocument) (*gqlast.Schema, error) { - // Merge builtin schema + schema we were provided. - builtinsSchemaDoc := getBuiltinSchema() - mergedSchemaDoc := mergeSchemaDocuments(builtinsSchemaDoc, schemaDoc) - schema, err := gqlvalidator.ValidateSchemaDocument(mergedSchemaDoc) - if err != nil { - return nil, fmt.Errorf("Error in gqlparser SchemaDocument to Schema conversion: %w", err) - } - return schema, nil -} - -// Converts an ast.Object into a gqlast.QueryDocument object. -func objectToQueryDocument(value ast.Object) (*gqlast.QueryDocument, error) { - // Convert ast.Term to interface{} for JSON encoding below. - asJSON, err := ast.JSON(value) - if err != nil { - return nil, err - } - // Marshal to JSON. - bs, err := json.Marshal(asJSON) - if err != nil { - return nil, err - } - // Unmarshal from JSON -> gqlast.QueryDocument. - var result gqlast.QueryDocument - err = json.Unmarshal(bs, &result) - if err != nil { - return nil, err - } - return &result, nil -} - -// Converts an ast.Object into a gqlast.SchemaDocument object. -func objectToSchemaDocument(value ast.Object) (*gqlast.SchemaDocument, error) { - // Convert ast.Term to interface{} for JSON encoding below. - asJSON, err := ast.JSON(value) - if err != nil { - return nil, err - } - // Marshal to JSON. - bs, err := json.Marshal(asJSON) - if err != nil { - return nil, err - } - // Unmarshal from JSON -> gqlast.SchemaDocument. - var result gqlast.SchemaDocument - err = json.Unmarshal(bs, &result) - if err != nil { - return nil, err - } - return &result, nil -} - -// Recursively traverses an AST that has been run through InterfaceToValue, -// and prunes away the fields with null or empty values, and all `Position` -// structs. -// NOTE(philipc): We currently prune away null values to reduce the level -// of clutter in the returned AST objects. In the future, if there is demand -// for ASTs that have a more regular/fixed structure, we may need to provide -// a "raw" version of the AST, where we still prune away the `Position` -// structs, but leave in the null fields. -func pruneIrrelevantGraphQLASTNodes(value ast.Value) ast.Value { - // We iterate over the Value we've been provided, and recurse down - // in the case of complex types, such as Arrays/Objects. - // We are guaranteed to only have to deal with standard JSON types, - // so this is much less ugly than what we'd need for supporting every - // extant ast type! - switch x := value.(type) { - case *ast.Array: - result := ast.NewArray() - // Iterate over the array's elements, and do the following: - // - Drop any Nulls - // - Drop any any empty object/array value (after running the pruner) - for i := 0; i < x.Len(); i++ { - vTerm := x.Elem(i) - switch v := vTerm.Value.(type) { - case ast.Null: - continue - case *ast.Array: - // Safe, because we knew the type before going to prune it. - va := pruneIrrelevantGraphQLASTNodes(v).(*ast.Array) - if va.Len() > 0 { - result = result.Append(ast.NewTerm(va)) - } - case ast.Object: - // Safe, because we knew the type before going to prune it. - vo := pruneIrrelevantGraphQLASTNodes(v).(ast.Object) - if len(vo.Keys()) > 0 { - result = result.Append(ast.NewTerm(vo)) - } - default: - result = result.Append(vTerm) - } - } - return result - case ast.Object: - result := ast.NewObject() - // Iterate over our object's keys, and do the following: - // - Drop "Position". - // - Drop any key with a Null value. - // - Drop any key with an empty object/array value (after running the pruner) - keys := x.Keys() - for _, k := range keys { - // We drop the "Position" objects because we don't need the - // source-backref/location info they provide for policy rules. - // Note that keys are ast.Strings. - if ast.String("Position").Equal(k.Value) { - continue - } - vTerm := x.Get(k) - switch v := vTerm.Value.(type) { - case ast.Null: - continue - case *ast.Array: - // Safe, because we knew the type before going to prune it. - va := pruneIrrelevantGraphQLASTNodes(v).(*ast.Array) - if va.Len() > 0 { - result.Insert(k, ast.NewTerm(va)) - } - case ast.Object: - // Safe, because we knew the type before going to prune it. - vo := pruneIrrelevantGraphQLASTNodes(v).(ast.Object) - if len(vo.Keys()) > 0 { - result.Insert(k, ast.NewTerm(vo)) - } - default: - result.Insert(k, vTerm) - } - } - return result - default: - return x - } -} - -// Reports errors from parsing/validation. -func builtinGraphQLParse(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - var queryDoc *gqlast.QueryDocument - var schemaDoc *gqlast.SchemaDocument - var err error - - // Parse/translate query if it's a string/object. - switch x := operands[0].Value.(type) { - case ast.String: - queryDoc, err = parseQuery(string(x)) - case ast.Object: - queryDoc, err = objectToQueryDocument(x) - default: - // Error if wrong type. - return builtins.NewOperandTypeErr(0, x, "string", "object") - } - if err != nil { - return err - } - - // Parse/translate schema if it's a string/object. - switch x := operands[1].Value.(type) { - case ast.String: - schemaDoc, err = parseSchema(string(x)) - case ast.Object: - schemaDoc, err = objectToSchemaDocument(x) - default: - // Error if wrong type. - return builtins.NewOperandTypeErr(1, x, "string", "object") - } - if err != nil { - return err - } - - // Transform the ASTs into Objects. - queryASTValue, err := ast.InterfaceToValue(queryDoc) - if err != nil { - return err - } - schemaASTValue, err := ast.InterfaceToValue(schemaDoc) - if err != nil { - return err - } - - // Validate the query against the schema, erroring if there's an issue. - schema, err := convertSchema(schemaDoc) - if err != nil { - return err - } - if err := validateQuery(schema, queryDoc); err != nil { - return err - } - - // Recursively remove irrelevant AST structures. - queryResult := pruneIrrelevantGraphQLASTNodes(queryASTValue.(ast.Object)) - querySchema := pruneIrrelevantGraphQLASTNodes(schemaASTValue.(ast.Object)) - - // Construct return value. - verified := ast.ArrayTerm( - ast.NewTerm(queryResult), - ast.NewTerm(querySchema), - ) - - return iter(verified) -} - -// Returns default value when errors occur. -func builtinGraphQLParseAndVerify(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - var queryDoc *gqlast.QueryDocument - var schemaDoc *gqlast.SchemaDocument - var err error - - unverified := ast.ArrayTerm( - ast.BooleanTerm(false), - ast.NewTerm(ast.NewObject()), - ast.NewTerm(ast.NewObject()), - ) - - // Parse/translate query if it's a string/object. - switch x := operands[0].Value.(type) { - case ast.String: - queryDoc, err = parseQuery(string(x)) - case ast.Object: - queryDoc, err = objectToQueryDocument(x) - default: - // Error if wrong type. - return iter(unverified) - } - if err != nil { - return iter(unverified) - } - - // Parse/translate schema if it's a string/object. - switch x := operands[1].Value.(type) { - case ast.String: - schemaDoc, err = parseSchema(string(x)) - case ast.Object: - schemaDoc, err = objectToSchemaDocument(x) - default: - // Error if wrong type. - return iter(unverified) - } - if err != nil { - return iter(unverified) - } - - // Transform the ASTs into Objects. - queryASTValue, err := ast.InterfaceToValue(queryDoc) - if err != nil { - return iter(unverified) - } - schemaASTValue, err := ast.InterfaceToValue(schemaDoc) - if err != nil { - return iter(unverified) - } - - // Validate the query against the schema, erroring if there's an issue. - schema, err := convertSchema(schemaDoc) - if err != nil { - return iter(unverified) - } - if err := validateQuery(schema, queryDoc); err != nil { - return iter(unverified) - } - - // Recursively remove irrelevant AST structures. - queryResult := pruneIrrelevantGraphQLASTNodes(queryASTValue.(ast.Object)) - querySchema := pruneIrrelevantGraphQLASTNodes(schemaASTValue.(ast.Object)) - - // Construct return value. - verified := ast.ArrayTerm( - ast.BooleanTerm(true), - ast.NewTerm(queryResult), - ast.NewTerm(querySchema), - ) - - return iter(verified) -} - -func builtinGraphQLParseQuery(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - raw, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - // Get the highly-nested AST struct, along with any errors generated. - query, err := parseQuery(string(raw)) - if err != nil { - return err - } - - // Transform the AST into an Object. - value, err := ast.InterfaceToValue(query) - if err != nil { - return err - } - - // Recursively remove irrelevant AST structures. - result := pruneIrrelevantGraphQLASTNodes(value.(ast.Object)) - - return iter(ast.NewTerm(result)) -} - -func builtinGraphQLParseSchema(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - raw, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - // Get the highly-nested AST struct, along with any errors generated. - schema, err := parseSchema(string(raw)) - if err != nil { - return err - } - - // Transform the AST into an Object. - value, err := ast.InterfaceToValue(schema) - if err != nil { - return err - } - - // Recursively remove irrelevant AST structures. - result := pruneIrrelevantGraphQLASTNodes(value.(ast.Object)) - - return iter(ast.NewTerm(result)) -} - -func builtinGraphQLIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - var queryDoc *gqlast.QueryDocument - var schemaDoc *gqlast.SchemaDocument - var err error - - switch x := operands[0].Value.(type) { - case ast.String: - queryDoc, err = parseQuery(string(x)) - case ast.Object: - queryDoc, err = objectToQueryDocument(x) - default: - // Error if wrong type. - return iter(ast.BooleanTerm(false)) - } - if err != nil { - return iter(ast.BooleanTerm(false)) - } - - switch x := operands[1].Value.(type) { - case ast.String: - schemaDoc, err = parseSchema(string(x)) - case ast.Object: - schemaDoc, err = objectToSchemaDocument(x) - default: - // Error if wrong type. - return iter(ast.BooleanTerm(false)) - } - if err != nil { - return iter(ast.BooleanTerm(false)) - } - - // Validate the query against the schema, erroring if there's an issue. - schema, err := convertSchema(schemaDoc) - if err != nil { - return iter(ast.BooleanTerm(false)) - } - if err := validateQuery(schema, queryDoc); err != nil { - return iter(ast.BooleanTerm(false)) - } - - // If we got this far, the GraphQL query passed validation. - return iter(ast.BooleanTerm(true)) -} - -func builtinGraphQLSchemaIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - var schemaDoc *gqlast.SchemaDocument - var err error - - switch x := operands[0].Value.(type) { - case ast.String: - schemaDoc, err = parseSchema(string(x)) - case ast.Object: - schemaDoc, err = objectToSchemaDocument(x) - default: - // Error if wrong type. - return iter(ast.BooleanTerm(false)) - } - if err != nil { - return iter(ast.BooleanTerm(false)) - } - - // Validate the schema, this determines the result - _, err = convertSchema(schemaDoc) - return iter(ast.BooleanTerm(err == nil)) -} - -func init() { - RegisterBuiltinFunc(ast.GraphQLParse.Name, builtinGraphQLParse) - RegisterBuiltinFunc(ast.GraphQLParseAndVerify.Name, builtinGraphQLParseAndVerify) - RegisterBuiltinFunc(ast.GraphQLParseQuery.Name, builtinGraphQLParseQuery) - RegisterBuiltinFunc(ast.GraphQLParseSchema.Name, builtinGraphQLParseSchema) - RegisterBuiltinFunc(ast.GraphQLIsValid.Name, builtinGraphQLIsValid) - RegisterBuiltinFunc(ast.GraphQLSchemaIsValid.Name, builtinGraphQLSchemaIsValid) -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/http.go b/vendor/github.com/open-policy-agent/opa/topdown/http.go index 18bfd3c722..693ea4048c 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/http.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/http.go @@ -5,1616 +5,13 @@ package topdown import ( - "bytes" - "context" - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io" - "math" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strconv" - "strings" - "time" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/internal/version" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/tracing" - "github.com/open-policy-agent/opa/util" -) - -type cachingMode string - -const ( - defaultHTTPRequestTimeoutEnv = "HTTP_SEND_TIMEOUT" - defaultCachingMode cachingMode = "serialized" - cachingModeDeserialized cachingMode = "deserialized" -) - -var defaultHTTPRequestTimeout = time.Second * 5 - -var allowedKeyNames = [...]string{ - "method", - "url", - "body", - "enable_redirect", - "force_json_decode", - "force_yaml_decode", - "headers", - "raw_body", - "tls_use_system_certs", - "tls_ca_cert", - "tls_ca_cert_file", - "tls_ca_cert_env_variable", - "tls_client_cert", - "tls_client_cert_file", - "tls_client_cert_env_variable", - "tls_client_key", - "tls_client_key_file", - "tls_client_key_env_variable", - "tls_insecure_skip_verify", - "tls_server_name", - "timeout", - "cache", - "force_cache", - "force_cache_duration_seconds", - "raise_error", - "caching_mode", - "max_retry_attempts", - "cache_ignored_headers", -} - -// ref: https://www.rfc-editor.org/rfc/rfc7231#section-6.1 -var cacheableHTTPStatusCodes = [...]int{ - http.StatusOK, - http.StatusNonAuthoritativeInfo, - http.StatusNoContent, - http.StatusPartialContent, - http.StatusMultipleChoices, - http.StatusMovedPermanently, - http.StatusNotFound, - http.StatusMethodNotAllowed, - http.StatusGone, - http.StatusRequestURITooLong, - http.StatusNotImplemented, -} - -var ( - allowedKeys = ast.NewSet() - cacheableCodes = ast.NewSet() - requiredKeys = ast.NewSet(ast.StringTerm("method"), ast.StringTerm("url")) - httpSendLatencyMetricKey = "rego_builtin_" + strings.ReplaceAll(ast.HTTPSend.Name, ".", "_") - httpSendInterQueryCacheHits = httpSendLatencyMetricKey + "_interquery_cache_hits" + v1 "github.com/open-policy-agent/opa/v1/topdown" ) -type httpSendKey string - const ( - // httpSendBuiltinCacheKey is the key in the builtin context cache that - // points to the http.send() specific cache resides at. - httpSendBuiltinCacheKey httpSendKey = "HTTP_SEND_CACHE_KEY" - // HTTPSendInternalErr represents a runtime evaluation error. - HTTPSendInternalErr string = "eval_http_send_internal_error" + HTTPSendInternalErr = v1.HTTPSendInternalErr // HTTPSendNetworkErr represents a network error. - HTTPSendNetworkErr string = "eval_http_send_network_error" - - // minRetryDelay is amount of time to backoff after the first failure. - minRetryDelay = time.Millisecond * 100 - - // maxRetryDelay is the upper bound of backoff delay. - maxRetryDelay = time.Second * 60 + HTTPSendNetworkErr = v1.HTTPSendNetworkErr ) - -func builtinHTTPSend(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - obj, err := builtins.ObjectOperand(operands[0].Value, 1) - if err != nil { - return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err) - } - - raiseError, err := getRaiseErrorValue(obj) - if err != nil { - return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err) - } - - req, err := validateHTTPRequestOperand(operands[0], 1) - if err != nil { - if raiseError { - return handleHTTPSendErr(bctx, err) - } - - return iter(generateRaiseErrorResult(handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err))) - } - - result, err := getHTTPResponse(bctx, req) - if err != nil { - if raiseError { - return handleHTTPSendErr(bctx, err) - } - - result = generateRaiseErrorResult(err) - } - return iter(result) -} - -func generateRaiseErrorResult(err error) *ast.Term { - obj := ast.NewObject() - obj.Insert(ast.StringTerm("status_code"), ast.IntNumberTerm(0)) - - errObj := ast.NewObject() - - switch err.(type) { - case *url.Error: - errObj.Insert(ast.StringTerm("code"), ast.StringTerm(HTTPSendNetworkErr)) - default: - errObj.Insert(ast.StringTerm("code"), ast.StringTerm(HTTPSendInternalErr)) - } - - errObj.Insert(ast.StringTerm("message"), ast.StringTerm(err.Error())) - obj.Insert(ast.StringTerm("error"), ast.NewTerm(errObj)) - - return ast.NewTerm(obj) -} - -func getHTTPResponse(bctx BuiltinContext, req ast.Object) (*ast.Term, error) { - - bctx.Metrics.Timer(httpSendLatencyMetricKey).Start() - defer bctx.Metrics.Timer(httpSendLatencyMetricKey).Stop() - - key, err := getKeyFromRequest(req) - if err != nil { - return nil, err - } - - reqExecutor, err := newHTTPRequestExecutor(bctx, req, key) - if err != nil { - return nil, err - } - // Check if cache already has a response for this query - // set headers to exclude cache_ignored_headers - resp, err := reqExecutor.CheckCache() - if err != nil { - return nil, err - } - - if resp == nil { - httpResp, err := reqExecutor.ExecuteHTTPRequest() - if err != nil { - reqExecutor.InsertErrorIntoCache(err) - return nil, err - } - defer util.Close(httpResp) - // Add result to intra/inter-query cache. - resp, err = reqExecutor.InsertIntoCache(httpResp) - if err != nil { - return nil, err - } - } - - return ast.NewTerm(resp), nil -} - -// getKeyFromRequest returns a key to be used for caching HTTP responses -// deletes headers from request object mentioned in cache_ignored_headers -func getKeyFromRequest(req ast.Object) (ast.Object, error) { - // deep copy so changes to key do not reflect in the request object - key := req.Copy() - cacheIgnoredHeadersTerm := req.Get(ast.StringTerm("cache_ignored_headers")) - allHeadersTerm := req.Get(ast.StringTerm("headers")) - // skip because no headers to delete - if cacheIgnoredHeadersTerm == nil || allHeadersTerm == nil { - // need to explicitly set cache_ignored_headers to null - // equivalent requests might have different sets of exclusion lists - key.Insert(ast.StringTerm("cache_ignored_headers"), ast.NullTerm()) - return key, nil - } - var cacheIgnoredHeaders []string - var allHeaders map[string]interface{} - err := ast.As(cacheIgnoredHeadersTerm.Value, &cacheIgnoredHeaders) - if err != nil { - return nil, err - } - err = ast.As(allHeadersTerm.Value, &allHeaders) - if err != nil { - return nil, err - } - for _, header := range cacheIgnoredHeaders { - delete(allHeaders, header) - } - val, err := ast.InterfaceToValue(allHeaders) - if err != nil { - return nil, err - } - key.Insert(ast.StringTerm("headers"), ast.NewTerm(val)) - // remove cache_ignored_headers key - key.Insert(ast.StringTerm("cache_ignored_headers"), ast.NullTerm()) - return key, nil -} - -func init() { - createAllowedKeys() - createCacheableHTTPStatusCodes() - initDefaults() - RegisterBuiltinFunc(ast.HTTPSend.Name, builtinHTTPSend) -} - -func handleHTTPSendErr(bctx BuiltinContext, err error) error { - // Return HTTP client timeout errors in a generic error message to avoid confusion about what happened. - // Do not do this if the builtin context was cancelled and is what caused the request to stop. - if urlErr, ok := err.(*url.Error); ok && urlErr.Timeout() && bctx.Context.Err() == nil { - err = fmt.Errorf("%s %s: request timed out", urlErr.Op, urlErr.URL) - } - if err := bctx.Context.Err(); err != nil { - return Halt{ - Err: &Error{ - Code: CancelErr, - Message: fmt.Sprintf("http.send: timed out (%s)", err.Error()), - }, - } - } - return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err) -} - -func initDefaults() { - timeoutDuration := os.Getenv(defaultHTTPRequestTimeoutEnv) - if timeoutDuration != "" { - var err error - defaultHTTPRequestTimeout, err = time.ParseDuration(timeoutDuration) - if err != nil { - // If it is set to something not valid don't let the process continue in a state - // that will almost definitely give unexpected results by having it set at 0 - // which means no timeout.. - // This environment variable isn't considered part of the public API. - // TODO(patrick-east): Remove the environment variable - panic(fmt.Sprintf("invalid value for HTTP_SEND_TIMEOUT: %s", err)) - } - } -} - -func validateHTTPRequestOperand(term *ast.Term, pos int) (ast.Object, error) { - - obj, err := builtins.ObjectOperand(term.Value, pos) - if err != nil { - return nil, err - } - - requestKeys := ast.NewSet(obj.Keys()...) - - invalidKeys := requestKeys.Diff(allowedKeys) - if invalidKeys.Len() != 0 { - return nil, builtins.NewOperandErr(pos, "invalid request parameters(s): %v", invalidKeys) - } - - missingKeys := requiredKeys.Diff(requestKeys) - if missingKeys.Len() != 0 { - return nil, builtins.NewOperandErr(pos, "missing required request parameters(s): %v", missingKeys) - } - - return obj, nil - -} - -// canonicalizeHeaders returns a copy of the headers where the keys are in -// canonical HTTP form. -func canonicalizeHeaders(headers map[string]interface{}) map[string]interface{} { - canonicalized := map[string]interface{}{} - - for k, v := range headers { - canonicalized[http.CanonicalHeaderKey(k)] = v - } - - return canonicalized -} - -// useSocket examines the url for "unix://" and returns a *http.Transport with -// a DialContext that opens a socket (specified in the http call). -// The url is expected to contain socket=/path/to/socket (url encoded) -// Ex. "unix://localhost/end/point?socket=%2Ftmp%2Fhttp.sock" -func useSocket(rawURL string, tlsConfig *tls.Config) (bool, string, *http.Transport) { - u, err := url.Parse(rawURL) - if err != nil { - return false, "", nil - } - - if u.Scheme != "unix" || u.RawQuery == "" { - return false, rawURL, nil - } - - v, err := url.ParseQuery(u.RawQuery) - if err != nil { - return false, rawURL, nil - } - - // Rewrite URL targeting the UNIX domain socket. - u.Scheme = "http" - - // Extract the path to the socket. - // Only retrieve the first value. Subsequent values are ignored and removed - // to prevent HTTP parameter pollution. - socket := v.Get("socket") - v.Del("socket") - u.RawQuery = v.Encode() - - tr := http.DefaultTransport.(*http.Transport).Clone() - tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { - return http.DefaultTransport.(*http.Transport).DialContext(ctx, "unix", socket) - } - tr.TLSClientConfig = tlsConfig - tr.DisableKeepAlives = true - - return true, u.String(), tr -} - -func verifyHost(bctx BuiltinContext, host string) error { - if bctx.Capabilities == nil || bctx.Capabilities.AllowNet == nil { - return nil - } - - for _, allowed := range bctx.Capabilities.AllowNet { - if allowed == host { - return nil - } - } - - return fmt.Errorf("unallowed host: %s", host) -} - -func verifyURLHost(bctx BuiltinContext, unverifiedURL string) error { - // Eager return to avoid unnecessary URL parsing - if bctx.Capabilities == nil || bctx.Capabilities.AllowNet == nil { - return nil - } - - parsedURL, err := url.Parse(unverifiedURL) - if err != nil { - return err - } - - host := strings.Split(parsedURL.Host, ":")[0] - - return verifyHost(bctx, host) -} - -func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *http.Client, error) { - var url string - var method string - - // Additional CA certificates loading options. - var tlsCaCert []byte - var tlsCaCertEnvVar string - var tlsCaCertFile string - - // Client TLS certificate and key options. Each input source - // comes in a matched pair. - var tlsClientCert []byte - var tlsClientKey []byte - - var tlsClientCertEnvVar string - var tlsClientKeyEnvVar string - - var tlsClientCertFile string - var tlsClientKeyFile string - - var tlsServerName string - var body *bytes.Buffer - var rawBody *bytes.Buffer - var enableRedirect bool - var tlsUseSystemCerts *bool - var tlsConfig tls.Config - var customHeaders map[string]interface{} - var tlsInsecureSkipVerify bool - timeout := defaultHTTPRequestTimeout - - for _, val := range obj.Keys() { - key, err := ast.JSON(val.Value) - if err != nil { - return nil, nil, err - } - - key = key.(string) - - var strVal string - - if s, ok := obj.Get(val).Value.(ast.String); ok { - strVal = strings.Trim(string(s), "\"") - } else { - // Most parameters are strings, so consolidate the type checking. - switch key { - case "method", - "url", - "raw_body", - "tls_ca_cert", - "tls_ca_cert_file", - "tls_ca_cert_env_variable", - "tls_client_cert", - "tls_client_cert_file", - "tls_client_cert_env_variable", - "tls_client_key", - "tls_client_key_file", - "tls_client_key_env_variable", - "tls_server_name": - return nil, nil, fmt.Errorf("%q must be a string", key) - } - } - - switch key { - case "method": - method = strings.ToUpper(strVal) - case "url": - err := verifyURLHost(bctx, strVal) - if err != nil { - return nil, nil, err - } - url = strVal - case "enable_redirect": - enableRedirect, err = strconv.ParseBool(obj.Get(val).String()) - if err != nil { - return nil, nil, err - } - case "body": - bodyVal := obj.Get(val).Value - bodyValInterface, err := ast.JSON(bodyVal) - if err != nil { - return nil, nil, err - } - - bodyValBytes, err := json.Marshal(bodyValInterface) - if err != nil { - return nil, nil, err - } - body = bytes.NewBuffer(bodyValBytes) - case "raw_body": - rawBody = bytes.NewBufferString(strVal) - case "tls_use_system_certs": - tempTLSUseSystemCerts, err := strconv.ParseBool(obj.Get(val).String()) - if err != nil { - return nil, nil, err - } - tlsUseSystemCerts = &tempTLSUseSystemCerts - case "tls_ca_cert": - tlsCaCert = []byte(strVal) - case "tls_ca_cert_file": - tlsCaCertFile = strVal - case "tls_ca_cert_env_variable": - tlsCaCertEnvVar = strVal - case "tls_client_cert": - tlsClientCert = []byte(strVal) - case "tls_client_cert_file": - tlsClientCertFile = strVal - case "tls_client_cert_env_variable": - tlsClientCertEnvVar = strVal - case "tls_client_key": - tlsClientKey = []byte(strVal) - case "tls_client_key_file": - tlsClientKeyFile = strVal - case "tls_client_key_env_variable": - tlsClientKeyEnvVar = strVal - case "tls_server_name": - tlsServerName = strVal - case "headers": - headersVal := obj.Get(val).Value - headersValInterface, err := ast.JSON(headersVal) - if err != nil { - return nil, nil, err - } - var ok bool - customHeaders, ok = headersValInterface.(map[string]interface{}) - if !ok { - return nil, nil, fmt.Errorf("invalid type for headers key") - } - case "tls_insecure_skip_verify": - tlsInsecureSkipVerify, err = strconv.ParseBool(obj.Get(val).String()) - if err != nil { - return nil, nil, err - } - case "timeout": - timeout, err = parseTimeout(obj.Get(val).Value) - if err != nil { - return nil, nil, err - } - case "cache", "caching_mode", - "force_cache", "force_cache_duration_seconds", - "force_json_decode", "force_yaml_decode", - "raise_error", "max_retry_attempts", "cache_ignored_headers": // no-op - default: - return nil, nil, fmt.Errorf("invalid parameter %q", key) - } - } - - isTLS := false - client := &http.Client{ - Timeout: timeout, - CheckRedirect: func(*http.Request, []*http.Request) error { - return http.ErrUseLastResponse - }, - } - - if tlsInsecureSkipVerify { - isTLS = true - tlsConfig.InsecureSkipVerify = tlsInsecureSkipVerify - } - - if len(tlsClientCert) > 0 && len(tlsClientKey) > 0 { - cert, err := tls.X509KeyPair(tlsClientCert, tlsClientKey) - if err != nil { - return nil, nil, err - } - - isTLS = true - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - } - - if tlsClientCertFile != "" && tlsClientKeyFile != "" { - cert, err := tls.LoadX509KeyPair(tlsClientCertFile, tlsClientKeyFile) - if err != nil { - return nil, nil, err - } - - isTLS = true - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - } - - if tlsClientCertEnvVar != "" && tlsClientKeyEnvVar != "" { - cert, err := tls.X509KeyPair( - []byte(os.Getenv(tlsClientCertEnvVar)), - []byte(os.Getenv(tlsClientKeyEnvVar))) - if err != nil { - return nil, nil, fmt.Errorf("cannot extract public/private key pair from envvars %q, %q: %w", - tlsClientCertEnvVar, tlsClientKeyEnvVar, err) - } - - isTLS = true - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - } - - // Use system certs if no CA cert is provided - // or system certs flag is not set - if len(tlsCaCert) == 0 && tlsCaCertFile == "" && tlsCaCertEnvVar == "" && tlsUseSystemCerts == nil { - trueValue := true - tlsUseSystemCerts = &trueValue - } - - // Check the system certificates config first so that we - // load additional certificated into the correct pool. - if tlsUseSystemCerts != nil && *tlsUseSystemCerts && runtime.GOOS != "windows" { - pool, err := x509.SystemCertPool() - if err != nil { - return nil, nil, err - } - - isTLS = true - tlsConfig.RootCAs = pool - } - - if len(tlsCaCert) != 0 { - tlsCaCert = bytes.Replace(tlsCaCert, []byte("\\n"), []byte("\n"), -1) - pool, err := addCACertsFromBytes(tlsConfig.RootCAs, tlsCaCert) - if err != nil { - return nil, nil, err - } - - isTLS = true - tlsConfig.RootCAs = pool - } - - if tlsCaCertFile != "" { - pool, err := addCACertsFromFile(tlsConfig.RootCAs, tlsCaCertFile) - if err != nil { - return nil, nil, err - } - - isTLS = true - tlsConfig.RootCAs = pool - } - - if tlsCaCertEnvVar != "" { - pool, err := addCACertsFromEnv(tlsConfig.RootCAs, tlsCaCertEnvVar) - if err != nil { - return nil, nil, err - } - - isTLS = true - tlsConfig.RootCAs = pool - } - - if isTLS { - if ok, parsedURL, tr := useSocket(url, &tlsConfig); ok { - client.Transport = tr - url = parsedURL - } else { - tr := http.DefaultTransport.(*http.Transport).Clone() - tr.TLSClientConfig = &tlsConfig - tr.DisableKeepAlives = true - client.Transport = tr - } - } else { - if ok, parsedURL, tr := useSocket(url, nil); ok { - client.Transport = tr - url = parsedURL - } - } - - // check if redirects are enabled - if enableRedirect { - client.CheckRedirect = func(req *http.Request, _ []*http.Request) error { - return verifyURLHost(bctx, req.URL.String()) - } - } - - if rawBody != nil { - body = rawBody - } else if body == nil { - body = bytes.NewBufferString("") - } - - // create the http request, use the builtin context's context to ensure - // the request is cancelled if evaluation is cancelled. - req, err := http.NewRequest(method, url, body) - if err != nil { - return nil, nil, err - } - - req = req.WithContext(bctx.Context) - - // Add custom headers - if len(customHeaders) != 0 { - customHeaders = canonicalizeHeaders(customHeaders) - - for k, v := range customHeaders { - header, ok := v.(string) - if !ok { - return nil, nil, fmt.Errorf("invalid type for headers value %q", v) - } - - req.Header.Add(k, header) - } - - // Don't overwrite or append to one that was set in the custom headers - if _, hasUA := customHeaders["User-Agent"]; !hasUA { - req.Header.Add("User-Agent", version.UserAgent) - } - - // If the caller specifies the Host header, use it for the HTTP - // request host and the TLS server name. - if host, hasHost := customHeaders["Host"]; hasHost { - host := host.(string) // We already checked that it's a string. - req.Host = host - - // Only default the ServerName if the caller has - // specified the host. If we don't specify anything, - // Go will default to the target hostname. This name - // is not the same as the default that Go populates - // `req.Host` with, which is why we don't just set - // this unconditionally. - tlsConfig.ServerName = host - } - } - - if tlsServerName != "" { - tlsConfig.ServerName = tlsServerName - } - - if len(bctx.DistributedTracingOpts) > 0 { - client.Transport = tracing.NewTransport(client.Transport, bctx.DistributedTracingOpts) - } - - return req, client, nil -} - -func executeHTTPRequest(req *http.Request, client *http.Client, inputReqObj ast.Object) (*http.Response, error) { - var err error - var retry int - - retry, err = getNumberValFromReqObj(inputReqObj, ast.StringTerm("max_retry_attempts")) - if err != nil { - return nil, err - } - - for i := 0; true; i++ { - - var resp *http.Response - resp, err = client.Do(req) - if err == nil { - return resp, nil - } - - // final attempt - if i == retry { - break - } - - if err == context.Canceled { - return nil, err - } - - delay := util.DefaultBackoff(float64(minRetryDelay), float64(maxRetryDelay), i) - timer, timerCancel := util.TimerWithCancel(delay) - select { - case <-timer.C: - case <-req.Context().Done(): - timerCancel() // explicitly cancel the timer. - return nil, context.Canceled - } - } - return nil, err -} - -func isContentType(header http.Header, typ ...string) bool { - for _, t := range typ { - if strings.Contains(header.Get("Content-Type"), t) { - return true - } - } - return false -} - -type httpSendCacheEntry struct { - response *ast.Value - error error -} - -// The httpSendCache is used for intra-query caching of http.send results. -type httpSendCache struct { - entries *util.HashMap -} - -func newHTTPSendCache() *httpSendCache { - return &httpSendCache{ - entries: util.NewHashMap(valueEq, valueHash), - } -} - -func valueHash(v util.T) int { - return ast.StringTerm(v.(ast.Value).String()).Hash() -} - -func valueEq(a, b util.T) bool { - av := a.(ast.Value) - bv := b.(ast.Value) - return av.String() == bv.String() -} - -func (cache *httpSendCache) get(k ast.Value) *httpSendCacheEntry { - if v, ok := cache.entries.Get(k); ok { - v := v.(httpSendCacheEntry) - return &v - } - return nil -} - -func (cache *httpSendCache) putResponse(k ast.Value, v *ast.Value) { - cache.entries.Put(k, httpSendCacheEntry{response: v}) -} - -func (cache *httpSendCache) putError(k ast.Value, v error) { - cache.entries.Put(k, httpSendCacheEntry{error: v}) -} - -// In the BuiltinContext cache we only store a single entry that points to -// our ValueMap which is the "real" http.send() cache. -func getHTTPSendCache(bctx BuiltinContext) *httpSendCache { - raw, ok := bctx.Cache.Get(httpSendBuiltinCacheKey) - if !ok { - // Initialize if it isn't there - c := newHTTPSendCache() - bctx.Cache.Put(httpSendBuiltinCacheKey, c) - return c - } - - c, ok := raw.(*httpSendCache) - if !ok { - return nil - } - return c -} - -// checkHTTPSendCache checks for the given key's value in the cache -func checkHTTPSendCache(bctx BuiltinContext, key ast.Object) (ast.Value, error) { - requestCache := getHTTPSendCache(bctx) - if requestCache == nil { - return nil, nil - } - - v := requestCache.get(key) - if v != nil { - if v.error != nil { - return nil, v.error - } - if v.response != nil { - return *v.response, nil - } - // This should never happen - } - - return nil, nil -} - -func insertIntoHTTPSendCache(bctx BuiltinContext, key ast.Object, value ast.Value) { - requestCache := getHTTPSendCache(bctx) - if requestCache == nil { - // Should never happen.. if it does just skip caching the value - // FIXME: return error instead, to prevent inconsistencies? - return - } - requestCache.putResponse(key, &value) -} - -func insertErrorIntoHTTPSendCache(bctx BuiltinContext, key ast.Object, err error) { - requestCache := getHTTPSendCache(bctx) - if requestCache == nil { - // Should never happen.. if it does just skip caching the value - // FIXME: return error instead, to prevent inconsistencies? - return - } - requestCache.putError(key, err) -} - -// checkHTTPSendInterQueryCache checks for the given key's value in the inter-query cache -func (c *interQueryCache) checkHTTPSendInterQueryCache() (ast.Value, error) { - requestCache := c.bctx.InterQueryBuiltinCache - - cachedValue, found := requestCache.Get(c.key) - if !found { - return nil, nil - } - - value, cerr := requestCache.Clone(cachedValue) - if cerr != nil { - return nil, handleHTTPSendErr(c.bctx, cerr) - } - - c.bctx.Metrics.Counter(httpSendInterQueryCacheHits).Incr() - var cachedRespData *interQueryCacheData - - switch v := value.(type) { - case *interQueryCacheValue: - var err error - cachedRespData, err = v.copyCacheData() - if err != nil { - return nil, err - } - case *interQueryCacheData: - cachedRespData = v - default: - return nil, nil - } - - if getCurrentTime(c.bctx).Before(cachedRespData.ExpiresAt) { - return cachedRespData.formatToAST(c.forceJSONDecode, c.forceYAMLDecode) - } - - var err error - c.httpReq, c.httpClient, err = createHTTPRequest(c.bctx, c.key) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - - headers := parseResponseHeaders(cachedRespData.Headers) - - // check with the server if the stale response is still up-to-date. - // If server returns a new response (ie. status_code=200), update the cache with the new response - // If server returns an unmodified response (ie. status_code=304), update the headers for the existing response - result, modified, err := revalidateCachedResponse(c.httpReq, c.httpClient, c.key, headers) - requestCache.Delete(c.key) - if err != nil || result == nil { - return nil, err - } - - defer result.Body.Close() - - if !modified { - // update the headers in the cached response with their corresponding values from the 304 (Not Modified) response - for headerName, values := range result.Header { - cachedRespData.Headers.Del(headerName) - for _, v := range values { - cachedRespData.Headers.Add(headerName, v) - } - } - - if forceCaching(c.forceCacheParams) { - createdAt := getCurrentTime(c.bctx) - cachedRespData.ExpiresAt = createdAt.Add(time.Second * time.Duration(c.forceCacheParams.forceCacheDurationSeconds)) - } else { - expiresAt, err := expiryFromHeaders(result.Header) - if err != nil { - return nil, err - } - cachedRespData.ExpiresAt = expiresAt - } - - cachingMode, err := getCachingMode(c.key) - if err != nil { - return nil, err - } - - var pcv cache.InterQueryCacheValue - - if cachingMode == defaultCachingMode { - pcv, err = cachedRespData.toCacheValue() - if err != nil { - return nil, err - } - } else { - pcv = cachedRespData - } - - c.bctx.InterQueryBuiltinCache.InsertWithExpiry(c.key, pcv, cachedRespData.ExpiresAt) - - return cachedRespData.formatToAST(c.forceJSONDecode, c.forceYAMLDecode) - } - - newValue, respBody, err := formatHTTPResponseToAST(result, c.forceJSONDecode, c.forceYAMLDecode) - if err != nil { - return nil, err - } - - if err := insertIntoHTTPSendInterQueryCache(c.bctx, c.key, result, respBody, c.forceCacheParams); err != nil { - return nil, err - } - - return newValue, nil -} - -// insertIntoHTTPSendInterQueryCache inserts given key and value in the inter-query cache -func insertIntoHTTPSendInterQueryCache(bctx BuiltinContext, key ast.Value, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) error { - if resp == nil || (!forceCaching(cacheParams) && !canStore(resp.Header)) || !cacheableCodes.Contains(ast.IntNumberTerm(resp.StatusCode)) { - return nil - } - - requestCache := bctx.InterQueryBuiltinCache - - obj, ok := key.(ast.Object) - if !ok { - return fmt.Errorf("interface conversion error") - } - - cachingMode, err := getCachingMode(obj) - if err != nil { - return err - } - - var pcv cache.InterQueryCacheValue - var pcvData *interQueryCacheData - if cachingMode == defaultCachingMode { - pcv, pcvData, err = newInterQueryCacheValue(bctx, resp, respBody, cacheParams) - } else { - pcvData, err = newInterQueryCacheData(bctx, resp, respBody, cacheParams) - pcv = pcvData - } - - if err != nil { - return err - } - - requestCache.InsertWithExpiry(key, pcv, pcvData.ExpiresAt) - return nil -} - -func createAllowedKeys() { - for _, element := range allowedKeyNames { - allowedKeys.Add(ast.StringTerm(element)) - } -} - -func createCacheableHTTPStatusCodes() { - for _, element := range cacheableHTTPStatusCodes { - cacheableCodes.Add(ast.IntNumberTerm(element)) - } -} - -func parseTimeout(timeoutVal ast.Value) (time.Duration, error) { - var timeout time.Duration - switch t := timeoutVal.(type) { - case ast.Number: - timeoutInt, ok := t.Int64() - if !ok { - return timeout, fmt.Errorf("invalid timeout number value %v, must be int64", timeoutVal) - } - return time.Duration(timeoutInt), nil - case ast.String: - // Support strings without a unit, treat them the same as just a number value (ns) - var err error - timeoutInt, err := strconv.ParseInt(string(t), 10, 64) - if err == nil { - return time.Duration(timeoutInt), nil - } - - // Try parsing it as a duration (requires a supported units suffix) - timeout, err = time.ParseDuration(string(t)) - if err != nil { - return timeout, fmt.Errorf("invalid timeout value %v: %s", timeoutVal, err) - } - return timeout, nil - default: - return timeout, builtins.NewOperandErr(1, "'timeout' must be one of {string, number} but got %s", ast.TypeName(t)) - } -} - -func getBoolValFromReqObj(req ast.Object, key *ast.Term) (bool, error) { - var b ast.Boolean - var ok bool - if v := req.Get(key); v != nil { - if b, ok = v.Value.(ast.Boolean); !ok { - return false, fmt.Errorf("invalid value for %v field", key.String()) - } - } - return bool(b), nil -} - -func getNumberValFromReqObj(req ast.Object, key *ast.Term) (int, error) { - term := req.Get(key) - if term == nil { - return 0, nil - } - - if t, ok := term.Value.(ast.Number); ok { - num, ok := t.Int() - if !ok || num < 0 { - return 0, fmt.Errorf("invalid value %v for field %v", t.String(), key.String()) - } - return num, nil - } - - return 0, fmt.Errorf("invalid value %v for field %v", term.String(), key.String()) -} - -func getCachingMode(req ast.Object) (cachingMode, error) { - key := ast.StringTerm("caching_mode") - var s ast.String - var ok bool - if v := req.Get(key); v != nil { - if s, ok = v.Value.(ast.String); !ok { - return "", fmt.Errorf("invalid value for %v field", key.String()) - } - - switch cachingMode(s) { - case defaultCachingMode, cachingModeDeserialized: - return cachingMode(s), nil - default: - return "", fmt.Errorf("invalid value specified for %v field: %v", key.String(), string(s)) - } - } - return defaultCachingMode, nil -} - -type interQueryCacheValue struct { - Data []byte -} - -func newInterQueryCacheValue(bctx BuiltinContext, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) (*interQueryCacheValue, *interQueryCacheData, error) { - data, err := newInterQueryCacheData(bctx, resp, respBody, cacheParams) - if err != nil { - return nil, nil, err - } - - b, err := json.Marshal(data) - if err != nil { - return nil, nil, err - } - return &interQueryCacheValue{Data: b}, data, nil -} - -func (cb interQueryCacheValue) Clone() (cache.InterQueryCacheValue, error) { - dup := make([]byte, len(cb.Data)) - copy(dup, cb.Data) - return &interQueryCacheValue{Data: dup}, nil -} - -func (cb interQueryCacheValue) SizeInBytes() int64 { - return int64(len(cb.Data)) -} - -func (cb *interQueryCacheValue) copyCacheData() (*interQueryCacheData, error) { - var res interQueryCacheData - err := util.UnmarshalJSON(cb.Data, &res) - if err != nil { - return nil, err - } - return &res, nil -} - -type interQueryCacheData struct { - RespBody []byte - Status string - StatusCode int - Headers http.Header - ExpiresAt time.Time -} - -func forceCaching(cacheParams *forceCacheParams) bool { - return cacheParams != nil && cacheParams.forceCacheDurationSeconds > 0 -} - -func expiryFromHeaders(headers http.Header) (time.Time, error) { - var expiresAt time.Time - maxAge, err := parseMaxAgeCacheDirective(parseCacheControlHeader(headers)) - if err != nil { - return time.Time{}, err - } - if maxAge != -1 { - createdAt, err := getResponseHeaderDate(headers) - if err != nil { - return time.Time{}, err - } - expiresAt = createdAt.Add(time.Second * time.Duration(maxAge)) - } else { - expiresAt = getResponseHeaderExpires(headers) - } - return expiresAt, nil -} - -func newInterQueryCacheData(bctx BuiltinContext, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) (*interQueryCacheData, error) { - var expiresAt time.Time - - if forceCaching(cacheParams) { - createdAt := getCurrentTime(bctx) - expiresAt = createdAt.Add(time.Second * time.Duration(cacheParams.forceCacheDurationSeconds)) - } else { - var err error - expiresAt, err = expiryFromHeaders(resp.Header) - if err != nil { - return nil, err - } - } - - cv := interQueryCacheData{ - ExpiresAt: expiresAt, - RespBody: respBody, - Status: resp.Status, - StatusCode: resp.StatusCode, - Headers: resp.Header} - - return &cv, nil -} - -func (c *interQueryCacheData) formatToAST(forceJSONDecode, forceYAMLDecode bool) (ast.Value, error) { - return prepareASTResult(c.Headers, forceJSONDecode, forceYAMLDecode, c.RespBody, c.Status, c.StatusCode) -} - -func (c *interQueryCacheData) toCacheValue() (*interQueryCacheValue, error) { - b, err := json.Marshal(c) - if err != nil { - return nil, err - } - return &interQueryCacheValue{Data: b}, nil -} - -func (c *interQueryCacheData) SizeInBytes() int64 { - return 0 -} - -func (c *interQueryCacheData) Clone() (cache.InterQueryCacheValue, error) { - dup := make([]byte, len(c.RespBody)) - copy(dup, c.RespBody) - - return &interQueryCacheData{ - ExpiresAt: c.ExpiresAt, - RespBody: dup, - Status: c.Status, - StatusCode: c.StatusCode, - Headers: c.Headers.Clone()}, nil -} - -type responseHeaders struct { - etag string // identifier for a specific version of the response - lastModified string // date and time response was last modified as per origin server -} - -// deltaSeconds specifies a non-negative integer, representing -// time in seconds: http://tools.ietf.org/html/rfc7234#section-1.2.1 -type deltaSeconds int32 - -func parseResponseHeaders(headers http.Header) *responseHeaders { - result := responseHeaders{} - - result.etag = headers.Get("etag") - - result.lastModified = headers.Get("last-modified") - - return &result -} - -func revalidateCachedResponse(req *http.Request, client *http.Client, inputReqObj ast.Object, headers *responseHeaders) (*http.Response, bool, error) { - etag := headers.etag - lastModified := headers.lastModified - - if etag == "" && lastModified == "" { - return nil, false, nil - } - - cloneReq := req.Clone(req.Context()) - - if etag != "" { - cloneReq.Header.Set("if-none-match", etag) - } - - if lastModified != "" { - cloneReq.Header.Set("if-modified-since", lastModified) - } - - response, err := executeHTTPRequest(cloneReq, client, inputReqObj) - if err != nil { - return nil, false, err - } - - switch response.StatusCode { - case http.StatusOK: - return response, true, nil - - case http.StatusNotModified: - return response, false, nil - } - util.Close(response) - return nil, false, nil -} - -func canStore(headers http.Header) bool { - ccHeaders := parseCacheControlHeader(headers) - - // Check "no-store" cache directive - // The "no-store" response directive indicates that a cache MUST NOT - // store any part of either the immediate request or response. - if _, ok := ccHeaders["no-store"]; ok { - return false - } - return true -} - -func getCurrentTime(bctx BuiltinContext) time.Time { - var current time.Time - - value, err := ast.JSON(bctx.Time.Value) - if err != nil { - return current - } - - valueNum, ok := value.(json.Number) - if !ok { - return current - } - - valueNumInt, err := valueNum.Int64() - if err != nil { - return current - } - - current = time.Unix(0, valueNumInt).UTC() - return current -} - -func parseCacheControlHeader(headers http.Header) map[string]string { - ccDirectives := map[string]string{} - ccHeader := headers.Get("cache-control") - - for _, part := range strings.Split(ccHeader, ",") { - part = strings.Trim(part, " ") - if part == "" { - continue - } - if strings.ContainsRune(part, '=') { - items := strings.Split(part, "=") - if len(items) != 2 { - continue - } - ccDirectives[strings.Trim(items[0], " ")] = strings.Trim(items[1], ",") - } else { - ccDirectives[part] = "" - } - } - - return ccDirectives -} - -func getResponseHeaderDate(headers http.Header) (date time.Time, err error) { - dateHeader := headers.Get("date") - if dateHeader == "" { - err = fmt.Errorf("no date header") - return - } - return http.ParseTime(dateHeader) -} - -func getResponseHeaderExpires(headers http.Header) time.Time { - expiresHeader := headers.Get("expires") - if expiresHeader == "" { - return time.Time{} - } - - date, err := http.ParseTime(expiresHeader) - if err != nil { - // servers can set `Expires: 0` which is an invalid date to indicate expired content - return time.Time{} - } - - return date -} - -// parseMaxAgeCacheDirective parses the max-age directive expressed in delta-seconds as per -// https://tools.ietf.org/html/rfc7234#section-1.2.1 -func parseMaxAgeCacheDirective(cc map[string]string) (deltaSeconds, error) { - maxAge, ok := cc["max-age"] - if !ok { - return deltaSeconds(-1), nil - } - - val, err := strconv.ParseUint(maxAge, 10, 32) - if err != nil { - if numError, ok := err.(*strconv.NumError); ok { - if numError.Err == strconv.ErrRange { - return deltaSeconds(math.MaxInt32), nil - } - } - return deltaSeconds(-1), err - } - - if val > math.MaxInt32 { - return deltaSeconds(math.MaxInt32), nil - } - return deltaSeconds(val), nil -} - -func formatHTTPResponseToAST(resp *http.Response, forceJSONDecode, forceYAMLDecode bool) (ast.Value, []byte, error) { - - resultRawBody, err := io.ReadAll(resp.Body) - if err != nil { - return nil, nil, err - } - - resultObj, err := prepareASTResult(resp.Header, forceJSONDecode, forceYAMLDecode, resultRawBody, resp.Status, resp.StatusCode) - if err != nil { - return nil, nil, err - } - - return resultObj, resultRawBody, nil -} - -func prepareASTResult(headers http.Header, forceJSONDecode, forceYAMLDecode bool, body []byte, status string, statusCode int) (ast.Value, error) { - var resultBody interface{} - - // If the response body cannot be JSON/YAML decoded, - // an error will not be returned. Instead, the "body" field - // in the result will be null. - switch { - case forceJSONDecode || isContentType(headers, "application/json"): - _ = util.UnmarshalJSON(body, &resultBody) - case forceYAMLDecode || isContentType(headers, "application/yaml", "application/x-yaml"): - _ = util.Unmarshal(body, &resultBody) - } - - result := make(map[string]interface{}) - result["status"] = status - result["status_code"] = statusCode - result["body"] = resultBody - result["raw_body"] = string(body) - result["headers"] = getResponseHeaders(headers) - - resultObj, err := ast.InterfaceToValue(result) - if err != nil { - return nil, err - } - - return resultObj, nil -} - -func getResponseHeaders(headers http.Header) map[string]interface{} { - respHeaders := map[string]interface{}{} - for headerName, values := range headers { - var respValues []interface{} - for _, v := range values { - respValues = append(respValues, v) - } - respHeaders[strings.ToLower(headerName)] = respValues - } - return respHeaders -} - -// httpRequestExecutor defines an interface for the http send cache -type httpRequestExecutor interface { - CheckCache() (ast.Value, error) - InsertIntoCache(value *http.Response) (ast.Value, error) - InsertErrorIntoCache(err error) - ExecuteHTTPRequest() (*http.Response, error) -} - -// newHTTPRequestExecutor returns a new HTTP request executor that wraps either an inter-query or -// intra-query cache implementation -func newHTTPRequestExecutor(bctx BuiltinContext, req ast.Object, key ast.Object) (httpRequestExecutor, error) { - useInterQueryCache, forceCacheParams, err := useInterQueryCache(req) - if err != nil { - return nil, handleHTTPSendErr(bctx, err) - } - - if useInterQueryCache && bctx.InterQueryBuiltinCache != nil { - return newInterQueryCache(bctx, req, key, forceCacheParams) - } - return newIntraQueryCache(bctx, req, key) -} - -type interQueryCache struct { - bctx BuiltinContext - req ast.Object - key ast.Object - httpReq *http.Request - httpClient *http.Client - forceJSONDecode bool - forceYAMLDecode bool - forceCacheParams *forceCacheParams -} - -func newInterQueryCache(bctx BuiltinContext, req ast.Object, key ast.Object, forceCacheParams *forceCacheParams) (*interQueryCache, error) { - return &interQueryCache{bctx: bctx, req: req, key: key, forceCacheParams: forceCacheParams}, nil -} - -// CheckCache checks the cache for the value of the key set on this object -func (c *interQueryCache) CheckCache() (ast.Value, error) { - var err error - - // Checking the intra-query cache first ensures consistency of errors and HTTP responses within a query. - resp, err := checkHTTPSendCache(c.bctx, c.key) - if err != nil { - return nil, err - } - if resp != nil { - return resp, nil - } - - c.forceJSONDecode, err = getBoolValFromReqObj(c.key, ast.StringTerm("force_json_decode")) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - c.forceYAMLDecode, err = getBoolValFromReqObj(c.key, ast.StringTerm("force_yaml_decode")) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - - resp, err = c.checkHTTPSendInterQueryCache() - // Always insert the result of the inter-query cache into the intra-query cache, to maintain consistency within the same query. - if err != nil { - insertErrorIntoHTTPSendCache(c.bctx, c.key, err) - } - if resp != nil { - insertIntoHTTPSendCache(c.bctx, c.key, resp) - } - return resp, err -} - -// InsertIntoCache inserts the key set on this object into the cache with the given value -func (c *interQueryCache) InsertIntoCache(value *http.Response) (ast.Value, error) { - result, respBody, err := formatHTTPResponseToAST(value, c.forceJSONDecode, c.forceYAMLDecode) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - - // Always insert into the intra-query cache, to maintain consistency within the same query. - insertIntoHTTPSendCache(c.bctx, c.key, result) - - // We ignore errors when populating the inter-query cache, because we've already populated the intra-cache, - // and query consistency is our primary concern. - _ = insertIntoHTTPSendInterQueryCache(c.bctx, c.key, value, respBody, c.forceCacheParams) - return result, nil -} - -func (c *interQueryCache) InsertErrorIntoCache(err error) { - insertErrorIntoHTTPSendCache(c.bctx, c.key, err) -} - -// ExecuteHTTPRequest executes a HTTP request -func (c *interQueryCache) ExecuteHTTPRequest() (*http.Response, error) { - var err error - c.httpReq, c.httpClient, err = createHTTPRequest(c.bctx, c.req) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - - return executeHTTPRequest(c.httpReq, c.httpClient, c.req) -} - -type intraQueryCache struct { - bctx BuiltinContext - req ast.Object - key ast.Object -} - -func newIntraQueryCache(bctx BuiltinContext, req ast.Object, key ast.Object) (*intraQueryCache, error) { - return &intraQueryCache{bctx: bctx, req: req, key: key}, nil -} - -// CheckCache checks the cache for the value of the key set on this object -func (c *intraQueryCache) CheckCache() (ast.Value, error) { - return checkHTTPSendCache(c.bctx, c.key) -} - -// InsertIntoCache inserts the key set on this object into the cache with the given value -func (c *intraQueryCache) InsertIntoCache(value *http.Response) (ast.Value, error) { - forceJSONDecode, err := getBoolValFromReqObj(c.key, ast.StringTerm("force_json_decode")) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - forceYAMLDecode, err := getBoolValFromReqObj(c.key, ast.StringTerm("force_yaml_decode")) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - - result, _, err := formatHTTPResponseToAST(value, forceJSONDecode, forceYAMLDecode) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - - if cacheableCodes.Contains(ast.IntNumberTerm(value.StatusCode)) { - insertIntoHTTPSendCache(c.bctx, c.key, result) - } - - return result, nil -} - -func (c *intraQueryCache) InsertErrorIntoCache(err error) { - insertErrorIntoHTTPSendCache(c.bctx, c.key, err) -} - -// ExecuteHTTPRequest executes a HTTP request -func (c *intraQueryCache) ExecuteHTTPRequest() (*http.Response, error) { - httpReq, httpClient, err := createHTTPRequest(c.bctx, c.req) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - return executeHTTPRequest(httpReq, httpClient, c.req) -} - -func useInterQueryCache(req ast.Object) (bool, *forceCacheParams, error) { - value, err := getBoolValFromReqObj(req, ast.StringTerm("cache")) - if err != nil { - return false, nil, err - } - - valueForceCache, err := getBoolValFromReqObj(req, ast.StringTerm("force_cache")) - if err != nil { - return false, nil, err - } - - if valueForceCache { - forceCacheParams, err := newForceCacheParams(req) - return true, forceCacheParams, err - } - - return value, nil, nil -} - -type forceCacheParams struct { - forceCacheDurationSeconds int32 -} - -func newForceCacheParams(req ast.Object) (*forceCacheParams, error) { - term := req.Get(ast.StringTerm("force_cache_duration_seconds")) - if term == nil { - return nil, fmt.Errorf("'force_cache' set but 'force_cache_duration_seconds' parameter is missing") - } - - forceCacheDurationSeconds := term.String() - - value, err := strconv.ParseInt(forceCacheDurationSeconds, 10, 32) - if err != nil { - return nil, err - } - - return &forceCacheParams{forceCacheDurationSeconds: int32(value)}, nil -} - -func getRaiseErrorValue(req ast.Object) (bool, error) { - result := ast.Boolean(true) - var ok bool - if v := req.Get(ast.StringTerm("raise_error")); v != nil { - if result, ok = v.Value.(ast.Boolean); !ok { - return false, fmt.Errorf("invalid value for raise_error field") - } - } - return bool(result), nil -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/http_fixup.go b/vendor/github.com/open-policy-agent/opa/topdown/http_fixup.go deleted file mode 100644 index 1b9ffc2350..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/http_fixup.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !go1.18 || !darwin -// +build !go1.18 !darwin - -package topdown - -func fixupDarwinGo118(x string, _ string) string { - return x -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go b/vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go index 6eacc338ef..845f8da612 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go @@ -4,60 +4,18 @@ package topdown -import "github.com/open-policy-agent/opa/metrics" - -const ( - evalOpPlug = "eval_op_plug" - evalOpResolve = "eval_op_resolve" - evalOpRuleIndex = "eval_op_rule_index" - evalOpBuiltinCall = "eval_op_builtin_call" - evalOpVirtualCacheHit = "eval_op_virtual_cache_hit" - evalOpVirtualCacheMiss = "eval_op_virtual_cache_miss" - evalOpBaseCacheHit = "eval_op_base_cache_hit" - evalOpBaseCacheMiss = "eval_op_base_cache_miss" - evalOpComprehensionCacheSkip = "eval_op_comprehension_cache_skip" - evalOpComprehensionCacheBuild = "eval_op_comprehension_cache_build" - evalOpComprehensionCacheHit = "eval_op_comprehension_cache_hit" - evalOpComprehensionCacheMiss = "eval_op_comprehension_cache_miss" - partialOpSaveUnify = "partial_op_save_unify" - partialOpSaveSetContains = "partial_op_save_set_contains" - partialOpSaveSetContainsRec = "partial_op_save_set_contains_rec" - partialOpCopyPropagation = "partial_op_copy_propagation" +import ( + "github.com/open-policy-agent/opa/v1/metrics" + v1 "github.com/open-policy-agent/opa/v1/topdown" ) // Instrumentation implements helper functions to instrument query evaluation // to diagnose performance issues. Instrumentation may be expensive in some // cases, so it is disabled by default. -type Instrumentation struct { - m metrics.Metrics -} +type Instrumentation = v1.Instrumentation // NewInstrumentation returns a new Instrumentation object. Performance // diagnostics recorded on this Instrumentation object will stored in m. func NewInstrumentation(m metrics.Metrics) *Instrumentation { - return &Instrumentation{ - m: m, - } -} - -func (instr *Instrumentation) startTimer(name string) { - if instr == nil { - return - } - instr.m.Timer(name).Start() -} - -func (instr *Instrumentation) stopTimer(name string) { - if instr == nil { - return - } - delta := instr.m.Timer(name).Stop() - instr.m.Histogram(name).Update(delta) -} - -func (instr *Instrumentation) counterIncr(name string) { - if instr == nil { - return - } - instr.m.Counter(name).Incr() + return v1.NewInstrumentation(m) } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/json.go b/vendor/github.com/open-policy-agent/opa/topdown/json.go deleted file mode 100644 index 8a5d232836..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/json.go +++ /dev/null @@ -1,405 +0,0 @@ -// Copyright 2019 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "fmt" - "strconv" - "strings" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" - - "github.com/open-policy-agent/opa/internal/edittree" -) - -func builtinJSONRemove(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - // Expect an object and a string or array/set of strings - _, err := builtins.ObjectOperand(operands[0].Value, 1) - if err != nil { - return err - } - - // Build a list of json pointers to remove - paths, err := getJSONPaths(operands[1].Value) - if err != nil { - return err - } - - newObj, err := jsonRemove(operands[0], ast.NewTerm(pathsToObject(paths))) - if err != nil { - return err - } - - if newObj == nil { - return nil - } - - return iter(newObj) -} - -// jsonRemove returns a new term that is the result of walking -// through a and omitting removing any values that are in b but -// have ast.Null values (ie leaf nodes for b). -func jsonRemove(a *ast.Term, b *ast.Term) (*ast.Term, error) { - if b == nil { - // The paths diverged, return a - return a, nil - } - - var bObj ast.Object - switch bValue := b.Value.(type) { - case ast.Object: - bObj = bValue - case ast.Null: - // Means we hit a leaf node on "b", dont add the value for a - return nil, nil - default: - // The paths diverged, return a - return a, nil - } - - switch aValue := a.Value.(type) { - case ast.String, ast.Number, ast.Boolean, ast.Null: - return a, nil - case ast.Object: - newObj := ast.NewObject() - err := aValue.Iter(func(k *ast.Term, v *ast.Term) error { - // recurse and add the diff of sub objects as needed - diffValue, err := jsonRemove(v, bObj.Get(k)) - if err != nil || diffValue == nil { - return err - } - newObj.Insert(k, diffValue) - return nil - }) - if err != nil { - return nil, err - } - return ast.NewTerm(newObj), nil - case ast.Set: - newSet := ast.NewSet() - err := aValue.Iter(func(v *ast.Term) error { - // recurse and add the diff of sub objects as needed - diffValue, err := jsonRemove(v, bObj.Get(v)) - if err != nil || diffValue == nil { - return err - } - newSet.Add(diffValue) - return nil - }) - if err != nil { - return nil, err - } - return ast.NewTerm(newSet), nil - case *ast.Array: - // When indexes are removed we shift left to close empty spots in the array - // as per the JSON patch spec. - newArray := ast.NewArray() - for i := 0; i < aValue.Len(); i++ { - v := aValue.Elem(i) - // recurse and add the diff of sub objects as needed - // Note: Keys in b will be strings for the index, eg path /a/1/b => {"a": {"1": {"b": null}}} - diffValue, err := jsonRemove(v, bObj.Get(ast.StringTerm(strconv.Itoa(i)))) - if err != nil { - return nil, err - } - if diffValue != nil { - newArray = newArray.Append(diffValue) - } - } - return ast.NewTerm(newArray), nil - default: - return nil, fmt.Errorf("invalid value type %T", a) - } -} - -func builtinJSONFilter(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - // Ensure we have the right parameters, expect an object and a string or array/set of strings - obj, err := builtins.ObjectOperand(operands[0].Value, 1) - if err != nil { - return err - } - - // Build a list of filter strings - filters, err := getJSONPaths(operands[1].Value) - if err != nil { - return err - } - - // Actually do the filtering - filterObj := pathsToObject(filters) - r, err := obj.Filter(filterObj) - if err != nil { - return err - } - - return iter(ast.NewTerm(r)) -} - -func getJSONPaths(operand ast.Value) ([]ast.Ref, error) { - var paths []ast.Ref - - switch v := operand.(type) { - case *ast.Array: - for i := 0; i < v.Len(); i++ { - filter, err := parsePath(v.Elem(i)) - if err != nil { - return nil, err - } - paths = append(paths, filter) - } - case ast.Set: - err := v.Iter(func(f *ast.Term) error { - filter, err := parsePath(f) - if err != nil { - return err - } - paths = append(paths, filter) - return nil - }) - if err != nil { - return nil, err - } - default: - return nil, builtins.NewOperandTypeErr(2, v, "set", "array") - } - - return paths, nil -} - -func parsePath(path *ast.Term) (ast.Ref, error) { - // paths can either be a `/` separated json path or - // an array or set of values - var pathSegments ast.Ref - switch p := path.Value.(type) { - case ast.String: - if p == "" { - return ast.Ref{}, nil - } - parts := strings.Split(strings.TrimLeft(string(p), "/"), "/") - for _, part := range parts { - part = strings.ReplaceAll(strings.ReplaceAll(part, "~1", "/"), "~0", "~") - pathSegments = append(pathSegments, ast.StringTerm(part)) - } - case *ast.Array: - p.Foreach(func(term *ast.Term) { - pathSegments = append(pathSegments, term) - }) - default: - return nil, builtins.NewOperandErr(2, "must be one of {set, array} containing string paths or array of path segments but got %v", ast.TypeName(p)) - } - - return pathSegments, nil -} - -func pathsToObject(paths []ast.Ref) ast.Object { - root := ast.NewObject() - - for _, path := range paths { - node := root - var done bool - - // If the path is an empty JSON path, skip all further processing. - if len(path) == 0 { - done = true - } - - // Otherwise, we should have 1+ path segments to work with. - for i := 0; i < len(path)-1 && !done; i++ { - - k := path[i] - child := node.Get(k) - - if child == nil { - obj := ast.NewObject() - node.Insert(k, ast.NewTerm(obj)) - node = obj - continue - } - - switch v := child.Value.(type) { - case ast.Null: - done = true - case ast.Object: - node = v - default: - panic("unreachable") - } - } - - if !done { - node.Insert(path[len(path)-1], ast.NullTerm()) - } - } - - return root -} - -type jsonPatch struct { - op string - path *ast.Term - from *ast.Term - value *ast.Term -} - -func getPatch(o ast.Object) (jsonPatch, error) { - validOps := map[string]struct{}{"add": {}, "remove": {}, "replace": {}, "move": {}, "copy": {}, "test": {}} - var out jsonPatch - var ok bool - getAttribute := func(attr string) (*ast.Term, error) { - if term := o.Get(ast.StringTerm(attr)); term != nil { - return term, nil - } - - return nil, fmt.Errorf("missing '%s' attribute", attr) - } - - opTerm, err := getAttribute("op") - if err != nil { - return out, err - } - op, ok := opTerm.Value.(ast.String) - if !ok { - return out, fmt.Errorf("attribute 'op' must be a string") - } - out.op = string(op) - if _, found := validOps[out.op]; !found { - out.op = "" - return out, fmt.Errorf("unrecognized op '%s'", string(op)) - } - - pathTerm, err := getAttribute("path") - if err != nil { - return out, err - } - out.path = pathTerm - - // Only fetch the "from" parameter for move/copy ops. - switch out.op { - case "move", "copy": - fromTerm, err := getAttribute("from") - if err != nil { - return out, err - } - out.from = fromTerm - } - - // Only fetch the "value" parameter for add/replace/test ops. - switch out.op { - case "add", "replace", "test": - valueTerm, err := getAttribute("value") - if err != nil { - return out, err - } - out.value = valueTerm - } - - return out, nil -} - -func applyPatches(source *ast.Term, operations *ast.Array) (*ast.Term, error) { - et := edittree.NewEditTree(source) - for i := 0; i < operations.Len(); i++ { - object, ok := operations.Elem(i).Value.(ast.Object) - if !ok { - return nil, fmt.Errorf("must be an array of JSON-Patch objects, but at least one element is not an object") - } - patch, err := getPatch(object) - if err != nil { - return nil, err - } - path, err := parsePath(patch.path) - if err != nil { - return nil, err - } - - switch patch.op { - case "add": - _, err = et.InsertAtPath(path, patch.value) - if err != nil { - return nil, err - } - case "remove": - _, err = et.DeleteAtPath(path) - if err != nil { - return nil, err - } - case "replace": - _, err = et.DeleteAtPath(path) - if err != nil { - return nil, err - } - _, err = et.InsertAtPath(path, patch.value) - if err != nil { - return nil, err - } - case "move": - from, err := parsePath(patch.from) - if err != nil { - return nil, err - } - chunk, err := et.RenderAtPath(from) - if err != nil { - return nil, err - } - _, err = et.DeleteAtPath(from) - if err != nil { - return nil, err - } - _, err = et.InsertAtPath(path, chunk) - if err != nil { - return nil, err - } - case "copy": - from, err := parsePath(patch.from) - if err != nil { - return nil, err - } - chunk, err := et.RenderAtPath(from) - if err != nil { - return nil, err - } - _, err = et.InsertAtPath(path, chunk) - if err != nil { - return nil, err - } - case "test": - chunk, err := et.RenderAtPath(path) - if err != nil { - return nil, err - } - if !chunk.Equal(patch.value) { - return nil, fmt.Errorf("value from EditTree != patch value.\n\nExpected: %v\n\nFound: %v", patch.value, chunk) - } - } - } - final := et.Render() - // TODO: Nil check here? - return final, nil -} - -func builtinJSONPatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - // JSON patch supports arrays, objects as well as values as the target. - target := ast.NewTerm(operands[0].Value) - - // Expect an array of operations. - operations, err := builtins.ArrayOperand(operands[1].Value, 2) - if err != nil { - return err - } - - patched, err := applyPatches(target, operations) - if err != nil { - return nil - } - return iter(patched) -} - -func init() { - RegisterBuiltinFunc(ast.JSONFilter.Name, builtinJSONFilter) - RegisterBuiltinFunc(ast.JSONRemove.Name, builtinJSONRemove) - RegisterBuiltinFunc(ast.JSONPatch.Name, builtinJSONPatch) -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/numbers.go b/vendor/github.com/open-policy-agent/opa/topdown/numbers.go deleted file mode 100644 index 27f3156b8a..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/numbers.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "fmt" - "math/big" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -type randIntCachingKey string - -var one = big.NewInt(1) - -func builtinNumbersRange(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - x, err := builtins.BigIntOperand(operands[0].Value, 1) - if err != nil { - return err - } - - y, err := builtins.BigIntOperand(operands[1].Value, 2) - if err != nil { - return err - } - - ast, err := generateRange(bctx, x, y, one, "numbers.range") - if err != nil { - return err - } - - return iter(ast) -} - -func builtinNumbersRangeStep(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - x, err := builtins.BigIntOperand(operands[0].Value, 1) - if err != nil { - return err - } - - y, err := builtins.BigIntOperand(operands[1].Value, 2) - if err != nil { - return err - } - - step, err := builtins.BigIntOperand(operands[2].Value, 3) - if err != nil { - return err - } - - if step.Cmp(big.NewInt(0)) <= 0 { - return fmt.Errorf("numbers.range_step: step must be a positive number above zero") - } - - ast, err := generateRange(bctx, x, y, step, "numbers.range_step") - if err != nil { - return err - } - - return iter(ast) -} - -func generateRange(bctx BuiltinContext, x *big.Int, y *big.Int, step *big.Int, funcName string) (*ast.Term, error) { - - cmp := x.Cmp(y) - - comp := func(i *big.Int, y *big.Int) bool { return i.Cmp(y) <= 0 } - iter := func(i *big.Int) *big.Int { return i.Add(i, step) } - - if cmp > 0 { - comp = func(i *big.Int, y *big.Int) bool { return i.Cmp(y) >= 0 } - iter = func(i *big.Int) *big.Int { return i.Sub(i, step) } - } - - result := ast.NewArray() - haltErr := Halt{ - Err: &Error{ - Code: CancelErr, - Message: fmt.Sprintf("%s: timed out before generating all numbers in range", funcName), - }, - } - - for i := new(big.Int).Set(x); comp(i, y); i = iter(i) { - if bctx.Cancel != nil && bctx.Cancel.Cancelled() { - return nil, haltErr - } - result = result.Append(ast.NewTerm(builtins.IntToNumber(i))) - } - - return ast.NewTerm(result), nil -} - -func builtinRandIntn(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - strOp, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - - } - - n, err := builtins.IntOperand(operands[1].Value, 2) - if err != nil { - return err - } - - if n == 0 { - return iter(ast.IntNumberTerm(0)) - } - - if n < 0 { - n = -n - } - - var key = randIntCachingKey(fmt.Sprintf("%s-%d", strOp, n)) - - if val, ok := bctx.Cache.Get(key); ok { - return iter(val.(*ast.Term)) - } - - r, err := bctx.Rand() - if err != nil { - return err - } - result := ast.IntNumberTerm(r.Intn(n)) - bctx.Cache.Put(key, result) - - return iter(result) -} - -func init() { - RegisterBuiltinFunc(ast.NumbersRange.Name, builtinNumbersRange) - RegisterBuiltinFunc(ast.NumbersRangeStep.Name, builtinNumbersRangeStep) - RegisterBuiltinFunc(ast.RandIntn.Name, builtinRandIntn) -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/parse.go b/vendor/github.com/open-policy-agent/opa/topdown/parse.go deleted file mode 100644 index c46222b413..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/parse.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "bytes" - "encoding/json" - "fmt" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -func builtinRegoParseModule(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - filename, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - input, err := builtins.StringOperand(operands[1].Value, 1) - if err != nil { - return err - } - - module, err := ast.ParseModule(string(filename), string(input)) - if err != nil { - return err - } - - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(module); err != nil { - return err - } - - term, err := ast.ParseTerm(buf.String()) - if err != nil { - return err - } - - return iter(term) -} - -func registerRegoMetadataBuiltinFunction(builtin *ast.Builtin) { - f := func(BuiltinContext, []*ast.Term, func(*ast.Term) error) error { - // The compiler should replace all usage of this function, so the only way to get here is within a query; - // which cannot define rules. - return fmt.Errorf("the %s function must only be called within the scope of a rule", builtin.Name) - } - RegisterBuiltinFunc(builtin.Name, f) -} - -func init() { - RegisterBuiltinFunc(ast.RegoParseModule.Name, builtinRegoParseModule) - registerRegoMetadataBuiltinFunction(ast.RegoMetadataChain) - registerRegoMetadataBuiltinFunction(ast.RegoMetadataRule) -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go b/vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go deleted file mode 100644 index 0cd4bc193a..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2016 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "fmt" - "math/big" - "strings" - "unicode" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -const ( - none uint64 = 1 << (10 * iota) - ki - mi - gi - ti - pi - ei - - kb uint64 = 1000 - mb = kb * 1000 - gb = mb * 1000 - tb = gb * 1000 - pb = tb * 1000 - eb = pb * 1000 -) - -func parseNumBytesError(msg string) error { - return fmt.Errorf("%s: %s", ast.UnitsParseBytes.Name, msg) -} - -func errBytesUnitNotRecognized(unit string) error { - return parseNumBytesError(fmt.Sprintf("byte unit %s not recognized", unit)) -} - -var ( - errBytesValueNoAmount = parseNumBytesError("no byte amount provided") - errBytesValueNumConv = parseNumBytesError("could not parse byte amount to a number") - errBytesValueIncludesSpaces = parseNumBytesError("spaces not allowed in resource strings") -) - -func builtinNumBytes(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - var m big.Float - - raw, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - s := formatString(raw) - - if strings.Contains(s, " ") { - return errBytesValueIncludesSpaces - } - - num, unit := extractNumAndUnit(s) - if num == "" { - return errBytesValueNoAmount - } - - switch unit { - case "": - m.SetUint64(none) - case "kb", "k": - m.SetUint64(kb) - case "kib", "ki": - m.SetUint64(ki) - case "mb", "m": - m.SetUint64(mb) - case "mib", "mi": - m.SetUint64(mi) - case "gb", "g": - m.SetUint64(gb) - case "gib", "gi": - m.SetUint64(gi) - case "tb", "t": - m.SetUint64(tb) - case "tib", "ti": - m.SetUint64(ti) - case "pb", "p": - m.SetUint64(pb) - case "pib", "pi": - m.SetUint64(pi) - case "eb", "e": - m.SetUint64(eb) - case "eib", "ei": - m.SetUint64(ei) - default: - return errBytesUnitNotRecognized(unit) - } - - numFloat, ok := new(big.Float).SetString(num) - if !ok { - return errBytesValueNumConv - } - - var total big.Int - numFloat.Mul(numFloat, &m).Int(&total) - return iter(ast.NewTerm(builtins.IntToNumber(&total))) -} - -// Makes the string lower case and removes quotation marks -func formatString(s ast.String) string { - str := string(s) - lower := strings.ToLower(str) - return strings.Replace(lower, "\"", "", -1) -} - -// Splits the string into a number string à la "10" or "10.2" and a unit -// string à la "gb" or "MiB" or "foo". Either can be an empty string -// (error handling is provided elsewhere). -func extractNumAndUnit(s string) (string, string) { - isNum := func(r rune) bool { - return unicode.IsDigit(r) || r == '.' - } - - firstNonNumIdx := -1 - for idx, r := range s { - if !isNum(r) { - firstNonNumIdx = idx - break - } - } - - if firstNonNumIdx == -1 { // only digits and '.' - return s, "" - } - if firstNonNumIdx == 0 { // only units (starts with non-digit) - return "", s - } - - return s[0:firstNonNumIdx], s[firstNonNumIdx:] -} - -func init() { - RegisterBuiltinFunc(ast.UnitsParseBytes.Name, builtinNumBytes) -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/print.go b/vendor/github.com/open-policy-agent/opa/topdown/print.go index 765b344b3a..5eacd180d9 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/print.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/print.go @@ -5,82 +5,12 @@ package topdown import ( - "fmt" "io" - "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" "github.com/open-policy-agent/opa/topdown/print" + v1 "github.com/open-policy-agent/opa/v1/topdown" ) func NewPrintHook(w io.Writer) print.Hook { - return printHook{w: w} -} - -type printHook struct { - w io.Writer -} - -func (h printHook) Print(_ print.Context, msg string) error { - _, err := fmt.Fprintln(h.w, msg) - return err -} - -func builtinPrint(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - if bctx.PrintHook == nil { - return iter(nil) - } - - arr, err := builtins.ArrayOperand(operands[0].Value, 1) - if err != nil { - return err - } - - buf := make([]string, arr.Len()) - - err = builtinPrintCrossProductOperands(bctx, buf, arr, 0, func(buf []string) error { - pctx := print.Context{ - Context: bctx.Context, - Location: bctx.Location, - } - return bctx.PrintHook.Print(pctx, strings.Join(buf, " ")) - }) - if err != nil { - return err - } - - return iter(nil) -} - -func builtinPrintCrossProductOperands(bctx BuiltinContext, buf []string, operands *ast.Array, i int, f func([]string) error) error { - - if i >= operands.Len() { - return f(buf) - } - - xs, ok := operands.Elem(i).Value.(ast.Set) - if !ok { - return Halt{Err: internalErr(bctx.Location, fmt.Sprintf("illegal argument type: %v", ast.TypeName(operands.Elem(i).Value)))} - } - - if xs.Len() == 0 { - buf[i] = "" - return builtinPrintCrossProductOperands(bctx, buf, operands, i+1, f) - } - - return xs.Iter(func(x *ast.Term) error { - switch v := x.Value.(type) { - case ast.String: - buf[i] = string(v) - default: - buf[i] = v.String() - } - return builtinPrintCrossProductOperands(bctx, buf, operands, i+1, f) - }) -} - -func init() { - RegisterBuiltinFunc(ast.InternalPrint.Name, builtinPrint) + return v1.NewPrintHook(w) } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/print/doc.go b/vendor/github.com/open-policy-agent/opa/topdown/print/doc.go new file mode 100644 index 0000000000..c2ee0eca7f --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/topdown/print/doc.go @@ -0,0 +1,8 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package print diff --git a/vendor/github.com/open-policy-agent/opa/topdown/print/print.go b/vendor/github.com/open-policy-agent/opa/topdown/print/print.go index 0fb6abdca8..66ffbb176f 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/print/print.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/print/print.go @@ -1,21 +1,14 @@ package print import ( - "context" - - "github.com/open-policy-agent/opa/ast" + v1 "github.com/open-policy-agent/opa/v1/topdown/print" ) // Context provides the Hook implementation context about the print() call. -type Context struct { - Context context.Context // request context passed when query executed - Location *ast.Location // location of print call -} +type Context = v1.Context // Hook defines the interface that callers can implement to receive print // statement outputs. If the hook returns an error, it will be surfaced if // strict builtin error checking is enabled (otherwise, it will not halt // execution.) -type Hook interface { - Print(Context, string) error -} +type Hook = v1.Hook diff --git a/vendor/github.com/open-policy-agent/opa/topdown/query.go b/vendor/github.com/open-policy-agent/opa/topdown/query.go index 8406cfdd87..d24060991f 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/query.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/query.go @@ -1,599 +1,24 @@ package topdown import ( - "context" - "crypto/rand" - "io" - "sort" - "time" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/resolver" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/copypropagation" - "github.com/open-policy-agent/opa/topdown/print" - "github.com/open-policy-agent/opa/tracing" + "github.com/open-policy-agent/opa/v1/ast" + v1 "github.com/open-policy-agent/opa/v1/topdown" ) // QueryResultSet represents a collection of results returned by a query. -type QueryResultSet []QueryResult +type QueryResultSet = v1.QueryResultSet // QueryResult represents a single result returned by a query. The result // contains bindings for all variables that appear in the query. -type QueryResult map[ast.Var]*ast.Term +type QueryResult = v1.QueryResult // Query provides a configurable interface for performing query evaluation. -type Query struct { - seed io.Reader - time time.Time - cancel Cancel - query ast.Body - queryCompiler ast.QueryCompiler - compiler *ast.Compiler - store storage.Store - txn storage.Transaction - input *ast.Term - external *resolverTrie - tracers []QueryTracer - plugTraceVars bool - unknowns []*ast.Term - partialNamespace string - skipSaveNamespace bool - metrics metrics.Metrics - instr *Instrumentation - disableInlining []ast.Ref - shallowInlining bool - genvarprefix string - runtime *ast.Term - builtins map[string]*Builtin - indexing bool - earlyExit bool - interQueryBuiltinCache cache.InterQueryCache - interQueryBuiltinValueCache cache.InterQueryValueCache - ndBuiltinCache builtins.NDBCache - strictBuiltinErrors bool - builtinErrorList *[]Error - strictObjects bool - printHook print.Hook - tracingOpts tracing.Options - virtualCache VirtualCache -} +type Query = v1.Query // Builtin represents a built-in function that queries can call. -type Builtin struct { - Decl *ast.Builtin - Func BuiltinFunc -} +type Builtin = v1.Builtin // NewQuery returns a new Query object that can be run. func NewQuery(query ast.Body) *Query { - return &Query{ - query: query, - genvarprefix: ast.WildcardPrefix, - indexing: true, - earlyExit: true, - external: newResolverTrie(), - } -} - -// WithQueryCompiler sets the queryCompiler used for the query. -func (q *Query) WithQueryCompiler(queryCompiler ast.QueryCompiler) *Query { - q.queryCompiler = queryCompiler - return q -} - -// WithCompiler sets the compiler to use for the query. -func (q *Query) WithCompiler(compiler *ast.Compiler) *Query { - q.compiler = compiler - return q -} - -// WithStore sets the store to use for the query. -func (q *Query) WithStore(store storage.Store) *Query { - q.store = store - return q -} - -// WithTransaction sets the transaction to use for the query. All queries -// should be performed over a consistent snapshot of the storage layer. -func (q *Query) WithTransaction(txn storage.Transaction) *Query { - q.txn = txn - return q -} - -// WithCancel sets the cancellation object to use for the query. Set this if -// you need to abort queries based on a deadline. This is optional. -func (q *Query) WithCancel(cancel Cancel) *Query { - q.cancel = cancel - return q -} - -// WithInput sets the input object to use for the query. References rooted at -// input will be evaluated against this value. This is optional. -func (q *Query) WithInput(input *ast.Term) *Query { - q.input = input - return q -} - -// WithTracer adds a query tracer to use during evaluation. This is optional. -// Deprecated: Use WithQueryTracer instead. -func (q *Query) WithTracer(tracer Tracer) *Query { - qt, ok := tracer.(QueryTracer) - if !ok { - qt = WrapLegacyTracer(tracer) - } - return q.WithQueryTracer(qt) -} - -// WithQueryTracer adds a query tracer to use during evaluation. This is optional. -// Disabled QueryTracers will be ignored. -func (q *Query) WithQueryTracer(tracer QueryTracer) *Query { - if !tracer.Enabled() { - return q - } - - q.tracers = append(q.tracers, tracer) - - // If *any* of the tracers require local variable metadata we need to - // enabled plugging local trace variables. - conf := tracer.Config() - if conf.PlugLocalVars { - q.plugTraceVars = true - } - - return q -} - -// WithMetrics sets the metrics collection to add evaluation metrics to. This -// is optional. -func (q *Query) WithMetrics(m metrics.Metrics) *Query { - q.metrics = m - return q -} - -// WithInstrumentation sets the instrumentation configuration to enable on the -// evaluation process. By default, instrumentation is turned off. -func (q *Query) WithInstrumentation(instr *Instrumentation) *Query { - q.instr = instr - return q -} - -// WithUnknowns sets the initial set of variables or references to treat as -// unknown during query evaluation. This is required for partial evaluation. -func (q *Query) WithUnknowns(terms []*ast.Term) *Query { - q.unknowns = terms - return q -} - -// WithPartialNamespace sets the namespace to use for supporting rules -// generated as part of the partial evaluation process. The ns value must be a -// valid package path component. -func (q *Query) WithPartialNamespace(ns string) *Query { - q.partialNamespace = ns - return q -} - -// WithSkipPartialNamespace disables namespacing of saved support rules that are generated -// from the original policy (rules which are completely synthetic are still namespaced.) -func (q *Query) WithSkipPartialNamespace(yes bool) *Query { - q.skipSaveNamespace = yes - return q -} - -// WithDisableInlining adds a set of paths to the query that should be excluded from -// inlining. Inlining during partial evaluation can be expensive in some cases -// (e.g., when a cross-product is computed.) Disabling inlining avoids expensive -// computation at the cost of generating support rules. -func (q *Query) WithDisableInlining(paths []ast.Ref) *Query { - q.disableInlining = paths - return q -} - -// WithShallowInlining disables aggressive inlining performed during partial evaluation. -// When shallow inlining is enabled rules that depend (transitively) on unknowns are not inlined. -// Only rules/values that are completely known will be inlined. -func (q *Query) WithShallowInlining(yes bool) *Query { - q.shallowInlining = yes - return q -} - -// WithRuntime sets the runtime data to execute the query with. The runtime data -// can be returned by the `opa.runtime` built-in function. -func (q *Query) WithRuntime(runtime *ast.Term) *Query { - q.runtime = runtime - return q -} - -// WithBuiltins adds a set of built-in functions that can be called by the -// query. -func (q *Query) WithBuiltins(builtins map[string]*Builtin) *Query { - q.builtins = builtins - return q -} - -// WithIndexing will enable or disable using rule indexing for the evaluation -// of the query. The default is enabled. -func (q *Query) WithIndexing(enabled bool) *Query { - q.indexing = enabled - return q -} - -// WithEarlyExit will enable or disable using 'early exit' for the evaluation -// of the query. The default is enabled. -func (q *Query) WithEarlyExit(enabled bool) *Query { - q.earlyExit = enabled - return q -} - -// WithSeed sets a reader that will seed randomization required by built-in functions. -// If a seed is not provided crypto/rand.Reader is used. -func (q *Query) WithSeed(r io.Reader) *Query { - q.seed = r - return q -} - -// WithTime sets the time that will be returned by the time.now_ns() built-in function. -func (q *Query) WithTime(x time.Time) *Query { - q.time = x - return q -} - -// WithInterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize. -func (q *Query) WithInterQueryBuiltinCache(c cache.InterQueryCache) *Query { - q.interQueryBuiltinCache = c - return q -} - -// WithInterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize. -func (q *Query) WithInterQueryBuiltinValueCache(c cache.InterQueryValueCache) *Query { - q.interQueryBuiltinValueCache = c - return q -} - -// WithNDBuiltinCache sets the non-deterministic builtin cache. -func (q *Query) WithNDBuiltinCache(c builtins.NDBCache) *Query { - q.ndBuiltinCache = c - return q -} - -// WithStrictBuiltinErrors tells the evaluator to treat all built-in function errors as fatal errors. -func (q *Query) WithStrictBuiltinErrors(yes bool) *Query { - q.strictBuiltinErrors = yes - return q -} - -// WithBuiltinErrorList supplies a pointer to an Error slice to store built-in function errors -// encountered during evaluation. This error slice can be inspected after evaluation to determine -// which built-in function errors occurred. -func (q *Query) WithBuiltinErrorList(list *[]Error) *Query { - q.builtinErrorList = list - return q -} - -// WithResolver configures an external resolver to use for the given ref. -func (q *Query) WithResolver(ref ast.Ref, r resolver.Resolver) *Query { - q.external.Put(ref, r) - return q -} - -func (q *Query) WithPrintHook(h print.Hook) *Query { - q.printHook = h - return q -} - -// WithDistributedTracingOpts sets the options to be used by distributed tracing. -func (q *Query) WithDistributedTracingOpts(tr tracing.Options) *Query { - q.tracingOpts = tr - return q -} - -// WithStrictObjects tells the evaluator to avoid the "lazy object" optimization -// applied when reading objects from the store. It will result in higher memory -// usage and should only be used temporarily while adjusting code that breaks -// because of the optimization. -func (q *Query) WithStrictObjects(yes bool) *Query { - q.strictObjects = yes - return q -} - -// WithVirtualCache sets the VirtualCache to use during evaluation. This is -// optional, and if not set, the default cache is used. -func (q *Query) WithVirtualCache(vc VirtualCache) *Query { - q.virtualCache = vc - return q -} - -// PartialRun executes partial evaluation on the query with respect to unknown -// values. Partial evaluation attempts to evaluate as much of the query as -// possible without requiring values for the unknowns set on the query. The -// result of partial evaluation is a new set of queries that can be evaluated -// once the unknown value is known. In addition to new queries, partial -// evaluation may produce additional support modules that should be used in -// conjunction with the partially evaluated queries. -func (q *Query) PartialRun(ctx context.Context) (partials []ast.Body, support []*ast.Module, err error) { - if q.partialNamespace == "" { - q.partialNamespace = "partial" // lazily initialize partial namespace - } - if q.seed == nil { - q.seed = rand.Reader - } - if !q.time.IsZero() { - q.time = time.Now() - } - if q.metrics == nil { - q.metrics = metrics.New() - } - - f := &queryIDFactory{} - b := newBindings(0, q.instr) - - var vc VirtualCache - if q.virtualCache != nil { - vc = q.virtualCache - } else { - vc = NewVirtualCache() - } - - e := &eval{ - ctx: ctx, - metrics: q.metrics, - seed: q.seed, - time: ast.NumberTerm(int64ToJSONNumber(q.time.UnixNano())), - cancel: q.cancel, - query: q.query, - queryCompiler: q.queryCompiler, - queryIDFact: f, - queryID: f.Next(), - bindings: b, - compiler: q.compiler, - store: q.store, - baseCache: newBaseCache(), - targetStack: newRefStack(), - txn: q.txn, - input: q.input, - external: q.external, - tracers: q.tracers, - traceEnabled: len(q.tracers) > 0, - plugTraceVars: q.plugTraceVars, - instr: q.instr, - builtins: q.builtins, - builtinCache: builtins.Cache{}, - functionMocks: newFunctionMocksStack(), - interQueryBuiltinCache: q.interQueryBuiltinCache, - interQueryBuiltinValueCache: q.interQueryBuiltinValueCache, - ndBuiltinCache: q.ndBuiltinCache, - virtualCache: vc, - comprehensionCache: newComprehensionCache(), - saveSet: newSaveSet(q.unknowns, b, q.instr), - saveStack: newSaveStack(), - saveSupport: newSaveSupport(), - saveNamespace: ast.StringTerm(q.partialNamespace), - skipSaveNamespace: q.skipSaveNamespace, - inliningControl: &inliningControl{ - shallow: q.shallowInlining, - }, - genvarprefix: q.genvarprefix, - runtime: q.runtime, - indexing: q.indexing, - earlyExit: q.earlyExit, - builtinErrors: &builtinErrors{}, - printHook: q.printHook, - strictObjects: q.strictObjects, - } - - if len(q.disableInlining) > 0 { - e.inliningControl.PushDisable(q.disableInlining, false) - } - - e.caller = e - q.metrics.Timer(metrics.RegoPartialEval).Start() - defer q.metrics.Timer(metrics.RegoPartialEval).Stop() - - livevars := ast.NewVarSet() - for _, t := range q.unknowns { - switch v := t.Value.(type) { - case ast.Var: - livevars.Add(v) - case ast.Ref: - livevars.Add(v[0].Value.(ast.Var)) - } - } - - ast.WalkVars(q.query, func(x ast.Var) bool { - if !x.IsGenerated() { - livevars.Add(x) - } - return false - }) - - p := copypropagation.New(livevars).WithCompiler(q.compiler) - - err = e.Run(func(e *eval) error { - - // Build output from saved expressions. - body := ast.NewBody() - - for _, elem := range e.saveStack.Stack[len(e.saveStack.Stack)-1] { - body.Append(elem.Plug(e.bindings)) - } - - // Include bindings as exprs so that when caller evals the result, they - // can obtain values for the vars in their query. - bindingExprs := []*ast.Expr{} - _ = e.bindings.Iter(e.bindings, func(a, b *ast.Term) error { - bindingExprs = append(bindingExprs, ast.Equality.Expr(a, b)) - return nil - }) // cannot return error - - // Sort binding expressions so that results are deterministic. - sort.Slice(bindingExprs, func(i, j int) bool { - return bindingExprs[i].Compare(bindingExprs[j]) < 0 - }) - - for i := range bindingExprs { - body.Append(bindingExprs[i]) - } - - // Skip this rule body if it fails to type-check. - // Type-checking failure means the rule body will never succeed. - if !e.compiler.PassesTypeCheck(body) { - return nil - } - - if !q.shallowInlining { - body = applyCopyPropagation(p, e.instr, body) - } - - partials = append(partials, body) - return nil - }) - - support = e.saveSupport.List() - - if len(e.builtinErrors.errs) > 0 { - if q.strictBuiltinErrors { - err = e.builtinErrors.errs[0] - } else if q.builtinErrorList != nil { - // If a builtinErrorList has been supplied, we must use pointer indirection - // to append to it. builtinErrorList is a slice pointer so that errors can be - // appended to it without returning a new slice and changing the interface - // of PartialRun. - for _, err := range e.builtinErrors.errs { - if tdError, ok := err.(*Error); ok { - *(q.builtinErrorList) = append(*(q.builtinErrorList), *tdError) - } else { - *(q.builtinErrorList) = append(*(q.builtinErrorList), Error{ - Code: BuiltinErr, - Message: err.Error(), - }) - } - } - } - } - - for i := range support { - sort.Slice(support[i].Rules, func(j, k int) bool { - return support[i].Rules[j].Compare(support[i].Rules[k]) < 0 - }) - } - - return partials, support, err -} - -// Run is a wrapper around Iter that accumulates query results and returns them -// in one shot. -func (q *Query) Run(ctx context.Context) (QueryResultSet, error) { - qrs := QueryResultSet{} - return qrs, q.Iter(ctx, func(qr QueryResult) error { - qrs = append(qrs, qr) - return nil - }) -} - -// Iter executes the query and invokes the iter function with query results -// produced by evaluating the query. -func (q *Query) Iter(ctx context.Context, iter func(QueryResult) error) error { - // Query evaluation must not be allowed if the compiler has errors and is in an undefined, possibly inconsistent state - if q.compiler != nil && len(q.compiler.Errors) > 0 { - return &Error{ - Code: InternalErr, - Message: "compiler has errors", - } - } - - if q.seed == nil { - q.seed = rand.Reader - } - if q.time.IsZero() { - q.time = time.Now() - } - if q.metrics == nil { - q.metrics = metrics.New() - } - - f := &queryIDFactory{} - - var vc VirtualCache - if q.virtualCache != nil { - vc = q.virtualCache - } else { - vc = NewVirtualCache() - } - - e := &eval{ - ctx: ctx, - metrics: q.metrics, - seed: q.seed, - time: ast.NumberTerm(int64ToJSONNumber(q.time.UnixNano())), - cancel: q.cancel, - query: q.query, - queryCompiler: q.queryCompiler, - queryIDFact: f, - queryID: f.Next(), - bindings: newBindings(0, q.instr), - compiler: q.compiler, - store: q.store, - baseCache: newBaseCache(), - targetStack: newRefStack(), - txn: q.txn, - input: q.input, - external: q.external, - tracers: q.tracers, - traceEnabled: len(q.tracers) > 0, - plugTraceVars: q.plugTraceVars, - instr: q.instr, - builtins: q.builtins, - builtinCache: builtins.Cache{}, - functionMocks: newFunctionMocksStack(), - interQueryBuiltinCache: q.interQueryBuiltinCache, - interQueryBuiltinValueCache: q.interQueryBuiltinValueCache, - ndBuiltinCache: q.ndBuiltinCache, - virtualCache: vc, - comprehensionCache: newComprehensionCache(), - genvarprefix: q.genvarprefix, - runtime: q.runtime, - indexing: q.indexing, - earlyExit: q.earlyExit, - builtinErrors: &builtinErrors{}, - printHook: q.printHook, - tracingOpts: q.tracingOpts, - strictObjects: q.strictObjects, - } - e.caller = e - q.metrics.Timer(metrics.RegoQueryEval).Start() - err := e.Run(func(e *eval) error { - qr := QueryResult{} - _ = e.bindings.Iter(nil, func(k, v *ast.Term) error { - qr[k.Value.(ast.Var)] = v - return nil - }) // cannot return error - return iter(qr) - }) - - if len(e.builtinErrors.errs) > 0 { - if q.strictBuiltinErrors { - err = e.builtinErrors.errs[0] - } else if q.builtinErrorList != nil { - // If a builtinErrorList has been supplied, we must use pointer indirection - // to append to it. builtinErrorList is a slice pointer so that errors can be - // appended to it without returning a new slice and changing the interface - // of Iter. - for _, err := range e.builtinErrors.errs { - if tdError, ok := err.(*Error); ok { - *(q.builtinErrorList) = append(*(q.builtinErrorList), *tdError) - } else { - *(q.builtinErrorList) = append(*(q.builtinErrorList), Error{ - Code: BuiltinErr, - Message: err.Error(), - }) - } - } - } - } - - q.metrics.Timer(metrics.RegoQueryEval).Stop() - return err + return v1.NewQuery(query) } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/regex.go b/vendor/github.com/open-policy-agent/opa/topdown/regex.go deleted file mode 100644 index 452e7d58bf..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/regex.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2016 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "fmt" - "regexp" - "sync" - - gintersect "github.com/yashtewari/glob-intersection" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -const regexCacheMaxSize = 100 -const regexInterQueryValueCacheHits = "rego_builtin_regex_interquery_value_cache_hits" - -var regexpCacheLock = sync.Mutex{} -var regexpCache map[string]*regexp.Regexp - -func builtinRegexIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return iter(ast.BooleanTerm(false)) - } - - _, err = regexp.Compile(string(s)) - if err != nil { - return iter(ast.BooleanTerm(false)) - } - - return iter(ast.BooleanTerm(true)) -} - -func builtinRegexMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s1, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - s2, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - re, err := getRegexp(bctx, string(s1)) - if err != nil { - return err - } - return iter(ast.BooleanTerm(re.MatchString(string(s2)))) -} - -func builtinRegexMatchTemplate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - pattern, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - match, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - start, err := builtins.StringOperand(operands[2].Value, 3) - if err != nil { - return err - } - end, err := builtins.StringOperand(operands[3].Value, 4) - if err != nil { - return err - } - if len(start) != 1 { - return fmt.Errorf("start delimiter has to be exactly one character long but is %d long", len(start)) - } - if len(end) != 1 { - return fmt.Errorf("end delimiter has to be exactly one character long but is %d long", len(start)) - } - re, err := getRegexpTemplate(string(pattern), string(start)[0], string(end)[0]) - if err != nil { - return err - } - return iter(ast.BooleanTerm(re.MatchString(string(match)))) -} - -func builtinRegexSplit(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s1, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - s2, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - re, err := getRegexp(bctx, string(s1)) - if err != nil { - return err - } - - elems := re.Split(string(s2), -1) - arr := make([]*ast.Term, len(elems)) - for i := range elems { - arr[i] = ast.StringTerm(elems[i]) - } - return iter(ast.NewTerm(ast.NewArray(arr...))) -} - -func getRegexp(bctx BuiltinContext, pat string) (*regexp.Regexp, error) { - if bctx.InterQueryBuiltinValueCache != nil { - val, ok := bctx.InterQueryBuiltinValueCache.Get(ast.String(pat)) - if ok { - res, valid := val.(*regexp.Regexp) - if !valid { - // The cache key may exist for a different value type (eg. glob). - // In this case, we calculate the regex and return the result w/o updating the cache. - return regexp.Compile(pat) - } - - bctx.Metrics.Counter(regexInterQueryValueCacheHits).Incr() - return res, nil - } - - re, err := regexp.Compile(pat) - if err != nil { - return nil, err - } - bctx.InterQueryBuiltinValueCache.Insert(ast.String(pat), re) - return re, nil - } - - regexpCacheLock.Lock() - defer regexpCacheLock.Unlock() - re, ok := regexpCache[pat] - if !ok { - var err error - re, err = regexp.Compile(pat) - if err != nil { - return nil, err - } - if len(regexpCache) >= regexCacheMaxSize { - // Delete a (semi-)random key to make room for the new one. - for k := range regexpCache { - delete(regexpCache, k) - break - } - } - regexpCache[pat] = re - } - return re, nil -} - -func getRegexpTemplate(pat string, delimStart, delimEnd byte) (*regexp.Regexp, error) { - regexpCacheLock.Lock() - defer regexpCacheLock.Unlock() - re, ok := regexpCache[pat] - if !ok { - var err error - re, err = compileRegexTemplate(pat, delimStart, delimEnd) - if err != nil { - return nil, err - } - regexpCache[pat] = re - } - return re, nil -} - -func builtinGlobsMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s1, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - s2, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - ne, err := gintersect.NonEmpty(string(s1), string(s2)) - if err != nil { - return err - } - return iter(ast.BooleanTerm(ne)) -} - -func builtinRegexFind(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s1, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - s2, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - n, err := builtins.IntOperand(operands[2].Value, 3) - if err != nil { - return err - } - re, err := getRegexp(bctx, string(s1)) - if err != nil { - return err - } - - elems := re.FindAllString(string(s2), n) - arr := make([]*ast.Term, len(elems)) - for i := range elems { - arr[i] = ast.StringTerm(elems[i]) - } - return iter(ast.NewTerm(ast.NewArray(arr...))) -} - -func builtinRegexFindAllStringSubmatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s1, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - s2, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - n, err := builtins.IntOperand(operands[2].Value, 3) - if err != nil { - return err - } - - re, err := getRegexp(bctx, string(s1)) - if err != nil { - return err - } - matches := re.FindAllStringSubmatch(string(s2), n) - - outer := make([]*ast.Term, len(matches)) - for i := range matches { - inner := make([]*ast.Term, len(matches[i])) - for j := range matches[i] { - inner[j] = ast.StringTerm(matches[i][j]) - } - outer[i] = ast.NewTerm(ast.NewArray(inner...)) - } - - return iter(ast.NewTerm(ast.NewArray(outer...))) -} - -func builtinRegexReplace(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - base, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - pattern, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - value, err := builtins.StringOperand(operands[2].Value, 3) - if err != nil { - return err - } - - re, err := getRegexp(bctx, string(pattern)) - if err != nil { - return err - } - - res := re.ReplaceAllString(string(base), string(value)) - - return iter(ast.StringTerm(res)) -} - -func init() { - regexpCache = map[string]*regexp.Regexp{} - RegisterBuiltinFunc(ast.RegexIsValid.Name, builtinRegexIsValid) - RegisterBuiltinFunc(ast.RegexMatch.Name, builtinRegexMatch) - RegisterBuiltinFunc(ast.RegexMatchDeprecated.Name, builtinRegexMatch) - RegisterBuiltinFunc(ast.RegexSplit.Name, builtinRegexSplit) - RegisterBuiltinFunc(ast.GlobsMatch.Name, builtinGlobsMatch) - RegisterBuiltinFunc(ast.RegexTemplateMatch.Name, builtinRegexMatchTemplate) - RegisterBuiltinFunc(ast.RegexFind.Name, builtinRegexFind) - RegisterBuiltinFunc(ast.RegexFindAllStringSubmatch.Name, builtinRegexFindAllStringSubmatch) - RegisterBuiltinFunc(ast.RegexReplace.Name, builtinRegexReplace) -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/runtime.go b/vendor/github.com/open-policy-agent/opa/topdown/runtime.go deleted file mode 100644 index 7d512f7c00..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/runtime.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "fmt" - - "github.com/open-policy-agent/opa/ast" -) - -func builtinOPARuntime(bctx BuiltinContext, _ []*ast.Term, iter func(*ast.Term) error) error { - - if bctx.Runtime == nil { - return iter(ast.ObjectTerm()) - } - - if bctx.Runtime.Get(ast.StringTerm("config")) != nil { - iface, err := ast.ValueToInterface(bctx.Runtime.Value, illegalResolver{}) - if err != nil { - return err - } - if object, ok := iface.(map[string]interface{}); ok { - if cfgRaw, ok := object["config"]; ok { - if config, ok := cfgRaw.(map[string]interface{}); ok { - configPurged, err := activeConfig(config) - if err != nil { - return err - } - object["config"] = configPurged - value, err := ast.InterfaceToValue(object) - if err != nil { - return err - } - return iter(ast.NewTerm(value)) - } - } - } - } - - return iter(bctx.Runtime) -} - -func init() { - RegisterBuiltinFunc(ast.OPARuntime.Name, builtinOPARuntime) -} - -func activeConfig(config map[string]interface{}) (interface{}, error) { - - if config["services"] != nil { - err := removeServiceCredentials(config["services"]) - if err != nil { - return nil, err - } - } - - if config["keys"] != nil { - err := removeCryptoKeys(config["keys"]) - if err != nil { - return nil, err - } - } - - return config, nil -} - -func removeServiceCredentials(x interface{}) error { - - switch x := x.(type) { - case []interface{}: - for _, v := range x { - err := removeKey(v, "credentials") - if err != nil { - return err - } - } - - case map[string]interface{}: - for _, v := range x { - err := removeKey(v, "credentials") - if err != nil { - return err - } - } - default: - return fmt.Errorf("illegal service config type: %T", x) - } - - return nil -} - -func removeCryptoKeys(x interface{}) error { - - switch x := x.(type) { - case map[string]interface{}: - for _, v := range x { - err := removeKey(v, "key", "private_key") - if err != nil { - return err - } - } - default: - return fmt.Errorf("illegal keys config type: %T", x) - } - - return nil -} - -func removeKey(x interface{}, keys ...string) error { - val, ok := x.(map[string]interface{}) - if !ok { - return fmt.Errorf("type assertion error") - } - - for _, key := range keys { - delete(val, key) - } - - return nil -} - -type illegalResolver struct{} - -func (illegalResolver) Resolve(ref ast.Ref) (interface{}, error) { - return nil, fmt.Errorf("illegal value: %v", ref) -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/semver.go b/vendor/github.com/open-policy-agent/opa/topdown/semver.go deleted file mode 100644 index 7bb7b9c183..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/semver.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "fmt" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/internal/semver" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -func builtinSemVerCompare(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - versionStringA, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - versionStringB, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - versionA, err := semver.NewVersion(string(versionStringA)) - if err != nil { - return fmt.Errorf("operand 1: string %s is not a valid SemVer", versionStringA) - } - versionB, err := semver.NewVersion(string(versionStringB)) - if err != nil { - return fmt.Errorf("operand 2: string %s is not a valid SemVer", versionStringB) - } - - result := versionA.Compare(*versionB) - - return iter(ast.IntNumberTerm(result)) -} - -func builtinSemVerIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - versionString, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return iter(ast.BooleanTerm(false)) - } - - result := true - - _, err = semver.NewVersion(string(versionString)) - if err != nil { - result = false - } - - return iter(ast.BooleanTerm(result)) -} - -func init() { - RegisterBuiltinFunc(ast.SemVerCompare.Name, builtinSemVerCompare) - RegisterBuiltinFunc(ast.SemVerIsValid.Name, builtinSemVerIsValid) -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/sets.go b/vendor/github.com/open-policy-agent/opa/topdown/sets.go deleted file mode 100644 index a973404f3f..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/sets.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2016 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -// Deprecated in v0.4.2 in favour of minus/infix "-" operation. -func builtinSetDiff(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - s1, err := builtins.SetOperand(operands[0].Value, 1) - if err != nil { - return err - } - - s2, err := builtins.SetOperand(operands[1].Value, 2) - if err != nil { - return err - } - - return iter(ast.NewTerm(s1.Diff(s2))) -} - -// builtinSetIntersection returns the intersection of the given input sets -func builtinSetIntersection(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - inputSet, err := builtins.SetOperand(operands[0].Value, 1) - if err != nil { - return err - } - - // empty input set - if inputSet.Len() == 0 { - return iter(ast.NewTerm(ast.NewSet())) - } - - var result ast.Set - - err = inputSet.Iter(func(x *ast.Term) error { - n, err := builtins.SetOperand(x.Value, 1) - if err != nil { - return err - } - - if result == nil { - result = n - } else { - result = result.Intersect(n) - } - return nil - }) - if err != nil { - return err - } - return iter(ast.NewTerm(result)) -} - -// builtinSetUnion returns the union of the given input sets -func builtinSetUnion(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - // The set union logic here is duplicated and manually inlined on - // purpose. By lifting this logic up a level, and not doing pairwise - // set unions, we avoid a number of heap allocations. This improves - // performance dramatically over the naive approach. - result := ast.NewSet() - - inputSet, err := builtins.SetOperand(operands[0].Value, 1) - if err != nil { - return err - } - - err = inputSet.Iter(func(x *ast.Term) error { - item, err := builtins.SetOperand(x.Value, 1) - if err != nil { - return err - } - item.Foreach(result.Add) - return nil - }) - if err != nil { - return err - } - - return iter(ast.NewTerm(result)) -} - -func init() { - RegisterBuiltinFunc(ast.SetDiff.Name, builtinSetDiff) - RegisterBuiltinFunc(ast.Intersection.Name, builtinSetIntersection) - RegisterBuiltinFunc(ast.Union.Name, builtinSetUnion) -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/strings.go b/vendor/github.com/open-policy-agent/opa/topdown/strings.go deleted file mode 100644 index d9e4a55e58..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/strings.go +++ /dev/null @@ -1,610 +0,0 @@ -// Copyright 2016 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "fmt" - "math/big" - "sort" - "strings" - - "github.com/tchap/go-patricia/v2/patricia" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -func builtinAnyPrefixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - a, b := operands[0].Value, operands[1].Value - - var strs []string - switch a := a.(type) { - case ast.String: - strs = []string{string(a)} - case *ast.Array, ast.Set: - var err error - strs, err = builtins.StringSliceOperand(a, 1) - if err != nil { - return err - } - default: - return builtins.NewOperandTypeErr(1, a, "string", "set", "array") - } - - var prefixes []string - switch b := b.(type) { - case ast.String: - prefixes = []string{string(b)} - case *ast.Array, ast.Set: - var err error - prefixes, err = builtins.StringSliceOperand(b, 2) - if err != nil { - return err - } - default: - return builtins.NewOperandTypeErr(2, b, "string", "set", "array") - } - - return iter(ast.BooleanTerm(anyStartsWithAny(strs, prefixes))) -} - -func builtinAnySuffixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - a, b := operands[0].Value, operands[1].Value - - var strsReversed []string - switch a := a.(type) { - case ast.String: - strsReversed = []string{reverseString(string(a))} - case *ast.Array, ast.Set: - strs, err := builtins.StringSliceOperand(a, 1) - if err != nil { - return err - } - strsReversed = make([]string, len(strs)) - for i := range strs { - strsReversed[i] = reverseString(strs[i]) - } - default: - return builtins.NewOperandTypeErr(1, a, "string", "set", "array") - } - - var suffixesReversed []string - switch b := b.(type) { - case ast.String: - suffixesReversed = []string{reverseString(string(b))} - case *ast.Array, ast.Set: - suffixes, err := builtins.StringSliceOperand(b, 2) - if err != nil { - return err - } - suffixesReversed = make([]string, len(suffixes)) - for i := range suffixes { - suffixesReversed[i] = reverseString(suffixes[i]) - } - default: - return builtins.NewOperandTypeErr(2, b, "string", "set", "array") - } - - return iter(ast.BooleanTerm(anyStartsWithAny(strsReversed, suffixesReversed))) -} - -func anyStartsWithAny(strs []string, prefixes []string) bool { - if len(strs) == 0 || len(prefixes) == 0 { - return false - } - if len(strs) == 1 && len(prefixes) == 1 { - return strings.HasPrefix(strs[0], prefixes[0]) - } - - trie := patricia.NewTrie() - for i := 0; i < len(strs); i++ { - trie.Insert([]byte(strs[i]), true) - } - - for i := 0; i < len(prefixes); i++ { - if trie.MatchSubtree([]byte(prefixes[i])) { - return true - } - } - - return false -} - -func builtinFormatInt(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - input, err := builtins.NumberOperand(operands[0].Value, 1) - if err != nil { - return err - } - - base, err := builtins.NumberOperand(operands[1].Value, 2) - if err != nil { - return err - } - - var format string - switch base { - case ast.Number("2"): - format = "%b" - case ast.Number("8"): - format = "%o" - case ast.Number("10"): - format = "%d" - case ast.Number("16"): - format = "%x" - default: - return builtins.NewOperandEnumErr(2, "2", "8", "10", "16") - } - - f := builtins.NumberToFloat(input) - i, _ := f.Int(nil) - - return iter(ast.StringTerm(fmt.Sprintf(format, i))) -} - -func builtinConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - join, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - strs := []string{} - - switch b := operands[1].Value.(type) { - case *ast.Array: - err := b.Iter(func(x *ast.Term) error { - s, ok := x.Value.(ast.String) - if !ok { - return builtins.NewOperandElementErr(2, operands[1].Value, x.Value, "string") - } - strs = append(strs, string(s)) - return nil - }) - if err != nil { - return err - } - case ast.Set: - err := b.Iter(func(x *ast.Term) error { - s, ok := x.Value.(ast.String) - if !ok { - return builtins.NewOperandElementErr(2, operands[1].Value, x.Value, "string") - } - strs = append(strs, string(s)) - return nil - }) - if err != nil { - return err - } - default: - return builtins.NewOperandTypeErr(2, operands[1].Value, "set", "array") - } - - return iter(ast.StringTerm(strings.Join(strs, string(join)))) -} - -func runesEqual(a, b []rune) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true -} - -func builtinIndexOf(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - base, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - search, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - if len(string(search)) == 0 { - return fmt.Errorf("empty search character") - } - - baseRunes := []rune(string(base)) - searchRunes := []rune(string(search)) - searchLen := len(searchRunes) - - for i, r := range baseRunes { - if len(baseRunes) >= i+searchLen { - if r == searchRunes[0] && runesEqual(baseRunes[i:i+searchLen], searchRunes) { - return iter(ast.IntNumberTerm(i)) - } - } else { - break - } - } - - return iter(ast.IntNumberTerm(-1)) -} - -func builtinIndexOfN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - base, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - search, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - if len(string(search)) == 0 { - return fmt.Errorf("empty search character") - } - - baseRunes := []rune(string(base)) - searchRunes := []rune(string(search)) - searchLen := len(searchRunes) - - var arr []*ast.Term - for i, r := range baseRunes { - if len(baseRunes) >= i+searchLen { - if r == searchRunes[0] && runesEqual(baseRunes[i:i+searchLen], searchRunes) { - arr = append(arr, ast.IntNumberTerm(i)) - } - } else { - break - } - } - - return iter(ast.ArrayTerm(arr...)) -} - -func builtinSubstring(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - base, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - runes := []rune(base) - - startIndex, err := builtins.IntOperand(operands[1].Value, 2) - if err != nil { - return err - } else if startIndex >= len(runes) { - return iter(ast.StringTerm("")) - } else if startIndex < 0 { - return fmt.Errorf("negative offset") - } - - length, err := builtins.IntOperand(operands[2].Value, 3) - if err != nil { - return err - } - - var s ast.String - if length < 0 { - s = ast.String(runes[startIndex:]) - } else { - upto := startIndex + length - if len(runes) < upto { - upto = len(runes) - } - s = ast.String(runes[startIndex:upto]) - } - - return iter(ast.NewTerm(s)) -} - -func builtinContains(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - substr, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - return iter(ast.BooleanTerm(strings.Contains(string(s), string(substr)))) -} - -func builtinStringCount(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - substr, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - baseTerm := string(s) - searchTerm := string(substr) - - count := strings.Count(baseTerm, searchTerm) - - return iter(ast.IntNumberTerm(count)) -} - -func builtinStartsWith(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - prefix, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - return iter(ast.BooleanTerm(strings.HasPrefix(string(s), string(prefix)))) -} - -func builtinEndsWith(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - suffix, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - return iter(ast.BooleanTerm(strings.HasSuffix(string(s), string(suffix)))) -} - -func builtinLower(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.ToLower(string(s)))) -} - -func builtinUpper(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.ToUpper(string(s)))) -} - -func builtinSplit(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - d, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - elems := strings.Split(string(s), string(d)) - arr := make([]*ast.Term, len(elems)) - for i := range elems { - arr[i] = ast.StringTerm(elems[i]) - } - return iter(ast.ArrayTerm(arr...)) -} - -func builtinReplace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - old, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - n, err := builtins.StringOperand(operands[2].Value, 3) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.Replace(string(s), string(old), string(n), -1))) -} - -func builtinReplaceN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - patterns, err := builtins.ObjectOperand(operands[0].Value, 1) - if err != nil { - return err - } - keys := patterns.Keys() - sort.Slice(keys, func(i, j int) bool { return ast.Compare(keys[i].Value, keys[j].Value) < 0 }) - - s, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - oldnewArr := make([]string, 0, len(keys)*2) - for _, k := range keys { - keyVal, ok := k.Value.(ast.String) - if !ok { - return builtins.NewOperandErr(1, "non-string key found in pattern object") - } - val := patterns.Get(k) // cannot be nil - strVal, ok := val.Value.(ast.String) - if !ok { - return builtins.NewOperandErr(1, "non-string value found in pattern object") - } - oldnewArr = append(oldnewArr, string(keyVal), string(strVal)) - } - if err != nil { - return err - } - - r := strings.NewReplacer(oldnewArr...) - replaced := r.Replace(string(s)) - - return iter(ast.StringTerm(replaced)) -} - -func builtinTrim(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - c, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.Trim(string(s), string(c)))) -} - -func builtinTrimLeft(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - c, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.TrimLeft(string(s), string(c)))) -} - -func builtinTrimPrefix(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - pre, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.TrimPrefix(string(s), string(pre)))) -} - -func builtinTrimRight(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - c, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.TrimRight(string(s), string(c)))) -} - -func builtinTrimSuffix(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - suf, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.TrimSuffix(string(s), string(suf)))) -} - -func builtinTrimSpace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.TrimSpace(string(s)))) -} - -func builtinSprintf(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - astArr, ok := operands[1].Value.(*ast.Array) - if !ok { - return builtins.NewOperandTypeErr(2, operands[1].Value, "array") - } - - args := make([]interface{}, astArr.Len()) - - for i := range args { - switch v := astArr.Elem(i).Value.(type) { - case ast.Number: - if n, ok := v.Int(); ok { - args[i] = n - } else if b, ok := new(big.Int).SetString(v.String(), 10); ok { - args[i] = b - } else if f, ok := v.Float64(); ok { - args[i] = f - } else { - args[i] = v.String() - } - case ast.String: - args[i] = string(v) - default: - args[i] = astArr.Elem(i).String() - } - } - - return iter(ast.StringTerm(fmt.Sprintf(string(s), args...))) -} - -func builtinReverse(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - return iter(ast.StringTerm(reverseString(string(s)))) -} - -func reverseString(str string) string { - sRunes := []rune(str) - length := len(sRunes) - reversedRunes := make([]rune, length) - - for index, r := range sRunes { - reversedRunes[length-index-1] = r - } - - return string(reversedRunes) -} - -func init() { - RegisterBuiltinFunc(ast.FormatInt.Name, builtinFormatInt) - RegisterBuiltinFunc(ast.Concat.Name, builtinConcat) - RegisterBuiltinFunc(ast.IndexOf.Name, builtinIndexOf) - RegisterBuiltinFunc(ast.IndexOfN.Name, builtinIndexOfN) - RegisterBuiltinFunc(ast.Substring.Name, builtinSubstring) - RegisterBuiltinFunc(ast.Contains.Name, builtinContains) - RegisterBuiltinFunc(ast.StringCount.Name, builtinStringCount) - RegisterBuiltinFunc(ast.StartsWith.Name, builtinStartsWith) - RegisterBuiltinFunc(ast.EndsWith.Name, builtinEndsWith) - RegisterBuiltinFunc(ast.Upper.Name, builtinUpper) - RegisterBuiltinFunc(ast.Lower.Name, builtinLower) - RegisterBuiltinFunc(ast.Split.Name, builtinSplit) - RegisterBuiltinFunc(ast.Replace.Name, builtinReplace) - RegisterBuiltinFunc(ast.ReplaceN.Name, builtinReplaceN) - RegisterBuiltinFunc(ast.Trim.Name, builtinTrim) - RegisterBuiltinFunc(ast.TrimLeft.Name, builtinTrimLeft) - RegisterBuiltinFunc(ast.TrimPrefix.Name, builtinTrimPrefix) - RegisterBuiltinFunc(ast.TrimRight.Name, builtinTrimRight) - RegisterBuiltinFunc(ast.TrimSuffix.Name, builtinTrimSuffix) - RegisterBuiltinFunc(ast.TrimSpace.Name, builtinTrimSpace) - RegisterBuiltinFunc(ast.Sprintf.Name, builtinSprintf) - RegisterBuiltinFunc(ast.AnyPrefixMatch.Name, builtinAnyPrefixMatch) - RegisterBuiltinFunc(ast.AnySuffixMatch.Name, builtinAnySuffixMatch) - RegisterBuiltinFunc(ast.StringReverse.Name, builtinReverse) -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/template.go b/vendor/github.com/open-policy-agent/opa/topdown/template.go deleted file mode 100644 index cf42477ee8..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/template.go +++ /dev/null @@ -1,45 +0,0 @@ -package topdown - -import ( - "bytes" - "text/template" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -func renderTemplate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - preContentTerm, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - templateVariablesTerm, err := builtins.ObjectOperand(operands[1].Value, 2) - if err != nil { - return err - } - - var templateVariables map[string]interface{} - - if err := ast.As(templateVariablesTerm, &templateVariables); err != nil { - return err - } - - tmpl, err := template.New("template").Parse(string(preContentTerm)) - if err != nil { - return err - } - - // Do not attempt to render if template variable keys are missing - tmpl.Option("missingkey=error") - var buf bytes.Buffer - if err := tmpl.Execute(&buf, templateVariables); err != nil { - return err - } - - return iter(ast.StringTerm(buf.String())) -} - -func init() { - RegisterBuiltinFunc(ast.RenderTemplate.Name, renderTemplate) -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/time.go b/vendor/github.com/open-policy-agent/opa/topdown/time.go deleted file mode 100644 index ba3efc75dc..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/time.go +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright 2017 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "encoding/json" - "fmt" - "math" - "math/big" - "strconv" - "sync" - "time" - _ "time/tzdata" // this is needed to have LoadLocation when no filesystem tzdata is available - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -var tzCache map[string]*time.Location -var tzCacheMutex *sync.Mutex - -// 1677-09-21T00:12:43.145224192-00:00 -var minDateAllowedForNsConversion = time.Unix(0, math.MinInt64) - -// 2262-04-11T23:47:16.854775807-00:00 -var maxDateAllowedForNsConversion = time.Unix(0, math.MaxInt64) - -func toSafeUnixNano(t time.Time, iter func(*ast.Term) error) error { - if t.Before(minDateAllowedForNsConversion) || t.After(maxDateAllowedForNsConversion) { - return fmt.Errorf("time outside of valid range") - } - - return iter(ast.NewTerm(ast.Number(int64ToJSONNumber(t.UnixNano())))) -} - -func builtinTimeNowNanos(bctx BuiltinContext, _ []*ast.Term, iter func(*ast.Term) error) error { - return iter(bctx.Time) -} - -func builtinTimeParseNanos(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - format, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - value, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - formatStr := string(format) - // look for the formatStr in our acceptedTimeFormats and - // use the constant instead if it matches - if f, ok := acceptedTimeFormats[formatStr]; ok { - formatStr = f - } - result, err := time.Parse(formatStr, string(value)) - if err != nil { - return err - } - - return toSafeUnixNano(result, iter) -} - -func builtinTimeParseRFC3339Nanos(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - value, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - result, err := time.Parse(time.RFC3339, string(value)) - if err != nil { - return err - } - - return toSafeUnixNano(result, iter) -} -func builtinParseDurationNanos(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - duration, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - value, err := time.ParseDuration(string(duration)) - if err != nil { - return err - } - return iter(ast.NumberTerm(int64ToJSONNumber(int64(value)))) -} - -// Represent exposed constants for formatting from the stdlib time pkg -var acceptedTimeFormats = map[string]string{ - "ANSIC": time.ANSIC, - "UnixDate": time.UnixDate, - "RubyDate": time.RubyDate, - "RFC822": time.RFC822, - "RFC822Z": time.RFC822Z, - "RFC850": time.RFC850, - "RFC1123": time.RFC1123, - "RFC1123Z": time.RFC1123Z, - "RFC3339": time.RFC3339, - "RFC3339Nano": time.RFC3339Nano, -} - -func builtinFormat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - t, layout, err := tzTime(operands[0].Value) - if err != nil { - return err - } - // Using RFC3339Nano time formatting as default - if layout == "" { - layout = time.RFC3339Nano - } else if layoutStr, ok := acceptedTimeFormats[layout]; ok { - // if we can find a constant specified, use the constant - layout = layoutStr - } - // otherwise try to treat the fmt string as a datetime fmt string - - timestamp := t.Format(layout) - return iter(ast.StringTerm(timestamp)) -} - -func builtinDate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - t, _, err := tzTime(operands[0].Value) - if err != nil { - return err - } - year, month, day := t.Date() - result := ast.NewArray(ast.IntNumberTerm(year), ast.IntNumberTerm(int(month)), ast.IntNumberTerm(day)) - return iter(ast.NewTerm(result)) -} - -func builtinClock(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - t, _, err := tzTime(operands[0].Value) - if err != nil { - return err - } - hour, minute, second := t.Clock() - result := ast.NewArray(ast.IntNumberTerm(hour), ast.IntNumberTerm(minute), ast.IntNumberTerm(second)) - return iter(ast.NewTerm(result)) -} - -func builtinWeekday(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - t, _, err := tzTime(operands[0].Value) - if err != nil { - return err - } - weekday := t.Weekday().String() - return iter(ast.StringTerm(weekday)) -} - -func builtinAddDate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - t, _, err := tzTime(operands[0].Value) - if err != nil { - return err - } - - years, err := builtins.IntOperand(operands[1].Value, 2) - if err != nil { - return err - } - - months, err := builtins.IntOperand(operands[2].Value, 3) - if err != nil { - return err - } - - days, err := builtins.IntOperand(operands[3].Value, 4) - if err != nil { - return err - } - - result := t.AddDate(years, months, days) - - return toSafeUnixNano(result, iter) -} - -func builtinDiff(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - t1, _, err := tzTime(operands[0].Value) - if err != nil { - return err - } - t2, _, err := tzTime(operands[1].Value) - if err != nil { - return err - } - - // The following implementation of this function is taken - // from https://github.com/icza/gox licensed under Apache 2.0. - // The only modification made is to variable names. - // - // For details, see https://stackoverflow.com/a/36531443/1705598 - // - // Copyright 2021 icza - // BEGIN REDISTRIBUTION FROM APACHE 2.0 LICENSED PROJECT - if t1.Location() != t2.Location() { - t2 = t2.In(t1.Location()) - } - if t1.After(t2) { - t1, t2 = t2, t1 - } - y1, M1, d1 := t1.Date() - y2, M2, d2 := t2.Date() - - h1, m1, s1 := t1.Clock() - h2, m2, s2 := t2.Clock() - - year := y2 - y1 - month := int(M2 - M1) - day := d2 - d1 - hour := h2 - h1 - min := m2 - m1 - sec := s2 - s1 - - // Normalize negative values - if sec < 0 { - sec += 60 - min-- - } - if min < 0 { - min += 60 - hour-- - } - if hour < 0 { - hour += 24 - day-- - } - if day < 0 { - // Days in month: - t := time.Date(y1, M1, 32, 0, 0, 0, 0, time.UTC) - day += 32 - t.Day() - month-- - } - if month < 0 { - month += 12 - year-- - } - // END REDISTRIBUTION FROM APACHE 2.0 LICENSED PROJECT - - return iter(ast.ArrayTerm(ast.IntNumberTerm(year), ast.IntNumberTerm(month), ast.IntNumberTerm(day), - ast.IntNumberTerm(hour), ast.IntNumberTerm(min), ast.IntNumberTerm(sec))) -} - -func tzTime(a ast.Value) (t time.Time, lay string, err error) { - var nVal ast.Value - loc := time.UTC - layout := "" - switch va := a.(type) { - case *ast.Array: - if va.Len() == 0 { - return time.Time{}, layout, builtins.NewOperandTypeErr(1, a, "either number (ns) or [number (ns), string (tz)]") - } - - nVal, err = builtins.NumberOperand(va.Elem(0).Value, 1) - if err != nil { - return time.Time{}, layout, err - } - - if va.Len() > 1 { - tzVal, err := builtins.StringOperand(va.Elem(1).Value, 1) - if err != nil { - return time.Time{}, layout, err - } - - tzName := string(tzVal) - - switch tzName { - case "", "UTC": - // loc is already UTC - - case "Local": - loc = time.Local - - default: - var ok bool - - tzCacheMutex.Lock() - loc, ok = tzCache[tzName] - - if !ok { - loc, err = time.LoadLocation(tzName) - if err != nil { - tzCacheMutex.Unlock() - return time.Time{}, layout, err - } - tzCache[tzName] = loc - } - tzCacheMutex.Unlock() - } - } - - if va.Len() > 2 { - lay, err := builtins.StringOperand(va.Elem(2).Value, 1) - if err != nil { - return time.Time{}, layout, err - } - layout = string(lay) - } - - case ast.Number: - nVal = a - - default: - return time.Time{}, layout, builtins.NewOperandTypeErr(1, a, "either number (ns) or [number (ns), string (tz)]") - } - - value, err := builtins.NumberOperand(nVal, 1) - if err != nil { - return time.Time{}, layout, err - } - - f := builtins.NumberToFloat(value) - i64, acc := f.Int64() - if acc != big.Exact { - return time.Time{}, layout, fmt.Errorf("timestamp too big") - } - - t = time.Unix(0, i64).In(loc) - - return t, layout, nil -} - -func int64ToJSONNumber(i int64) json.Number { - return json.Number(strconv.FormatInt(i, 10)) -} - -func init() { - RegisterBuiltinFunc(ast.NowNanos.Name, builtinTimeNowNanos) - RegisterBuiltinFunc(ast.ParseRFC3339Nanos.Name, builtinTimeParseRFC3339Nanos) - RegisterBuiltinFunc(ast.ParseNanos.Name, builtinTimeParseNanos) - RegisterBuiltinFunc(ast.ParseDurationNanos.Name, builtinParseDurationNanos) - RegisterBuiltinFunc(ast.Format.Name, builtinFormat) - RegisterBuiltinFunc(ast.Date.Name, builtinDate) - RegisterBuiltinFunc(ast.Clock.Name, builtinClock) - RegisterBuiltinFunc(ast.Weekday.Name, builtinWeekday) - RegisterBuiltinFunc(ast.AddDate.Name, builtinAddDate) - RegisterBuiltinFunc(ast.Diff.Name, builtinDiff) - tzCacheMutex = &sync.Mutex{} - tzCache = make(map[string]*time.Location) -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/tokens.go b/vendor/github.com/open-policy-agent/opa/topdown/tokens.go deleted file mode 100644 index 7457f1f15d..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/tokens.go +++ /dev/null @@ -1,1246 +0,0 @@ -// Copyright 2017 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "crypto" - "crypto/ecdsa" - "crypto/hmac" - "crypto/rsa" - "crypto/sha256" - "crypto/sha512" - "crypto/x509" - "encoding/hex" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "hash" - "math/big" - "strings" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/internal/jwx/jwa" - "github.com/open-policy-agent/opa/internal/jwx/jwk" - "github.com/open-policy-agent/opa/internal/jwx/jws" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -var ( - jwtEncKey = ast.StringTerm("enc") - jwtCtyKey = ast.StringTerm("cty") - jwtIssKey = ast.StringTerm("iss") - jwtExpKey = ast.StringTerm("exp") - jwtNbfKey = ast.StringTerm("nbf") - jwtAudKey = ast.StringTerm("aud") -) - -const ( - headerJwt = "JWT" -) - -// JSONWebToken represent the 3 parts (header, payload & signature) of -// -// a JWT in Base64. -type JSONWebToken struct { - header string - payload string - signature string - decodedHeader ast.Object -} - -// decodeHeader populates the decodedHeader field. -func (token *JSONWebToken) decodeHeader() error { - result, err := getResult(builtinBase64UrlDecode, ast.StringTerm(token.header)) - if err != nil { - return fmt.Errorf("JWT header had invalid encoding: %w", err) - } - decodedHeader, err := validateJWTHeader(string(result.Value.(ast.String))) - if err != nil { - return err - } - token.decodedHeader = decodedHeader - return nil -} - -// Implements JWT decoding/validation based on RFC 7519 Section 7.2: -// https://tools.ietf.org/html/rfc7519#section-7.2 -// It does no data validation, it merely checks that the given string -// represents a structurally valid JWT. It supports JWTs using JWS compact -// serialization. -func builtinJWTDecode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - token, err := decodeJWT(operands[0].Value) - if err != nil { - return err - } - - if err = token.decodeHeader(); err != nil { - return err - } - - p, err := getResult(builtinBase64UrlDecode, ast.StringTerm(token.payload)) - if err != nil { - return fmt.Errorf("JWT payload had invalid encoding: %v", err) - } - - if cty := token.decodedHeader.Get(jwtCtyKey); cty != nil { - ctyVal := string(cty.Value.(ast.String)) - // It is possible for the contents of a token to be another - // token as a result of nested signing or encryption. To handle - // the case where we are given a token such as this, we check - // the content type and recurse on the payload if the content - // is "JWT". - // When the payload is itself another encoded JWT, then its - // contents are quoted (behavior of https://jwt.io/). To fix - // this, remove leading and trailing quotes. - if ctyVal == headerJwt { - p, err = getResult(builtinTrim, p, ast.StringTerm(`"'`)) - if err != nil { - panic("not reached") - } - result, err := getResult(builtinJWTDecode, p) - if err != nil { - return err - } - return iter(result) - } - } - - payload, err := extractJSONObject(string(p.Value.(ast.String))) - if err != nil { - return err - } - - s, err := getResult(builtinBase64UrlDecode, ast.StringTerm(token.signature)) - if err != nil { - return fmt.Errorf("JWT signature had invalid encoding: %v", err) - } - sign := hex.EncodeToString([]byte(s.Value.(ast.String))) - - arr := []*ast.Term{ - ast.NewTerm(token.decodedHeader), - ast.NewTerm(payload), - ast.StringTerm(sign), - } - - return iter(ast.ArrayTerm(arr...)) -} - -// Implements RS256 JWT signature verification -func builtinJWTVerifyRS256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { - return rsa.VerifyPKCS1v15( - publicKey, - crypto.SHA256, - digest, - signature) - }) - if err == nil { - return iter(ast.NewTerm(result)) - } - return err -} - -// Implements RS384 JWT signature verification -func builtinJWTVerifyRS384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { - return rsa.VerifyPKCS1v15( - publicKey, - crypto.SHA384, - digest, - signature) - }) - if err == nil { - return iter(ast.NewTerm(result)) - } - return err -} - -// Implements RS512 JWT signature verification -func builtinJWTVerifyRS512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { - return rsa.VerifyPKCS1v15( - publicKey, - crypto.SHA512, - digest, - signature) - }) - if err == nil { - return iter(ast.NewTerm(result)) - } - return err -} - -// Implements PS256 JWT signature verification -func builtinJWTVerifyPS256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { - return rsa.VerifyPSS( - publicKey, - crypto.SHA256, - digest, - signature, - nil) - }) - if err == nil { - return iter(ast.NewTerm(result)) - } - return err -} - -// Implements PS384 JWT signature verification -func builtinJWTVerifyPS384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { - return rsa.VerifyPSS( - publicKey, - crypto.SHA384, - digest, - signature, - nil) - }) - if err == nil { - return iter(ast.NewTerm(result)) - } - return err -} - -// Implements PS512 JWT signature verification -func builtinJWTVerifyPS512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { - return rsa.VerifyPSS( - publicKey, - crypto.SHA512, - digest, - signature, - nil) - }) - if err == nil { - return iter(ast.NewTerm(result)) - } - return err -} - -// Implements RSA JWT signature verification. -func builtinJWTVerifyRSA(a ast.Value, b ast.Value, hasher func() hash.Hash, verify func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error) (ast.Value, error) { - return builtinJWTVerify(a, b, hasher, func(publicKey interface{}, digest []byte, signature []byte) error { - publicKeyRsa, ok := publicKey.(*rsa.PublicKey) - if !ok { - return fmt.Errorf("incorrect public key type") - } - return verify(publicKeyRsa, digest, signature) - }) -} - -// Implements ES256 JWT signature verification. -func builtinJWTVerifyES256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerify(operands[0].Value, operands[1].Value, sha256.New, verifyES) - if err == nil { - return iter(ast.NewTerm(result)) - } - return err -} - -// Implements ES384 JWT signature verification -func builtinJWTVerifyES384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerify(operands[0].Value, operands[1].Value, sha512.New384, verifyES) - if err == nil { - return iter(ast.NewTerm(result)) - } - return err -} - -// Implements ES512 JWT signature verification -func builtinJWTVerifyES512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerify(operands[0].Value, operands[1].Value, sha512.New, verifyES) - if err == nil { - return iter(ast.NewTerm(result)) - } - return err -} - -func verifyES(publicKey interface{}, digest []byte, signature []byte) (err error) { - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("ECDSA signature verification error: %v", r) - } - }() - publicKeyEcdsa, ok := publicKey.(*ecdsa.PublicKey) - if !ok { - return fmt.Errorf("incorrect public key type") - } - r, s := &big.Int{}, &big.Int{} - n := len(signature) / 2 - r.SetBytes(signature[:n]) - s.SetBytes(signature[n:]) - if ecdsa.Verify(publicKeyEcdsa, digest, r, s) { - return nil - } - return fmt.Errorf("ECDSA signature verification error") -} - -type verificationKey struct { - alg string - kid string - key interface{} -} - -// getKeysFromCertOrJWK returns the public key found in a X.509 certificate or JWK key(s). -// A valid PEM block is never valid JSON (and vice versa), hence can try parsing both. -// When provided a JWKS, each key additionally likely contains a key ID and the key algorithm. -func getKeysFromCertOrJWK(certificate string) ([]verificationKey, error) { - if block, rest := pem.Decode([]byte(certificate)); block != nil { - if len(rest) > 0 { - return nil, fmt.Errorf("extra data after a PEM certificate block") - } - - if block.Type == blockTypeCertificate { - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, fmt.Errorf("failed to parse a PEM certificate: %w", err) - } - return []verificationKey{{key: cert.PublicKey}}, nil - } - - if block.Type == "PUBLIC KEY" { - key, err := x509.ParsePKIXPublicKey(block.Bytes) - if err != nil { - return nil, fmt.Errorf("failed to parse a PEM public key: %w", err) - } - - return []verificationKey{{key: key}}, nil - } - - return nil, fmt.Errorf("failed to extract a Key from the PEM certificate") - } - - jwks, err := jwk.ParseString(certificate) - if err != nil { - return nil, fmt.Errorf("failed to parse a JWK key (set): %w", err) - } - - keys := make([]verificationKey, 0, len(jwks.Keys)) - for _, k := range jwks.Keys { - key, err := k.Materialize() - if err != nil { - return nil, err - } - keys = append(keys, verificationKey{ - alg: k.GetAlgorithm().String(), - kid: k.GetKeyID(), - key: key, - }) - } - - return keys, nil -} - -func getKeyByKid(kid string, keys []verificationKey) *verificationKey { - for _, key := range keys { - if key.kid == kid { - return &key - } - } - return nil -} - -// Implements JWT signature verification. -func builtinJWTVerify(a ast.Value, b ast.Value, hasher func() hash.Hash, verify func(publicKey interface{}, digest []byte, signature []byte) error) (ast.Value, error) { - token, err := decodeJWT(a) - if err != nil { - return nil, err - } - - s, err := builtins.StringOperand(b, 2) - if err != nil { - return nil, err - } - - keys, err := getKeysFromCertOrJWK(string(s)) - if err != nil { - return nil, err - } - - signature, err := token.decodeSignature() - if err != nil { - return nil, err - } - - err = token.decodeHeader() - if err != nil { - return nil, err - } - header, err := parseTokenHeader(token) - if err != nil { - return nil, err - } - - // Validate the JWT signature - - // First, check if there's a matching key ID (`kid`) in both token header and key(s). - // If a match is found, verify using only that key. Only applicable when a JWKS was provided. - if header.kid != "" { - if key := getKeyByKid(header.kid, keys); key != nil { - err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), []byte(signature)) - - return ast.Boolean(err == nil), nil - } - } - - // If no key ID matched, try to verify using any key in the set - // If an alg is present in both the JWT header and the key, skip verification unless they match - for _, key := range keys { - if key.alg == "" { - // No algorithm provided for the key - this is likely a certificate and not a JWKS, so - // we'll need to verify to find out - err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), []byte(signature)) - if err == nil { - return ast.Boolean(true), nil - } - } else { - if header.alg != key.alg { - continue - } - err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), []byte(signature)) - if err == nil { - return ast.Boolean(true), nil - } - } - } - - // None of the keys worked, return false - return ast.Boolean(false), nil -} - -// Implements HS256 (secret) JWT signature verification -func builtinJWTVerifyHS256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - // Decode the JSON Web Token - token, err := decodeJWT(operands[0].Value) - if err != nil { - return err - } - - // Process Secret input - astSecret, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - secret := string(astSecret) - - mac := hmac.New(sha256.New, []byte(secret)) - _, err = mac.Write([]byte(token.header + "." + token.payload)) - if err != nil { - return err - } - - signature, err := token.decodeSignature() - if err != nil { - return err - } - - return iter(ast.NewTerm(ast.Boolean(hmac.Equal([]byte(signature), mac.Sum(nil))))) -} - -// Implements HS384 JWT signature verification -func builtinJWTVerifyHS384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - // Decode the JSON Web Token - token, err := decodeJWT(operands[0].Value) - if err != nil { - return err - } - - // Process Secret input - astSecret, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - secret := string(astSecret) - - mac := hmac.New(sha512.New384, []byte(secret)) - _, err = mac.Write([]byte(token.header + "." + token.payload)) - if err != nil { - return err - } - - signature, err := token.decodeSignature() - if err != nil { - return err - } - - return iter(ast.NewTerm(ast.Boolean(hmac.Equal([]byte(signature), mac.Sum(nil))))) -} - -// Implements HS512 JWT signature verification -func builtinJWTVerifyHS512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - // Decode the JSON Web Token - token, err := decodeJWT(operands[0].Value) - if err != nil { - return err - } - - // Process Secret input - astSecret, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - secret := string(astSecret) - - mac := hmac.New(sha512.New, []byte(secret)) - _, err = mac.Write([]byte(token.header + "." + token.payload)) - if err != nil { - return err - } - - signature, err := token.decodeSignature() - if err != nil { - return err - } - - return iter(ast.NewTerm(ast.Boolean(hmac.Equal([]byte(signature), mac.Sum(nil))))) -} - -// -- Full JWT verification and decoding -- - -// Verification constraints. See tokens_test.go for unit tests. - -// tokenConstraints holds decoded JWT verification constraints. -type tokenConstraints struct { - // The set of asymmetric keys we can verify with. - keys []verificationKey - - // The single symmetric key we will verify with. - secret string - - // The algorithm that must be used to verify. - // If "", any algorithm is acceptable. - alg string - - // The required issuer. - // If "", any issuer is acceptable. - iss string - - // The required audience. - // If "", no audience is acceptable. - aud string - - // The time to validate against, or -1 if no constraint set. - // (If unset, the current time will be used.) - time float64 -} - -// tokenConstraintHandler is the handler type for JWT verification constraints. -type tokenConstraintHandler func(value ast.Value, parameters *tokenConstraints) error - -// tokenConstraintTypes maps known JWT verification constraints to handlers. -var tokenConstraintTypes = map[string]tokenConstraintHandler{ - "cert": tokenConstraintCert, - "secret": func(value ast.Value, constraints *tokenConstraints) error { - return tokenConstraintString("secret", value, &constraints.secret) - }, - "alg": func(value ast.Value, constraints *tokenConstraints) error { - return tokenConstraintString("alg", value, &constraints.alg) - }, - "iss": func(value ast.Value, constraints *tokenConstraints) error { - return tokenConstraintString("iss", value, &constraints.iss) - }, - "aud": func(value ast.Value, constraints *tokenConstraints) error { - return tokenConstraintString("aud", value, &constraints.aud) - }, - "time": tokenConstraintTime, -} - -// tokenConstraintCert handles the `cert` constraint. -func tokenConstraintCert(value ast.Value, constraints *tokenConstraints) error { - s, ok := value.(ast.String) - if !ok { - return fmt.Errorf("cert constraint: must be a string") - } - - keys, err := getKeysFromCertOrJWK(string(s)) - if err != nil { - return err - } - - constraints.keys = keys - return nil -} - -// tokenConstraintTime handles the `time` constraint. -func tokenConstraintTime(value ast.Value, constraints *tokenConstraints) error { - t, err := timeFromValue(value) - if err != nil { - return err - } - constraints.time = t - return nil -} - -func timeFromValue(value ast.Value) (float64, error) { - time, ok := value.(ast.Number) - if !ok { - return 0, fmt.Errorf("token time constraint: must be a number") - } - timeFloat, ok := time.Float64() - if !ok { - return 0, fmt.Errorf("token time constraint: unvalid float64") - } - if timeFloat < 0 { - return 0, fmt.Errorf("token time constraint: must not be negative") - } - return timeFloat, nil -} - -// tokenConstraintString handles string constraints. -func tokenConstraintString(name string, value ast.Value, where *string) error { - av, ok := value.(ast.String) - if !ok { - return fmt.Errorf("%s constraint: must be a string", name) - } - *where = string(av) - return nil -} - -// parseTokenConstraints parses the constraints argument. -func parseTokenConstraints(o ast.Object, wallclock *ast.Term) (*tokenConstraints, error) { - constraints := tokenConstraints{ - time: -1, - } - if err := o.Iter(func(k *ast.Term, v *ast.Term) error { - name := string(k.Value.(ast.String)) - handler, ok := tokenConstraintTypes[name] - if ok { - return handler(v.Value, &constraints) - } - // Anything unknown is rejected. - return fmt.Errorf("unknown token validation constraint: %s", name) - }); err != nil { - return nil, err - } - if constraints.time == -1 { // no time provided in constraint object - t, err := timeFromValue(wallclock.Value) - if err != nil { - return nil, err - } - constraints.time = t - } - return &constraints, nil -} - -// validate validates the constraints argument. -func (constraints *tokenConstraints) validate() error { - keys := 0 - if constraints.keys != nil { - keys++ - } - if constraints.secret != "" { - keys++ - } - if keys > 1 { - return fmt.Errorf("duplicate key constraints") - } - if keys < 1 { - return fmt.Errorf("no key constraint") - } - return nil -} - -// verify verifies a JWT using the constraints and the algorithm from the header -func (constraints *tokenConstraints) verify(kid, alg, header, payload, signature string) error { - // Construct the payload - plaintext := []byte(header) - plaintext = append(plaintext, []byte(".")...) - plaintext = append(plaintext, payload...) - // Look up the algorithm - a, ok := tokenAlgorithms[alg] - if !ok { - return fmt.Errorf("unknown JWS algorithm: %s", alg) - } - // If we're configured with asymmetric key(s) then only trust that - if constraints.keys != nil { - if kid != "" { - if key := getKeyByKid(kid, constraints.keys); key != nil { - err := a.verify(key.key, a.hash, plaintext, []byte(signature)) - if err != nil { - return errSignatureNotVerified - } - return nil - } - } - - verified := false - for _, key := range constraints.keys { - if key.alg == "" { - err := a.verify(key.key, a.hash, plaintext, []byte(signature)) - if err == nil { - verified = true - break - } - } else { - if alg != key.alg { - continue - } - err := a.verify(key.key, a.hash, plaintext, []byte(signature)) - if err == nil { - verified = true - break - } - } - } - - if !verified { - return errSignatureNotVerified - } - return nil - } - if constraints.secret != "" { - return a.verify([]byte(constraints.secret), a.hash, plaintext, []byte(signature)) - } - // (*tokenConstraints)validate() should prevent this happening - return errors.New("unexpectedly found no keys to trust") -} - -// validAudience checks the audience of the JWT. -// It returns true if it meets the constraints and false otherwise. -func (constraints *tokenConstraints) validAudience(aud ast.Value) bool { - s, ok := aud.(ast.String) - if ok { - return string(s) == constraints.aud - } - a, ok := aud.(*ast.Array) - if !ok { - return false - } - return a.Until(func(elem *ast.Term) bool { - if s, ok := elem.Value.(ast.String); ok { - return string(s) == constraints.aud - } - return false - }) -} - -// JWT algorithms - -type ( - tokenVerifyFunction func(key interface{}, hash crypto.Hash, payload []byte, signature []byte) error - tokenVerifyAsymmetricFunction func(key interface{}, hash crypto.Hash, digest []byte, signature []byte) error -) - -// jwtAlgorithm describes a JWS 'alg' value -type tokenAlgorithm struct { - hash crypto.Hash - verify tokenVerifyFunction -} - -// tokenAlgorithms is the known JWT algorithms -var tokenAlgorithms = map[string]tokenAlgorithm{ - "RS256": {crypto.SHA256, verifyAsymmetric(verifyRSAPKCS)}, - "RS384": {crypto.SHA384, verifyAsymmetric(verifyRSAPKCS)}, - "RS512": {crypto.SHA512, verifyAsymmetric(verifyRSAPKCS)}, - "PS256": {crypto.SHA256, verifyAsymmetric(verifyRSAPSS)}, - "PS384": {crypto.SHA384, verifyAsymmetric(verifyRSAPSS)}, - "PS512": {crypto.SHA512, verifyAsymmetric(verifyRSAPSS)}, - "ES256": {crypto.SHA256, verifyAsymmetric(verifyECDSA)}, - "ES384": {crypto.SHA384, verifyAsymmetric(verifyECDSA)}, - "ES512": {crypto.SHA512, verifyAsymmetric(verifyECDSA)}, - "HS256": {crypto.SHA256, verifyHMAC}, - "HS384": {crypto.SHA384, verifyHMAC}, - "HS512": {crypto.SHA512, verifyHMAC}, -} - -// errSignatureNotVerified is returned when a signature cannot be verified. -var errSignatureNotVerified = errors.New("signature not verified") - -func verifyHMAC(key interface{}, hash crypto.Hash, payload []byte, signature []byte) error { - macKey, ok := key.([]byte) - if !ok { - return fmt.Errorf("incorrect symmetric key type") - } - mac := hmac.New(hash.New, macKey) - if _, err := mac.Write(payload); err != nil { - return err - } - if !hmac.Equal(signature, mac.Sum([]byte{})) { - return errSignatureNotVerified - } - return nil -} - -func verifyAsymmetric(verify tokenVerifyAsymmetricFunction) tokenVerifyFunction { - return func(key interface{}, hash crypto.Hash, payload []byte, signature []byte) error { - h := hash.New() - h.Write(payload) - return verify(key, hash, h.Sum([]byte{}), signature) - } -} - -func verifyRSAPKCS(key interface{}, hash crypto.Hash, digest []byte, signature []byte) error { - publicKeyRsa, ok := key.(*rsa.PublicKey) - if !ok { - return fmt.Errorf("incorrect public key type") - } - if err := rsa.VerifyPKCS1v15(publicKeyRsa, hash, digest, signature); err != nil { - return errSignatureNotVerified - } - return nil -} - -func verifyRSAPSS(key interface{}, hash crypto.Hash, digest []byte, signature []byte) error { - publicKeyRsa, ok := key.(*rsa.PublicKey) - if !ok { - return fmt.Errorf("incorrect public key type") - } - if err := rsa.VerifyPSS(publicKeyRsa, hash, digest, signature, nil); err != nil { - return errSignatureNotVerified - } - return nil -} - -func verifyECDSA(key interface{}, _ crypto.Hash, digest []byte, signature []byte) (err error) { - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("ECDSA signature verification error: %v", r) - } - }() - publicKeyEcdsa, ok := key.(*ecdsa.PublicKey) - if !ok { - return fmt.Errorf("incorrect public key type") - } - r, s := &big.Int{}, &big.Int{} - n := len(signature) / 2 - r.SetBytes(signature[:n]) - s.SetBytes(signature[n:]) - if ecdsa.Verify(publicKeyEcdsa, digest, r, s) { - return nil - } - return errSignatureNotVerified -} - -// JWT header parsing and parameters. See tokens_test.go for unit tests. - -// tokenHeaderType represents a recognized JWT header field -// tokenHeader is a parsed JWT header -type tokenHeader struct { - alg string - kid string - typ string - cty string - crit map[string]bool - unknown []string -} - -// tokenHeaderHandler handles a JWT header parameters -type tokenHeaderHandler func(header *tokenHeader, value ast.Value) error - -// tokenHeaderTypes maps known JWT header parameters to handlers -var tokenHeaderTypes = map[string]tokenHeaderHandler{ - "alg": func(header *tokenHeader, value ast.Value) error { - return tokenHeaderString("alg", &header.alg, value) - }, - "kid": func(header *tokenHeader, value ast.Value) error { - return tokenHeaderString("kid", &header.kid, value) - }, - "typ": func(header *tokenHeader, value ast.Value) error { - return tokenHeaderString("typ", &header.typ, value) - }, - "cty": func(header *tokenHeader, value ast.Value) error { - return tokenHeaderString("cty", &header.cty, value) - }, - "crit": tokenHeaderCrit, -} - -// tokenHeaderCrit handles the 'crit' header parameter -func tokenHeaderCrit(header *tokenHeader, value ast.Value) error { - v, ok := value.(*ast.Array) - if !ok { - return fmt.Errorf("crit: must be a list") - } - header.crit = map[string]bool{} - _ = v.Iter(func(elem *ast.Term) error { - tv, ok := elem.Value.(ast.String) - if !ok { - return fmt.Errorf("crit: must be a list of strings") - } - header.crit[string(tv)] = true - return nil - }) - if len(header.crit) == 0 { - return fmt.Errorf("crit: must be a nonempty list") // 'MUST NOT' use the empty list - } - return nil -} - -// tokenHeaderString handles string-format JWT header parameters -func tokenHeaderString(name string, where *string, value ast.Value) error { - v, ok := value.(ast.String) - if !ok { - return fmt.Errorf("%s: must be a string", name) - } - *where = string(v) - return nil -} - -// parseTokenHeader parses the JWT header. -func parseTokenHeader(token *JSONWebToken) (*tokenHeader, error) { - header := tokenHeader{ - unknown: []string{}, - } - if err := token.decodedHeader.Iter(func(k *ast.Term, v *ast.Term) error { - ks := string(k.Value.(ast.String)) - handler, ok := tokenHeaderTypes[ks] - if !ok { - header.unknown = append(header.unknown, ks) - return nil - } - return handler(&header, v.Value) - }); err != nil { - return nil, err - } - return &header, nil -} - -// validTokenHeader returns true if the JOSE header is valid, otherwise false. -func (header *tokenHeader) valid() bool { - // RFC7515 s4.1.1 alg MUST be present - if header.alg == "" { - return false - } - // RFC7515 4.1.11 JWS is invalid if there is a critical parameter that we did not recognize - for _, u := range header.unknown { - if header.crit[u] { - return false - } - } - return true -} - -func commonBuiltinJWTEncodeSign(bctx BuiltinContext, inputHeaders, jwsPayload, jwkSrc string, iter func(*ast.Term) error) error { - keys, err := jwk.ParseString(jwkSrc) - if err != nil { - return err - } - key, err := keys.Keys[0].Materialize() - if err != nil { - return err - } - if jwk.GetKeyTypeFromKey(key) != keys.Keys[0].GetKeyType() { - return fmt.Errorf("JWK derived key type and keyType parameter do not match") - } - - standardHeaders := &jws.StandardHeaders{} - jwsHeaders := []byte(inputHeaders) - err = json.Unmarshal(jwsHeaders, standardHeaders) - if err != nil { - return err - } - alg := standardHeaders.GetAlgorithm() - if alg == jwa.Unsupported { - return fmt.Errorf("unknown signature algorithm") - } - - if (standardHeaders.Type == "" || standardHeaders.Type == headerJwt) && !json.Valid([]byte(jwsPayload)) { - return fmt.Errorf("type is JWT but payload is not JSON") - } - - // process payload and sign - var jwsCompact []byte - jwsCompact, err = jws.SignLiteral([]byte(jwsPayload), alg, key, jwsHeaders, bctx.Seed) - if err != nil { - return err - } - - return iter(ast.StringTerm(string(jwsCompact))) -} - -func builtinJWTEncodeSign(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - inputHeadersAsJSON, err := ast.JSON(operands[0].Value) - if err != nil { - return fmt.Errorf("failed to prepare JWT headers for marshalling: %v", err) - } - - inputHeadersBs, err := json.Marshal(inputHeadersAsJSON) - if err != nil { - return fmt.Errorf("failed to marshal JWT headers: %v", err) - } - - payloadAsJSON, err := ast.JSON(operands[1].Value) - if err != nil { - return fmt.Errorf("failed to prepare JWT payload for marshalling: %v", err) - } - - payloadBs, err := json.Marshal(payloadAsJSON) - if err != nil { - return fmt.Errorf("failed to marshal JWT payload: %v", err) - } - - signatureAsJSON, err := ast.JSON(operands[2].Value) - if err != nil { - return fmt.Errorf("failed to prepare JWT signature for marshalling: %v", err) - } - - signatureBs, err := json.Marshal(signatureAsJSON) - if err != nil { - return fmt.Errorf("failed to marshal JWT signature: %v", err) - } - - return commonBuiltinJWTEncodeSign( - bctx, - string(inputHeadersBs), - string(payloadBs), - string(signatureBs), - iter, - ) -} - -func builtinJWTEncodeSignRaw(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - jwkSrc, err := builtins.StringOperand(operands[2].Value, 3) - if err != nil { - return err - } - inputHeaders, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - jwsPayload, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - return commonBuiltinJWTEncodeSign(bctx, string(inputHeaders), string(jwsPayload), string(jwkSrc), iter) -} - -// Implements full JWT decoding, validation and verification. -func builtinJWTDecodeVerify(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - // io.jwt.decode_verify(string, constraints, [valid, header, payload]) - // - // If valid is true then the signature verifies and all constraints are met. - // If valid is false then either the signature did not verify or some constrain - // was not met. - // - // Decoding errors etc are returned as errors. - a := operands[0].Value - - b, err := builtins.ObjectOperand(operands[1].Value, 2) - if err != nil { - return err - } - - unverified := ast.ArrayTerm( - ast.BooleanTerm(false), - ast.NewTerm(ast.NewObject()), - ast.NewTerm(ast.NewObject()), - ) - constraints, err := parseTokenConstraints(b, bctx.Time) - if err != nil { - return err - } - if err := constraints.validate(); err != nil { - return err - } - var token *JSONWebToken - var p *ast.Term - for { - // RFC7519 7.2 #1-2 split into parts - if token, err = decodeJWT(a); err != nil { - return err - } - // RFC7519 7.2 #3, #4, #6 - if err := token.decodeHeader(); err != nil { - return err - } - // RFC7159 7.2 #5 (and RFC7159 5.2 #5) validate header fields - header, err := parseTokenHeader(token) - if err != nil { - return err - } - if !header.valid() { - return iter(unverified) - } - // Check constraints that impact signature verification. - if constraints.alg != "" && constraints.alg != header.alg { - return iter(unverified) - } - // RFC7159 7.2 #7 verify the signature - signature, err := token.decodeSignature() - if err != nil { - return err - } - if err := constraints.verify(header.kid, header.alg, token.header, token.payload, signature); err != nil { - if err == errSignatureNotVerified { - return iter(unverified) - } - return err - } - // RFC7159 7.2 #9-10 decode the payload - p, err = getResult(builtinBase64UrlDecode, ast.StringTerm(token.payload)) - if err != nil { - return fmt.Errorf("JWT payload had invalid encoding: %v", err) - } - // RFC7159 7.2 #8 and 5.2 cty - if strings.ToUpper(header.cty) == headerJwt { - // Nested JWT, go round again with payload as first argument - a = p.Value - continue - } - // Non-nested JWT (or we've reached the bottom of the nesting). - break - } - payload, err := extractJSONObject(string(p.Value.(ast.String))) - if err != nil { - return err - } - // Check registered claim names against constraints or environment - // RFC7159 4.1.1 iss - if constraints.iss != "" { - if iss := payload.Get(jwtIssKey); iss != nil { - issVal := string(iss.Value.(ast.String)) - if constraints.iss != issVal { - return iter(unverified) - } - } else { - return iter(unverified) - } - } - // RFC7159 4.1.3 aud - if aud := payload.Get(jwtAudKey); aud != nil { - if !constraints.validAudience(aud.Value) { - return iter(unverified) - } - } else { - if constraints.aud != "" { - return iter(unverified) - } - } - // RFC7159 4.1.4 exp - if exp := payload.Get(jwtExpKey); exp != nil { - switch exp.Value.(type) { - case ast.Number: - // constraints.time is in nanoseconds but exp Value is in seconds - compareTime := ast.FloatNumberTerm(constraints.time / 1000000000) - if ast.Compare(compareTime, exp.Value.(ast.Number)) != -1 { - return iter(unverified) - } - default: - return fmt.Errorf("exp value must be a number") - } - } - // RFC7159 4.1.5 nbf - if nbf := payload.Get(jwtNbfKey); nbf != nil { - switch nbf.Value.(type) { - case ast.Number: - // constraints.time is in nanoseconds but nbf Value is in seconds - compareTime := ast.FloatNumberTerm(constraints.time / 1000000000) - if ast.Compare(compareTime, nbf.Value.(ast.Number)) == -1 { - return iter(unverified) - } - default: - return fmt.Errorf("nbf value must be a number") - } - } - - verified := ast.ArrayTerm( - ast.BooleanTerm(true), - ast.NewTerm(token.decodedHeader), - ast.NewTerm(payload), - ) - return iter(verified) -} - -// -- Utilities -- - -func decodeJWT(a ast.Value) (*JSONWebToken, error) { - // Parse the JSON Web Token - astEncode, err := builtins.StringOperand(a, 1) - if err != nil { - return nil, err - } - - encoding := string(astEncode) - if !strings.Contains(encoding, ".") { - return nil, errors.New("encoded JWT had no period separators") - } - - parts := strings.Split(encoding, ".") - if len(parts) != 3 { - return nil, fmt.Errorf("encoded JWT must have 3 sections, found %d", len(parts)) - } - - return &JSONWebToken{header: parts[0], payload: parts[1], signature: parts[2]}, nil -} - -func (token *JSONWebToken) decodeSignature() (string, error) { - decodedSignature, err := getResult(builtinBase64UrlDecode, ast.StringTerm(token.signature)) - if err != nil { - return "", err - } - - signatureAst, err := builtins.StringOperand(decodedSignature.Value, 1) - if err != nil { - return "", err - } - return string(signatureAst), err -} - -// Extract, validate and return the JWT header as an ast.Object. -func validateJWTHeader(h string) (ast.Object, error) { - header, err := extractJSONObject(h) - if err != nil { - return nil, fmt.Errorf("bad JWT header: %v", err) - } - - // There are two kinds of JWT tokens, a JSON Web Signature (JWS) and - // a JSON Web Encryption (JWE). The latter is very involved, and we - // won't support it for now. - // This code checks which kind of JWT we are dealing with according to - // RFC 7516 Section 9: https://tools.ietf.org/html/rfc7516#section-9 - if header.Get(jwtEncKey) != nil { - return nil, errors.New("JWT is a JWE object, which is not supported") - } - - return header, nil -} - -func extractJSONObject(s string) (ast.Object, error) { - // XXX: This code relies on undocumented behavior of Go's - // json.Unmarshal using the last occurrence of duplicate keys in a JSON - // Object. If duplicate keys are present in a JWT, the last must be - // used or the token rejected. Since detecting duplicates is tantamount - // to parsing it ourselves, we're relying on the Go implementation - // using the last occurring instance of the key, which is the behavior - // as of Go 1.8.1. - v, err := getResult(builtinJSONUnmarshal, ast.StringTerm(s)) - if err != nil { - return nil, fmt.Errorf("invalid JSON: %v", err) - } - - o, ok := v.Value.(ast.Object) - if !ok { - return nil, errors.New("decoded JSON type was not an Object") - } - - return o, nil -} - -// getInputSha returns the SHA checksum of the input -func getInputSHA(input []byte, h func() hash.Hash) []byte { - hasher := h() - hasher.Write(input) - return hasher.Sum(nil) -} - -func init() { - RegisterBuiltinFunc(ast.JWTDecode.Name, builtinJWTDecode) - RegisterBuiltinFunc(ast.JWTVerifyRS256.Name, builtinJWTVerifyRS256) - RegisterBuiltinFunc(ast.JWTVerifyRS384.Name, builtinJWTVerifyRS384) - RegisterBuiltinFunc(ast.JWTVerifyRS512.Name, builtinJWTVerifyRS512) - RegisterBuiltinFunc(ast.JWTVerifyPS256.Name, builtinJWTVerifyPS256) - RegisterBuiltinFunc(ast.JWTVerifyPS384.Name, builtinJWTVerifyPS384) - RegisterBuiltinFunc(ast.JWTVerifyPS512.Name, builtinJWTVerifyPS512) - RegisterBuiltinFunc(ast.JWTVerifyES256.Name, builtinJWTVerifyES256) - RegisterBuiltinFunc(ast.JWTVerifyES384.Name, builtinJWTVerifyES384) - RegisterBuiltinFunc(ast.JWTVerifyES512.Name, builtinJWTVerifyES512) - RegisterBuiltinFunc(ast.JWTVerifyHS256.Name, builtinJWTVerifyHS256) - RegisterBuiltinFunc(ast.JWTVerifyHS384.Name, builtinJWTVerifyHS384) - RegisterBuiltinFunc(ast.JWTVerifyHS512.Name, builtinJWTVerifyHS512) - RegisterBuiltinFunc(ast.JWTDecodeVerify.Name, builtinJWTDecodeVerify) - RegisterBuiltinFunc(ast.JWTEncodeSignRaw.Name, builtinJWTEncodeSignRaw) - RegisterBuiltinFunc(ast.JWTEncodeSign.Name, builtinJWTEncodeSign) -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/trace.go b/vendor/github.com/open-policy-agent/opa/topdown/trace.go index 277c94b626..0153e4f090 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/trace.go +++ b/vendor/github.com/open-policy-agent/opa/topdown/trace.go @@ -5,898 +5,109 @@ package topdown import ( - "bytes" - "fmt" "io" - "slices" - "strings" - iStrs "github.com/open-policy-agent/opa/internal/strings" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -const ( - minLocationWidth = 5 // len("query") - maxIdealLocationWidth = 64 - columnPadding = 4 - maxExprVarWidth = 32 - maxPrettyExprVarWidth = 64 + v1 "github.com/open-policy-agent/opa/v1/topdown" ) // Op defines the types of tracing events. -type Op string +type Op = v1.Op const ( // EnterOp is emitted when a new query is about to be evaluated. - EnterOp Op = "Enter" + EnterOp = v1.EnterOp // ExitOp is emitted when a query has evaluated to true. - ExitOp Op = "Exit" + ExitOp = v1.ExitOp // EvalOp is emitted when an expression is about to be evaluated. - EvalOp Op = "Eval" + EvalOp = v1.EvalOp // RedoOp is emitted when an expression, rule, or query is being re-evaluated. - RedoOp Op = "Redo" + RedoOp = v1.RedoOp // SaveOp is emitted when an expression is saved instead of evaluated // during partial evaluation. - SaveOp Op = "Save" + SaveOp = v1.SaveOp // FailOp is emitted when an expression evaluates to false. - FailOp Op = "Fail" + FailOp = v1.FailOp // DuplicateOp is emitted when a query has produced a duplicate value. The search // will stop at the point where the duplicate was emitted and backtrack. - DuplicateOp Op = "Duplicate" + DuplicateOp = v1.DuplicateOp // NoteOp is emitted when an expression invokes a tracing built-in function. - NoteOp Op = "Note" + NoteOp = v1.NoteOp // IndexOp is emitted during an expression evaluation to represent lookup // matches. - IndexOp Op = "Index" + IndexOp = v1.IndexOp // WasmOp is emitted when resolving a ref using an external // Resolver. - WasmOp Op = "Wasm" + WasmOp = v1.WasmOp // UnifyOp is emitted when two terms are unified. Node will be set to an // equality expression with the two terms. This Node will not have location // info. - UnifyOp Op = "Unify" - FailedAssertionOp Op = "FailedAssertion" + UnifyOp = v1.UnifyOp + FailedAssertionOp = v1.FailedAssertionOp ) // VarMetadata provides some user facing information about // a variable in some policy. -type VarMetadata struct { - Name ast.Var `json:"name"` - Location *ast.Location `json:"location"` -} +type VarMetadata = v1.VarMetadata // Event contains state associated with a tracing event. -type Event struct { - Op Op // Identifies type of event. - Node ast.Node // Contains AST node relevant to the event. - Location *ast.Location // The location of the Node this event relates to. - QueryID uint64 // Identifies the query this event belongs to. - ParentID uint64 // Identifies the parent query this event belongs to. - Locals *ast.ValueMap // Contains local variable bindings from the query context. Nil if variables were not included in the trace event. - LocalMetadata map[ast.Var]VarMetadata // Contains metadata for the local variable bindings. Nil if variables were not included in the trace event. - Message string // Contains message for Note events. - Ref *ast.Ref // Identifies the subject ref for the event. Only applies to Index and Wasm operations. - - input *ast.Term - bindings *bindings - localVirtualCacheSnapshot *ast.ValueMap -} - -func (evt *Event) WithInput(input *ast.Term) *Event { - evt.input = input - return evt -} - -// HasRule returns true if the Event contains an ast.Rule. -func (evt *Event) HasRule() bool { - _, ok := evt.Node.(*ast.Rule) - return ok -} - -// HasBody returns true if the Event contains an ast.Body. -func (evt *Event) HasBody() bool { - _, ok := evt.Node.(ast.Body) - return ok -} - -// HasExpr returns true if the Event contains an ast.Expr. -func (evt *Event) HasExpr() bool { - _, ok := evt.Node.(*ast.Expr) - return ok -} - -// Equal returns true if this event is equal to the other event. -func (evt *Event) Equal(other *Event) bool { - if evt.Op != other.Op { - return false - } - if evt.QueryID != other.QueryID { - return false - } - if evt.ParentID != other.ParentID { - return false - } - if !evt.equalNodes(other) { - return false - } - return evt.Locals.Equal(other.Locals) -} - -func (evt *Event) String() string { - return fmt.Sprintf("%v %v %v (qid=%v, pqid=%v)", evt.Op, evt.Node, evt.Locals, evt.QueryID, evt.ParentID) -} - -// Input returns the input object as it was at the event. -func (evt *Event) Input() *ast.Term { - return evt.input -} - -// Plug plugs event bindings into the provided ast.Term. Because bindings are mutable, this only makes sense to do when -// the event is emitted rather than on recorded trace events as the bindings are going to be different by then. -func (evt *Event) Plug(term *ast.Term) *ast.Term { - return evt.bindings.Plug(term) -} - -func (evt *Event) equalNodes(other *Event) bool { - switch a := evt.Node.(type) { - case ast.Body: - if b, ok := other.Node.(ast.Body); ok { - return a.Equal(b) - } - case *ast.Rule: - if b, ok := other.Node.(*ast.Rule); ok { - return a.Equal(b) - } - case *ast.Expr: - if b, ok := other.Node.(*ast.Expr); ok { - return a.Equal(b) - } - case nil: - return other.Node == nil - } - return false -} +type Event = v1.Event // Tracer defines the interface for tracing in the top-down evaluation engine. +// // Deprecated: Use QueryTracer instead. -type Tracer interface { - Enabled() bool - Trace(*Event) -} +type Tracer = v1.Tracer //nolint:staticcheck // QueryTracer defines the interface for tracing in the top-down evaluation engine. // The implementation can provide additional configuration to modify the tracing // behavior for query evaluations. -type QueryTracer interface { - Enabled() bool - TraceEvent(Event) - Config() TraceConfig -} +type QueryTracer = v1.QueryTracer // TraceConfig defines some common configuration for Tracer implementations -type TraceConfig struct { - PlugLocalVars bool // Indicate whether to plug local variable bindings before calling into the tracer. -} - -// legacyTracer Implements the QueryTracer interface by wrapping an older Tracer instance. -type legacyTracer struct { - t Tracer -} - -func (l *legacyTracer) Enabled() bool { - return l.t.Enabled() -} - -func (l *legacyTracer) Config() TraceConfig { - return TraceConfig{ - PlugLocalVars: true, // For backwards compatibility old tracers will plug local variables - } -} - -func (l *legacyTracer) TraceEvent(evt Event) { - l.t.Trace(&evt) -} +type TraceConfig = v1.TraceConfig // WrapLegacyTracer will create a new QueryTracer which wraps an // older Tracer instance. func WrapLegacyTracer(tracer Tracer) QueryTracer { - return &legacyTracer{t: tracer} + return v1.WrapLegacyTracer(tracer) } // BufferTracer implements the Tracer and QueryTracer interface by // simply buffering all events received. -type BufferTracer []*Event +type BufferTracer = v1.BufferTracer // NewBufferTracer returns a new BufferTracer. func NewBufferTracer() *BufferTracer { - return &BufferTracer{} -} - -// Enabled always returns true if the BufferTracer is instantiated. -func (b *BufferTracer) Enabled() bool { - return b != nil -} - -// Trace adds the event to the buffer. -// Deprecated: Use TraceEvent instead. -func (b *BufferTracer) Trace(evt *Event) { - *b = append(*b, evt) -} - -// TraceEvent adds the event to the buffer. -func (b *BufferTracer) TraceEvent(evt Event) { - *b = append(*b, &evt) -} - -// Config returns the Tracers standard configuration -func (b *BufferTracer) Config() TraceConfig { - return TraceConfig{PlugLocalVars: true} + return v1.NewBufferTracer() } // PrettyTrace pretty prints the trace to the writer. func PrettyTrace(w io.Writer, trace []*Event) { - PrettyTraceWithOpts(w, trace, PrettyTraceOptions{}) + v1.PrettyTrace(w, trace) } // PrettyTraceWithLocation prints the trace to the writer and includes location information func PrettyTraceWithLocation(w io.Writer, trace []*Event) { - PrettyTraceWithOpts(w, trace, PrettyTraceOptions{Locations: true}) -} - -type PrettyTraceOptions struct { - Locations bool // Include location information - ExprVariables bool // Include variables found in the expression - LocalVariables bool // Include all local variables -} - -type traceRow []string - -func (r *traceRow) add(s string) { - *r = append(*r, s) -} - -type traceTable struct { - rows []traceRow - maxWidths []int + v1.PrettyTraceWithLocation(w, trace) } -func (t *traceTable) add(row traceRow) { - t.rows = append(t.rows, row) - for i := range row { - if i >= len(t.maxWidths) { - t.maxWidths = append(t.maxWidths, len(row[i])) - } else if len(row[i]) > t.maxWidths[i] { - t.maxWidths[i] = len(row[i]) - } - } -} - -func (t *traceTable) write(w io.Writer, padding int) { - for _, row := range t.rows { - for i, cell := range row { - width := t.maxWidths[i] + padding - if i < len(row)-1 { - _, _ = fmt.Fprintf(w, "%-*s ", width, cell) - } else { - _, _ = fmt.Fprintf(w, "%s", cell) - } - } - _, _ = fmt.Fprintln(w) - } -} +type PrettyTraceOptions = v1.PrettyTraceOptions func PrettyTraceWithOpts(w io.Writer, trace []*Event, opts PrettyTraceOptions) { - depths := depths{} - - // FIXME: Can we shorten each location as we process each trace event instead of beforehand? - filePathAliases, _ := getShortenedFileNames(trace) - - table := traceTable{} - - for _, event := range trace { - depth := depths.GetOrSet(event.QueryID, event.ParentID) - row := traceRow{} - - if opts.Locations { - location := formatLocation(event, filePathAliases) - row.add(location) - } - - row.add(formatEvent(event, depth)) - - if opts.ExprVariables { - vars := exprLocalVars(event) - keys := sortedKeys(vars) - - buf := new(bytes.Buffer) - buf.WriteString("{") - for i, k := range keys { - if i > 0 { - buf.WriteString(", ") - } - _, _ = fmt.Fprintf(buf, "%v: %s", k, iStrs.Truncate(vars.Get(k).String(), maxExprVarWidth)) - } - buf.WriteString("}") - row.add(buf.String()) - } - - if opts.LocalVariables { - if locals := event.Locals; locals != nil { - keys := sortedKeys(locals) - - buf := new(bytes.Buffer) - buf.WriteString("{") - for i, k := range keys { - if i > 0 { - buf.WriteString(", ") - } - _, _ = fmt.Fprintf(buf, "%v: %s", k, iStrs.Truncate(locals.Get(k).String(), maxExprVarWidth)) - } - buf.WriteString("}") - row.add(buf.String()) - } else { - row.add("{}") - } - } - - table.add(row) - } - - table.write(w, columnPadding) -} - -func sortedKeys(vm *ast.ValueMap) []ast.Value { - keys := make([]ast.Value, 0, vm.Len()) - vm.Iter(func(k, _ ast.Value) bool { - keys = append(keys, k) - return false - }) - slices.SortFunc(keys, func(a, b ast.Value) int { - return strings.Compare(a.String(), b.String()) - }) - return keys -} - -func exprLocalVars(e *Event) *ast.ValueMap { - vars := ast.NewValueMap() - - findVars := func(term *ast.Term) bool { - //if r, ok := term.Value.(ast.Ref); ok { - // fmt.Printf("ref: %v\n", r) - // //return true - //} - if name, ok := term.Value.(ast.Var); ok { - if meta, ok := e.LocalMetadata[name]; ok { - if val := e.Locals.Get(name); val != nil { - vars.Put(meta.Name, val) - } - } - } - return false - } - - if r, ok := e.Node.(*ast.Rule); ok { - // We're only interested in vars in the head, not the body - ast.WalkTerms(r.Head, findVars) - return vars - } - - // The local cache snapshot only contains a snapshot for those refs present in the event node, - // so they can all be added to the vars map. - e.localVirtualCacheSnapshot.Iter(func(k, v ast.Value) bool { - vars.Put(k, v) - return false - }) - - ast.WalkTerms(e.Node, findVars) - - return vars -} - -func formatEvent(event *Event, depth int) string { - padding := formatEventPadding(event, depth) - if event.Op == NoteOp { - return fmt.Sprintf("%v%v %q", padding, event.Op, event.Message) - } - - var details interface{} - if node, ok := event.Node.(*ast.Rule); ok { - details = node.Path() - } else if event.Ref != nil { - details = event.Ref - } else { - details = rewrite(event).Node - } - - template := "%v%v %v" - opts := []interface{}{padding, event.Op, details} - - if event.Message != "" { - template += " %v" - opts = append(opts, event.Message) - } - - return fmt.Sprintf(template, opts...) + v1.PrettyTraceWithOpts(w, trace, opts) } -func formatEventPadding(event *Event, depth int) string { - spaces := formatEventSpaces(event, depth) - if spaces > 1 { - return strings.Repeat("| ", spaces-1) - } - return "" -} - -func formatEventSpaces(event *Event, depth int) int { - switch event.Op { - case EnterOp: - return depth - case RedoOp: - if _, ok := event.Node.(*ast.Expr); !ok { - return depth - } - } - return depth + 1 -} - -// getShortenedFileNames will return a map of file paths to shortened aliases -// that were found in the trace. It also returns the longest location expected -func getShortenedFileNames(trace []*Event) (map[string]string, int) { - // Get a deduplicated list of all file paths - // and the longest file path size - fpAliases := map[string]string{} - var canShorten []string - longestLocation := 0 - for _, event := range trace { - if event.Location != nil { - if event.Location.File != "" { - // length of ":" - curLen := len(event.Location.File) + numDigits10(event.Location.Row) + 1 - if curLen > longestLocation { - longestLocation = curLen - } - - if _, ok := fpAliases[event.Location.File]; ok { - continue - } - - canShorten = append(canShorten, event.Location.File) - - // Default to just alias their full path - fpAliases[event.Location.File] = event.Location.File - } else { - // length of ":" - curLen := minLocationWidth + numDigits10(event.Location.Row) + 1 - if curLen > longestLocation { - longestLocation = curLen - } - } - } - } - - if len(canShorten) > 0 && longestLocation > maxIdealLocationWidth { - fpAliases, longestLocation = iStrs.TruncateFilePaths(maxIdealLocationWidth, longestLocation, canShorten...) - } - - return fpAliases, longestLocation -} - -func numDigits10(n int) int { - if n < 10 { - return 1 - } - return numDigits10(n/10) + 1 -} - -func formatLocation(event *Event, fileAliases map[string]string) string { - - location := event.Location - if location == nil { - return "" - } - - if location.File == "" { - return fmt.Sprintf("query:%v", location.Row) - } - - return fmt.Sprintf("%v:%v", fileAliases[location.File], location.Row) -} - -// depths is a helper for computing the depth of an event. Events within the -// same query all have the same depth. The depth of query is -// depth(parent(query))+1. -type depths map[uint64]int - -func (ds depths) GetOrSet(qid uint64, pqid uint64) int { - depth := ds[qid] - if depth == 0 { - depth = ds[pqid] - depth++ - ds[qid] = depth - } - return depth -} - -func builtinTrace(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - str, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return handleBuiltinErr(ast.Trace.Name, bctx.Location, err) - } - - if !bctx.TraceEnabled { - return iter(ast.BooleanTerm(true)) - } - - evt := Event{ - Op: NoteOp, - Location: bctx.Location, - QueryID: bctx.QueryID, - ParentID: bctx.ParentID, - Message: string(str), - } - - for i := range bctx.QueryTracers { - bctx.QueryTracers[i].TraceEvent(evt) - } - - return iter(ast.BooleanTerm(true)) -} - -func rewrite(event *Event) *Event { - - cpy := *event - - var node ast.Node - - switch v := event.Node.(type) { - case *ast.Expr: - expr := v.Copy() - - // Hide generated local vars in 'key' position that have not been - // rewritten. - if ev, ok := v.Terms.(*ast.Every); ok { - if kv, ok := ev.Key.Value.(ast.Var); ok { - if rw, ok := cpy.LocalMetadata[kv]; !ok || rw.Name.IsGenerated() { - expr.Terms.(*ast.Every).Key = nil - } - } - } - node = expr - case ast.Body: - node = v.Copy() - case *ast.Rule: - node = v.Copy() - } - - _, _ = ast.TransformVars(node, func(v ast.Var) (ast.Value, error) { - if meta, ok := cpy.LocalMetadata[v]; ok { - return meta.Name, nil - } - return v, nil - }) - - cpy.Node = node - - return &cpy -} - -type varInfo struct { - VarMetadata - val ast.Value - exprLoc *ast.Location - col int // 0-indexed column -} - -func (v varInfo) Value() string { - if v.val != nil { - return v.val.String() - } - return "undefined" -} - -func (v varInfo) Title() string { - if v.exprLoc != nil && v.exprLoc.Text != nil { - return string(v.exprLoc.Text) - } - return string(v.Name) -} - -func padLocationText(loc *ast.Location) string { - if loc == nil { - return "" - } - - text := string(loc.Text) - - if loc.Col == 0 { - return text - } - - buf := new(bytes.Buffer) - j := 0 - for i := 1; i < loc.Col; i++ { - if len(loc.Tabs) > 0 && j < len(loc.Tabs) && loc.Tabs[j] == i { - buf.WriteString("\t") - j++ - } else { - buf.WriteString(" ") - } - } - - buf.WriteString(text) - return buf.String() -} - -type PrettyEventOpts struct { - PrettyVars bool -} - -func walkTestTerms(x interface{}, f func(*ast.Term) bool) { - var vis *ast.GenericVisitor - vis = ast.NewGenericVisitor(func(x interface{}) bool { - switch x := x.(type) { - case ast.Call: - for _, t := range x[1:] { - vis.Walk(t) - } - return true - case *ast.Expr: - if x.IsCall() { - for _, o := range x.Operands() { - vis.Walk(o) - } - for i := range x.With { - vis.Walk(x.With[i]) - } - return true - } - case *ast.Term: - return f(x) - case *ast.With: - vis.Walk(x.Value) - return true - } - return false - }) - vis.Walk(x) -} +type PrettyEventOpts = v1.PrettyEventOpts func PrettyEvent(w io.Writer, e *Event, opts PrettyEventOpts) error { - if !opts.PrettyVars { - _, _ = fmt.Fprintln(w, padLocationText(e.Location)) - return nil - } - - buf := new(bytes.Buffer) - exprVars := map[string]varInfo{} - - findVars := func(unknownAreUndefined bool) func(term *ast.Term) bool { - return func(term *ast.Term) bool { - if term.Location == nil { - return false - } - - switch v := term.Value.(type) { - case *ast.ArrayComprehension, *ast.SetComprehension, *ast.ObjectComprehension: - // we don't report on the internals of a comprehension, as it's already evaluated, and we won't have the local vars. - return true - case ast.Var: - var info *varInfo - if meta, ok := e.LocalMetadata[v]; ok { - info = &varInfo{ - VarMetadata: meta, - val: e.Locals.Get(v), - exprLoc: term.Location, - } - } else if unknownAreUndefined { - info = &varInfo{ - VarMetadata: VarMetadata{Name: v}, - exprLoc: term.Location, - col: term.Location.Col, - } - } - - if info != nil { - if v, exists := exprVars[info.Title()]; !exists || v.val == nil { - if term.Location != nil { - info.col = term.Location.Col - } - exprVars[info.Title()] = *info - } - } - } - return false - } - } - - expr, ok := e.Node.(*ast.Expr) - if !ok || expr == nil { - return nil - } - - base := expr.BaseCogeneratedExpr() - exprText := padLocationText(base.Location) - buf.WriteString(exprText) - - e.localVirtualCacheSnapshot.Iter(func(k, v ast.Value) bool { - var info *varInfo - switch k := k.(type) { - case ast.Ref: - info = &varInfo{ - VarMetadata: VarMetadata{Name: ast.Var(k.String())}, - val: v, - exprLoc: k[0].Location, - col: k[0].Location.Col, - } - case *ast.ArrayComprehension: - info = &varInfo{ - VarMetadata: VarMetadata{Name: ast.Var(k.String())}, - val: v, - exprLoc: k.Term.Location, - col: k.Term.Location.Col, - } - case *ast.SetComprehension: - info = &varInfo{ - VarMetadata: VarMetadata{Name: ast.Var(k.String())}, - val: v, - exprLoc: k.Term.Location, - col: k.Term.Location.Col, - } - case *ast.ObjectComprehension: - info = &varInfo{ - VarMetadata: VarMetadata{Name: ast.Var(k.String())}, - val: v, - exprLoc: k.Key.Location, - col: k.Key.Location.Col, - } - } - - if info != nil { - exprVars[info.Title()] = *info - } - - return false - }) - - // If the expression is negated, we can't confidently assert that vars with unknown values are 'undefined', - // since the compiler might have opted out of the necessary rewrite. - walkTestTerms(expr, findVars(!expr.Negated)) - coExprs := expr.CogeneratedExprs() - for _, coExpr := range coExprs { - // Only the current "co-expr" can have undefined vars, if we don't know the value for a var in any other co-expr, - // it's unknown, not undefined. A var can be unknown if it hasn't been assigned a value yet, because the co-expr - // hasn't been evaluated yet (the fail happened before it). - walkTestTerms(coExpr, findVars(false)) - } - - printPrettyVars(buf, exprVars) - _, _ = fmt.Fprint(w, buf.String()) - return nil -} - -func printPrettyVars(w *bytes.Buffer, exprVars map[string]varInfo) { - containsTabs := false - varRows := make(map[int]interface{}) - for _, info := range exprVars { - if len(info.exprLoc.Tabs) > 0 { - containsTabs = true - } - varRows[info.exprLoc.Row] = nil - } - - if containsTabs && len(varRows) > 1 { - // We can't (currently) reliably point to var locations when they are on different rows that contain tabs. - // So we'll just print them in alphabetical order instead. - byName := make([]varInfo, 0, len(exprVars)) - for _, info := range exprVars { - byName = append(byName, info) - } - slices.SortStableFunc(byName, func(a, b varInfo) int { - return strings.Compare(a.Title(), b.Title()) - }) - - w.WriteString("\n\nWhere:\n") - for _, info := range byName { - w.WriteString(fmt.Sprintf("\n%s: %s", info.Title(), iStrs.Truncate(info.Value(), maxPrettyExprVarWidth))) - } - - return - } - - byCol := make([]varInfo, 0, len(exprVars)) - for _, info := range exprVars { - byCol = append(byCol, info) - } - slices.SortFunc(byCol, func(a, b varInfo) int { - // sort first by column, then by reverse row (to present vars in the same order they appear in the expr) - if a.col == b.col { - if a.exprLoc.Row == b.exprLoc.Row { - return strings.Compare(a.Title(), b.Title()) - } - return b.exprLoc.Row - a.exprLoc.Row - } - return a.col - b.col - }) - - if len(byCol) == 0 { - return - } - - w.WriteString("\n") - printArrows(w, byCol, -1) - for i := len(byCol) - 1; i >= 0; i-- { - w.WriteString("\n") - printArrows(w, byCol, i) - } -} - -func printArrows(w *bytes.Buffer, l []varInfo, printValueAt int) { - prevCol := 0 - var slice []varInfo - if printValueAt >= 0 { - slice = l[:printValueAt+1] - } else { - slice = l - } - isFirst := true - for i, info := range slice { - - isLast := i >= len(slice)-1 - col := info.col - - if !isLast && col == l[i+1].col { - // We're sharing the same column with another, subsequent var - continue - } - - spaces := col - 1 - if i > 0 && !isFirst { - spaces = (col - prevCol) - 1 - } - - for j := 0; j < spaces; j++ { - tab := false - for _, t := range info.exprLoc.Tabs { - if t == j+prevCol+1 { - w.WriteString("\t") - tab = true - break - } - } - if !tab { - w.WriteString(" ") - } - } - - if isLast && printValueAt >= 0 { - valueStr := iStrs.Truncate(info.Value(), maxPrettyExprVarWidth) - if (i > 0 && col == l[i-1].col) || (i < len(l)-1 && col == l[i+1].col) { - // There is another var on this column, so we need to include the name to differentiate them. - w.WriteString(fmt.Sprintf("%s: %s", info.Title(), valueStr)) - } else { - w.WriteString(valueStr) - } - } else { - w.WriteString("|") - } - prevCol = col - isFirst = false - } -} - -func init() { - RegisterBuiltinFunc(ast.Trace.Name, builtinTrace) + return v1.PrettyEvent(w, e, opts) } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/type.go b/vendor/github.com/open-policy-agent/opa/topdown/type.go deleted file mode 100644 index dab5c853cd..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/type.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2022 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "github.com/open-policy-agent/opa/ast" -) - -func builtinIsNumber(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch operands[0].Value.(type) { - case ast.Number: - return iter(ast.BooleanTerm(true)) - default: - return iter(ast.BooleanTerm(false)) - } -} - -func builtinIsString(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch operands[0].Value.(type) { - case ast.String: - return iter(ast.BooleanTerm(true)) - default: - return iter(ast.BooleanTerm(false)) - } -} - -func builtinIsBoolean(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch operands[0].Value.(type) { - case ast.Boolean: - return iter(ast.BooleanTerm(true)) - default: - return iter(ast.BooleanTerm(false)) - } -} - -func builtinIsArray(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch operands[0].Value.(type) { - case *ast.Array: - return iter(ast.BooleanTerm(true)) - default: - return iter(ast.BooleanTerm(false)) - } -} - -func builtinIsSet(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch operands[0].Value.(type) { - case ast.Set: - return iter(ast.BooleanTerm(true)) - default: - return iter(ast.BooleanTerm(false)) - } -} - -func builtinIsObject(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch operands[0].Value.(type) { - case ast.Object: - return iter(ast.BooleanTerm(true)) - default: - return iter(ast.BooleanTerm(false)) - } -} - -func builtinIsNull(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch operands[0].Value.(type) { - case ast.Null: - return iter(ast.BooleanTerm(true)) - default: - return iter(ast.BooleanTerm(false)) - } -} - -func init() { - RegisterBuiltinFunc(ast.IsNumber.Name, builtinIsNumber) - RegisterBuiltinFunc(ast.IsString.Name, builtinIsString) - RegisterBuiltinFunc(ast.IsBoolean.Name, builtinIsBoolean) - RegisterBuiltinFunc(ast.IsArray.Name, builtinIsArray) - RegisterBuiltinFunc(ast.IsSet.Name, builtinIsSet) - RegisterBuiltinFunc(ast.IsObject.Name, builtinIsObject) - RegisterBuiltinFunc(ast.IsNull.Name, builtinIsNull) -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/type_name.go b/vendor/github.com/open-policy-agent/opa/topdown/type_name.go deleted file mode 100644 index 0a8b44aed3..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/type_name.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "fmt" - - "github.com/open-policy-agent/opa/ast" -) - -func builtinTypeName(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch operands[0].Value.(type) { - case ast.Null: - return iter(ast.StringTerm("null")) - case ast.Boolean: - return iter(ast.StringTerm("boolean")) - case ast.Number: - return iter(ast.StringTerm("number")) - case ast.String: - return iter(ast.StringTerm("string")) - case *ast.Array: - return iter(ast.StringTerm("array")) - case ast.Object: - return iter(ast.StringTerm("object")) - case ast.Set: - return iter(ast.StringTerm("set")) - } - - return fmt.Errorf("illegal value") -} - -func init() { - RegisterBuiltinFunc(ast.TypeNameBuiltin.Name, builtinTypeName) -} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/walk.go b/vendor/github.com/open-policy-agent/opa/topdown/walk.go deleted file mode 100644 index 0f3b3544b5..0000000000 --- a/vendor/github.com/open-policy-agent/opa/topdown/walk.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2017 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "github.com/open-policy-agent/opa/ast" -) - -func evalWalk(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - input := operands[0] - - if pathIsWildcard(operands) { - // When the path assignment is a wildcard: walk(input, [_, value]) - // we may skip the path construction entirely, and simply return - // same pointer in each iteration. This is a much more efficient - // path when only the values are needed. - return walkNoPath(input, iter) - } - - filter := getOutputPath(operands) - return walk(filter, nil, input, iter) -} - -func walk(filter, path *ast.Array, input *ast.Term, iter func(*ast.Term) error) error { - - if filter == nil || filter.Len() == 0 { - if path == nil { - path = ast.NewArray() - } - - if err := iter(ast.ArrayTerm(ast.NewTerm(path.Copy()), input)); err != nil { - return err - } - } - - if filter != nil && filter.Len() > 0 { - key := filter.Elem(0) - filter = filter.Slice(1, -1) - if key.IsGround() { - if term := input.Get(key); term != nil { - path = pathAppend(path, key) - return walk(filter, path, term, iter) - } - return nil - } - } - - switch v := input.Value.(type) { - case *ast.Array: - for i := 0; i < v.Len(); i++ { - path = pathAppend(path, ast.IntNumberTerm(i)) - if err := walk(filter, path, v.Elem(i), iter); err != nil { - return err - } - path = path.Slice(0, path.Len()-1) - } - case ast.Object: - return v.Iter(func(k, v *ast.Term) error { - path = pathAppend(path, k) - if err := walk(filter, path, v, iter); err != nil { - return err - } - path = path.Slice(0, path.Len()-1) - return nil - }) - case ast.Set: - return v.Iter(func(elem *ast.Term) error { - path = pathAppend(path, elem) - if err := walk(filter, path, elem, iter); err != nil { - return err - } - path = path.Slice(0, path.Len()-1) - return nil - }) - } - - return nil -} - -var emptyArr = ast.ArrayTerm() - -func walkNoPath(input *ast.Term, iter func(*ast.Term) error) error { - if err := iter(ast.ArrayTerm(emptyArr, input)); err != nil { - return err - } - - switch v := input.Value.(type) { - case ast.Object: - return v.Iter(func(_, v *ast.Term) error { - return walkNoPath(v, iter) - }) - case *ast.Array: - for i := 0; i < v.Len(); i++ { - if err := walkNoPath(v.Elem(i), iter); err != nil { - return err - } - } - case ast.Set: - return v.Iter(func(elem *ast.Term) error { - return walkNoPath(elem, iter) - }) - } - - return nil -} - -func pathAppend(path *ast.Array, key *ast.Term) *ast.Array { - if path == nil { - return ast.NewArray(key) - } - - return path.Append(key) -} - -func getOutputPath(operands []*ast.Term) *ast.Array { - if len(operands) == 2 { - if arr, ok := operands[1].Value.(*ast.Array); ok && arr.Len() == 2 { - if path, ok := arr.Elem(0).Value.(*ast.Array); ok { - return path - } - } - } - return nil -} - -func pathIsWildcard(operands []*ast.Term) bool { - if len(operands) == 2 { - if arr, ok := operands[1].Value.(*ast.Array); ok && arr.Len() == 2 { - if v, ok := arr.Elem(0).Value.(ast.Var); ok { - return v.IsWildcard() - } - } - } - return false -} - -func init() { - RegisterBuiltinFunc(ast.WalkBuiltin.Name, evalWalk) -} diff --git a/vendor/github.com/open-policy-agent/opa/types/decode.go b/vendor/github.com/open-policy-agent/opa/types/decode.go deleted file mode 100644 index a6bd9ea030..0000000000 --- a/vendor/github.com/open-policy-agent/opa/types/decode.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package types - -import ( - "encoding/json" - "fmt" - - "github.com/open-policy-agent/opa/util" -) - -const ( - typeNull = "null" - typeBoolean = "boolean" - typeNumber = "number" - typeString = "string" - typeArray = "array" - typeSet = "set" - typeObject = "object" - typeAny = "any" - typeFunction = "function" -) - -// Unmarshal deserializes bs and returns the resulting type. -func Unmarshal(bs []byte) (result Type, err error) { - - var hint rawtype - - if err = util.UnmarshalJSON(bs, &hint); err == nil { - switch hint.Type { - case typeNull: - result = NewNull() - case typeBoolean: - result = NewBoolean() - case typeNumber: - result = NewNumber() - case typeString: - result = NewString() - case typeArray: - var arr rawarray - if err = util.UnmarshalJSON(bs, &arr); err == nil { - var err error - var static []Type - var dynamic Type - if static, err = unmarshalSlice(arr.Static); err != nil { - return nil, err - } - if len(arr.Dynamic) != 0 { - if dynamic, err = Unmarshal(arr.Dynamic); err != nil { - return nil, err - } - } - result = NewArray(static, dynamic) - } - case typeObject: - var obj rawobject - if err = util.UnmarshalJSON(bs, &obj); err == nil { - var err error - var static []*StaticProperty - var dynamic *DynamicProperty - if static, err = unmarshalStaticPropertySlice(obj.Static); err != nil { - return nil, err - } - if dynamic, err = unmarshalDynamicProperty(obj.Dynamic); err != nil { - return nil, err - } - result = NewObject(static, dynamic) - } - case typeSet: - var set rawset - if err = util.UnmarshalJSON(bs, &set); err == nil { - var of Type - if of, err = Unmarshal(set.Of); err == nil { - result = NewSet(of) - } - } - case typeAny: - var union rawunion - if err = util.UnmarshalJSON(bs, &union); err == nil { - var of []Type - if of, err = unmarshalSlice(union.Of); err == nil { - result = NewAny(of...) - } - } - case typeFunction: - var decl rawdecl - if err = util.UnmarshalJSON(bs, &decl); err == nil { - args, err := unmarshalSlice(decl.Args) - if err != nil { - return nil, err - } - var ret Type - if len(decl.Result) > 0 { - ret, err = Unmarshal(decl.Result) - if err != nil { - return nil, err - } - } - if len(decl.Variadic) > 0 { - varargs, err := Unmarshal(decl.Variadic) - if err != nil { - return nil, err - } - result = NewVariadicFunction(args, varargs, ret) - } else { - result = NewFunction(args, ret) - } - } - default: - err = fmt.Errorf("unsupported type '%v'", hint.Type) - } - } - - return result, err -} - -type rawtype struct { - Type string `json:"type"` -} - -type rawarray struct { - Static []json.RawMessage `json:"static"` - Dynamic json.RawMessage `json:"dynamic"` -} - -type rawobject struct { - Static []rawstaticproperty `json:"static"` - Dynamic rawdynamicproperty `json:"dynamic"` -} - -type rawstaticproperty struct { - Key interface{} `json:"key"` - Value json.RawMessage `json:"value"` -} - -type rawdynamicproperty struct { - Key json.RawMessage `json:"key"` - Value json.RawMessage `json:"value"` -} - -type rawset struct { - Of json.RawMessage `json:"of"` -} - -type rawunion struct { - Of []json.RawMessage `json:"of"` -} - -type rawdecl struct { - Args []json.RawMessage `json:"args"` - Result json.RawMessage `json:"result"` - Variadic json.RawMessage `json:"variadic"` -} - -func unmarshalSlice(elems []json.RawMessage) (result []Type, err error) { - result = make([]Type, len(elems)) - for i := range elems { - if result[i], err = Unmarshal(elems[i]); err != nil { - return nil, err - } - } - return result, err -} - -func unmarshalStaticPropertySlice(elems []rawstaticproperty) (result []*StaticProperty, err error) { - result = make([]*StaticProperty, len(elems)) - for i := range elems { - value, err := Unmarshal(elems[i].Value) - if err != nil { - return nil, err - } - result[i] = NewStaticProperty(elems[i].Key, value) - } - return result, err -} - -func unmarshalDynamicProperty(x rawdynamicproperty) (result *DynamicProperty, err error) { - if len(x.Key) == 0 { - return nil, nil - } - var key Type - if key, err = Unmarshal(x.Key); err == nil { - var value Type - if value, err = Unmarshal(x.Value); err == nil { - return NewDynamicProperty(key, value), nil - } - } - return nil, err -} diff --git a/vendor/github.com/open-policy-agent/opa/util/backoff.go b/vendor/github.com/open-policy-agent/opa/util/backoff.go deleted file mode 100644 index 6fbf63ef77..0000000000 --- a/vendor/github.com/open-policy-agent/opa/util/backoff.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package util - -import ( - "math/rand" - "time" -) - -func init() { - // NOTE(sr): We don't need good random numbers here; it's used for jittering - // the backup timing a bit. But anyways, let's make it random enough; without - // a call to rand.Seed() we'd get the same stream of numbers for each program - // run. (Or not, if some other packages happens to seed the global randomness - // source.) - // Note(philipc): rand.Seed() was deprecated in Go 1.20, so we've switched to - // using the recommended rand.New(rand.NewSource(seed)) style. - rand.New(rand.NewSource(time.Now().UnixNano())) -} - -// DefaultBackoff returns a delay with an exponential backoff based on the -// number of retries. -func DefaultBackoff(base, max float64, retries int) time.Duration { - return Backoff(base, max, .2, 1.6, retries) -} - -// Backoff returns a delay with an exponential backoff based on the number of -// retries. Same algorithm used in gRPC. -func Backoff(base, max, jitter, factor float64, retries int) time.Duration { - if retries == 0 { - return 0 - } - - backoff, max := base, max - for backoff < max && retries > 0 { - backoff *= factor - retries-- - } - if backoff > max { - backoff = max - } - - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. - backoff *= 1 + jitter*(rand.Float64()*2-1) - if backoff < 0 { - return 0 - } - - return time.Duration(backoff) -} diff --git a/vendor/github.com/open-policy-agent/opa/util/compare.go b/vendor/github.com/open-policy-agent/opa/util/compare.go deleted file mode 100644 index 8ae7753690..0000000000 --- a/vendor/github.com/open-policy-agent/opa/util/compare.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2016 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package util - -import ( - "encoding/json" - "fmt" - "math/big" - "sort" -) - -// Compare returns 0 if a equals b, -1 if a is less than b, and 1 if b is than a. -// -// For comparison between values of different types, the following ordering is used: -// nil < bool < int, float64 < string < []interface{} < map[string]interface{}. Slices and maps -// are compared recursively. If one slice or map is a subset of the other slice or map -// it is considered "less than". Nil is always equal to nil. -func Compare(a, b interface{}) int { - aSortOrder := sortOrder(a) - bSortOrder := sortOrder(b) - if aSortOrder < bSortOrder { - return -1 - } else if bSortOrder < aSortOrder { - return 1 - } - switch a := a.(type) { - case nil: - return 0 - case bool: - switch b := b.(type) { - case bool: - if a == b { - return 0 - } - if !a { - return -1 - } - return 1 - } - case json.Number: - switch b := b.(type) { - case json.Number: - return compareJSONNumber(a, b) - } - case int: - switch b := b.(type) { - case int: - if a == b { - return 0 - } else if a < b { - return -1 - } - return 1 - } - case float64: - switch b := b.(type) { - case float64: - if a == b { - return 0 - } else if a < b { - return -1 - } - return 1 - } - case string: - switch b := b.(type) { - case string: - if a == b { - return 0 - } else if a < b { - return -1 - } - return 1 - } - case []interface{}: - switch b := b.(type) { - case []interface{}: - bLen := len(b) - aLen := len(a) - minLen := aLen - if bLen < minLen { - minLen = bLen - } - for i := 0; i < minLen; i++ { - cmp := Compare(a[i], b[i]) - if cmp != 0 { - return cmp - } - } - if aLen == bLen { - return 0 - } else if aLen < bLen { - return -1 - } - return 1 - } - case map[string]interface{}: - switch b := b.(type) { - case map[string]interface{}: - var aKeys []string - for k := range a { - aKeys = append(aKeys, k) - } - var bKeys []string - for k := range b { - bKeys = append(bKeys, k) - } - sort.Strings(aKeys) - sort.Strings(bKeys) - aLen := len(aKeys) - bLen := len(bKeys) - minLen := aLen - if bLen < minLen { - minLen = bLen - } - for i := 0; i < minLen; i++ { - if aKeys[i] < bKeys[i] { - return -1 - } else if bKeys[i] < aKeys[i] { - return 1 - } - aVal := a[aKeys[i]] - bVal := b[bKeys[i]] - cmp := Compare(aVal, bVal) - if cmp != 0 { - return cmp - } - } - if aLen == bLen { - return 0 - } else if aLen < bLen { - return -1 - } - return 1 - } - } - - panic(fmt.Sprintf("illegal arguments of type %T and type %T", a, b)) -} - -const ( - nilSort = iota - boolSort = iota - numberSort = iota - stringSort = iota - arraySort = iota - objectSort = iota -) - -func compareJSONNumber(a, b json.Number) int { - bigA, ok := new(big.Float).SetString(string(a)) - if !ok { - panic("illegal value") - } - bigB, ok := new(big.Float).SetString(string(b)) - if !ok { - panic("illegal value") - } - return bigA.Cmp(bigB) -} - -func sortOrder(v interface{}) int { - switch v.(type) { - case nil: - return nilSort - case bool: - return boolSort - case json.Number: - return numberSort - case int: - return numberSort - case float64: - return numberSort - case string: - return stringSort - case []interface{}: - return arraySort - case map[string]interface{}: - return objectSort - } - panic(fmt.Sprintf("illegal argument of type %T", v)) -} diff --git a/vendor/github.com/open-policy-agent/opa/util/hashmap.go b/vendor/github.com/open-policy-agent/opa/util/hashmap.go deleted file mode 100644 index 8875a6323e..0000000000 --- a/vendor/github.com/open-policy-agent/opa/util/hashmap.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2016 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package util - -import ( - "fmt" - "strings" -) - -// T is a concise way to refer to T. -type T interface{} - -type hashEntry struct { - k T - v T - next *hashEntry -} - -// HashMap represents a key/value map. -type HashMap struct { - eq func(T, T) bool - hash func(T) int - table map[int]*hashEntry - size int -} - -// NewHashMap returns a new empty HashMap. -func NewHashMap(eq func(T, T) bool, hash func(T) int) *HashMap { - return &HashMap{ - eq: eq, - hash: hash, - table: make(map[int]*hashEntry), - size: 0, - } -} - -// Copy returns a shallow copy of this HashMap. -func (h *HashMap) Copy() *HashMap { - cpy := NewHashMap(h.eq, h.hash) - h.Iter(func(k, v T) bool { - cpy.Put(k, v) - return false - }) - return cpy -} - -// Equal returns true if this HashMap equals the other HashMap. -// Two hash maps are equal if they contain the same key/value pairs. -func (h *HashMap) Equal(other *HashMap) bool { - if h.Len() != other.Len() { - return false - } - return !h.Iter(func(k, v T) bool { - ov, ok := other.Get(k) - if !ok { - return true - } - return !h.eq(v, ov) - }) -} - -// Get returns the value for k. -func (h *HashMap) Get(k T) (T, bool) { - hash := h.hash(k) - for entry := h.table[hash]; entry != nil; entry = entry.next { - if h.eq(entry.k, k) { - return entry.v, true - } - } - return nil, false -} - -// Delete removes the key k. -func (h *HashMap) Delete(k T) { - hash := h.hash(k) - var prev *hashEntry - for entry := h.table[hash]; entry != nil; entry = entry.next { - if h.eq(entry.k, k) { - if prev != nil { - prev.next = entry.next - } else { - h.table[hash] = entry.next - } - h.size-- - return - } - prev = entry - } -} - -// Hash returns the hash code for this hash map. -func (h *HashMap) Hash() int { - var hash int - h.Iter(func(k, v T) bool { - hash += h.hash(k) + h.hash(v) - return false - }) - return hash -} - -// Iter invokes the iter function for each element in the HashMap. -// If the iter function returns true, iteration stops and the return value is true. -// If the iter function never returns true, iteration proceeds through all elements -// and the return value is false. -func (h *HashMap) Iter(iter func(T, T) bool) bool { - for _, entry := range h.table { - for ; entry != nil; entry = entry.next { - if iter(entry.k, entry.v) { - return true - } - } - } - return false -} - -// Len returns the current size of this HashMap. -func (h *HashMap) Len() int { - return h.size -} - -// Put inserts a key/value pair into this HashMap. If the key is already present, the existing -// value is overwritten. -func (h *HashMap) Put(k T, v T) { - hash := h.hash(k) - head := h.table[hash] - for entry := head; entry != nil; entry = entry.next { - if h.eq(entry.k, k) { - entry.v = v - return - } - } - h.table[hash] = &hashEntry{k: k, v: v, next: head} - h.size++ -} - -func (h *HashMap) String() string { - var buf []string - h.Iter(func(k T, v T) bool { - buf = append(buf, fmt.Sprintf("%v: %v", k, v)) - return false - }) - return "{" + strings.Join(buf, ", ") + "}" -} - -// Update returns a new HashMap with elements from the other HashMap put into this HashMap. -// If the other HashMap contains elements with the same key as this HashMap, the value -// from the other HashMap overwrites the value from this HashMap. -func (h *HashMap) Update(other *HashMap) *HashMap { - updated := h.Copy() - other.Iter(func(k, v T) bool { - updated.Put(k, v) - return false - }) - return updated -} diff --git a/vendor/github.com/open-policy-agent/opa/util/json.go b/vendor/github.com/open-policy-agent/opa/util/json.go deleted file mode 100644 index 0b7fd2ed64..0000000000 --- a/vendor/github.com/open-policy-agent/opa/util/json.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2016 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package util - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "reflect" - - "sigs.k8s.io/yaml" - - "github.com/open-policy-agent/opa/loader/extension" -) - -// UnmarshalJSON parses the JSON encoded data and stores the result in the value -// pointed to by x. -// -// This function is intended to be used in place of the standard json.Marshal -// function when json.Number is required. -func UnmarshalJSON(bs []byte, x interface{}) error { - return unmarshalJSON(bs, x, true) -} - -func unmarshalJSON(bs []byte, x interface{}, ext bool) error { - buf := bytes.NewBuffer(bs) - decoder := NewJSONDecoder(buf) - if err := decoder.Decode(x); err != nil { - if handler := extension.FindExtension(".json"); handler != nil && ext { - return handler(bs, x) - } - return err - } - - // Since decoder.Decode validates only the first json structure in bytes, - // check if decoder has more bytes to consume to validate whole input bytes. - tok, err := decoder.Token() - if tok != nil { - return fmt.Errorf("error: invalid character '%s' after top-level value", tok) - } - if err != nil && err != io.EOF { - return err - } - return nil -} - -// NewJSONDecoder returns a new decoder that reads from r. -// -// This function is intended to be used in place of the standard json.NewDecoder -// when json.Number is required. -func NewJSONDecoder(r io.Reader) *json.Decoder { - decoder := json.NewDecoder(r) - decoder.UseNumber() - return decoder -} - -// MustUnmarshalJSON parse the JSON encoded data and returns the result. -// -// If the data cannot be decoded, this function will panic. This function is for -// test purposes. -func MustUnmarshalJSON(bs []byte) interface{} { - var x interface{} - if err := UnmarshalJSON(bs, &x); err != nil { - panic(err) - } - return x -} - -// MustMarshalJSON returns the JSON encoding of x -// -// If the data cannot be encoded, this function will panic. This function is for -// test purposes. -func MustMarshalJSON(x interface{}) []byte { - bs, err := json.Marshal(x) - if err != nil { - panic(err) - } - return bs -} - -// RoundTrip encodes to JSON, and decodes the result again. -// -// Thereby, it is converting its argument to the representation expected by -// rego.Input and inmem's Write operations. Works with both references and -// values. -func RoundTrip(x *interface{}) error { - bs, err := json.Marshal(x) - if err != nil { - return err - } - return UnmarshalJSON(bs, x) -} - -// Reference returns a pointer to its argument unless the argument already is -// a pointer. If the argument is **t, or ***t, etc, it will return *t. -// -// Used for preparing Go types (including pointers to structs) into values to be -// put through util.RoundTrip(). -func Reference(x interface{}) *interface{} { - var y interface{} - rv := reflect.ValueOf(x) - if rv.Kind() == reflect.Ptr { - return Reference(rv.Elem().Interface()) - } - if rv.Kind() != reflect.Invalid { - y = rv.Interface() - return &y - } - return &x -} - -// Unmarshal decodes a YAML, JSON or JSON extension value into the specified type. -func Unmarshal(bs []byte, v interface{}) error { - if len(bs) > 2 && bs[0] == 0xef && bs[1] == 0xbb && bs[2] == 0xbf { - bs = bs[3:] // Strip UTF-8 BOM, see https://www.rfc-editor.org/rfc/rfc8259#section-8.1 - } - - if json.Valid(bs) { - return unmarshalJSON(bs, v, false) - } - nbs, err := yaml.YAMLToJSON(bs) - if err == nil { - return unmarshalJSON(nbs, v, false) - } - // not json or yaml: try extensions - if handler := extension.FindExtension(".json"); handler != nil { - return handler(bs, v) - } - return err -} diff --git a/vendor/github.com/open-policy-agent/opa/util/maps.go b/vendor/github.com/open-policy-agent/opa/util/maps.go deleted file mode 100644 index d943b4d0a8..0000000000 --- a/vendor/github.com/open-policy-agent/opa/util/maps.go +++ /dev/null @@ -1,10 +0,0 @@ -package util - -// Values returns a slice of values from any map. Copied from golang.org/x/exp/maps. -func Values[M ~map[K]V, K comparable, V any](m M) []V { - r := make([]V, 0, len(m)) - for _, v := range m { - r = append(r, v) - } - return r -} diff --git a/vendor/github.com/open-policy-agent/opa/util/read_gzip_body.go b/vendor/github.com/open-policy-agent/opa/util/read_gzip_body.go deleted file mode 100644 index 217638b363..0000000000 --- a/vendor/github.com/open-policy-agent/opa/util/read_gzip_body.go +++ /dev/null @@ -1,81 +0,0 @@ -package util - -import ( - "bytes" - "compress/gzip" - "encoding/binary" - "fmt" - "io" - "net/http" - "strings" - "sync" - - "github.com/open-policy-agent/opa/util/decoding" -) - -var gzipReaderPool = sync.Pool{ - New: func() interface{} { - reader := new(gzip.Reader) - return reader - }, -} - -// Note(philipc): Originally taken from server/server.go -// The DecodingLimitHandler handles validating that the gzip payload is within the -// allowed max size limit. Thus, in the event of a forged payload size trailer, -// the worst that can happen is that we waste memory up to the allowed max gzip -// payload size, but not an unbounded amount of memory, as was potentially -// possible before. -func ReadMaybeCompressedBody(r *http.Request) ([]byte, error) { - var content *bytes.Buffer - // Note(philipc): If the request body is of unknown length (such as what - // happens when 'Transfer-Encoding: chunked' is set), we have to do an - // incremental read of the body. In this case, we can't be too clever, we - // just do the best we can with whatever is streamed over to us. - // Fetch gzip payload size limit from request context. - if maxLength, ok := decoding.GetServerDecodingMaxLen(r.Context()); ok { - bs, err := io.ReadAll(io.LimitReader(r.Body, maxLength)) - if err != nil { - return bs, err - } - content = bytes.NewBuffer(bs) - } else { - // Read content from the request body into a buffer of known size. - content = bytes.NewBuffer(make([]byte, 0, r.ContentLength)) - if _, err := io.CopyN(content, r.Body, r.ContentLength); err != nil { - return content.Bytes(), err - } - } - - // Decompress gzip content by reading from the buffer. - if strings.Contains(r.Header.Get("Content-Encoding"), "gzip") { - // Fetch gzip payload size limit from request context. - gzipMaxLength, _ := decoding.GetServerDecodingGzipMaxLen(r.Context()) - - // Note(philipc): The last 4 bytes of a well-formed gzip blob will - // always be a little-endian uint32, representing the decompressed - // content size, modulo 2^32. We validate that the size is safe, - // earlier in DecodingLimitHandler. - sizeTrailerField := binary.LittleEndian.Uint32(content.Bytes()[content.Len()-4:]) - if sizeTrailerField > uint32(gzipMaxLength) { - return content.Bytes(), fmt.Errorf("gzip payload too large") - } - // Pull a gzip decompressor from the pool, and assign it to the current - // buffer, using Reset(). Later, return it back to the pool for another - // request to use. - gzReader := gzipReaderPool.Get().(*gzip.Reader) - if err := gzReader.Reset(content); err != nil { - return nil, err - } - defer gzReader.Close() - defer gzipReaderPool.Put(gzReader) - decompressedContent := bytes.NewBuffer(make([]byte, 0, sizeTrailerField)) - if _, err := io.CopyN(decompressedContent, gzReader, int64(sizeTrailerField)); err != nil { - return decompressedContent.Bytes(), err - } - return decompressedContent.Bytes(), nil - } - - // Request was not compressed; return the content bytes. - return content.Bytes(), nil -} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go b/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go new file mode 100644 index 0000000000..603ab5cd77 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go @@ -0,0 +1,1020 @@ +// Copyright 2022 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "encoding/json" + "fmt" + "net/url" + "slices" + "strings" + + "github.com/open-policy-agent/opa/internal/deepcopy" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/util" +) + +const ( + annotationScopePackage = "package" + annotationScopeRule = "rule" + annotationScopeDocument = "document" + annotationScopeSubpackages = "subpackages" +) + +type ( + // Annotations represents metadata attached to other AST nodes such as rules. + Annotations struct { + Scope string `json:"scope"` + Title string `json:"title,omitempty"` + Entrypoint bool `json:"entrypoint,omitempty"` + Description string `json:"description,omitempty"` + Organizations []string `json:"organizations,omitempty"` + RelatedResources []*RelatedResourceAnnotation `json:"related_resources,omitempty"` + Authors []*AuthorAnnotation `json:"authors,omitempty"` + Schemas []*SchemaAnnotation `json:"schemas,omitempty"` + Compile *CompileAnnotation `json:"compile,omitempty"` + Custom map[string]any `json:"custom,omitempty"` + Location *Location `json:"location,omitempty"` + + comments []*Comment + node Node + } + + // SchemaAnnotation contains a schema declaration for the document identified by the path. + SchemaAnnotation struct { + Path Ref `json:"path"` + Schema Ref `json:"schema,omitempty"` + Definition *any `json:"definition,omitempty"` + } + + CompileAnnotation struct { + Unknowns []Ref `json:"unknowns,omitempty"` + MaskRule Ref `json:"mask_rule,omitempty"` // NOTE: This doesn't need to start with "data.package", it can be relative + } + + AuthorAnnotation struct { + Name string `json:"name"` + Email string `json:"email,omitempty"` + } + + RelatedResourceAnnotation struct { + Ref url.URL `json:"ref"` + Description string `json:"description,omitempty"` + } + + AnnotationSet struct { + byRule map[*Rule][]*Annotations + byPackage map[int]*Annotations + byPath *annotationTreeNode + modules []*Module // Modules this set was constructed from + } + + annotationTreeNode struct { + Value *Annotations + Children map[Value]*annotationTreeNode // we assume key elements are hashable (vars and strings only!) + } + + AnnotationsRef struct { + Path Ref `json:"path"` // The path of the node the annotations are applied to + Annotations *Annotations `json:"annotations,omitempty"` + Location *Location `json:"location,omitempty"` // The location of the node the annotations are applied to + + node Node // The node the annotations are applied to + } + + AnnotationsRefSet []*AnnotationsRef + + FlatAnnotationsRefSet AnnotationsRefSet +) + +func (a *Annotations) String() string { + bs, _ := a.MarshalJSON() + return string(bs) +} + +// Loc returns the location of this annotation. +func (a *Annotations) Loc() *Location { + return a.Location +} + +// SetLoc updates the location of this annotation. +func (a *Annotations) SetLoc(l *Location) { + a.Location = l +} + +// EndLoc returns the location of this annotation's last comment line. +func (a *Annotations) EndLoc() *Location { + count := len(a.comments) + if count == 0 { + return a.Location + } + return a.comments[count-1].Location +} + +// Compare returns an integer indicating if a is less than, equal to, or greater +// than other. +func (a *Annotations) Compare(other *Annotations) int { + + if a == nil && other == nil { + return 0 + } + + if a == nil { + return -1 + } + + if other == nil { + return 1 + } + + if cmp := scopeCompare(a.Scope, other.Scope); cmp != 0 { + return cmp + } + + if cmp := strings.Compare(a.Title, other.Title); cmp != 0 { + return cmp + } + + if cmp := strings.Compare(a.Description, other.Description); cmp != 0 { + return cmp + } + + if cmp := compareStringLists(a.Organizations, other.Organizations); cmp != 0 { + return cmp + } + + if cmp := compareRelatedResources(a.RelatedResources, other.RelatedResources); cmp != 0 { + return cmp + } + + if cmp := compareAuthors(a.Authors, other.Authors); cmp != 0 { + return cmp + } + + if cmp := compareSchemas(a.Schemas, other.Schemas); cmp != 0 { + return cmp + } + + if cmp := a.Compile.Compare(other.Compile); cmp != 0 { + return cmp + } + + if a.Entrypoint != other.Entrypoint { + if a.Entrypoint { + return 1 + } + return -1 + } + + if cmp := util.Compare(a.Custom, other.Custom); cmp != 0 { + return cmp + } + + return 0 +} + +// GetTargetPath returns the path of the node these Annotations are applied to (the target) +func (a *Annotations) GetTargetPath() Ref { + switch n := a.node.(type) { + case *Package: + return n.Path + case *Rule: + return n.Ref().GroundPrefix() + default: + return nil + } +} + +func (a *Annotations) MarshalJSON() ([]byte, error) { + if a == nil { + return []byte(`{"scope":""}`), nil + } + + data := map[string]any{ + "scope": a.Scope, + } + + if a.Title != "" { + data["title"] = a.Title + } + + if a.Description != "" { + data["description"] = a.Description + } + + if a.Entrypoint { + data["entrypoint"] = a.Entrypoint + } + + if len(a.Organizations) > 0 { + data["organizations"] = a.Organizations + } + + if len(a.RelatedResources) > 0 { + data["related_resources"] = a.RelatedResources + } + + if len(a.Authors) > 0 { + data["authors"] = a.Authors + } + + if len(a.Schemas) > 0 { + data["schemas"] = a.Schemas + } + + if len(a.Custom) > 0 { + data["custom"] = a.Custom + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Annotations { + if a.Location != nil { + data["location"] = a.Location + } + } + + return json.Marshal(data) +} + +func NewAnnotationsRef(a *Annotations) *AnnotationsRef { + var loc *Location + if a.node != nil { + loc = a.node.Loc() + } + + return &AnnotationsRef{ + Location: loc, + Path: a.GetTargetPath(), + Annotations: a, + node: a.node, + } +} + +func (ar *AnnotationsRef) GetPackage() *Package { + switch n := ar.node.(type) { + case *Package: + return n + case *Rule: + return n.Module.Package + default: + return nil + } +} + +func (ar *AnnotationsRef) GetRule() *Rule { + switch n := ar.node.(type) { + case *Rule: + return n + default: + return nil + } +} + +func (ar *AnnotationsRef) MarshalJSON() ([]byte, error) { + data := map[string]any{ + "path": ar.Path, + } + + if ar.Annotations != nil { + data["annotations"] = ar.Annotations + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.AnnotationsRef { + if ar.Location != nil { + data["location"] = ar.Location + } + + // The location set for the schema ref terms is wrong (always set to + // row 1) and not really useful anyway.. so strip it out before marshalling + for _, schema := range ar.Annotations.Schemas { + if schema.Path != nil { + for _, term := range schema.Path { + term.Location = nil + } + } + } + } + + return json.Marshal(data) +} + +func scopeCompare(s1, s2 string) int { + o1 := scopeOrder(s1) + o2 := scopeOrder(s2) + + if o2 < o1 { + return 1 + } else if o2 > o1 { + return -1 + } + + if s1 < s2 { + return -1 + } else if s2 < s1 { + return 1 + } + + return 0 +} + +func scopeOrder(s string) int { + if s == annotationScopeRule { + return 1 + } + return 0 +} + +func compareAuthors(a, b []*AuthorAnnotation) int { + if len(a) > len(b) { + return 1 + } else if len(a) < len(b) { + return -1 + } + + for i := range a { + if cmp := a[i].Compare(b[i]); cmp != 0 { + return cmp + } + } + + return 0 +} + +func compareRelatedResources(a, b []*RelatedResourceAnnotation) int { + if len(a) > len(b) { + return 1 + } else if len(a) < len(b) { + return -1 + } + + for i := range a { + if cmp := a[i].Compare(b[i]); cmp != 0 { + return cmp + } + } + + return 0 +} + +func compareSchemas(a, b []*SchemaAnnotation) int { + maxLen := min(len(b), len(a)) + + for i := range maxLen { + if cmp := a[i].Compare(b[i]); cmp != 0 { + return cmp + } + } + + if len(a) > len(b) { + return 1 + } else if len(a) < len(b) { + return -1 + } + + return 0 +} + +func compareStringLists(a, b []string) int { + if len(a) > len(b) { + return 1 + } else if len(a) < len(b) { + return -1 + } + + for i := range a { + if cmp := strings.Compare(a[i], b[i]); cmp != 0 { + return cmp + } + } + + return 0 +} + +// Copy returns a deep copy of s. +func (a *Annotations) Copy(node Node) *Annotations { + cpy := *a + + cpy.Organizations = make([]string, len(a.Organizations)) + copy(cpy.Organizations, a.Organizations) + + cpy.RelatedResources = make([]*RelatedResourceAnnotation, len(a.RelatedResources)) + for i := range a.RelatedResources { + cpy.RelatedResources[i] = a.RelatedResources[i].Copy() + } + + cpy.Authors = make([]*AuthorAnnotation, len(a.Authors)) + for i := range a.Authors { + cpy.Authors[i] = a.Authors[i].Copy() + } + + cpy.Schemas = make([]*SchemaAnnotation, len(a.Schemas)) + for i := range a.Schemas { + cpy.Schemas[i] = a.Schemas[i].Copy() + } + + cpy.Compile = a.Compile.Copy() + + if a.Custom != nil { + cpy.Custom = deepcopy.Map(a.Custom) + } + + cpy.node = node + + return &cpy +} + +// toObject constructs an AST Object from the annotation. +func (a *Annotations) toObject() (*Object, *Error) { + obj := NewObject() + + if a == nil { + return &obj, nil + } + + if len(a.Scope) > 0 { + obj.Insert(InternedTerm("scope"), InternedTerm(a.Scope)) + } + + if len(a.Title) > 0 { + obj.Insert(InternedTerm("title"), StringTerm(a.Title)) + } + + if a.Entrypoint { + obj.Insert(InternedTerm("entrypoint"), InternedTerm(true)) + } + + if len(a.Description) > 0 { + obj.Insert(InternedTerm("description"), StringTerm(a.Description)) + } + + if len(a.Organizations) > 0 { + orgs := make([]*Term, 0, len(a.Organizations)) + for _, org := range a.Organizations { + orgs = append(orgs, StringTerm(org)) + } + obj.Insert(InternedTerm("organizations"), ArrayTerm(orgs...)) + } + + if len(a.RelatedResources) > 0 { + rrs := make([]*Term, 0, len(a.RelatedResources)) + for _, rr := range a.RelatedResources { + rrObj := NewObject(Item(InternedTerm("ref"), StringTerm(rr.Ref.String()))) + if len(rr.Description) > 0 { + rrObj.Insert(InternedTerm("description"), StringTerm(rr.Description)) + } + rrs = append(rrs, NewTerm(rrObj)) + } + obj.Insert(InternedTerm("related_resources"), ArrayTerm(rrs...)) + } + + if len(a.Authors) > 0 { + as := make([]*Term, 0, len(a.Authors)) + for _, author := range a.Authors { + aObj := NewObject() + if len(author.Name) > 0 { + aObj.Insert(InternedTerm("name"), StringTerm(author.Name)) + } + if len(author.Email) > 0 { + aObj.Insert(InternedTerm("email"), StringTerm(author.Email)) + } + as = append(as, NewTerm(aObj)) + } + obj.Insert(InternedTerm("authors"), ArrayTerm(as...)) + } + + if len(a.Schemas) > 0 { + ss := make([]*Term, 0, len(a.Schemas)) + for _, s := range a.Schemas { + sObj := NewObject() + if len(s.Path) > 0 { + sObj.Insert(InternedTerm("path"), NewTerm(s.Path.toArray())) + } + if len(s.Schema) > 0 { + sObj.Insert(InternedTerm("schema"), NewTerm(s.Schema.toArray())) + } + if s.Definition != nil { + def, err := InterfaceToValue(s.Definition) + if err != nil { + return nil, NewError(CompileErr, a.Location, "invalid definition in schema annotation: %s", err.Error()) + } + sObj.Insert(InternedTerm("definition"), NewTerm(def)) + } + ss = append(ss, NewTerm(sObj)) + } + obj.Insert(InternedTerm("schemas"), ArrayTerm(ss...)) + } + + if len(a.Custom) > 0 { + c, err := InterfaceToValue(a.Custom) + if err != nil { + return nil, NewError(CompileErr, a.Location, "invalid custom annotation %s", err.Error()) + } + obj.Insert(InternedTerm("custom"), NewTerm(c)) + } + + return &obj, nil +} + +func attachRuleAnnotations(mod *Module) { + // make a copy of the annotations + cpy := make([]*Annotations, len(mod.Annotations)) + for i, a := range mod.Annotations { + cpy[i] = a.Copy(a.node) + } + + for _, rule := range mod.Rules { + var j int + var found bool + for i, a := range cpy { + if rule.Ref().GroundPrefix().Equal(a.GetTargetPath()) { + if a.Scope == annotationScopeDocument { + rule.Annotations = append(rule.Annotations, a) + } else if a.Scope == annotationScopeRule && rule.Loc().Row > a.Location.Row { + j = i + found = true + rule.Annotations = append(rule.Annotations, a) + } + } + } + + if found && j < len(cpy) { + cpy = slices.Delete(cpy, j, j+1) + } + } +} + +func attachAnnotationsNodes(mod *Module) Errors { + var errs Errors + + // Find first non-annotation statement following each annotation and attach + // the annotation to that statement. + for _, a := range mod.Annotations { + for _, stmt := range mod.stmts { + _, ok := stmt.(*Annotations) + if !ok { + if stmt.Loc().Row > a.Location.Row { + a.node = stmt + break + } + } + } + + if a.Scope == "" { + switch a.node.(type) { + case *Rule: + if a.Entrypoint { + a.Scope = annotationScopeDocument + } else { + a.Scope = annotationScopeRule + } + case *Package: + a.Scope = annotationScopePackage + case *Import: + // Note that this isn't a valid scope, but set here so that the + // validate function called below can print an error message with + // a context that makes sense ("invalid scope: 'import'" instead of + // "invalid scope: '') + a.Scope = "import" + } + } + + if err := validateAnnotationScopeAttachment(a); err != nil { + errs = append(errs, err) + } + + if err := validateAnnotationEntrypointAttachment(a); err != nil { + errs = append(errs, err) + } + } + + return errs +} + +func validateAnnotationScopeAttachment(a *Annotations) *Error { + + switch a.Scope { + case annotationScopeRule, annotationScopeDocument: + if _, ok := a.node.(*Rule); ok { + return nil + } + return newScopeAttachmentErr(a, "rule") + case annotationScopePackage, annotationScopeSubpackages: + if _, ok := a.node.(*Package); ok { + return nil + } + return newScopeAttachmentErr(a, "package") + } + + return NewError(ParseErr, a.Loc(), "invalid annotation scope '%v'. Use one of '%s', '%s', '%s', or '%s'", + a.Scope, annotationScopeRule, annotationScopeDocument, annotationScopePackage, annotationScopeSubpackages) +} + +func validateAnnotationEntrypointAttachment(a *Annotations) *Error { + if a.Entrypoint && !(a.Scope == annotationScopeDocument || a.Scope == annotationScopePackage) { + return NewError( + ParseErr, a.Loc(), "annotation entrypoint applied to non-document or package scope '%v'", a.Scope) + } + return nil +} + +// Copy returns a deep copy of a. +func (a *AuthorAnnotation) Copy() *AuthorAnnotation { + cpy := *a + return &cpy +} + +// Compare returns an integer indicating if s is less than, equal to, or greater +// than other. +func (a *AuthorAnnotation) Compare(other *AuthorAnnotation) int { + if cmp := strings.Compare(a.Name, other.Name); cmp != 0 { + return cmp + } + + if cmp := strings.Compare(a.Email, other.Email); cmp != 0 { + return cmp + } + + return 0 +} + +func (a *AuthorAnnotation) String() string { + if len(a.Email) == 0 { + return a.Name + } else if len(a.Name) == 0 { + return fmt.Sprintf("<%s>", a.Email) + } + return fmt.Sprintf("%s <%s>", a.Name, a.Email) +} + +// Copy returns a deep copy of rr. +func (rr *RelatedResourceAnnotation) Copy() *RelatedResourceAnnotation { + cpy := *rr + return &cpy +} + +// Compare returns an integer indicating if s is less than, equal to, or greater +// than other. +func (rr *RelatedResourceAnnotation) Compare(other *RelatedResourceAnnotation) int { + if cmp := strings.Compare(rr.Description, other.Description); cmp != 0 { + return cmp + } + + if cmp := strings.Compare(rr.Ref.String(), other.Ref.String()); cmp != 0 { + return cmp + } + + return 0 +} + +func (rr *RelatedResourceAnnotation) String() string { + bs, _ := json.Marshal(rr) + return string(bs) +} + +func (rr *RelatedResourceAnnotation) MarshalJSON() ([]byte, error) { + d := map[string]any{ + "ref": rr.Ref.String(), + } + + if len(rr.Description) > 0 { + d["description"] = rr.Description + } + + return json.Marshal(d) +} + +// Copy returns a deep copy of s. +func (s *SchemaAnnotation) Copy() *SchemaAnnotation { + cpy := *s + return &cpy +} + +// Compare returns an integer indicating if s is less than, equal to, or greater +// than other. +func (s *SchemaAnnotation) Compare(other *SchemaAnnotation) int { + if cmp := s.Path.Compare(other.Path); cmp != 0 { + return cmp + } + + if cmp := s.Schema.Compare(other.Schema); cmp != 0 { + return cmp + } + + if s.Definition != nil && other.Definition == nil { + return -1 + } else if s.Definition == nil && other.Definition != nil { + return 1 + } else if s.Definition != nil && other.Definition != nil { + return util.Compare(*s.Definition, *other.Definition) + } + + return 0 +} + +func (s *SchemaAnnotation) String() string { + bs, _ := json.Marshal(s) + return string(bs) +} + +// Copy returns a deep copy of s. +func (c *CompileAnnotation) Copy() *CompileAnnotation { + if c == nil { + return nil + } + cpy := *c + for i := range c.Unknowns { + cpy.Unknowns[i] = c.Unknowns[i].Copy() + } + return &cpy +} + +// Compare returns an integer indicating if s is less than, equal to, or greater +// than other. +func (c *CompileAnnotation) Compare(other *CompileAnnotation) int { + switch { + case c == nil && other == nil: + return 0 + case c != nil && other == nil: + return 1 + case c == nil && other != nil: + return -1 + } + + if cmp := slices.CompareFunc(c.Unknowns, other.Unknowns, RefCompare); cmp != 0 { + return cmp + } + return c.MaskRule.Compare(other.MaskRule) +} + +func (c *CompileAnnotation) String() string { + bs, _ := json.Marshal(c) + return string(bs) +} + +func newAnnotationSet() *AnnotationSet { + return &AnnotationSet{ + byRule: map[*Rule][]*Annotations{}, + byPackage: map[int]*Annotations{}, + byPath: newAnnotationTree(), + } +} + +func BuildAnnotationSet(modules []*Module) (*AnnotationSet, Errors) { + as := newAnnotationSet() + var errs Errors + for _, m := range modules { + for _, a := range m.Annotations { + if err := as.add(a); err != nil { + errs = append(errs, err) + } + } + } + if len(errs) > 0 { + return nil, errs + } + as.modules = modules + return as, nil +} + +// NOTE(philipc): During copy propagation, the underlying Nodes can be +// stripped away from the annotations, leading to nil deref panics. We +// silently ignore these cases for now, as a workaround. +func (as *AnnotationSet) add(a *Annotations) *Error { + switch a.Scope { + case annotationScopeRule: + if rule, ok := a.node.(*Rule); ok { + as.byRule[rule] = append(as.byRule[rule], a) + } + case annotationScopePackage: + if pkg, ok := a.node.(*Package); ok { + hash := pkg.Path.Hash() + if exist, ok := as.byPackage[hash]; ok { + return errAnnotationRedeclared(a, exist.Location) + } + as.byPackage[hash] = a + } + case annotationScopeDocument: + if rule, ok := a.node.(*Rule); ok { + path := rule.Ref().GroundPrefix() + x := as.byPath.get(path) + if x != nil { + return errAnnotationRedeclared(a, x.Value.Location) + } + as.byPath.insert(path, a) + } + case annotationScopeSubpackages: + if pkg, ok := a.node.(*Package); ok { + x := as.byPath.get(pkg.Path) + if x != nil && x.Value != nil { + return errAnnotationRedeclared(a, x.Value.Location) + } + as.byPath.insert(pkg.Path, a) + } + } + return nil +} + +func (as *AnnotationSet) GetRuleScope(r *Rule) []*Annotations { + if as == nil { + return nil + } + return as.byRule[r] +} + +func (as *AnnotationSet) GetSubpackagesScope(path Ref) []*Annotations { + if as == nil { + return nil + } + return as.byPath.ancestors(path) +} + +func (as *AnnotationSet) GetDocumentScope(path Ref) *Annotations { + if as == nil { + return nil + } + if node := as.byPath.get(path); node != nil { + return node.Value + } + return nil +} + +func (as *AnnotationSet) GetPackageScope(pkg *Package) *Annotations { + if as == nil { + return nil + } + return as.byPackage[pkg.Path.Hash()] +} + +// Flatten returns a flattened list view of this AnnotationSet. +// The returned slice is sorted, first by the annotations' target path, then by their target location +func (as *AnnotationSet) Flatten() FlatAnnotationsRefSet { + // This preallocation often won't be optimal, but it's superior to starting with a nil slice. + refs := make([]*AnnotationsRef, 0, len(as.byPath.Children)+len(as.byRule)+len(as.byPackage)) + + refs = as.byPath.flatten(refs) + + for _, a := range as.byPackage { + refs = append(refs, NewAnnotationsRef(a)) + } + + for _, as := range as.byRule { + for _, a := range as { + refs = append(refs, NewAnnotationsRef(a)) + } + } + + // Sort by path, then annotation location, for stable output + slices.SortStableFunc(refs, (*AnnotationsRef).Compare) + + return refs +} + +// Chain returns the chain of annotations leading up to the given rule. +// The returned slice is ordered as follows +// 0. Entries for the given rule, ordered from the METADATA block declared immediately above the rule, to the block declared farthest away (always at least one entry) +// 1. The 'document' scope entry, if any +// 2. The 'package' scope entry, if any +// 3. Entries for the 'subpackages' scope, if any; ordered from the closest package path to the fartest. E.g.: 'do.re.mi', 'do.re', 'do' +// The returned slice is guaranteed to always contain at least one entry, corresponding to the given rule. +func (as *AnnotationSet) Chain(rule *Rule) AnnotationsRefSet { + var refs []*AnnotationsRef + + ruleAnnots := as.GetRuleScope(rule) + + if len(ruleAnnots) >= 1 { + for _, a := range ruleAnnots { + refs = append(refs, NewAnnotationsRef(a)) + } + } else { + // Make sure there is always a leading entry representing the passed rule, even if it has no annotations + refs = append(refs, &AnnotationsRef{ + Location: rule.Location, + Path: rule.Ref().GroundPrefix(), + node: rule, + }) + } + + if len(refs) > 1 { + // Sort by annotation location; chain must start with annotations declared closest to rule, then going outward + slices.SortStableFunc(refs, func(a, b *AnnotationsRef) int { + return -a.Annotations.Location.Compare(b.Annotations.Location) + }) + } + + docAnnots := as.GetDocumentScope(rule.Ref().GroundPrefix()) + if docAnnots != nil { + refs = append(refs, NewAnnotationsRef(docAnnots)) + } + + pkg := rule.Module.Package + pkgAnnots := as.GetPackageScope(pkg) + if pkgAnnots != nil { + refs = append(refs, NewAnnotationsRef(pkgAnnots)) + } + + subPkgAnnots := as.GetSubpackagesScope(pkg.Path) + // We need to reverse the order, as subPkgAnnots ordering will start at the root, + // whereas we want to end at the root. + for i := len(subPkgAnnots) - 1; i >= 0; i-- { + refs = append(refs, NewAnnotationsRef(subPkgAnnots[i])) + } + + return refs +} + +func (ars FlatAnnotationsRefSet) Insert(ar *AnnotationsRef) FlatAnnotationsRefSet { + result := make(FlatAnnotationsRefSet, 0, len(ars)+1) + + // insertion sort, first by path, then location + for i, current := range ars { + if ar.Compare(current) < 0 { + result = append(result, ar) + result = append(result, ars[i:]...) + break + } + result = append(result, current) + } + + if len(result) < len(ars)+1 { + result = append(result, ar) + } + + return result +} + +func newAnnotationTree() *annotationTreeNode { + return &annotationTreeNode{ + Value: nil, + Children: map[Value]*annotationTreeNode{}, + } +} + +func (t *annotationTreeNode) insert(path Ref, value *Annotations) { + node := t + for _, k := range path { + child, ok := node.Children[k.Value] + if !ok { + child = newAnnotationTree() + node.Children[k.Value] = child + } + node = child + } + node.Value = value +} + +func (t *annotationTreeNode) get(path Ref) *annotationTreeNode { + node := t + for _, k := range path { + if node == nil { + return nil + } + child, ok := node.Children[k.Value] + if !ok { + return nil + } + node = child + } + return node +} + +// ancestors returns a slice of annotations in ascending order, starting with the root of ref; e.g.: 'root', 'root.foo', 'root.foo.bar'. +func (t *annotationTreeNode) ancestors(path Ref) (result []*Annotations) { + node := t + for _, k := range path { + if node == nil { + return result + } + child, ok := node.Children[k.Value] + if !ok { + return result + } + if child.Value != nil { + result = append(result, child.Value) + } + node = child + } + return result +} + +func (t *annotationTreeNode) flatten(refs []*AnnotationsRef) []*AnnotationsRef { + if a := t.Value; a != nil { + refs = append(refs, NewAnnotationsRef(a)) + } + for _, c := range t.Children { + refs = c.flatten(refs) + } + return refs +} + +func (ar *AnnotationsRef) Compare(other *AnnotationsRef) int { + if c := ar.Path.Compare(other.Path); c != 0 { + return c + } + + if c := ar.Annotations.Location.Compare(other.Annotations.Location); c != 0 { + return c + } + + return ar.Annotations.Compare(other.Annotations) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go b/vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go new file mode 100644 index 0000000000..7e30a8051c --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go @@ -0,0 +1,3663 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "strings" + + "github.com/open-policy-agent/opa/v1/types" +) + +// Builtins is the registry of built-in functions supported by OPA. +// Call RegisterBuiltin to add a new built-in. +var Builtins []*Builtin + +// RegisterBuiltin adds a new built-in function to the registry. +// NOTE: The underlying map storing built-ins is **not** thread-safe, +// and it's recommended to call this only during initialization, and never +// later. Registering built-ins after that point is unsupported and will +// likely lead to concurrent map read/write panics. +func RegisterBuiltin(b *Builtin) { + Builtins = append(Builtins, b) + BuiltinMap[b.Name] = b + if len(b.Infix) > 0 { + BuiltinMap[b.Infix] = b + + InternStringTerm(b.Infix) + InternVarValue(b.Infix) + } + + if strings.Contains(b.Name, ".") { + parts := strings.Split(b.Name, ".") + InternStringTerm(parts...) + InternVarValue(parts[0]) + } else { + InternStringTerm(b.Name) + InternVarValue(b.Name) + } +} + +// DefaultBuiltins is the registry of built-in functions supported in OPA +// by default. When adding a new built-in function to OPA, update this +// list. +var DefaultBuiltins = [...]*Builtin{ + // Unification/equality ("=") + Equality, + + // Assignment (":=") + Assign, + + // Membership, infix "in": `x in xs` + Member, + MemberWithKey, + + // Comparisons + GreaterThan, + GreaterThanEq, + LessThan, + LessThanEq, + NotEqual, + Equal, + + // Arithmetic + Plus, + Minus, + Multiply, + Divide, + Ceil, + Floor, + Round, + Abs, + Rem, + + // Bitwise Arithmetic + BitsOr, + BitsAnd, + BitsNegate, + BitsXOr, + BitsShiftLeft, + BitsShiftRight, + + // Binary + And, + Or, + + // Aggregates + Count, + Sum, + Product, + Max, + Min, + Any, + All, + + // Arrays + ArrayConcat, + ArrayFlatten, + ArraySlice, + ArrayReverse, + + // Conversions + ToNumber, + + // Casts (DEPRECATED) + CastObject, + CastNull, + CastBoolean, + CastString, + CastSet, + CastArray, + + // Regular Expressions + RegexIsValid, + RegexMatch, + RegexMatchDeprecated, + RegexSplit, + GlobsMatch, + RegexTemplateMatch, + RegexFind, + RegexFindAllStringSubmatch, + RegexReplace, + + // Sets + SetDiff, + Intersection, + Union, + + // Strings + AnyPrefixMatch, + AnySuffixMatch, + Concat, + FormatInt, + IndexOf, + IndexOfN, + Substring, + Lower, + Upper, + Contains, + StringCount, + StartsWith, + EndsWith, + Split, + Replace, + ReplaceN, + Trim, + TrimLeft, + TrimPrefix, + TrimRight, + TrimSuffix, + TrimSpace, + Sprintf, + StringReverse, + RenderTemplate, + InternalTemplateString, + + // Numbers + NumbersRange, + NumbersRangeStep, + RandIntn, + + // Encoding + JSONMarshal, + JSONMarshalWithOptions, + JSONUnmarshal, + JSONIsValid, + Base64Encode, + Base64Decode, + Base64IsValid, + Base64UrlEncode, + Base64UrlEncodeNoPad, + Base64UrlDecode, + URLQueryDecode, + URLQueryEncode, + URLQueryEncodeObject, + URLQueryDecodeObject, + YAMLMarshal, + YAMLUnmarshal, + YAMLIsValid, + HexEncode, + HexDecode, + + // Object Manipulation + ObjectUnion, + ObjectUnionN, + ObjectRemove, + ObjectFilter, + ObjectGet, + ObjectKeys, + ObjectSubset, + + // JSON Object Manipulation + JSONFilter, + JSONRemove, + JSONPatch, + + // Tokens + JWTDecode, + JWTVerifyRS256, + JWTVerifyRS384, + JWTVerifyRS512, + JWTVerifyPS256, + JWTVerifyPS384, + JWTVerifyPS512, + JWTVerifyES256, + JWTVerifyES384, + JWTVerifyES512, + JWTVerifyEdDSA, + JWTVerifyHS256, + JWTVerifyHS384, + JWTVerifyHS512, + JWTDecodeVerify, + JWTEncodeSignRaw, + JWTEncodeSign, + + // Time + NowNanos, + ParseNanos, + ParseRFC3339Nanos, + ParseDurationNanos, + Format, + Date, + Clock, + Weekday, + AddDate, + Diff, + + // Crypto + CryptoX509ParseCertificates, + CryptoX509ParseAndVerifyCertificates, + CryptoX509ParseAndVerifyCertificatesWithOptions, + CryptoMd5, + CryptoSha1, + CryptoSha256, + CryptoX509ParseCertificateRequest, + CryptoX509ParseRSAPrivateKey, + CryptoX509ParseKeyPair, + CryptoParsePrivateKeys, + CryptoHmacMd5, + CryptoHmacSha1, + CryptoHmacSha256, + CryptoHmacSha512, + CryptoHmacEqual, + + // Graphs + WalkBuiltin, + ReachableBuiltin, + ReachablePathsBuiltin, + + // Sort + Sort, + + // Types + IsNumber, + IsString, + IsBoolean, + IsArray, + IsSet, + IsObject, + IsNull, + TypeNameBuiltin, + + // HTTP + HTTPSend, + + // GraphQL + GraphQLParse, + GraphQLParseAndVerify, + GraphQLParseQuery, + GraphQLParseSchema, + GraphQLIsValid, + GraphQLSchemaIsValid, + + // JSON Schema + JSONSchemaVerify, + JSONMatchSchema, + + // Cloud Provider Helpers + ProvidersAWSSignReqObj, + + // Rego + RegoParseModule, + RegoMetadataChain, + RegoMetadataRule, + + // OPA + OPARuntime, + + // Tracing + Trace, + + // Networking + NetCIDROverlap, + NetCIDRIntersects, + NetCIDRContains, + NetCIDRContainsMatches, + NetCIDRExpand, + NetCIDRMerge, + NetLookupIPAddr, + NetCIDRIsValid, + + // Glob + GlobMatch, + GlobQuoteMeta, + + // Units + UnitsParse, + UnitsParseBytes, + + // UUIDs + UUIDRFC4122, + UUIDParse, + + // SemVers + SemVerIsValid, + SemVerCompare, + + // Printing + Print, + InternalPrint, + + // Testing + InternalTestCase, +} + +// BuiltinMap provides a convenient mapping of built-in names to +// built-in definitions. +var BuiltinMap map[string]*Builtin + +// Deprecated: Builtins can now be directly annotated with the +// Nondeterministic property, and when set to true, will be ignored +// for partial evaluation. +var IgnoreDuringPartialEval = []*Builtin{ + RandIntn, + UUIDRFC4122, + JWTDecodeVerify, + JWTEncodeSignRaw, + JWTEncodeSign, + NowNanos, + HTTPSend, + OPARuntime, + NetLookupIPAddr, +} + +/** + * Unification + */ + +// Equality represents the "=" operator. +var Equality = &Builtin{ + Name: "eq", + Infix: "=", + Decl: types.NewFunction( + types.Args(types.A, types.A), + types.B, + ), + CanSkipBctx: true, +} + +/** + * Assignment + */ + +// Assign represents the assignment (":=") operator. +var Assign = &Builtin{ + Name: "assign", + Infix: ":=", + Decl: types.NewFunction( + types.Args(types.A, types.A), + types.B, + ), + CanSkipBctx: true, +} + +// Member represents the `in` (infix) operator. +var Member = &Builtin{ + Name: "internal.member_2", + Infix: "in", + Decl: types.NewFunction( + types.Args( + types.A, + types.A, + ), + types.B, + ), + CanSkipBctx: true, +} + +// MemberWithKey represents the `in` (infix) operator when used +// with two terms on the lhs, i.e., `k, v in obj`. +var MemberWithKey = &Builtin{ + Name: "internal.member_3", + Infix: "in", + Decl: types.NewFunction( + types.Args( + types.A, + types.A, + types.A, + ), + types.B, + ), + CanSkipBctx: true, +} + +/** + * Comparisons + */ +var comparison = category("comparison") + +var GreaterThan = &Builtin{ + Name: "gt", + Infix: ">", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is greater than `y`; false otherwise"), + ), + CanSkipBctx: true, +} + +var GreaterThanEq = &Builtin{ + Name: "gte", + Infix: ">=", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is greater or equal to `y`; false otherwise"), + ), + CanSkipBctx: true, +} + +// LessThan represents the "<" comparison operator. +var LessThan = &Builtin{ + Name: "lt", + Infix: "<", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is less than `y`; false otherwise"), + ), + CanSkipBctx: true, +} + +var LessThanEq = &Builtin{ + Name: "lte", + Infix: "<=", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is less than or equal to `y`; false otherwise"), + ), + CanSkipBctx: true, +} + +var NotEqual = &Builtin{ + Name: "neq", + Infix: "!=", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is not equal to `y`; false otherwise"), + ), + CanSkipBctx: true, +} + +// Equal represents the "==" comparison operator. +var Equal = &Builtin{ + Name: "equal", + Infix: "==", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is equal to `y`; false otherwise"), + ), + CanSkipBctx: true, +} + +/** + * Arithmetic + */ +var number = category("numbers") + +var Plus = &Builtin{ + Name: "plus", + Infix: "+", + Description: "Plus adds two numbers together.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N), + types.Named("y", types.N), + ), + types.Named("z", types.N).Description("the sum of `x` and `y`"), + ), + Categories: number, + CanSkipBctx: true, +} + +var Minus = &Builtin{ + Name: "minus", + Infix: "-", + Description: "Minus subtracts the second number from the first number or computes the difference between two sets.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny(types.N, types.SetOfAny)), + types.Named("y", types.NewAny(types.N, types.SetOfAny)), + ), + types.Named("z", types.NewAny(types.N, types.SetOfAny)).Description("the difference of `x` and `y`"), + ), + Categories: category("sets", "numbers"), + CanSkipBctx: true, +} + +var Multiply = &Builtin{ + Name: "mul", + Infix: "*", + Description: "Multiplies two numbers.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N), + types.Named("y", types.N), + ), + types.Named("z", types.N).Description("the product of `x` and `y`"), + ), + Categories: number, + CanSkipBctx: true, +} + +var Divide = &Builtin{ + Name: "div", + Infix: "/", + Description: "Divides the first number by the second number.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the dividend"), + types.Named("y", types.N).Description("the divisor"), + ), + types.Named("z", types.N).Description("the result of `x` divided by `y`"), + ), + Categories: number, + CanSkipBctx: true, +} + +var Round = &Builtin{ + Name: "round", + Description: "Rounds the number to the nearest integer.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the number to round"), + ), + types.Named("y", types.N).Description("the result of rounding `x`"), + ), + Categories: number, + CanSkipBctx: true, +} + +var Ceil = &Builtin{ + Name: "ceil", + Description: "Rounds the number _up_ to the nearest integer.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the number to round"), + ), + types.Named("y", types.N).Description("the result of rounding `x` _up_"), + ), + Categories: number, + CanSkipBctx: true, +} + +var Floor = &Builtin{ + Name: "floor", + Description: "Rounds the number _down_ to the nearest integer.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the number to round"), + ), + types.Named("y", types.N).Description("the result of rounding `x` _down_"), + ), + Categories: number, + CanSkipBctx: true, +} + +var Abs = &Builtin{ + Name: "abs", + Description: "Returns the number without its sign.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the number to take the absolute value of"), + ), + types.Named("y", types.N).Description("the absolute value of `x`"), + ), + Categories: number, + CanSkipBctx: true, +} + +var Rem = &Builtin{ + Name: "rem", + Infix: "%", + Description: "Returns the remainder for of `x` divided by `y`, for `y != 0`.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N), + types.Named("y", types.N), + ), + types.Named("z", types.N).Description("the remainder"), + ), + Categories: number, + CanSkipBctx: true, +} + +/** + * Bitwise + */ + +var BitsOr = &Builtin{ + Name: "bits.or", + Description: "Returns the bitwise \"OR\" of two integers.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the first integer"), + types.Named("y", types.N).Description("the second integer"), + ), + types.Named("z", types.N).Description("the bitwise OR of `x` and `y`"), + ), + CanSkipBctx: true, +} + +var BitsAnd = &Builtin{ + Name: "bits.and", + Description: "Returns the bitwise \"AND\" of two integers.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the first integer"), + types.Named("y", types.N).Description("the second integer"), + ), + types.Named("z", types.N).Description("the bitwise AND of `x` and `y`"), + ), + CanSkipBctx: true, +} + +var BitsNegate = &Builtin{ + Name: "bits.negate", + Description: "Returns the bitwise negation (flip) of an integer.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the integer to negate"), + ), + types.Named("z", types.N).Description("the bitwise negation of `x`"), + ), + CanSkipBctx: true, +} + +var BitsXOr = &Builtin{ + Name: "bits.xor", + Description: "Returns the bitwise \"XOR\" (exclusive-or) of two integers.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the first integer"), + types.Named("y", types.N).Description("the second integer"), + ), + types.Named("z", types.N).Description("the bitwise XOR of `x` and `y`"), + ), + CanSkipBctx: true, +} + +var BitsShiftLeft = &Builtin{ + Name: "bits.lsh", + Description: "Returns a new integer with its bits shifted `s` bits to the left.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the integer to shift"), + types.Named("s", types.N).Description("the number of bits to shift"), + ), + types.Named("z", types.N).Description("the result of shifting `x` `s` bits to the left"), + ), + CanSkipBctx: true, +} + +var BitsShiftRight = &Builtin{ + Name: "bits.rsh", + Description: "Returns a new integer with its bits shifted `s` bits to the right.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the integer to shift"), + types.Named("s", types.N).Description("the number of bits to shift"), + ), + types.Named("z", types.N).Description("the result of shifting `x` `s` bits to the right"), + ), + CanSkipBctx: true, +} + +/** + * Sets + */ + +var sets = category("sets") + +var And = &Builtin{ + Name: "and", + Infix: "&", + Description: "Returns the intersection of two sets.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.SetOfAny).Description("the first set"), + types.Named("y", types.SetOfAny).Description("the second set"), + ), + types.Named("z", types.SetOfAny).Description("the intersection of `x` and `y`"), + ), + Categories: sets, + CanSkipBctx: true, +} + +// Or performs a union operation on sets. +var Or = &Builtin{ + Name: "or", + Infix: "|", + Description: "Returns the union of two sets.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.SetOfAny), + types.Named("y", types.SetOfAny), + ), + types.Named("z", types.SetOfAny).Description("the union of `x` and `y`"), + ), + Categories: sets, + CanSkipBctx: true, +} + +var Intersection = &Builtin{ + Name: "intersection", + Description: "Returns the intersection of the given input sets.", + Decl: types.NewFunction( + types.Args( + types.Named("xs", types.NewSet(types.SetOfAny)).Description("set of sets to intersect"), + ), + types.Named("y", types.SetOfAny).Description("the intersection of all `xs` sets"), + ), + Categories: sets, + CanSkipBctx: true, +} + +var Union = &Builtin{ + Name: "union", + Description: "Returns the union of the given input sets.", + Decl: types.NewFunction( + types.Args( + types.Named("xs", types.NewSet(types.SetOfAny)).Description("set of sets to merge"), + ), + types.Named("y", types.SetOfAny).Description("the union of all `xs` sets"), + ), + Categories: sets, + CanSkipBctx: true, +} + +/** + * Aggregates + */ + +var aggregates = category("aggregates") + +var Count = &Builtin{ + Name: "count", + Description: "Count takes a collection or string and returns the number of elements (or characters) in it.", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.SetOfAny, + types.NewArray(nil, types.A), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.S, + )).Description("the set/array/object/string to be counted"), + ), + types.Named("n", types.N).Description("the count of elements, key/val pairs, or characters, respectively."), + ), + Categories: aggregates, + CanSkipBctx: true, +} + +var Sum = &Builtin{ + Name: "sum", + Description: "Sums elements of an array or set of numbers.", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.SetOfNum, + types.NewArray(nil, types.N), + )).Description("the set or array of numbers to sum"), + ), + types.Named("n", types.N).Description("the sum of all elements"), + ), + Categories: aggregates, + CanSkipBctx: true, +} + +var Product = &Builtin{ + Name: "product", + Description: "Multiplies elements of an array or set of numbers", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.SetOfNum, + types.NewArray(nil, types.N), + )).Description("the set or array of numbers to multiply"), + ), + types.Named("n", types.N).Description("the product of all elements"), + ), + Categories: aggregates, + CanSkipBctx: true, +} + +var Max = &Builtin{ + Name: "max", + Description: "Returns the maximum value in a collection.", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.SetOfAny, + types.NewArray(nil, types.A), + )).Description("the set or array to be searched"), + ), + types.Named("n", types.A).Description("the maximum of all elements"), + ), + Categories: aggregates, + CanSkipBctx: true, +} + +var Min = &Builtin{ + Name: "min", + Description: "Returns the minimum value in a collection.", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.SetOfAny, + types.NewArray(nil, types.A), + )).Description("the set or array to be searched"), + ), + types.Named("n", types.A).Description("the minimum of all elements"), + ), + Categories: aggregates, + CanSkipBctx: true, +} + +/** + * Sorting + */ + +var Sort = &Builtin{ + Name: "sort", + Description: "Returns a sorted array.", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.NewArray(nil, types.A), + types.SetOfAny, + )).Description("the array or set to be sorted"), + ), + types.Named("n", types.NewArray(nil, types.A)).Description("the sorted array"), + ), + Categories: aggregates, + CanSkipBctx: true, +} + +/** + * Arrays + */ + +var ArrayConcat = &Builtin{ + Name: "array.concat", + Description: "Concatenates two arrays.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewArray(nil, types.A)).Description("the first array"), + types.Named("y", types.NewArray(nil, types.A)).Description("the second array"), + ), + types.Named("z", types.NewArray(nil, types.A)).Description("the concatenation of `x` and `y`"), + ), + CanSkipBctx: true, +} + +var ArrayFlatten = &Builtin{ + Name: "array.flatten", + Description: "Non-recursively unpacks array items in arr into the flattened array. Other types are appended as-is.", + Decl: types.NewFunction( + types.Args( + types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be flattened"), + ), + types.Named("flattened", types.NewArray(nil, types.A)).Description("array flattened one level"), + ), + CanSkipBctx: true, +} + +var ArraySlice = &Builtin{ + Name: "array.slice", + Description: "Returns a slice of a given array. If `start` is greater or equal than `stop`, `slice` is `[]`.", + Decl: types.NewFunction( + types.Args( + types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be sliced"), + types.Named("start", types.N).Description("the start index of the returned slice; if less than zero, it's clamped to 0"), + types.Named("stop", types.N).Description("the stop index of the returned slice; if larger than `count(arr)`, it's clamped to `count(arr)`"), + ), + types.Named("slice", types.NewArray(nil, types.A)).Description("the subslice of `array`, from `start` to `end`, including `arr[start]`, but excluding `arr[end]`"), + ), + CanSkipBctx: true, +} // NOTE(sr): this function really needs examples + +var ArrayReverse = &Builtin{ + Name: "array.reverse", + Description: "Returns the reverse of a given array.", + Decl: types.NewFunction( + types.Args( + types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be reversed"), + ), + types.Named("rev", types.NewArray(nil, types.A)).Description("an array containing the elements of `arr` in reverse order"), + ), + CanSkipBctx: true, +} + +/** + * Conversions + */ +var conversions = category("conversions") + +var ToNumber = &Builtin{ + Name: "to_number", + Description: "Converts a string, bool, or number value to a number: Strings are converted to numbers using `strconv.Atoi`, Boolean `false` is converted to 0 and `true` is converted to 1.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny( + types.N, + types.S, + types.B, + types.Nl, + )).Description("value to convert"), + ), + types.Named("num", types.N).Description("the numeric representation of `x`"), + ), + Categories: conversions, + CanSkipBctx: true, +} + +/** + * Regular Expressions + */ + +var RegexMatch = &Builtin{ + Name: "regex.match", + Description: "Matches a string against a regular expression.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("regular expression"), + types.Named("value", types.S).Description("value to match against `pattern`"), + ), + types.Named("result", types.B).Description("true if `value` matches `pattern`"), + ), +} + +var RegexIsValid = &Builtin{ + Name: "regex.is_valid", + Description: "Checks if a string is a valid regular expression: the detailed syntax for patterns is defined by https://github.com/google/re2/wiki/Syntax.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("regular expression"), + ), + types.Named("result", types.B).Description("true if `pattern` is a valid regular expression"), + ), + CanSkipBctx: true, +} + +var RegexFindAllStringSubmatch = &Builtin{ + Name: "regex.find_all_string_submatch_n", + Description: "Returns all successive matches of the expression.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("regular expression"), + types.Named("value", types.S).Description("string to match"), + types.Named("number", types.N).Description("number of matches to return; `-1` means all matches"), + ), + types.Named("output", types.NewArray(nil, types.NewArray(nil, types.S))).Description("array of all matches"), + ), + CanSkipBctx: false, +} + +var RegexTemplateMatch = &Builtin{ + Name: "regex.template_match", + Description: "Matches a string against a pattern, where there pattern may be glob-like", + Decl: types.NewFunction( + types.Args( + types.Named("template", types.S).Description("template expression containing `0..n` regular expressions"), + types.Named("value", types.S).Description("string to match"), + types.Named("delimiter_start", types.S).Description("start delimiter of the regular expression in `template`"), + types.Named("delimiter_end", types.S).Description("end delimiter of the regular expression in `template`"), + ), + types.Named("result", types.B).Description("true if `value` matches the `template`"), + ), + CanSkipBctx: true, +} // TODO(sr): example:`regex.template_match("urn:foo:{.*}", "urn:foo:bar:baz", "{", "}")`` returns ``true``. + +var RegexSplit = &Builtin{ + Name: "regex.split", + Description: "Splits the input string by the occurrences of the given pattern.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("regular expression"), + types.Named("value", types.S).Description("string to match"), + ), + types.Named("output", types.NewArray(nil, types.S)).Description("the parts obtained by splitting `value`"), + ), + CanSkipBctx: false, +} + +// RegexFind takes two strings and a number, the pattern, the value and number of match values to +// return, -1 means all match values. +var RegexFind = &Builtin{ + Name: "regex.find_n", + Description: "Returns the specified number of matches when matching the input against the pattern.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("regular expression"), + types.Named("value", types.S).Description("string to match"), + types.Named("number", types.N).Description("number of matches to return, if `-1`, returns all matches"), + ), + types.Named("output", types.NewArray(nil, types.S)).Description("collected matches"), + ), + CanSkipBctx: false, +} + +// GlobsMatch takes two strings regexp-style strings and evaluates to true if their +// intersection matches a non-empty set of non-empty strings. +// Examples: +// - "a.a." and ".b.b" -> true. +// - "[a-z]*" and [0-9]+" -> not true. +var GlobsMatch = &Builtin{ + Name: "regex.globs_match", + Description: `Checks if the intersection of two glob-style regular expressions matches a non-empty set of non-empty strings. +The set of regex symbols is limited for this builtin: only ` + "`.`, `*`, `+`, `[`, `-`, `]` and `\\` are treated as special symbols.", + Decl: types.NewFunction( + types.Args( + types.Named("glob1", types.S).Description("first glob-style regular expression"), + types.Named("glob2", types.S).Description("second glob-style regular expression"), + ), + types.Named("result", types.B).Description("true if the intersection of `glob1` and `glob2` matches a non-empty set of non-empty strings"), + ), + CanSkipBctx: true, +} + +/** + * Strings + */ +var stringsCat = category("strings") + +var AnyPrefixMatch = &Builtin{ + Name: "strings.any_prefix_match", + Description: "Returns true if any of the search strings begins with any of the base strings.", + Decl: types.NewFunction( + types.Args( + types.Named("search", types.NewAny( + types.S, + types.SetOfStr, + types.NewArray(nil, types.S), + )).Description("search string(s)"), + types.Named("base", types.NewAny( + types.S, + types.SetOfStr, + types.NewArray(nil, types.S), + )).Description("base string(s)"), + ), + types.Named("result", types.B).Description("result of the prefix check"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var AnySuffixMatch = &Builtin{ + Name: "strings.any_suffix_match", + Description: "Returns true if any of the search strings ends with any of the base strings.", + Decl: types.NewFunction( + types.Args( + types.Named("search", types.NewAny( + types.S, + types.SetOfStr, + types.NewArray(nil, types.S), + )).Description("search string(s)"), + types.Named("base", types.NewAny( + types.S, + types.SetOfStr, + types.NewArray(nil, types.S), + )).Description("base string(s)"), + ), + types.Named("result", types.B).Description("result of the suffix check"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var Concat = &Builtin{ + Name: "concat", + Description: "Joins a set or array of strings with a delimiter.", + Decl: types.NewFunction( + types.Args( + types.Named("delimiter", types.S).Description("string to use as a delimiter"), + types.Named("collection", types.NewAny( + types.SetOfStr, + types.NewArray(nil, types.S), + )).Description("strings to join"), + ), + types.Named("output", types.S).Description("the joined string"), + ), + Categories: stringsCat, + CanSkipBctx: false, +} + +var FormatInt = &Builtin{ + Name: "format_int", + Description: "Returns the string representation of the number in the given base after rounding it down to an integer value.", + Decl: types.NewFunction( + types.Args( + types.Named("number", types.N).Description("number to format"), + types.Named("base", types.N).Description("base of number representation to use"), + ), + types.Named("output", types.S).Description("formatted number"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var IndexOf = &Builtin{ + Name: "indexof", + Description: "Returns the index of a substring contained inside a string.", + Decl: types.NewFunction( + types.Args( + types.Named("haystack", types.S).Description("string to search in"), + types.Named("needle", types.S).Description("substring to look for"), + ), + types.Named("output", types.N).Description("index of first occurrence, `-1` if not found"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var IndexOfN = &Builtin{ + Name: "indexof_n", + Description: "Returns a list of all the indexes of a substring contained inside a string.", + Decl: types.NewFunction( + types.Args( + types.Named("haystack", types.S).Description("string to search in"), + types.Named("needle", types.S).Description("substring to look for"), + ), + types.Named("output", types.NewArray(nil, types.N)).Description("all indices at which `needle` occurs in `haystack`, may be empty"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var Substring = &Builtin{ + Name: "substring", + Description: "Returns the portion of a string for a given `offset` and a `length`. If `length < 0`, `output` is the remainder of the string.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to extract substring from"), + types.Named("offset", types.N).Description("offset, must be positive"), + types.Named("length", types.N).Description("length of the substring starting from `offset`"), + ), + types.Named("output", types.S).Description("substring of `value` from `offset`, of length `length`"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var Contains = &Builtin{ + Name: "contains", + Description: "Returns `true` if the search string is included in the base string", + Decl: types.NewFunction( + types.Args( + types.Named("haystack", types.S).Description("string to search in"), + types.Named("needle", types.S).Description("substring to look for"), + ), + types.Named("result", types.B).Description("result of the containment check"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var StringCount = &Builtin{ + Name: "strings.count", + Description: "Returns the number of non-overlapping instances of a substring in a string.", + Decl: types.NewFunction( + types.Args( + types.Named("search", types.S).Description("string to search in"), + types.Named("substring", types.S).Description("substring to look for"), + ), + types.Named("output", types.N).Description("count of occurrences, `0` if not found"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var StartsWith = &Builtin{ + Name: "startswith", + Description: "Returns true if the search string begins with the base string.", + Decl: types.NewFunction( + types.Args( + types.Named("search", types.S).Description("search string"), + types.Named("base", types.S).Description("base string"), + ), + types.Named("result", types.B).Description("result of the prefix check"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var EndsWith = &Builtin{ + Name: "endswith", + Description: "Returns true if the search string ends with the base string.", + Decl: types.NewFunction( + types.Args( + types.Named("search", types.S).Description("search string"), + types.Named("base", types.S).Description("base string"), + ), + types.Named("result", types.B).Description("result of the suffix check"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var Lower = &Builtin{ + Name: "lower", + Description: "Returns the input string but with all characters in lower-case.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string that is converted to lower-case"), + ), + types.Named("y", types.S).Description("lower-case of x"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var Upper = &Builtin{ + Name: "upper", + Description: "Returns the input string but with all characters in upper-case.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string that is converted to upper-case"), + ), + types.Named("y", types.S).Description("upper-case of x"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var Split = &Builtin{ + Name: "split", + Description: "Split returns an array containing elements of the input string split on a delimiter.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string that is split"), + types.Named("delimiter", types.S).Description("delimiter used for splitting"), + ), + types.Named("ys", types.NewArray(nil, types.S)).Description("split parts"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var Replace = &Builtin{ + Name: "replace", + Description: "Replace replaces all instances of a sub-string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string being processed"), + types.Named("old", types.S).Description("substring to replace"), + types.Named("new", types.S).Description("string to replace `old` with"), + ), + types.Named("y", types.S).Description("string with replaced substrings"), + ), + Categories: stringsCat, + CanSkipBctx: false, +} + +var ReplaceN = &Builtin{ + Name: "strings.replace_n", + Description: `Replaces a string from a list of old, new string pairs. +Replacements are performed in the order they appear in the target string, without overlapping matches. +The old string comparisons are done in argument order.`, + Decl: types.NewFunction( + types.Args( + types.Named("patterns", types.NewObject( + nil, + types.NewDynamicProperty( + types.S, + types.S)), + ).Description("replacement pairs"), + types.Named("value", types.S).Description("string to replace substring matches in"), + ), + types.Named("output", types.S).Description("string with replaced substrings"), + ), + CanSkipBctx: false, +} + +var RegexReplace = &Builtin{ + Name: "regex.replace", + Description: `Find and replaces the text using the regular expression pattern.`, + Decl: types.NewFunction( + types.Args( + types.Named("s", types.S).Description("string being processed"), + types.Named("pattern", types.S).Description("regex pattern to be applied"), + types.Named("value", types.S).Description("regex value"), + ), + types.Named("output", types.S).Description("string with replaced substrings"), + ), + CanSkipBctx: false, +} + +var Trim = &Builtin{ + Name: "trim", + Description: "Returns `value` with all leading or trailing instances of the `cutset` characters removed.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + types.Named("cutset", types.S).Description("string of characters that are cut off"), + ), + types.Named("output", types.S).Description("string trimmed of `cutset` characters"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var TrimLeft = &Builtin{ + Name: "trim_left", + Description: "Returns `value` with all leading instances of the `cutset` characters removed.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + types.Named("cutset", types.S).Description("string of characters that are cut off on the left"), + ), + types.Named("output", types.S).Description("string left-trimmed of `cutset` characters"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var TrimPrefix = &Builtin{ + Name: "trim_prefix", + Description: "Returns `value` without the prefix. If `value` doesn't start with `prefix`, it is returned unchanged.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + types.Named("prefix", types.S).Description("prefix to cut off"), + ), + types.Named("output", types.S).Description("string with `prefix` cut off"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var TrimRight = &Builtin{ + Name: "trim_right", + Description: "Returns `value` with all trailing instances of the `cutset` characters removed.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + types.Named("cutset", types.S).Description("string of characters that are cut off on the right"), + ), + types.Named("output", types.S).Description("string right-trimmed of `cutset` characters"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var TrimSuffix = &Builtin{ + Name: "trim_suffix", + Description: "Returns `value` without the suffix. If `value` doesn't end with `suffix`, it is returned unchanged.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + types.Named("suffix", types.S).Description("suffix to cut off"), + ), + types.Named("output", types.S).Description("string with `suffix` cut off"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var TrimSpace = &Builtin{ + Name: "trim_space", + Description: "Return the given string with all leading and trailing white space removed.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + ), + types.Named("output", types.S).Description("string leading and trailing white space cut off"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var Sprintf = &Builtin{ + Name: "sprintf", + Description: "Returns the given string, formatted.", + Decl: types.NewFunction( + types.Args( + types.Named("format", types.S).Description("string with formatting verbs"), + types.Named("values", types.NewArray(nil, types.A)).Description("arguments to format into formatting verbs"), + ), + types.Named("output", types.S).Description("`format` formatted by the values in `values`"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var StringReverse = &Builtin{ + Name: "strings.reverse", + Description: "Reverses a given string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to reverse"), + ), + types.Named("y", types.S).Description("reversed string"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +var RenderTemplate = &Builtin{ + Name: "strings.render_template", + Description: `Renders a templated string with given template variables injected. For a given templated string and key/value mapping, values will be injected into the template where they are referenced by key. + For examples of templating syntax, see https://pkg.go.dev/text/template`, + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("a templated string"), + types.Named("vars", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("a mapping of template variable keys to values"), + ), + types.Named("result", types.S).Description("rendered template with template variables injected"), + ), + Categories: stringsCat, + CanSkipBctx: true, +} + +/** + * Numbers + */ + +// RandIntn returns a random number 0 - n +// Marked non-deterministic because it relies on RNG internally. +var RandIntn = &Builtin{ + Name: "rand.intn", + Description: "Returns a random integer between `0` and `n` (`n` exclusive). If `n` is `0`, then `y` is always `0`. For any given argument pair (`str`, `n`), the output will be consistent throughout a query evaluation.", + Decl: types.NewFunction( + types.Args( + types.Named("str", types.S).Description("seed string for the random number"), + types.Named("n", types.N).Description("upper bound of the random number (exclusive)"), + ), + types.Named("y", types.N).Description("random integer in the range `[0, abs(n))`"), + ), + Categories: number, + Nondeterministic: true, + CanSkipBctx: false, +} + +var NumbersRange = &Builtin{ + Name: "numbers.range", + Description: "Returns an array of numbers in the given (inclusive) range. If `a==b`, then `range == [a]`; if `a > b`, then `range` is in descending order.", + Decl: types.NewFunction( + types.Args( + types.Named("a", types.N).Description("the start of the range"), + types.Named("b", types.N).Description("the end of the range (inclusive)"), + ), + types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b`"), + ), + CanSkipBctx: false, // needed for context timeout check +} + +var NumbersRangeStep = &Builtin{ + Name: "numbers.range_step", + Description: `Returns an array of numbers in the given (inclusive) range incremented by a positive step. + If "a==b", then "range == [a]"; if "a > b", then "range" is in descending order. + If the provided "step" is less then 1, an error will be thrown. + If "b" is not in the range of the provided "step", "b" won't be included in the result. + `, + Decl: types.NewFunction( + types.Args( + types.Named("a", types.N).Description("the start of the range"), + types.Named("b", types.N).Description("the end of the range (inclusive)"), + types.Named("step", types.N).Description("the step between numbers in the range"), + ), + types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b` in `step` increments"), + ), + CanSkipBctx: false, // needed for context timeout check +} + +/** + * Units + */ + +var UnitsParse = &Builtin{ + Name: "units.parse", + Description: `Converts strings like "10G", "5K", "4M", "1500m", and the like into a number. +This number can be a non-integer, such as 1.5, 0.22, etc. Scientific notation is supported, +allowing values such as "1e-3K" (1) or "2.5e6M" (2.5 million M). + +Supports standard metric decimal and binary SI units (e.g., K, Ki, M, Mi, G, Gi, etc.) where +m, K, M, G, T, P, and E are treated as decimal units and Ki, Mi, Gi, Ti, Pi, and Ei are treated as +binary units. + +Note that 'm' and 'M' are case-sensitive to allow distinguishing between "milli" and "mega" units +respectively. Other units are case-insensitive.`, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("the unit to parse"), + ), + types.Named("y", types.N).Description("the parsed number"), + ), + CanSkipBctx: true, +} + +var UnitsParseBytes = &Builtin{ + Name: "units.parse_bytes", + Description: `Converts strings like "10GB", "5K", "4mb", or "1e6KB" into an integer number of bytes. + +Supports standard byte units (e.g., KB, KiB, etc.) where KB, MB, GB, and TB are treated as decimal +units, and KiB, MiB, GiB, and TiB are treated as binary units. Scientific notation is supported, +enabling values like "1.5e3MB" (1500MB) or "2e6GiB" (2 million GiB). + +The bytes symbol (b/B) in the unit is optional; omitting it will yield the same result (e.g., "Mi" +and "MiB" are equivalent).`, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("the byte unit to parse"), + ), + types.Named("y", types.N).Description("the parsed number"), + ), + CanSkipBctx: true, +} + +// +/** + * Type + */ + +// UUIDRFC4122 returns a version 4 UUID string. +// Marked non-deterministic because it relies on RNG internally. +var UUIDRFC4122 = &Builtin{ + Name: "uuid.rfc4122", + Description: "Returns a new UUIDv4.", + Decl: types.NewFunction( + types.Args( + types.Named("k", types.S).Description("seed string"), + ), + types.Named("output", types.S).Description("a version 4 UUID; for any given `k`, the output will be consistent throughout a query evaluation"), + ), + Nondeterministic: true, + CanSkipBctx: false, +} + +var UUIDParse = &Builtin{ + Name: "uuid.parse", + Description: "Parses the string value as an UUID and returns an object with the well-defined fields of the UUID if valid.", + Categories: nil, + Decl: types.NewFunction( + types.Args( + types.Named("uuid", types.S).Description("UUID string to parse"), + ), + types.Named("result", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("Properties of UUID if valid (version, variant, etc). Undefined otherwise."), + ), + Relation: false, + CanSkipBctx: true, +} + +/** + * JSON + */ + +var objectCat = category("object") + +var JSONFilter = &Builtin{ + Name: "json.filter", + Description: "Filters the object. " + + "For example: `json.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"b\": \"x\"}}`). " + + "Paths are not filtered in-order and are deduplicated before being evaluated.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("object to filter"), + types.Named("paths", types.NewAny( + types.NewArray( + nil, + types.NewAny( + types.S, + types.NewArray( + nil, + types.A, + ), + ), + ), + types.NewSet( + types.NewAny( + types.S, + types.NewArray( + nil, + types.A, + ), + ), + ), + )).Description("JSON string paths"), + ), + types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `paths`"), + ), + Categories: objectCat, + CanSkipBctx: true, +} + +var JSONRemove = &Builtin{ + Name: "json.remove", + Description: "Removes paths from an object. " + + "For example: `json.remove({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"c\": \"y\"}}`. " + + "Paths are not removed in-order and are deduplicated before being evaluated.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("object to remove paths from"), + types.Named("paths", types.NewAny( + types.NewArray( + nil, + types.NewAny( + types.S, + types.NewArray( + nil, + types.A, + ), + ), + ), + types.NewSet( + types.NewAny( + types.S, + types.NewArray( + nil, + types.A, + ), + ), + ), + )).Description("JSON string paths"), + ), + types.Named("output", types.A).Description("result of removing all keys specified in `paths`"), + ), + Categories: objectCat, + CanSkipBctx: true, +} + +var JSONPatch = &Builtin{ + Name: "json.patch", + Description: "Patches an object according to RFC6902. " + + "For example: `json.patch({\"a\": {\"foo\": 1}}, [{\"op\": \"add\", \"path\": \"/a/bar\", \"value\": 2}])` results in `{\"a\": {\"foo\": 1, \"bar\": 2}`. " + + "The patches are applied atomically: if any of them fails, the result will be undefined. " + + "Additionally works on sets, where a value contained in the set is considered to be its path.", + Decl: types.NewFunction( + types.Args( + types.Named("target", types.A).Description("the object, array or set to patch"), + types.Named("patches", types.NewArray( + nil, + types.NewObject( + []*types.StaticProperty{ + {Key: "op", Value: types.S}, + {Key: "path", Value: types.A}, + }, + types.NewDynamicProperty(types.A, types.A), + ), + )).Description("the JSON patches to apply"), + ), + types.Named("output", types.A).Description("result obtained after consecutively applying all patch operations in `patches`"), + ), + Categories: objectCat, + CanSkipBctx: true, +} + +var ObjectSubset = &Builtin{ + Name: "object.subset", + Description: "Determines if an object `sub` is a subset of another object `super`." + + "Object `sub` is a subset of object `super` if and only if every key in `sub` is also in `super`, " + + "**and** for all keys which `sub` and `super` share, they have the same value. " + + "This function works with objects, sets, arrays and a set of array and set." + + "If both arguments are objects, then the operation is recursive, e.g. " + + "`{\"c\": {\"x\": {10, 15, 20}}` is a subset of `{\"a\": \"b\", \"c\": {\"x\": {10, 15, 20, 25}, \"y\": \"z\"}`. " + + "If both arguments are sets, then this function checks if every element of `sub` is a member of `super`, " + + "but does not attempt to recurse. If both arguments are arrays, " + + "then this function checks if `sub` appears contiguously in order within `super`, " + + "and also does not attempt to recurse. If `super` is array and `sub` is set, " + + "then this function checks if `super` contains every element of `sub` with no consideration of ordering, " + + "and also does not attempt to recurse.", + Decl: types.NewFunction( + types.Args( + types.Named("super", types.NewAny(types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + ), types.SetOfAny, + types.NewArray(nil, types.A), + )).Description("object to test if sub is a subset of"), + types.Named("sub", types.NewAny(types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + ), types.SetOfAny, + types.NewArray(nil, types.A), + )).Description("object to test if super is a superset of"), + ), + types.Named("result", types.A).Description("`true` if `sub` is a subset of `super`"), + ), + CanSkipBctx: true, +} + +var ObjectUnion = &Builtin{ + Name: "object.union", + Description: "Creates a new object of the asymmetric union of two objects. " + + "For example: `object.union({\"a\": 1, \"b\": 2, \"c\": {\"d\": 3}}, {\"a\": 7, \"c\": {\"d\": 4, \"e\": 5}})` will result in `{\"a\": 7, \"b\": 2, \"c\": {\"d\": 4, \"e\": 5}}`.", + Decl: types.NewFunction( + types.Args( + types.Named("a", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("left-hand object"), + types.Named("b", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("right-hand object"), + ), + types.Named("output", types.A).Description("a new object which is the result of an asymmetric recursive union of two objects where conflicts are resolved by choosing the key from the right-hand object `b`"), + ), // TODO(sr): types.A? ^^^^^^^ (also below) + CanSkipBctx: true, +} + +var ObjectUnionN = &Builtin{ + Name: "object.union_n", + Description: "Creates a new object that is the asymmetric union of all objects merged from left to right. " + + "For example: `object.union_n([{\"a\": 1}, {\"b\": 2}, {\"a\": 3}])` will result in `{\"b\": 2, \"a\": 3}`.", + Decl: types.NewFunction( + types.Args( + types.Named("objects", types.NewArray( + nil, + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + )).Description("list of objects to merge"), + ), + types.Named("output", types.A).Description("asymmetric recursive union of all objects in `objects`, merged from left to right, where conflicts are resolved by choosing the key from the right-hand object"), + ), + CanSkipBctx: true, +} + +var ObjectRemove = &Builtin{ + Name: "object.remove", + Description: "Removes specified keys from an object.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("object to remove keys from"), + types.Named("keys", types.NewAny( + types.NewArray(nil, types.A), + types.SetOfAny, + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + )).Description("keys to remove from x"), + ), + types.Named("output", types.A).Description("result of removing the specified `keys` from `object`"), + ), + CanSkipBctx: true, +} + +var ObjectFilter = &Builtin{ + Name: "object.filter", + Description: "Filters the object by keeping only specified keys. " + + "For example: `object.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}, \"d\": \"z\"}, [\"a\"])` will result in `{\"a\": {\"b\": \"x\", \"c\": \"y\"}}`).", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("object to filter keys"), + types.Named("keys", types.NewAny( + types.NewArray(nil, types.A), + types.SetOfAny, + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + )).Description("keys to keep in `object`"), + ), + types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `keys`"), + ), + CanSkipBctx: true, +} + +var ObjectGet = &Builtin{ + Name: "object.get", + Description: "Returns value of an object's key if present, otherwise a default. " + + "If the supplied `key` is an `array`, then `object.get` will search through a nested object or array using each key in turn. " + + "For example: `object.get({\"a\": [{ \"b\": true }]}, [\"a\", 0, \"b\"], false)` results in `true`.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get `key` from"), + types.Named("key", types.A).Description("key to lookup in `object`"), + types.Named("default", types.A).Description("default to use when lookup fails"), + ), + types.Named("value", types.A).Description("`object[key]` if present, otherwise `default`"), + ), + CanSkipBctx: true, +} + +var ObjectKeys = &Builtin{ + Name: "object.keys", + Description: "Returns a set of an object's keys. " + + "For example: `object.keys({\"a\": 1, \"b\": true, \"c\": \"d\")` results in `{\"a\", \"b\", \"c\"}`.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get keys from"), + ), + types.Named("value", types.SetOfAny).Description("set of `object`'s keys"), + ), + CanSkipBctx: true, +} + +/* + * Encoding + */ +// Not using 'encoding' to avoid having to alias stdlib "encoding" imports +var catEncoding = category("encoding") + +var JSONMarshal = &Builtin{ + Name: "json.marshal", + Description: "Serializes the input term to JSON.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("the term to serialize"), + ), + types.Named("y", types.S).Description("the JSON string representation of `x`"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +var JSONMarshalWithOptions = &Builtin{ + Name: "json.marshal_with_options", + Description: "Serializes the input term JSON, with additional formatting options via the `opts` parameter. " + + "`opts` accepts keys `pretty` (enable multi-line/formatted JSON), `prefix` (string to prefix lines with, default empty string) and `indent` (string to indent with, default `\\t`).", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("the term to serialize"), + types.Named("opts", types.NewObject( + []*types.StaticProperty{ + types.NewStaticProperty("pretty", types.B), + types.NewStaticProperty("indent", types.S), + types.NewStaticProperty("prefix", types.S), + }, + types.NewDynamicProperty(types.S, types.A), + )).Description("encoding options"), + ), + types.Named("y", types.S).Description("the JSON string representation of `x`, with configured prefix/indent string(s) as appropriate"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +var JSONUnmarshal = &Builtin{ + Name: "json.unmarshal", + Description: "Deserializes the input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("a JSON string"), + ), + types.Named("y", types.A).Description("the term deserialized from `x`"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +var JSONIsValid = &Builtin{ + Name: "json.is_valid", + Description: "Verifies the input string is a valid JSON document.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("a JSON string"), + ), + types.Named("result", types.B).Description("`true` if `x` is valid JSON, `false` otherwise"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +var Base64Encode = &Builtin{ + Name: "base64.encode", + Description: "Serializes the input string into base64 encoding.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to encode"), + ), + types.Named("y", types.S).Description("base64 serialization of `x`"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +var Base64Decode = &Builtin{ + Name: "base64.decode", + Description: "Deserializes the base64 encoded input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to decode"), + ), + types.Named("y", types.S).Description("base64 deserialization of `x`"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +var Base64IsValid = &Builtin{ + Name: "base64.is_valid", + Description: "Verifies the input string is base64 encoded.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to check"), + ), + types.Named("result", types.B).Description("`true` if `x` is valid base64 encoded value, `false` otherwise"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +var Base64UrlEncode = &Builtin{ + Name: "base64url.encode", + Description: "Serializes the input string into base64url encoding.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to encode"), + ), + types.Named("y", types.S).Description("base64url serialization of `x`"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +var Base64UrlEncodeNoPad = &Builtin{ + Name: "base64url.encode_no_pad", + Description: "Serializes the input string into base64url encoding without padding.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to encode"), + ), + types.Named("y", types.S).Description("base64url serialization of `x`"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +var Base64UrlDecode = &Builtin{ + Name: "base64url.decode", + Description: "Deserializes the base64url encoded input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to decode"), + ), + types.Named("y", types.S).Description("base64url deserialization of `x`"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +var URLQueryDecode = &Builtin{ + Name: "urlquery.decode", + Description: "Decodes a URL-encoded input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("the URL-encoded string"), + ), + types.Named("y", types.S).Description("URL-encoding deserialization of `x`"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +var URLQueryEncode = &Builtin{ + Name: "urlquery.encode", + Description: "Encodes the input string into a URL-encoded string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("the string to encode"), + ), + types.Named("y", types.S).Description("URL-encoding serialization of `x`"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +var URLQueryEncodeObject = &Builtin{ + Name: "urlquery.encode_object", + Description: "Encodes the given object into a URL encoded query string.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject( + nil, + types.NewDynamicProperty( + types.S, + types.NewAny( + types.S, + types.NewArray(nil, types.S), + types.SetOfStr, + ), + ), + ), + ).Description("the object to encode"), + ), + types.Named("y", types.S).Description("the URL-encoded serialization of `object`"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +var URLQueryDecodeObject = &Builtin{ + Name: "urlquery.decode_object", + Description: "Decodes the given URL query string into an object.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("the query string"), + ), + types.Named("object", types.NewObject(nil, types.NewDynamicProperty( + types.S, + types.NewArray(nil, types.S)))).Description("the resulting object"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +var YAMLMarshal = &Builtin{ + Name: "yaml.marshal", + Description: "Serializes the input term to YAML.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("the term to serialize"), + ), + types.Named("y", types.S).Description("the YAML string representation of `x`"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +var YAMLUnmarshal = &Builtin{ + Name: "yaml.unmarshal", + Description: "Deserializes the input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("a YAML string"), + ), + types.Named("y", types.A).Description("the term deserialized from `x`"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +// YAMLIsValid verifies the input string is a valid YAML document. +var YAMLIsValid = &Builtin{ + Name: "yaml.is_valid", + Description: "Verifies the input string is a valid YAML document.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("a YAML string"), + ), + types.Named("result", types.B).Description("`true` if `x` is valid YAML, `false` otherwise"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +var HexEncode = &Builtin{ + Name: "hex.encode", + Description: "Serializes the input string using hex-encoding.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to encode"), + ), + types.Named("y", types.S).Description("serialization of `x` using hex-encoding"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +var HexDecode = &Builtin{ + Name: "hex.decode", + Description: "Deserializes the hex-encoded input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("a hex-encoded string"), + ), + types.Named("y", types.S).Description("deserialized from `x`"), + ), + Categories: catEncoding, + CanSkipBctx: true, +} + +/** + * Tokens + */ +var tokensCat = category("tokens") + +var JWTDecode = &Builtin{ + Name: "io.jwt.decode", + Description: "Decodes a JSON Web Token and outputs it as an object.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token to decode"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.S, + }, nil)).Description("`[header, payload, sig]`, where `header` and `payload` are objects; `sig` is the hexadecimal representation of the signature on the token."), + ), + Categories: tokensCat, + CanSkipBctx: true, +} + +var JWTVerifyRS256 = &Builtin{ + Name: "io.jwt.verify_rs256", + Description: "Verifies if a RS256 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, + CanSkipBctx: false, +} + +var JWTVerifyRS384 = &Builtin{ + Name: "io.jwt.verify_rs384", + Description: "Verifies if a RS384 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, + CanSkipBctx: false, +} + +var JWTVerifyRS512 = &Builtin{ + Name: "io.jwt.verify_rs512", + Description: "Verifies if a RS512 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, + CanSkipBctx: false, +} + +var JWTVerifyPS256 = &Builtin{ + Name: "io.jwt.verify_ps256", + Description: "Verifies if a PS256 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, + CanSkipBctx: false, +} + +var JWTVerifyPS384 = &Builtin{ + Name: "io.jwt.verify_ps384", + Description: "Verifies if a PS384 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, + CanSkipBctx: false, +} + +var JWTVerifyPS512 = &Builtin{ + Name: "io.jwt.verify_ps512", + Description: "Verifies if a PS512 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, + CanSkipBctx: false, +} + +var JWTVerifyES256 = &Builtin{ + Name: "io.jwt.verify_es256", + Description: "Verifies if a ES256 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, + CanSkipBctx: false, +} + +var JWTVerifyES384 = &Builtin{ + Name: "io.jwt.verify_es384", + Description: "Verifies if a ES384 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, + CanSkipBctx: false, +} + +var JWTVerifyES512 = &Builtin{ + Name: "io.jwt.verify_es512", + Description: "Verifies if a ES512 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, + CanSkipBctx: false, +} + +var JWTVerifyEdDSA = &Builtin{ + Name: "io.jwt.verify_eddsa", + Description: "Verifies if an EdDSA JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, + CanSkipBctx: false, +} + +var JWTVerifyHS256 = &Builtin{ + Name: "io.jwt.verify_hs256", + Description: "Verifies if a HS256 (secret) JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("secret", types.S).Description("plain text secret used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, + CanSkipBctx: false, +} + +var JWTVerifyHS384 = &Builtin{ + Name: "io.jwt.verify_hs384", + Description: "Verifies if a HS384 (secret) JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("secret", types.S).Description("plain text secret used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, + CanSkipBctx: false, +} + +var JWTVerifyHS512 = &Builtin{ + Name: "io.jwt.verify_hs512", + Description: "Verifies if a HS512 (secret) JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("secret", types.S).Description("plain text secret used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, + CanSkipBctx: false, +} + +// Marked non-deterministic because it relies on time internally. +var JWTDecodeVerify = &Builtin{ + Name: "io.jwt.decode_verify", + Description: `Verifies a JWT signature under parameterized constraints and decodes the claims if it is valid. +Supports the following algorithms: HS256, HS384, HS512, RS256, RS384, RS512, ES256, ES384, ES512, PS256, PS384, PS512, and EdDSA.`, + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified and whose claims are to be checked"), + types.Named("constraints", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("claim verification constraints"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + }, nil)).Description("`[valid, header, payload]`: if the input token is verified and meets the requirements of `constraints` then `valid` is `true`; `header` and `payload` are objects containing the JOSE header and the JWT claim set; otherwise, `valid` is `false`, `header` and `payload` are `{}`"), + ), + Categories: tokensCat, + Nondeterministic: true, + CanSkipBctx: false, +} + +var tokenSign = category("tokensign") + +// Marked non-deterministic because it relies on RNG internally. +var JWTEncodeSignRaw = &Builtin{ + Name: "io.jwt.encode_sign_raw", + Description: "Encodes and optionally signs a JSON Web Token.", + Decl: types.NewFunction( + types.Args( + types.Named("headers", types.S).Description("JWS Protected Header"), + types.Named("payload", types.S).Description("JWS Payload"), + types.Named("key", types.S).Description("JSON Web Key (RFC7517)"), + ), + types.Named("output", types.S).Description("signed JWT"), + ), + Categories: tokenSign, + Nondeterministic: true, + CanSkipBctx: false, +} + +// Marked non-deterministic because it relies on RNG internally. +var JWTEncodeSign = &Builtin{ + Name: "io.jwt.encode_sign", + Description: "Encodes and optionally signs a JSON Web Token. Inputs are taken as objects, not encoded strings (see `io.jwt.encode_sign_raw`).", + Decl: types.NewFunction( + types.Args( + types.Named("headers", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWS Protected Header"), + types.Named("payload", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWS Payload"), + types.Named("key", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JSON Web Key (RFC7517)"), + ), + types.Named("output", types.S).Description("signed JWT"), + ), + Categories: tokenSign, + Nondeterministic: true, + CanSkipBctx: false, +} + +/** + * Time + */ + +// Marked non-deterministic because it relies on time directly. +var NowNanos = &Builtin{ + Name: "time.now_ns", + Description: "Returns the current time since epoch in nanoseconds.", + Decl: types.NewFunction( + nil, + types.Named("now", types.N).Description("nanoseconds since epoch"), + ), + Nondeterministic: true, + CanSkipBctx: false, +} + +var ParseNanos = &Builtin{ + Name: "time.parse_ns", + Description: "Returns the time in nanoseconds parsed from the string in the given format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", + Decl: types.NewFunction( + types.Args( + types.Named("layout", types.S).Description("format used for parsing, see the [Go `time` package documentation](https://golang.org/pkg/time/#Parse) for more details"), + types.Named("value", types.S).Description("input to parse according to `layout`"), + ), + types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"), + ), + CanSkipBctx: true, +} + +var ParseRFC3339Nanos = &Builtin{ + Name: "time.parse_rfc3339_ns", + Description: "Returns the time in nanoseconds parsed from the string in RFC3339 format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("input string to parse in RFC3339 format"), + ), + types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"), + ), + CanSkipBctx: true, +} + +var ParseDurationNanos = &Builtin{ + Name: "time.parse_duration_ns", + Description: "Returns the duration in nanoseconds represented by a string.", + Decl: types.NewFunction( + types.Args( + types.Named("duration", types.S).Description("a duration like \"3m\"; see the [Go `time` package documentation](https://golang.org/pkg/time/#ParseDuration) for more details"), + ), + types.Named("ns", types.N).Description("the `duration` in nanoseconds"), + ), + CanSkipBctx: true, +} + +var Format = &Builtin{ + Name: "time.format", + Description: "Returns the formatted timestamp for the nanoseconds since epoch.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + types.NewArray([]types.Type{types.N, types.S, types.S}, nil), + )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string; or a three-element array of ns, timezone string and a layout string or golang defined formatting constant (see golang supported time formats)"), + ), + types.Named("formatted timestamp", types.S).Description("the formatted timestamp represented for the nanoseconds since the epoch in the supplied timezone (or UTC)"), + ), + CanSkipBctx: true, +} + +var Date = &Builtin{ + Name: "time.date", + Description: "Returns the `[year, month, day]` for the nanoseconds since epoch.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), + ), + types.Named("date", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)).Description("an array of `year`, `month` (1-12), and `day` (1-31)"), + ), + CanSkipBctx: true, +} + +var Clock = &Builtin{ + Name: "time.clock", + Description: "Returns the `[hour, minute, second]` of the day for the nanoseconds since epoch.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), + ), + types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)). + Description("the `hour`, `minute` (0-59), and `second` (0-59) representing the time of day for the nanoseconds since epoch in the supplied timezone (or UTC)"), + ), + CanSkipBctx: true, +} + +var Weekday = &Builtin{ + Name: "time.weekday", + Description: "Returns the day of the week (Monday, Tuesday, ...) for the nanoseconds since epoch.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), + ), + types.Named("day", types.S).Description("the weekday represented by `ns` nanoseconds since the epoch in the supplied timezone (or UTC)"), + ), + CanSkipBctx: true, +} + +var AddDate = &Builtin{ + Name: "time.add_date", + Description: "Returns the nanoseconds since epoch after adding years, months and days to nanoseconds. Month & day values outside their usual ranges after the operation and will be normalized - for example, October 32 would become November 1. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", + Decl: types.NewFunction( + types.Args( + types.Named("ns", types.N).Description("nanoseconds since the epoch"), + types.Named("years", types.N).Description("number of years to add"), + types.Named("months", types.N).Description("number of months to add"), + types.Named("days", types.N).Description("number of days to add"), + ), + types.Named("output", types.N).Description("nanoseconds since the epoch representing the input time, with years, months and days added"), + ), + CanSkipBctx: true, +} + +var Diff = &Builtin{ + Name: "time.diff", + Description: "Returns the difference between two unix timestamps in nanoseconds (with optional timezone strings).", + Decl: types.NewFunction( + types.Args( + types.Named("ns1", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + )).Description("nanoseconds since the epoch; or a two-element array of the nanoseconds, and a timezone string"), + types.Named("ns2", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + )).Description("nanoseconds since the epoch; or a two-element array of the nanoseconds, and a timezone string"), + ), + types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N, types.N, types.N, types.N}, nil)).Description("difference between `ns1` and `ns2` (in their supplied timezones, if supplied, or UTC) as array of numbers: `[years, months, days, hours, minutes, seconds]`"), + ), + CanSkipBctx: true, +} + +/** + * Crypto. + */ + +var CryptoX509ParseCertificates = &Builtin{ + Name: "crypto.x509.parse_certificates", + Description: `Returns zero or more certificates from the given encoded string containing +DER certificate data. + +If the input is empty, the function will return null. The input string should be a list of one or more +concatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.`, + Decl: types.NewFunction( + types.Args( + types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing one or more certificates or a PEM string of one or more certificates"), + ), + types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed X.509 certificates represented as objects"), + ), + CanSkipBctx: true, +} + +var CryptoX509ParseAndVerifyCertificates = &Builtin{ + Name: "crypto.x509.parse_and_verify_certificates", + Description: `Returns one or more certificates from the given string containing PEM +or base64 encoded DER certificates after verifying the supplied certificates form a complete +certificate chain back to a trusted root. + +The first certificate is treated as the root and the last is treated as the leaf, +with all others being treated as intermediates.`, + Decl: types.NewFunction( + types.Args( + types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing two or more certificates where the first is a root CA, the last is a leaf certificate, and all others are intermediate CAs"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), + }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"), + ), + CanSkipBctx: true, +} + +var CryptoX509ParseAndVerifyCertificatesWithOptions = &Builtin{ + Name: "crypto.x509.parse_and_verify_certificates_with_options", + Description: `Returns one or more certificates from the given string containing PEM +or base64 encoded DER certificates after verifying the supplied certificates form a complete +certificate chain back to a trusted root. A config option passed as the second argument can +be used to configure the validation options used. + +The first certificate is treated as the root and the last is treated as the leaf, +with all others being treated as intermediates.`, + Decl: types.NewFunction( + types.Args( + types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing two or more certificates where the first is a root CA, the last is a leaf certificate, and all others are intermediate CAs"), + types.Named("options", types.NewObject( + nil, + types.NewDynamicProperty(types.S, types.A), + )).Description("object containing extra configs to verify the validity of certificates. `options` object supports four fields which maps to same fields in [x509.VerifyOptions struct](https://pkg.go.dev/crypto/x509#VerifyOptions). `DNSName`, `CurrentTime`: Nanoseconds since the Unix Epoch as a number, `MaxConstraintComparisons` and `KeyUsages`. `KeyUsages` is list and can have possible values as in: `\"KeyUsageAny\"`, `\"KeyUsageServerAuth\"`, `\"KeyUsageClientAuth\"`, `\"KeyUsageCodeSigning\"`, `\"KeyUsageEmailProtection\"`, `\"KeyUsageIPSECEndSystem\"`, `\"KeyUsageIPSECTunnel\"`, `\"KeyUsageIPSECUser\"`, `\"KeyUsageTimeStamping\"`, `\"KeyUsageOCSPSigning\"`, `\"KeyUsageMicrosoftServerGatedCrypto\"`, `\"KeyUsageNetscapeServerGatedCrypto\"`, `\"KeyUsageMicrosoftCommercialCodeSigning\"`, `\"KeyUsageMicrosoftKernelCodeSigning\"` "), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), + }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"), + ), + CanSkipBctx: true, +} + +var CryptoX509ParseCertificateRequest = &Builtin{ + Name: "crypto.x509.parse_certificate_request", + Description: "Returns a PKCS #10 certificate signing request from the given PEM-encoded PKCS#10 certificate signing request.", + Decl: types.NewFunction( + types.Args( + types.Named("csr", types.S).Description("base64 string containing either a PEM encoded or DER CSR or a string containing a PEM CSR"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("X.509 CSR represented as an object"), + ), + CanSkipBctx: true, +} + +var CryptoX509ParseKeyPair = &Builtin{ + Name: "crypto.x509.parse_keypair", + Description: "Returns a valid key pair", + Decl: types.NewFunction( + types.Args( + types.Named("cert", types.S).Description("string containing PEM or base64 encoded DER certificates"), + types.Named("pem", types.S).Description("string containing PEM or base64 encoded DER keys"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("if key pair is valid, returns the tls.certificate(https://pkg.go.dev/crypto/tls#Certificate) as an object. If the key pair is invalid, nil and an error are returned."), + ), + CanSkipBctx: true, +} + +var CryptoX509ParseRSAPrivateKey = &Builtin{ + Name: "crypto.x509.parse_rsa_private_key", + Description: "Returns a JWK for signing a JWT from the given PEM-encoded RSA private key.", + Decl: types.NewFunction( + types.Args( + types.Named("pem", types.S).Description("base64 string containing a PEM encoded RSA private key"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWK as an object"), + ), + CanSkipBctx: true, +} + +var CryptoParsePrivateKeys = &Builtin{ + Name: "crypto.parse_private_keys", + Description: `Returns zero or more private keys from the given encoded string containing DER certificate data. + +If the input is empty, the function will return null. The input string should be a list of one or more concatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.`, + Decl: types.NewFunction( + types.Args( + types.Named("keys", types.S).Description("PEM encoded data containing one or more private keys as concatenated blocks. Optionally Base64 encoded."), + ), + types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed private keys represented as objects"), + ), + CanSkipBctx: true, +} + +var CryptoMd5 = &Builtin{ + Name: "crypto.md5", + Description: "Returns a string representing the input string hashed with the MD5 function", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + ), + types.Named("y", types.S).Description("MD5-hash of `x`"), + ), + CanSkipBctx: true, +} + +var CryptoSha1 = &Builtin{ + Name: "crypto.sha1", + Description: "Returns a string representing the input string hashed with the SHA1 function", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + ), + types.Named("y", types.S).Description("SHA1-hash of `x`"), + ), + CanSkipBctx: true, +} + +var CryptoSha256 = &Builtin{ + Name: "crypto.sha256", + Description: "Returns a string representing the input string hashed with the SHA256 function", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + ), + types.Named("y", types.S).Description("SHA256-hash of `x`"), + ), + CanSkipBctx: true, +} + +var CryptoHmacMd5 = &Builtin{ + Name: "crypto.hmac.md5", + Description: "Returns a string representing the MD5 HMAC of the input message using the input key.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + types.Named("key", types.S).Description("key to use"), + ), + types.Named("y", types.S).Description("MD5-HMAC of `x`"), + ), + CanSkipBctx: true, +} + +var CryptoHmacSha1 = &Builtin{ + Name: "crypto.hmac.sha1", + Description: "Returns a string representing the SHA1 HMAC of the input message using the input key.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + types.Named("key", types.S).Description("key to use"), + ), + types.Named("y", types.S).Description("SHA1-HMAC of `x`"), + ), + CanSkipBctx: true, +} + +var CryptoHmacSha256 = &Builtin{ + Name: "crypto.hmac.sha256", + Description: "Returns a string representing the SHA256 HMAC of the input message using the input key.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + types.Named("key", types.S).Description("key to use"), + ), + types.Named("y", types.S).Description("SHA256-HMAC of `x`"), + ), + CanSkipBctx: true, +} + +var CryptoHmacSha512 = &Builtin{ + Name: "crypto.hmac.sha512", + Description: "Returns a string representing the SHA512 HMAC of the input message using the input key.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + types.Named("key", types.S).Description("key to use"), + ), + types.Named("y", types.S).Description("SHA512-HMAC of `x`"), + ), + CanSkipBctx: true, +} + +var CryptoHmacEqual = &Builtin{ + Name: "crypto.hmac.equal", + Description: "Returns a boolean representing the result of comparing two MACs for equality without leaking timing information.", + Decl: types.NewFunction( + types.Args( + types.Named("mac1", types.S).Description("mac1 to compare"), + types.Named("mac2", types.S).Description("mac2 to compare"), + ), + types.Named("result", types.B).Description("`true` if the MACs are equals, `false` otherwise"), + ), + CanSkipBctx: true, +} + +/** + * Graphs. + */ +var graphs = category("graph") + +var WalkBuiltin = &Builtin{ + Name: "walk", + Relation: true, + Description: "Generates `[path, value]` tuples for all nested documents of `x` (recursively). Queries can use `walk` to traverse documents nested under `x`.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("value to walk"), + ), + types.Named("output", types.NewArray( + []types.Type{ + types.NewArray(nil, types.A), + types.A, + }, + nil, + )).Description("pairs of `path` and `value`: `path` is an array representing the pointer to `value` in `x`. If `path` is assigned a wildcard (`_`), the `walk` function will skip path creation entirely for faster evaluation."), + ), + Categories: graphs, + CanSkipBctx: true, +} + +var ReachableBuiltin = &Builtin{ + Name: "graph.reachable", + Description: "Computes the set of reachable nodes in the graph from a set of starting nodes.", + Decl: types.NewFunction( + types.Args( + types.Named("graph", types.NewObject( + nil, + types.NewDynamicProperty( + types.A, + types.NewAny( + types.SetOfAny, + types.NewArray(nil, types.A)), + )), + ).Description("object containing a set or array of neighboring vertices"), + types.Named("initial", types.NewAny(types.SetOfAny, types.NewArray(nil, types.A))).Description("set or array of root vertices"), + ), + types.Named("output", types.SetOfAny).Description("set of vertices reachable from the `initial` vertices in the directed `graph`"), + ), + CanSkipBctx: true, +} + +var ReachablePathsBuiltin = &Builtin{ + Name: "graph.reachable_paths", + Description: "Computes the set of reachable paths in the graph from a set of starting nodes.", + Decl: types.NewFunction( + types.Args( + types.Named("graph", types.NewObject( + nil, + types.NewDynamicProperty( + types.A, + types.NewAny( + types.SetOfAny, + types.NewArray(nil, types.A)), + )), + ).Description("object containing a set or array of root vertices"), + types.Named("initial", types.NewAny(types.SetOfAny, types.NewArray(nil, types.A))).Description("initial paths"), // TODO(sr): copied. is that correct? + ), + types.Named("output", types.NewSet(types.NewArray(nil, types.A))).Description("paths reachable from the `initial` vertices in the directed `graph`"), + ), + CanSkipBctx: true, +} + +/** + * Type + */ +var typesCat = category("types") + +var IsNumber = &Builtin{ + Name: "is_number", + Description: "Returns `true` if the input value is a number.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is a number, `false` otherwise."), + ), + Categories: typesCat, + CanSkipBctx: true, +} + +var IsString = &Builtin{ + Name: "is_string", + Description: "Returns `true` if the input value is a string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is a string, `false` otherwise."), + ), + Categories: typesCat, + CanSkipBctx: true, +} + +var IsBoolean = &Builtin{ + Name: "is_boolean", + Description: "Returns `true` if the input value is a boolean.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is an boolean, `false` otherwise."), + ), + Categories: typesCat, + CanSkipBctx: true, +} + +var IsArray = &Builtin{ + Name: "is_array", + Description: "Returns `true` if the input value is an array.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is an array, `false` otherwise."), + ), + Categories: typesCat, + CanSkipBctx: true, +} + +var IsSet = &Builtin{ + Name: "is_set", + Description: "Returns `true` if the input value is a set.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is a set, `false` otherwise."), + ), + Categories: typesCat, + CanSkipBctx: true, +} + +var IsObject = &Builtin{ + Name: "is_object", + Description: "Returns true if the input value is an object", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is an object, `false` otherwise."), + ), + Categories: typesCat, + CanSkipBctx: true, +} + +var IsNull = &Builtin{ + Name: "is_null", + Description: "Returns `true` if the input value is null.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is null, `false` otherwise."), + ), + Categories: typesCat, + CanSkipBctx: true, +} + +/** + * Type Name + */ + +// TypeNameBuiltin returns the type of the input. +var TypeNameBuiltin = &Builtin{ + Name: "type_name", + Description: "Returns the type of its input value.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("type", types.S).Description(`one of "null", "boolean", "number", "string", "array", "object", "set"`), + ), + Categories: typesCat, + CanSkipBctx: true, +} + +/** + * HTTP Request + */ + +// Marked non-deterministic because HTTP request results can be non-deterministic. +var HTTPSend = &Builtin{ + Name: "http.send", + Description: "Returns a HTTP response to the given HTTP request.", + Decl: types.NewFunction( + types.Args( + types.Named("request", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). + Description("the HTTP request object"), + ), + types.Named("response", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))). + Description("the HTTP response object"), + ), + Nondeterministic: true, + CanSkipBctx: false, +} + +/** + * GraphQL + */ + +// GraphQLParse returns a pair of AST objects from parsing/validation. +var GraphQLParse = &Builtin{ + Name: "graphql.parse", + Description: "Returns AST objects for a given GraphQL query and schema after validating the query against the schema. Returns undefined if errors were encountered during parsing or validation. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", + Decl: types.NewFunction( + types.Args( + types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL query"), + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL schema"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + }, nil)).Description("`output` is of the form `[query_ast, schema_ast]`. If the GraphQL query is valid given the provided schema, then `query_ast` and `schema_ast` are objects describing the ASTs for the query and schema."), + ), + CanSkipBctx: false, +} + +// GraphQLParseAndVerify returns a boolean and a pair of AST object from parsing/validation. +var GraphQLParseAndVerify = &Builtin{ + Name: "graphql.parse_and_verify", + Description: "Returns a boolean indicating success or failure alongside the parsed ASTs for a given GraphQL query and schema after validating the query against the schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", + Decl: types.NewFunction( + types.Args( + types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL query"), + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL schema"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + }, nil)).Description(" `output` is of the form `[valid, query_ast, schema_ast]`. If the query is valid given the provided schema, then `valid` is `true`, and `query_ast` and `schema_ast` are objects describing the ASTs for the GraphQL query and schema. Otherwise, `valid` is `false` and `query_ast` and `schema_ast` are `{}`."), + ), + CanSkipBctx: false, +} + +// GraphQLParseQuery parses the input GraphQL query and returns a JSON +// representation of its AST. +var GraphQLParseQuery = &Builtin{ + Name: "graphql.parse_query", + Description: "Returns an AST object for a GraphQL query.", + Decl: types.NewFunction( + types.Args( + types.Named("query", types.S).Description("GraphQL query string"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL query."), + ), + CanSkipBctx: true, +} + +// GraphQLParseSchema parses the input GraphQL schema and returns a JSON +// representation of its AST. +var GraphQLParseSchema = &Builtin{ + Name: "graphql.parse_schema", + Description: "Returns an AST object for a GraphQL schema.", + Decl: types.NewFunction( + types.Args( + types.Named("schema", types.S).Description("GraphQL schema string"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL schema."), + ), + CanSkipBctx: false, +} + +// GraphQLIsValid returns true if a GraphQL query is valid with a given +// schema, and returns false for all other inputs. +var GraphQLIsValid = &Builtin{ + Name: "graphql.is_valid", + Description: "Checks that a GraphQL query is valid against a given schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", + Decl: types.NewFunction( + types.Args( + types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL query"), + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL schema"), + ), + types.Named("output", types.B).Description("`true` if the query is valid under the given schema. `false` otherwise."), + ), + CanSkipBctx: false, +} + +// GraphQLSchemaIsValid returns true if the input is valid GraphQL schema, +// and returns false for all other inputs. +var GraphQLSchemaIsValid = &Builtin{ + Name: "graphql.schema_is_valid", + Description: "Checks that the input is a valid GraphQL schema. The schema can be either a GraphQL string or an AST object from the other GraphQL builtin functions.", + Decl: types.NewFunction( + types.Args( + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the schema to verify"), + ), + types.Named("output", types.B).Description("`true` if the schema is a valid GraphQL schema. `false` otherwise."), + ), + CanSkipBctx: false, +} + +/** + * JSON Schema + */ + +// JSONSchemaVerify returns empty string if the input is valid JSON schema +// and returns error string for all other inputs. +var JSONSchemaVerify = &Builtin{ + Name: "json.verify_schema", + Description: "Checks that the input is a valid JSON schema object. The schema can be either a JSON string or an JSON object.", + Decl: types.NewFunction( + types.Args( + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the schema to verify"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewAny(types.S, types.Null{}), + }, nil)). + Description("`output` is of the form `[valid, error]`. If the schema is valid, then `valid` is `true`, and `error` is `null`. Otherwise, `valid` is `false` and `error` is a string describing the error."), + ), + Categories: objectCat, + CanSkipBctx: true, +} + +// JSONMatchSchema returns empty array if the document matches the JSON schema, +// and returns non-empty array with error objects otherwise. +var JSONMatchSchema = &Builtin{ + Name: "json.match_schema", + Description: "Checks that the document matches the JSON schema.", + Decl: types.NewFunction( + types.Args( + types.Named("document", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("document to verify by schema"), + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("schema to verify document by"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewArray( + nil, types.NewObject( + []*types.StaticProperty{ + {Key: "error", Value: types.S}, + {Key: "type", Value: types.S}, + {Key: "field", Value: types.S}, + {Key: "desc", Value: types.S}, + }, + nil, + ), + ), + }, nil)). + Description("`output` is of the form `[match, errors]`. If the document is valid given the schema, then `match` is `true`, and `errors` is an empty array. Otherwise, `match` is `false` and `errors` is an array of objects describing the error(s)."), + ), + Categories: objectCat, + CanSkipBctx: false, +} + +/** + * Cloud Provider Helper Functions + */ +var providersAWSCat = category("providers.aws") + +var ProvidersAWSSignReqObj = &Builtin{ + Name: "providers.aws.sign_req", + Description: "Signs an HTTP request object for Amazon Web Services. Currently implements [AWS Signature Version 4 request signing](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) by the `Authorization` header method.", + Decl: types.NewFunction( + types.Args( + types.Named("request", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). + Description("HTTP request object"), + types.Named("aws_config", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). + Description("AWS configuration object"), + types.Named("time_ns", types.N).Description("nanoseconds since the epoch"), + ), + types.Named("signed_request", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))). + Description("HTTP request object with `Authorization` header"), + ), + Categories: providersAWSCat, + CanSkipBctx: true, +} + +/** + * Rego + */ + +var RegoParseModule = &Builtin{ + Name: "rego.parse_module", + Description: "Parses the input Rego string and returns an object representation of the AST.", + Decl: types.NewFunction( + types.Args( + types.Named("filename", types.S).Description("file name to attach to AST nodes' locations"), + types.Named("rego", types.S).Description("Rego module"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). + Description("AST object for the Rego module"), + ), + CanSkipBctx: true, +} + +var RegoMetadataChain = &Builtin{ + Name: "rego.metadata.chain", + Description: `Returns the chain of metadata for the active rule. +Ordered starting at the active rule, going outward to the most distant node in its package ancestry. +A chain entry is a JSON document with two members: "path", an array representing the path of the node; and "annotations", a JSON document containing the annotations declared for the node. +The first entry in the chain always points to the active rule, even if it has no declared annotations (in which case the "annotations" member is not present).`, + Decl: types.NewFunction( + types.Args(), + types.Named("chain", types.NewArray(nil, types.A)).Description("each array entry represents a node in the path ancestry (chain) of the active rule that also has declared annotations"), + ), + CanSkipBctx: true, +} + +// RegoMetadataRule returns the metadata for the active rule +var RegoMetadataRule = &Builtin{ + Name: "rego.metadata.rule", + Description: "Returns annotations declared for the active rule and using the _rule_ scope.", + Decl: types.NewFunction( + types.Args(), + types.Named("output", types.A).Description("\"rule\" scope annotations for this rule; empty object if no annotations exist"), + ), + CanSkipBctx: true, +} + +/** + * OPA + */ + +// Marked non-deterministic because of unpredictable config/environment-dependent results. +var OPARuntime = &Builtin{ + Name: "opa.runtime", + Description: "Returns an object that describes the runtime environment where OPA is deployed.", + Decl: types.NewFunction( + nil, + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). + Description("includes a `config` key if OPA was started with a configuration file; an `env` key containing the environment variables that the OPA process was started with; includes `version` and `commit` keys containing the version and build commit of OPA."), + ), + Nondeterministic: true, + CanSkipBctx: false, +} + +/** + * Trace + */ +var tracing = category("tracing") + +var Trace = &Builtin{ + Name: "trace", + Description: "Emits `note` as a `Note` event in the query explanation. Query explanations show the exact expressions evaluated by OPA during policy execution. For example, `trace(\"Hello There!\")` includes `Note \"Hello There!\"` in the query explanation. To include variables in the message, use `sprintf`. For example, `person := \"Bob\"; trace(sprintf(\"Hello There! %v\", [person]))` will emit `Note \"Hello There! Bob\"` inside of the explanation.", + Decl: types.NewFunction( + types.Args( + types.Named("note", types.S).Description("the note to include"), + ), + types.Named("result", types.B).Description("always `true`"), + ), + Categories: tracing, + CanSkipBctx: false, +} + +/** + * Glob + */ + +var GlobMatch = &Builtin{ + Name: "glob.match", + Description: "Parses and matches strings against the glob notation. Not to be confused with `regex.globs_match`.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("glob pattern"), + types.Named("delimiters", types.NewAny( + types.NewArray(nil, types.S), + types.Nl, + )).Description("glob pattern delimiters, e.g. `[\".\", \":\"]`, defaults to `[\".\"]` if unset. If `delimiters` is `null`, glob match without delimiter."), + types.Named("match", types.S).Description("string to match against `pattern`"), + ), + types.Named("result", types.B).Description("true if `match` can be found in `pattern` which is separated by `delimiters`"), + ), + CanSkipBctx: false, +} + +var GlobQuoteMeta = &Builtin{ + Name: "glob.quote_meta", + Description: "Returns a string which represents a version of the pattern where all asterisks have been escaped.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("glob pattern"), + ), + types.Named("output", types.S).Description("the escaped string of `pattern`"), + ), + CanSkipBctx: true, + // TODO(sr): example for this was: Calling ``glob.quote_meta("*.github.com", output)`` returns ``\\*.github.com`` as ``output``. +} + +/** + * Networking + */ + +var NetCIDRIntersects = &Builtin{ + Name: "net.cidr_intersects", + Description: "Checks if a CIDR intersects with another CIDR (e.g. `192.168.0.0/16` overlaps with `192.168.1.0/24`). Supports both IPv4 and IPv6 notations.", + Decl: types.NewFunction( + types.Args( + types.Named("cidr1", types.S).Description("first CIDR"), + types.Named("cidr2", types.S).Description("second CIDR"), + ), + types.Named("result", types.B).Description("`true` if `cidr1` intersects with `cidr2`"), + ), + CanSkipBctx: true, +} + +var NetCIDRExpand = &Builtin{ + Name: "net.cidr_expand", + Description: "Expands CIDR to set of hosts (e.g., `net.cidr_expand(\"192.168.0.0/30\")` generates 4 hosts: `{\"192.168.0.0\", \"192.168.0.1\", \"192.168.0.2\", \"192.168.0.3\"}`).", + Decl: types.NewFunction( + types.Args( + types.Named("cidr", types.S).Description("CIDR to expand"), + ), + types.Named("hosts", types.SetOfStr).Description("set of IP addresses the CIDR `cidr` expands to"), + ), + CanSkipBctx: false, +} + +var NetCIDRContains = &Builtin{ + Name: "net.cidr_contains", + Description: "Checks if a CIDR or IP is contained within another CIDR. `output` is `true` if `cidr_or_ip` (e.g. `127.0.0.64/26` or `127.0.0.1`) is contained within `cidr` (e.g. `127.0.0.1/24`) and `false` otherwise. Supports both IPv4 and IPv6 notations.", + Decl: types.NewFunction( + types.Args( + types.Named("cidr", types.S).Description("CIDR to check against"), + types.Named("cidr_or_ip", types.S).Description("CIDR or IP to check"), + ), + types.Named("result", types.B).Description("`true` if `cidr_or_ip` is contained within `cidr`"), + ), + CanSkipBctx: true, +} + +var NetCIDRContainsMatches = &Builtin{ + Name: "net.cidr_contains_matches", + Description: "Checks if collections of cidrs or ips are contained within another collection of cidrs and returns matches. " + + "This function is similar to `net.cidr_contains` except it allows callers to pass collections of CIDRs or IPs as arguments and returns the matches (as opposed to a boolean result indicating a match between two CIDRs/IPs).", + Decl: types.NewFunction( + types.Args( + types.Named("cidrs", netCidrContainsMatchesOperandType).Description("CIDRs to check against"), + types.Named("cidrs_or_ips", netCidrContainsMatchesOperandType).Description("CIDRs or IPs to check"), + ), + types.Named("output", types.NewSet(types.NewArray([]types.Type{types.A, types.A}, nil))).Description("tuples identifying matches where `cidrs_or_ips` are contained within `cidrs`"), + ), + CanSkipBctx: true, +} + +var NetCIDRMerge = &Builtin{ + Name: "net.cidr_merge", + Description: "Merges IP addresses and subnets into the smallest possible list of CIDRs (e.g., `net.cidr_merge([\"192.0.128.0/24\", \"192.0.129.0/24\"])` generates `{\"192.0.128.0/23\"}`." + + `This function merges adjacent subnets where possible, those contained within others and also removes any duplicates. +Supports both IPv4 and IPv6 notations. IPv6 inputs need a prefix length (e.g. "/128").`, + Decl: types.NewFunction( + types.Args( + types.Named("addrs", types.NewAny( + types.NewArray(nil, types.NewAny(types.S)), + types.SetOfStr, + )).Description("CIDRs or IP addresses"), + ), + types.Named("output", types.SetOfStr).Description("smallest possible set of CIDRs obtained after merging the provided list of IP addresses and subnets in `addrs`"), + ), + CanSkipBctx: true, +} + +var NetCIDRIsValid = &Builtin{ + Name: "net.cidr_is_valid", + Description: "Parses an IPv4/IPv6 CIDR and returns a boolean indicating if the provided CIDR is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("cidr", types.S).Description("CIDR to validate"), + ), + types.Named("result", types.B).Description("`true` if `cidr` is a valid CIDR"), + ), + CanSkipBctx: true, +} + +var netCidrContainsMatchesOperandType = types.NewAny( + types.S, + types.NewArray(nil, types.NewAny( + types.S, + types.NewArray(nil, types.A), + )), + types.NewSet(types.NewAny( + types.S, + types.NewArray(nil, types.A), + )), + types.NewObject(nil, types.NewDynamicProperty( + types.S, + types.NewAny( + types.S, + types.NewArray(nil, types.A), + ), + )), +) + +// Marked non-deterministic because DNS resolution results can be non-deterministic. +var NetLookupIPAddr = &Builtin{ + Name: "net.lookup_ip_addr", + Description: "Returns the set of IP addresses (both v4 and v6) that the passed-in `name` resolves to using the standard name resolution mechanisms available.", + Decl: types.NewFunction( + types.Args( + types.Named("name", types.S).Description("domain name to resolve"), + ), + types.Named("addrs", types.SetOfStr).Description("IP addresses (v4 and v6) that `name` resolves to"), + ), + Nondeterministic: true, + CanSkipBctx: false, +} + +/** + * Semantic Versions + */ + +var SemVerIsValid = &Builtin{ + Name: "semver.is_valid", + Description: "Validates that the input is a valid SemVer string.", + Decl: types.NewFunction( + types.Args( + types.Named("vsn", types.A).Description("input to validate"), + ), + types.Named("result", types.B).Description("`true` if `vsn` is a valid SemVer; `false` otherwise"), + ), + CanSkipBctx: true, +} + +var SemVerCompare = &Builtin{ + Name: "semver.compare", + Description: "Compares valid SemVer formatted version strings.", + Decl: types.NewFunction( + types.Args( + types.Named("a", types.S).Description("first version string"), + types.Named("b", types.S).Description("second version string"), + ), + types.Named("result", types.N).Description("`-1` if `a < b`; `1` if `a > b`; `0` if `a == b`"), + ), + CanSkipBctx: true, +} + +/** + * Printing + */ + +// Print is a special built-in function that writes zero or more operands +// to a message buffer. The caller controls how the buffer is displayed. The +// operands may be of any type. Furthermore, unlike other built-in functions, +// undefined operands DO NOT cause the print() function to fail during +// evaluation. +var Print = &Builtin{ + Name: "print", + Decl: types.NewVariadicFunction(nil, types.A, nil), +} + +// InternalPrint represents the internal implementation of the print() function. +// The compiler rewrites print() calls to refer to the internal implementation. +var InternalPrint = &Builtin{ + Name: "internal.print", + Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.SetOfAny)}, nil), +} + +var InternalTestCase = &Builtin{ + Name: "internal.test_case", + Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.A)}, nil), +} + +var InternalTemplateString = &Builtin{ + Name: "internal.template_string", + Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.A)}, types.S), + CanSkipBctx: true, // Uses bctx.Location for error reporting, but that is always provided in eval +} + +/** + * Deprecated built-ins. + */ + +// SetDiff has been replaced by the minus built-in. +var SetDiff = &Builtin{ + Name: "set_diff", + Decl: types.NewFunction( + types.Args( + types.SetOfAny, + types.SetOfAny, + ), + types.SetOfAny, + ), + Deprecated: true, + CanSkipBctx: true, +} + +// NetCIDROverlap has been replaced by the `net.cidr_contains` built-in. +var NetCIDROverlap = &Builtin{ + Name: "net.cidr_overlap", + Decl: types.NewFunction( + types.Args( + types.S, + types.S, + ), + types.B, + ), + Deprecated: true, + CanSkipBctx: true, +} + +// CastArray checks the underlying type of the input. If it is array or set, an array +// containing the values is returned. If it is not an array, an error is thrown. +var CastArray = &Builtin{ + Name: "cast_array", + Decl: types.NewFunction( + types.Args(types.A), + types.NewArray(nil, types.A), + ), + Deprecated: true, + CanSkipBctx: true, +} + +// CastSet checks the underlying type of the input. +// If it is a set, the set is returned. +// If it is an array, the array is returned in set form (all duplicates removed) +// If neither, an error is thrown +var CastSet = &Builtin{ + Name: "cast_set", + Decl: types.NewFunction( + types.Args(types.A), + types.SetOfAny, + ), + Deprecated: true, + CanSkipBctx: true, +} + +// CastString returns input if it is a string; if not returns error. +// For formatting variables, see sprintf +var CastString = &Builtin{ + Name: "cast_string", + Decl: types.NewFunction( + types.Args(types.A), + types.S, + ), + Deprecated: true, + CanSkipBctx: true, +} + +// CastBoolean returns input if it is a boolean; if not returns error. +var CastBoolean = &Builtin{ + Name: "cast_boolean", + Decl: types.NewFunction( + types.Args(types.A), + types.B, + ), + Deprecated: true, + CanSkipBctx: true, +} + +// CastNull returns null if input is null; if not returns error. +var CastNull = &Builtin{ + Name: "cast_null", + Decl: types.NewFunction( + types.Args(types.A), + types.Nl, + ), + Deprecated: true, + CanSkipBctx: true, +} + +// CastObject returns the given object if it is null; throws an error otherwise +var CastObject = &Builtin{ + Name: "cast_object", + Decl: types.NewFunction( + types.Args(types.A), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + ), + Deprecated: true, + CanSkipBctx: true, +} + +// RegexMatchDeprecated declares `re_match` which has been Deprecated. Use `regex.match` instead. +var RegexMatchDeprecated = &Builtin{ + Name: "re_match", + Decl: types.NewFunction( + types.Args( + types.S, + types.S, + ), + types.B, + ), + Deprecated: true, + CanSkipBctx: false, +} + +// All takes a list and returns true if all of the items +// are true. A collection of length 0 returns true. +var All = &Builtin{ + Name: "all", + Decl: types.NewFunction( + types.Args( + types.NewAny( + types.SetOfAny, + types.NewArray(nil, types.A), + ), + ), + types.B, + ), + Deprecated: true, + CanSkipBctx: true, +} + +// Any takes a collection and returns true if any of the items +// is true. A collection of length 0 returns false. +var Any = &Builtin{ + Name: "any", + Decl: types.NewFunction( + types.Args( + types.NewAny( + types.SetOfAny, + types.NewArray(nil, types.A), + ), + ), + types.B, + ), + Deprecated: true, + CanSkipBctx: true, +} + +// Builtin represents a built-in function supported by OPA. Every built-in +// function is uniquely identified by a name. +type Builtin struct { + Name string `json:"name"` // Unique name of built-in function, e.g., (arg1,arg2,...,argN) + Description string `json:"description,omitempty"` // Description of what the built-in function does. + + // Categories of the built-in function. Omitted for namespaced + // built-ins, i.e. "array.concat" is taken to be of the "array" category. + // "minus" for example, is part of two categories: numbers and sets. (NOTE(sr): aspirational) + Categories []string `json:"categories,omitempty"` + + Decl *types.Function `json:"decl"` // Built-in function type declaration. + Infix string `json:"infix,omitempty"` // Unique name of infix operator. Default should be unset. + Relation bool `json:"relation,omitempty"` // Indicates if the built-in acts as a relation. + Deprecated bool `json:"deprecated,omitempty"` // Indicates if the built-in has been deprecated. + CanSkipBctx bool `json:"-"` // Built-in needs no data from the built-in context. + Nondeterministic bool `json:"nondeterministic,omitempty"` // Indicates if the built-in returns non-deterministic results. +} + +// category is a helper for specifying a Builtin's Categories +func category(cs ...string) []string { + return cs +} + +// Minimal returns a shallow copy of b with the descriptions and categories and +// named arguments stripped out. +func (b *Builtin) Minimal() *Builtin { + cpy := *b + fargs := b.Decl.FuncArgs() + if fargs.Variadic != nil { + cpy.Decl = types.NewVariadicFunction(fargs.Args, fargs.Variadic, b.Decl.Result()) + } else { + cpy.Decl = types.NewFunction(fargs.Args, b.Decl.Result()) + } + cpy.Categories = nil + cpy.Description = "" + return &cpy +} + +// IsDeprecated returns true if the Builtin function is Deprecated and will be removed in a future release. +func (b *Builtin) IsDeprecated() bool { + return b.Deprecated +} + +// IsNondeterministic returns true if the Builtin function returns non-deterministic results. +func (b *Builtin) IsNondeterministic() bool { + return b.Nondeterministic +} + +// Expr creates a new expression for the built-in with the given operands. +func (b *Builtin) Expr(operands ...*Term) *Expr { + ts := make([]*Term, len(operands)+1) + ts[0] = NewTerm(b.Ref()) + for i := range operands { + ts[i+1] = operands[i] + } + return &Expr{ + Terms: ts, + } +} + +// Call creates a new term for the built-in with the given operands. +func (b *Builtin) Call(operands ...*Term) *Term { + call := make(Call, len(operands)+1) + call[0] = NewTerm(b.Ref()) + for i := range operands { + call[i+1] = operands[i] + } + return NewTerm(call) +} + +// Ref returns a Ref that refers to the built-in function. +func (b *Builtin) Ref() Ref { + parts := strings.Split(b.Name, ".") + ref := make(Ref, len(parts)) + ref[0] = VarTerm(parts[0]) + for i := 1; i < len(parts); i++ { + ref[i] = InternedTerm(parts[i]) + } + return ref +} + +// IsTargetPos returns true if a variable in the i-th position will be bound by +// evaluating the call expression. +func (b *Builtin) IsTargetPos(i int) bool { + return b.Decl.Arity() == i +} + +// NeedsBuiltInContext returns true if the built-in depends on the built-in context. +func (b *Builtin) NeedsBuiltInContext() bool { + // Negated, so built-ins we don't know about (and who don't know about this option) + // will get a built-in context provided to them. + return !b.CanSkipBctx +} + +func init() { + BuiltinMap = map[string]*Builtin{} + for _, b := range &DefaultBuiltins { + RegisterBuiltin(b) + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go b/vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go new file mode 100644 index 0000000000..170f0bf176 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go @@ -0,0 +1,293 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "bytes" + _ "embed" + "encoding/json" + "fmt" + "io" + "os" + "slices" + "sort" + "strings" + "sync" + + "github.com/open-policy-agent/opa/internal/semver" + "github.com/open-policy-agent/opa/internal/wasm/sdk/opa/capabilities" + caps "github.com/open-policy-agent/opa/v1/capabilities" + "github.com/open-policy-agent/opa/v1/util" +) + +// VersonIndex contains an index from built-in function name, language feature, +// and future rego keyword to version number. During the build, this is used to +// create an index of the minimum version required for the built-in/feature/kw. +type VersionIndex struct { + Builtins map[string]semver.Version `json:"builtins"` + Features map[string]semver.Version `json:"features"` + Keywords map[string]semver.Version `json:"keywords"` +} + +// NOTE(tsandall): this file is generated by internal/cmd/genversionindex/main.go +// and run as part of go:generate. We generate the version index as part of the +// build process because it's relatively expensive to build (it takes ~500ms on +// my machine) and never changes. +// +//go:embed version_index.json +var versionIndexBs []byte + +// init only on demand, as JSON unmarshalling comes with some cost, and contributes +// noise to things like pprof stats +var minVersionIndexOnce = sync.OnceValue(func() VersionIndex { + var vi VersionIndex + if err := json.Unmarshal(versionIndexBs, &vi); err != nil { + panic(err) + } + return vi +}) + +// In the compiler, we used this to check that we're OK working with ref heads. +// If this isn't present, we'll fail. This is to ensure that older versions of +// OPA can work with policies that we're compiling -- if they don't know ref +// heads, they wouldn't be able to parse them. +const FeatureRefHeadStringPrefixes = "rule_head_ref_string_prefixes" +const FeatureRefHeads = "rule_head_refs" +const FeatureRegoV1 = "rego_v1" +const FeatureRegoV1Import = "rego_v1_import" +const FeatureKeywordsInRefs = "keywords_in_refs" +const FeatureTemplateStrings = "template_strings" + +// Features carries the default features supported by this version of OPA. +// Use RegisterFeatures to add to them. +var Features = []string{ + FeatureRegoV1, + FeatureKeywordsInRefs, + FeatureTemplateStrings, +} + +// RegisterFeatures lets applications wrapping OPA register features, to be +// included in `ast.CapabilitiesForThisVersion()`. +func RegisterFeatures(fs ...string) { + for i := range fs { + if slices.Contains(Features, fs[i]) { + continue + } + Features = append(Features, fs[i]) + } +} + +// Capabilities defines a structure containing data that describes the capabilities +// or features supported by a particular version of OPA. +type Capabilities struct { + Builtins []*Builtin `json:"builtins,omitempty"` + FutureKeywords []string `json:"future_keywords,omitempty"` + WasmABIVersions []WasmABIVersion `json:"wasm_abi_versions,omitempty"` + + // Features is a bit of a mixed bag for checking that an older version of OPA + // is able to do what needs to be done. + // TODO(sr): find better words ^^ + Features []string `json:"features,omitempty"` + + // allow_net is an array of hostnames or IP addresses, that an OPA instance is + // allowed to connect to. + // If omitted, ANY host can be connected to. If empty, NO host can be connected to. + // As of now, this only controls fetching remote refs for using JSON Schemas in + // the type checker. + // TODO(sr): support ports to further restrict connection peers + AllowNet []string `json:"allow_net,omitempty"` +} + +// WasmABIVersion captures the Wasm ABI version. Its `Minor` version is indicating +// backwards-compatible changes. +type WasmABIVersion struct { + Version int `json:"version"` + Minor int `json:"minor_version"` +} + +type CapabilitiesOptions struct { + regoVersion RegoVersion +} + +func newCapabilitiesOptions(opts []CapabilitiesOption) CapabilitiesOptions { + co := CapabilitiesOptions{} + for _, opt := range opts { + opt(&co) + } + return co +} + +type CapabilitiesOption func(*CapabilitiesOptions) + +func CapabilitiesRegoVersion(regoVersion RegoVersion) CapabilitiesOption { + return func(o *CapabilitiesOptions) { + o.regoVersion = regoVersion + } +} + +// CapabilitiesForThisVersion returns the capabilities of this version of OPA. +func CapabilitiesForThisVersion(opts ...CapabilitiesOption) *Capabilities { + co := newCapabilitiesOptions(opts) + + f := &Capabilities{} + + for _, vers := range capabilities.ABIVersions() { + f.WasmABIVersions = append(f.WasmABIVersions, WasmABIVersion{Version: vers[0], Minor: vers[1]}) + } + + f.Builtins = make([]*Builtin, len(Builtins)) + copy(f.Builtins, Builtins) + + slices.SortFunc(f.Builtins, func(a, b *Builtin) int { + return strings.Compare(a.Name, b.Name) + }) + + switch co.regoVersion { + case RegoV0, RegoV0CompatV1: + for kw := range allFutureKeywords { + f.FutureKeywords = append(f.FutureKeywords, kw) + } + + f.Features = []string{ + FeatureRefHeadStringPrefixes, + FeatureRefHeads, + FeatureRegoV1Import, + FeatureRegoV1, // Included in v0 capabilities to allow v1 bundles in --v0-compatible mode + FeatureKeywordsInRefs, + } + default: + for kw := range futureKeywords { + f.FutureKeywords = append(f.FutureKeywords, kw) + } + + f.Features = make([]string, len(Features)) + copy(f.Features, Features) + } + + sort.Strings(f.FutureKeywords) + sort.Strings(f.Features) + + return f +} + +// LoadCapabilitiesJSON loads a JSON serialized capabilities structure from the reader r. +func LoadCapabilitiesJSON(r io.Reader) (*Capabilities, error) { + d := util.NewJSONDecoder(r) + var c Capabilities + return &c, d.Decode(&c) +} + +// LoadCapabilitiesVersion loads a JSON serialized capabilities structure from the specific version. +func LoadCapabilitiesVersion(version string) (*Capabilities, error) { + cvs, err := LoadCapabilitiesVersions() + if err != nil { + return nil, err + } + + for _, cv := range cvs { + if cv == version { + cont, err := caps.FS.ReadFile(cv + ".json") + if err != nil { + return nil, err + } + + return LoadCapabilitiesJSON(bytes.NewReader(cont)) + } + + } + return nil, fmt.Errorf("no capabilities version found %v", version) +} + +// LoadCapabilitiesFile loads a JSON serialized capabilities structure from a file. +func LoadCapabilitiesFile(file string) (*Capabilities, error) { + fd, err := os.Open(file) + if err != nil { + return nil, err + } + defer fd.Close() + return LoadCapabilitiesJSON(fd) +} + +// LoadCapabilitiesVersions loads all capabilities versions +func LoadCapabilitiesVersions() ([]string, error) { + ents, err := caps.FS.ReadDir(".") + if err != nil { + return nil, err + } + + capabilitiesVersions := make([]string, 0, len(ents)) + for _, ent := range ents { + capabilitiesVersions = append(capabilitiesVersions, strings.Replace(ent.Name(), ".json", "", 1)) + } + + slices.SortStableFunc(capabilitiesVersions, semver.Compare) + + return capabilitiesVersions, nil +} + +// MinimumCompatibleVersion returns the minimum compatible OPA version based on +// the built-ins, features, and keywords in c. +func (c *Capabilities) MinimumCompatibleVersion() (string, bool) { + // this is the oldest OPA release that includes capabilities + maxVersion := semver.MustParse("0.17.0") + minVersionIndex := minVersionIndexOnce() + + for _, bi := range c.Builtins { + v, ok := minVersionIndex.Builtins[bi.Name] + if !ok { + return "", false + } + if v.Compare(maxVersion) > 0 { + maxVersion = v + } + } + + for _, kw := range c.FutureKeywords { + v, ok := minVersionIndex.Keywords[kw] + if !ok { + return "", false + } + if v.Compare(maxVersion) > 0 { + maxVersion = v + } + } + + for _, feat := range c.Features { + v, ok := minVersionIndex.Features[feat] + if !ok { + return "", false + } + if v.Compare(maxVersion) > 0 { + maxVersion = v + } + } + + return maxVersion.String(), true +} + +func (c *Capabilities) ContainsFeature(feature string) bool { + return slices.Contains(c.Features, feature) +} + +func (c *Capabilities) ContainsBuiltin(name string) bool { + return slices.ContainsFunc(c.Builtins, func(builtin *Builtin) bool { + return builtin.Name == name + }) +} + +// addBuiltinSorted inserts a built-in into c in sorted order. An existing built-in with the same name +// will be overwritten. +func (c *Capabilities) addBuiltinSorted(bi *Builtin) { + i := sort.Search(len(c.Builtins), func(x int) bool { + return c.Builtins[x].Name >= bi.Name + }) + if i < len(c.Builtins) && bi.Name == c.Builtins[i].Name { + c.Builtins[i] = bi + return + } + c.Builtins = append(c.Builtins, nil) + copy(c.Builtins[i+1:], c.Builtins[i:]) + c.Builtins[i] = bi +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/check.go b/vendor/github.com/open-policy-agent/opa/v1/ast/check.go new file mode 100644 index 0000000000..6e4d8ddd74 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/check.go @@ -0,0 +1,1325 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "regexp" + "slices" + "strings" + + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" +) + +type varRewriter func(Ref) Ref + +// typeChecker implements type checking on queries and rules. Errors are +// accumulated on the typeChecker so that a single run can report multiple +// issues. +type typeChecker struct { + builtins map[string]*Builtin + required *Capabilities + errs Errors + varRewriter varRewriter + ss *SchemaSet + allowNet []string + input types.Type + allowUndefinedFuncs bool + schemaTypes map[string]types.Type +} + +// newTypeChecker returns a new typeChecker object that has no errors. +func newTypeChecker() *typeChecker { + return &typeChecker{} +} + +func (tc *typeChecker) newEnv(exist *TypeEnv) *TypeEnv { + if exist != nil { + return exist.wrap() + } + env := newTypeEnv(tc.copy) + if tc.input != nil { + env.tree.Put(InputRootRef, tc.input) + } + return env +} + +func (tc *typeChecker) copy() *typeChecker { + return newTypeChecker(). + WithVarRewriter(tc.varRewriter). + WithSchemaSet(tc.ss). + WithSchemaTypes(tc.schemaTypes). + WithAllowNet(tc.allowNet). + WithInputType(tc.input). + WithAllowUndefinedFunctionCalls(tc.allowUndefinedFuncs). + WithBuiltins(tc.builtins). + WithRequiredCapabilities(tc.required) +} + +func (tc *typeChecker) WithRequiredCapabilities(c *Capabilities) *typeChecker { + tc.required = c + return tc +} + +func (tc *typeChecker) WithBuiltins(builtins map[string]*Builtin) *typeChecker { + tc.builtins = builtins + return tc +} + +func (tc *typeChecker) WithSchemaSet(ss *SchemaSet) *typeChecker { + tc.ss = ss + return tc +} + +func (tc *typeChecker) WithSchemaTypes(schemaTypes map[string]types.Type) *typeChecker { + tc.schemaTypes = schemaTypes + return tc +} + +func (tc *typeChecker) WithAllowNet(hosts []string) *typeChecker { + tc.allowNet = hosts + return tc +} + +func (tc *typeChecker) WithVarRewriter(f varRewriter) *typeChecker { + tc.varRewriter = f + return tc +} + +func (tc *typeChecker) WithInputType(tpe types.Type) *typeChecker { + tc.input = tpe + return tc +} + +// WithAllowUndefinedFunctionCalls sets the type checker to allow references to undefined functions. +// Additionally, the 'CheckUndefinedFuncs' and 'CheckSafetyRuleBodies' compiler stages are skipped. +func (tc *typeChecker) WithAllowUndefinedFunctionCalls(allow bool) *typeChecker { + tc.allowUndefinedFuncs = allow + return tc +} + +// Env returns a type environment for the specified built-ins with any other +// global types configured on the checker. In practice, this is the default +// environment that other statements will be checked against. +func (tc *typeChecker) Env(builtins map[string]*Builtin) *TypeEnv { + env := tc.newEnv(nil) + for _, bi := range builtins { + env.tree.Put(bi.Ref(), bi.Decl) + } + return env +} + +// CheckBody runs type checking on the body and returns a TypeEnv if no errors +// are found. The resulting TypeEnv wraps the provided one. The resulting +// TypeEnv will be able to resolve types of vars contained in the body. +func (tc *typeChecker) CheckBody(env *TypeEnv, body Body) (*TypeEnv, Errors) { + var errors []*Error + + env = tc.newEnv(env) + vis := newRefChecker(env, tc.varRewriter) + gv := NewGenericVisitor(vis.Visit) + + for _, bexpr := range body { + WalkExprs(bexpr, func(expr *Expr) bool { + closureErrs := tc.checkClosures(env, expr) + errors = append(errors, closureErrs...) + + // reset errors from previous iteration + vis.errs = nil + gv.Walk(expr) + errors = append(errors, vis.errs...) + + if err := tc.checkExpr(env, expr); err != nil { + hasClosureErrors := len(closureErrs) > 0 + hasRefErrors := len(vis.errs) > 0 + // Suppress this error if a more actionable one has occurred. In + // this case, if an error occurred in a ref or closure contained in + // this expression, and the error is due to a nil type, then it's + // likely to be the result of the more specific error. + skip := (hasClosureErrors || hasRefErrors) && causedByNilType(err) + if !skip { + errors = append(errors, err) + } + } + return true + }) + } + + tc.err(errors...) + return env, errors +} + +// CheckTypes runs type checking on the rules returns a TypeEnv if no errors +// are found. The resulting TypeEnv wraps the provided one. The resulting +// TypeEnv will be able to resolve types of refs that refer to rules. +func (tc *typeChecker) CheckTypes(env *TypeEnv, sorted []util.T, as *AnnotationSet) (*TypeEnv, Errors) { + env = tc.newEnv(env) + for _, s := range sorted { + tc.checkRule(env, as, s.(*Rule)) + } + tc.errs.Sort() + return env, tc.errs +} + +func (tc *typeChecker) checkClosures(env *TypeEnv, expr *Expr) Errors { + var result Errors + WalkClosures(expr, func(x any) bool { + switch x := x.(type) { + case *ArrayComprehension: + _, errs := tc.copy().CheckBody(env, x.Body) + if len(errs) > 0 { + result = errs + return true + } + case *SetComprehension: + _, errs := tc.copy().CheckBody(env, x.Body) + if len(errs) > 0 { + result = errs + return true + } + case *ObjectComprehension: + _, errs := tc.copy().CheckBody(env, x.Body) + if len(errs) > 0 { + result = errs + return true + } + } + return false + }) + return result +} + +func (tc *typeChecker) getSchemaType(schemaAnnot *SchemaAnnotation, rule *Rule) (types.Type, *Error) { + if tc.schemaTypes == nil { + tc.schemaTypes = make(map[string]types.Type) + } + + if len(schemaAnnot.Schema) > 0 { + if refType, exists := tc.schemaTypes[schemaAnnot.Schema.String()]; exists { + return refType, nil + } + } + + refType, err := processAnnotation(tc.ss, schemaAnnot, rule, tc.allowNet) + if err != nil { + return nil, err + } + + if refType == nil { + return nil, nil + } + + // Only add to cache if schema is read from file + if len(schemaAnnot.Schema) > 0 { + tc.schemaTypes[schemaAnnot.Schema.String()] = refType + } + + return refType, nil + +} + +func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) { + + env = env.wrap() + + schemaAnnots := getRuleAnnotation(as, rule) + for _, schemaAnnot := range schemaAnnots { + refType, err := tc.getSchemaType(schemaAnnot, rule) + if err != nil { + tc.err(err) + continue + } + + ref := schemaAnnot.Path + // if we do not have a ref or a reftype, we should not evaluate this rule. + if ref == nil || refType == nil { + continue + } + + prefixRef, t := getPrefix(env, ref) + if t == nil || len(prefixRef) == len(ref) { + env.tree.Put(ref, refType) + } else { + newType, err := override(ref[len(prefixRef):], t, refType, rule) + if err != nil { + tc.err(err) + continue + } + env.tree.Put(prefixRef, newType) + } + } + + cpy, err := tc.CheckBody(env, rule.Body) + env = env.next + path := rule.Ref() + + if len(err) > 0 { + // if the rule/function contains an error, add it to the type env so + // that expressions that refer to this rule/function do not encounter + // type errors. + env.tree.Put(path, types.A) + return + } + + var tpe types.Type + + if len(rule.Head.Args) > 0 { + for _, arg := range rule.Head.Args { + // If args are not referred to in body, infer as any. + WalkTerms(arg, func(t *Term) bool { + if _, ok := t.Value.(Var); ok { + if cpy.GetByValue(t.Value) == nil { + cpy.tree.PutOne(t.Value, types.A) + } + } + return false + }) + } + + // Construct function type. + args := make([]types.Type, len(rule.Head.Args)) + for i := range rule.Head.Args { + args[i] = cpy.GetByValue(rule.Head.Args[i].Value) + } + + tpe = types.NewFunction(args, cpy.GetByValue(rule.Head.Value.Value)) + } else { + switch rule.Head.RuleKind() { + case SingleValue: + typeV := cpy.GetByValue(rule.Head.Value.Value) + if !path.IsGround() { + // e.g. store object[string: whatever] at data.p.q.r, not data.p.q.r[x] or data.p.q.r[x].y[z] + objPath := path.DynamicSuffix() + path = path.GroundPrefix() + + var err error + tpe, err = nestedObject(cpy, objPath, typeV) + if err != nil { + tc.err(NewError(TypeErr, rule.Head.Location, "%s", err.Error())) + tpe = nil + } + } else if typeV != nil { + tpe = typeV + } + case MultiValue: + typeK := cpy.GetByValue(rule.Head.Key.Value) + if typeK != nil { + tpe = types.NewSet(typeK) + } + } + } + + if tpe != nil { + env.tree.Insert(path, tpe, env) + } +} + +// nestedObject creates a nested structure of object types, where each term on path corresponds to a level in the +// nesting. Each term in the path only contributes to the dynamic portion of its corresponding object. +func nestedObject(env *TypeEnv, path Ref, tpe types.Type) (types.Type, error) { + if len(path) == 0 { + return tpe, nil + } + + k := path[0] + typeV, err := nestedObject(env, path[1:], tpe) + if err != nil { + return nil, err + } + if typeV == nil { + return nil, nil + } + + var dynamicProperty *types.DynamicProperty + typeK := env.GetByValue(k.Value) + if typeK == nil { + return nil, nil + } + dynamicProperty = types.NewDynamicProperty(typeK, typeV) + + return types.NewObject(nil, dynamicProperty), nil +} + +func (tc *typeChecker) checkExpr(env *TypeEnv, expr *Expr) *Error { + if err := tc.checkExprWith(env, expr, 0); err != nil { + return err + } + if !expr.IsCall() { + return nil + } + + operator := expr.Operator().String() + + // If the type checker wasn't provided with a required capabilities + // structure then just skip. In some cases, type checking might be run + // without the need to record what builtins are required. + if tc.required != nil && tc.builtins != nil { + if bi, ok := tc.builtins[operator]; ok { + tc.required.addBuiltinSorted(bi) + } + } + + if operator == "eq" { + return checkExprEq(env, expr) + } + + return tc.checkExprBuiltin(env, expr) +} + +func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error { + // NOTE(tsandall): undefined functions will have been caught earlier in the + // compiler. We check for undefined functions before the safety check so + // that references to non-existent functions result in undefined function + // errors as opposed to unsafe var errors. + // + // We cannot run type checking before the safety check because part of the + // type checker relies on reordering (in particular for references to local + // vars). + name := expr.Operator() + tpe := env.GetByRef(name) + + if tpe == nil { + if tc.allowUndefinedFuncs { + return nil + } + return NewError(TypeErr, expr.Location, "undefined function %v", name) + } + + if t, ok := tpe.(types.Any); ok { + // A type.Any with a len(0) is created by using types.A , this represents a potential non-local reference + // This is the exception when checking if the type represents a function + if len(t) == 0 { + return nil + } + } + + ftpe, ok := tpe.(*types.Function) + if !ok { + return NewError(TypeErr, expr.Location, "undefined function %v", name) + } + + fargs := ftpe.FuncArgs() + namedFargs := ftpe.NamedFuncArgs() + + if ftpe.Result() != nil { + fargs.Args = append(fargs.Args, ftpe.Result()) + namedFargs.Args = append(namedFargs.Args, ftpe.NamedResult()) + } + + args := expr.Operands() + + if len(args) > len(fargs.Args) && fargs.Variadic == nil { + return newArgError(expr.Location, name, "too many arguments", getArgTypes(env, args), namedFargs) + } + + if len(args) < len(ftpe.FuncArgs().Args) { + return newArgError(expr.Location, name, "too few arguments", getArgTypes(env, args), namedFargs) + } + + for i := range args { + if !unify1(env, args[i], fargs.Arg(i), false) { + post := make([]types.Type, len(args)) + for i := range args { + post[i] = env.GetByValue(args[i].Value) + } + return newArgError(expr.Location, name, "invalid argument(s)", post, namedFargs) + } + } + + return nil +} + +func checkExprEq(env *TypeEnv, expr *Expr) *Error { + + pre := getArgTypes(env, expr.Operands()) + + if len(pre) < Equality.Decl.Arity() { + return newArgError(expr.Location, expr.Operator(), "too few arguments", pre, Equality.Decl.FuncArgs()) + } + + if Equality.Decl.Arity() < len(pre) { + return newArgError(expr.Location, expr.Operator(), "too many arguments", pre, Equality.Decl.FuncArgs()) + } + + a, b := expr.Operand(0), expr.Operand(1) + typeA, typeB := env.GetByValue(a.Value), env.GetByValue(b.Value) + + if !unify2(env, a, typeA, b, typeB) { + err := NewError(TypeErr, expr.Location, "match error") + err.Details = &UnificationErrDetail{ + Left: typeA, + Right: typeB, + } + return err + } + + return nil +} + +func (tc *typeChecker) checkExprWith(env *TypeEnv, expr *Expr, i int) *Error { + if i == len(expr.With) { + return nil + } + + target, value := expr.With[i].Target, expr.With[i].Value + targetType, valueType := env.GetByValue(target.Value), env.GetByValue(value.Value) + + if t, ok := targetType.(*types.Function); ok { // built-in function replacement + switch v := valueType.(type) { + case *types.Function: // ...by function + if !unifies(targetType, valueType) { + return newArgError(expr.With[i].Loc(), target.Value.(Ref), "arity mismatch", v.FuncArgs().Args, t.NamedFuncArgs()) + } + default: // ... by value, nothing to check + } + } + + return tc.checkExprWith(env, expr, i+1) +} + +func unify2(env *TypeEnv, a *Term, typeA types.Type, b *Term, typeB types.Type) bool { + + nilA := types.Nil(typeA) + nilB := types.Nil(typeB) + + if nilA && !nilB { + return unify1(env, a, typeB, false) + } else if nilB && !nilA { + return unify1(env, b, typeA, false) + } else if !nilA && !nilB { + return unifies(typeA, typeB) + } + + switch a.Value.(type) { + case *Array: + return unify2Array(env, a, b) + case *object: + return unify2Object(env, a, b) + case Var: + switch b.Value.(type) { + case Var: + return unify1(env, a, types.A, false) && unify1(env, b, env.GetByValue(a.Value), false) + case *Array: + return unify2Array(env, b, a) + case *object: + return unify2Object(env, b, a) + } + } + + return false +} + +func unify2Array(env *TypeEnv, a *Term, b *Term) bool { + arr := a.Value.(*Array) + switch bv := b.Value.(type) { + case *Array: + if arr.Len() == bv.Len() { + for i := range arr.Len() { + if !unify2(env, arr.Elem(i), env.GetByValue(arr.Elem(i).Value), bv.Elem(i), env.GetByValue(bv.Elem(i).Value)) { + return false + } + } + return true + } + case Var: + return unify1(env, a, types.A, false) && unify1(env, b, env.GetByValue(a.Value), false) + } + return false +} + +func unify2Object(env *TypeEnv, a *Term, b *Term) bool { + obj := a.Value.(Object) + switch bv := b.Value.(type) { + case *object: + cv := obj.Intersect(bv) + if obj.Len() == bv.Len() && bv.Len() == len(cv) { + for i := range cv { + if !unify2(env, cv[i][1], env.GetByValue(cv[i][1].Value), cv[i][2], env.GetByValue(cv[i][2].Value)) { + return false + } + } + return true + } + case Var: + return unify1(env, a, types.A, false) && unify1(env, b, env.GetByValue(a.Value), false) + } + return false +} + +func unify1(env *TypeEnv, term *Term, tpe types.Type, union bool) bool { + switch v := term.Value.(type) { + case *Array: + switch tpe := tpe.(type) { + case *types.Array: + return unify1Array(env, v, tpe, union) + case types.Any: + if types.Compare(tpe, types.A) == 0 { + for i := range v.Len() { + unify1(env, v.Elem(i), types.A, true) + } + return true + } + unifies := false + for i := range tpe { + unifies = unify1(env, term, tpe[i], true) || unifies + } + return unifies + } + return false + case *object: + switch tpe := tpe.(type) { + case *types.Object: + return unify1Object(env, v, tpe, union) + case types.Any: + if types.Compare(tpe, types.A) == 0 { + v.Foreach(func(key, value *Term) { + unify1(env, key, types.A, true) + unify1(env, value, types.A, true) + }) + return true + } + unifies := false + for i := range tpe { + unifies = unify1(env, term, tpe[i], true) || unifies + } + return unifies + } + return false + case *set: + switch tpe := tpe.(type) { + case *types.Set: + return unify1Set(env, v, tpe, union) + case types.Any: + if types.Compare(tpe, types.A) == 0 { + v.Foreach(func(elem *Term) { + unify1(env, elem, types.A, true) + }) + return true + } + unifies := false + for i := range tpe { + unifies = unify1(env, term, tpe[i], true) || unifies + } + return unifies + } + return false + case Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension: + return unifies(env.GetByValue(v), tpe) + case Var: + if !union { + if exist := env.GetByValue(v); exist != nil { + return unifies(exist, tpe) + } + env.tree.PutOne(term.Value, tpe) + } else { + env.tree.PutOne(term.Value, types.Or(env.GetByValue(v), tpe)) + } + return true + default: + if !IsConstant(v) { + panic("unreachable") + } + return unifies(env.GetByValue(term.Value), tpe) + } +} + +func unify1Array(env *TypeEnv, val *Array, tpe *types.Array, union bool) bool { + if val.Len() != tpe.Len() && tpe.Dynamic() == nil { + return false + } + for i := range val.Len() { + if !unify1(env, val.Elem(i), tpe.Select(i), union) { + return false + } + } + return true +} + +func unify1Object(env *TypeEnv, val Object, tpe *types.Object, union bool) bool { + if val.Len() != len(tpe.Keys()) && tpe.DynamicValue() == nil { + return false + } + stop := val.Until(func(k, v *Term) bool { + if IsConstant(k.Value) { + if child := selectConstant(tpe, k); child != nil { + if !unify1(env, v, child, union) { + return true + } + } else { + return true + } + } else { + // Inferring type of value under dynamic key would involve unioning + // with all property values of tpe whose keys unify. For now, type + // these values as Any. We can investigate stricter inference in + // the future. + unify1(env, v, types.A, union) + } + return false + }) + return !stop +} + +func unify1Set(env *TypeEnv, val *set, tpe *types.Set, union bool) bool { + of := types.Values(tpe) + return !val.Until(func(elem *Term) bool { + return !unify1(env, elem, of, union) + }) +} + +func (tc *typeChecker) err(errors ...*Error) { + tc.errs = append(tc.errs, errors...) +} + +type refChecker struct { + env *TypeEnv + errs Errors + varRewriter varRewriter +} + +func rewriteVarsNop(node Ref) Ref { + return node +} + +func newRefChecker(env *TypeEnv, f varRewriter) *refChecker { + if f == nil { + f = rewriteVarsNop + } + + return &refChecker{ + env: env, + varRewriter: f, + } +} + +func (rc *refChecker) Visit(x any) bool { + switch x := x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension: + return true + case *Expr: + switch terms := x.Terms.(type) { + case []*Term: + vis := NewGenericVisitor(rc.Visit) + for i := 1; i < len(terms); i++ { + vis.Walk(terms[i]) + } + return true + case *Term: + NewGenericVisitor(rc.Visit).Walk(terms) + return true + } + case Ref: + if err := rc.checkApply(rc.env, x); err != nil { + rc.errs = append(rc.errs, err) + return true + } + if err := rc.checkRef(rc.env, rc.env.tree, x, 0); err != nil { + rc.errs = append(rc.errs, err) + } + } + return false +} + +func (rc *refChecker) checkApply(curr *TypeEnv, ref Ref) *Error { + if tpe, ok := curr.GetByRef(ref).(*types.Function); ok { + // NOTE(sr): We don't support first-class functions, except for `with`. + return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), len(ref)-1, tpe) + } + + return nil +} + +func (rc *refChecker) checkRef(curr *TypeEnv, node *typeTreeNode, ref Ref, idx int) *Error { + + if idx == len(ref) { + return nil + } + + head := ref[idx] + + // NOTE(sr): as long as package statements are required, this isn't possible: + // the shortest possible rule ref is data.a.b (b is idx 2), idx 1 and 2 need to + // be strings or vars. + if idx == 1 || idx == 2 { + switch head.Value.(type) { + case Var, String: // OK + default: + have := rc.env.GetByValue(head.Value) + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, have, types.S, getOneOfForNode(node)) + } + } + + if _, ok := head.Value.(Var); ok && idx != 0 { + tpe := types.Keys(rc.env.getRefRecExtent(node)) + if exist := rc.env.GetByValue(head.Value); exist != nil { + if !unifies(tpe, exist) { + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, tpe, getOneOfForNode(node)) + } + } else { + rc.env.tree.PutOne(head.Value, tpe) + } + } + + child := node.Child(head.Value) + if child == nil { + // NOTE(sr): idx is reset on purpose: we start over + switch { + case curr.next != nil: + next := curr.next + return rc.checkRef(next, next.tree, ref, 0) + + case RootDocumentNames.Contains(ref[0]): + if idx != 0 { + node.Children().Iter(func(_ Value, child *typeTreeNode) bool { + _ = rc.checkRef(curr, child, ref, idx+1) // ignore error + return false + }) + return nil + } + return rc.checkRefLeaf(types.A, ref, 1) + + default: + return rc.checkRefLeaf(types.A, ref, 0) + } + } + + if child.Leaf() { + return rc.checkRefLeaf(child.Value(), ref, idx+1) + } + + return rc.checkRef(curr, child, ref, idx+1) +} + +func (rc *refChecker) checkRefLeaf(tpe types.Type, ref Ref, idx int) *Error { + if idx == len(ref) { + return nil + } + + head := ref[idx] + + keys := types.Keys(tpe) + if keys == nil { + return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), idx-1, tpe) + } + + switch value := head.Value.(type) { + + case Var: + if exist := rc.env.GetByValue(head.Value); exist != nil { + if !unifies(exist, keys) { + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe)) + } + } else { + rc.env.tree.PutOne(head.Value, types.Keys(tpe)) + } + + case Ref: + if exist := rc.env.GetByRef(value); exist != nil { + if !unifies(exist, keys) { + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe)) + } + } + + case *Array, Object, Set: + if !unify1(rc.env, head, keys, false) { + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, rc.env.Get(head), keys, nil) + } + + default: + child := selectConstant(tpe, head) + if child == nil { + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, nil, types.Keys(tpe), getOneOfForType(tpe)) + } + return rc.checkRefLeaf(child, ref, idx+1) + } + + return rc.checkRefLeaf(types.Values(tpe), ref, idx+1) +} + +func unifies(a, b types.Type) bool { + + if a == nil || b == nil { + return false + } + + anyA, ok1 := a.(types.Any) + if ok1 { + if unifiesAny(anyA, b) { + return true + } + } + + anyB, ok2 := b.(types.Any) + if ok2 { + if unifiesAny(anyB, a) { + return true + } + } + + if ok1 || ok2 { + return false + } + + switch a := a.(type) { + case types.Null: + _, ok := b.(types.Null) + return ok + case types.Boolean: + _, ok := b.(types.Boolean) + return ok + case types.Number: + _, ok := b.(types.Number) + return ok + case types.String: + _, ok := b.(types.String) + return ok + case *types.Array: + b, ok := b.(*types.Array) + if !ok { + return false + } + return unifiesArrays(a, b) + case *types.Object: + b, ok := b.(*types.Object) + if !ok { + return false + } + return unifiesObjects(a, b) + case *types.Set: + b, ok := b.(*types.Set) + if !ok { + return false + } + return unifies(types.Values(a), types.Values(b)) + case *types.Function: + // NOTE(sr): variadic functions can only be internal ones, and we've forbidden + // their replacement via `with`; so we disregard variadic here + if types.Arity(a) == types.Arity(b) { + b := b.(*types.Function) + for i := range a.FuncArgs().Args { + if !unifies(a.FuncArgs().Arg(i), b.FuncArgs().Arg(i)) { + return false + } + } + return true + } + return false + default: + panic("unreachable") + } +} + +func unifiesAny(a types.Any, b types.Type) bool { + if _, ok := b.(*types.Function); ok { + return false + } + for i := range a { + if unifies(a[i], b) { + return true + } + } + return len(a) == 0 +} + +func unifiesArrays(a, b *types.Array) bool { + + if !unifiesArraysStatic(a, b) { + return false + } + + if !unifiesArraysStatic(b, a) { + return false + } + + return a.Dynamic() == nil || b.Dynamic() == nil || unifies(a.Dynamic(), b.Dynamic()) +} + +func unifiesArraysStatic(a, b *types.Array) bool { + if a.Len() != 0 { + for i := range a.Len() { + if !unifies(a.Select(i), b.Select(i)) { + return false + } + } + } + return true +} + +func unifiesObjects(a, b *types.Object) bool { + if !unifiesObjectsStatic(a, b) { + return false + } + + if !unifiesObjectsStatic(b, a) { + return false + } + + return a.DynamicValue() == nil || b.DynamicValue() == nil || unifies(a.DynamicValue(), b.DynamicValue()) +} + +func unifiesObjectsStatic(a, b *types.Object) bool { + for _, k := range a.Keys() { + if !unifies(a.Select(k), b.Select(k)) { + return false + } + } + return true +} + +// typeErrorCause defines an interface to determine the reason for a type +// error. The type error details implement this interface so that type checking +// can report more actionable errors. +type typeErrorCause interface { + nilType() bool +} + +func causedByNilType(err *Error) bool { + cause, ok := err.Details.(typeErrorCause) + if !ok { + return false + } + return cause.nilType() +} + +// ArgErrDetail represents a generic argument error. +type ArgErrDetail struct { + Have []types.Type `json:"have"` + Want types.FuncArgs `json:"want"` +} + +// Lines returns the string representation of the detail. +func (d *ArgErrDetail) Lines() []string { + lines := make([]string, 2) + lines[0] = "have: " + formatArgs(d.Have) + lines[1] = "want: " + d.Want.String() + return lines +} + +func (d *ArgErrDetail) nilType() bool { + return slices.ContainsFunc(d.Have, types.Nil) +} + +// UnificationErrDetail describes a type mismatch error when two values are +// unified (e.g., x = [1,2,y]). +type UnificationErrDetail struct { + Left types.Type `json:"a"` + Right types.Type `json:"b"` +} + +func (a *UnificationErrDetail) nilType() bool { + return types.Nil(a.Left) || types.Nil(a.Right) +} + +// Lines returns the string representation of the detail. +func (a *UnificationErrDetail) Lines() []string { + lines := make([]string, 2) + lines[0] = fmt.Sprint("left : ", types.Sprint(a.Left)) + lines[1] = fmt.Sprint("right : ", types.Sprint(a.Right)) + return lines +} + +// RefErrUnsupportedDetail describes an undefined reference error where the +// referenced value does not support dereferencing (e.g., scalars). +type RefErrUnsupportedDetail struct { + Ref Ref `json:"ref"` // invalid ref + Pos int `json:"pos"` // invalid element + Have types.Type `json:"have"` // referenced type +} + +// Lines returns the string representation of the detail. +func (r *RefErrUnsupportedDetail) Lines() []string { + lines := []string{ + r.Ref.String(), + strings.Repeat("^", len(r.Ref[:r.Pos+1].String())), + fmt.Sprintf("have: %v", r.Have), + } + return lines +} + +// RefErrInvalidDetail describes an undefined reference error where the referenced +// value does not support the reference operand (e.g., missing object key, +// invalid key type, etc.) +type RefErrInvalidDetail struct { + Ref Ref `json:"ref"` // invalid ref + Pos int `json:"pos"` // invalid element + Have types.Type `json:"have,omitempty"` // type of invalid element (for var/ref elements) + Want types.Type `json:"want"` // allowed type (for non-object values) + OneOf []Value `json:"oneOf"` // allowed values (e.g., for object keys) +} + +// Lines returns the string representation of the detail. +func (r *RefErrInvalidDetail) Lines() []string { + lines := []string{r.Ref.String()} + offset := len(r.Ref[:r.Pos].String()) + 1 + pad := strings.Repeat(" ", offset) + lines = append(lines, pad+"^") + if r.Have != nil { + lines = append(lines, fmt.Sprintf("%shave (type): %v", pad, r.Have)) + } else { + lines = append(lines, fmt.Sprintf("%shave: %v", pad, r.Ref[r.Pos])) + } + if len(r.OneOf) > 0 { + lines = append(lines, fmt.Sprintf("%swant (one of): %v", pad, r.OneOf)) + } else { + lines = append(lines, fmt.Sprintf("%swant (type): %v", pad, r.Want)) + } + return lines +} + +func formatArgs(args []types.Type) string { + buf := make([]string, len(args)) + for i := range args { + buf[i] = types.Sprint(args[i]) + } + return "(" + strings.Join(buf, ", ") + ")" +} + +func newRefErrInvalid(loc *Location, ref Ref, idx int, have, want types.Type, oneOf []Value) *Error { + err := newRefError(loc, ref) + err.Details = &RefErrInvalidDetail{ + Ref: ref, + Pos: idx, + Have: have, + Want: want, + OneOf: oneOf, + } + return err +} + +func newRefErrUnsupported(loc *Location, ref Ref, idx int, have types.Type) *Error { + var err *Error + switch have.(type) { + case *types.Function: + var function string + // drop any trailing references to unidentified parameters (e.g. __local1__) + if match, err := regexp.MatchString(`__local[0-9]+__`, ref[len(ref)-1].Value.String()); err == nil && match { + function = ref[:len(ref)-1].String() + } else { + function = ref.String() + } + + err = NewError(TypeErr, loc, "function %s used as reference, not called", function) + default: + err = newRefError(loc, ref) + } + err.Details = &RefErrUnsupportedDetail{ + Ref: ref, + Pos: idx, + Have: have, + } + return err +} + +func newRefError(loc *Location, ref Ref) *Error { + return NewError(TypeErr, loc, "undefined ref: %v", ref) +} + +func newArgError(loc *Location, builtinName Ref, msg string, have []types.Type, want types.FuncArgs) *Error { + err := NewError(TypeErr, loc, "%v: %v", builtinName, msg) + err.Details = &ArgErrDetail{ + Have: have, + Want: want, + } + return err +} + +func getOneOfForNode(node *typeTreeNode) (result []Value) { + node.Children().Iter(func(k Value, _ *typeTreeNode) bool { + result = append(result, k) + return false + }) + + slices.SortFunc(result, Value.Compare) + return result +} + +func getOneOfForType(tpe types.Type) (result []Value) { + switch tpe := tpe.(type) { + case *types.Object: + for _, k := range tpe.Keys() { + v, err := InterfaceToValue(k) + if err != nil { + panic(err) + } + result = append(result, v) + } + + case types.Any: + for _, object := range tpe { + objRes := getOneOfForType(object) + result = append(result, objRes...) + } + } + + result = removeDuplicate(result) + slices.SortFunc(result, Value.Compare) + return result +} + +func removeDuplicate(list []Value) []Value { + seen := make(map[Value]bool) + var newResult []Value + for _, item := range list { + if !seen[item] { + newResult = append(newResult, item) + seen[item] = true + } + } + return newResult +} + +func getArgTypes(env *TypeEnv, args []*Term) []types.Type { + pre := make([]types.Type, len(args)) + for i := range args { + pre[i] = env.Get(args[i]) + } + return pre +} + +// getPrefix returns the shortest prefix of ref that exists in env +func getPrefix(env *TypeEnv, ref Ref) (Ref, types.Type) { + if len(ref) == 1 { + t := env.GetByRef(ref) + if t != nil { + return ref, t + } + } + for i := 1; i < len(ref); i++ { + t := env.GetByRef(ref[:i]) + if t != nil { + return ref[:i], t + } + } + return nil, nil +} + +var dynamicAnyAny = types.NewDynamicProperty(types.A, types.A) + +// override takes a type t and returns a type obtained from t where the path represented by ref within it has type o (overriding the original type of that path) +func override(ref Ref, t types.Type, o types.Type, rule *Rule) (types.Type, *Error) { + var newStaticProps []*types.StaticProperty + obj, ok := t.(*types.Object) + if !ok { + newType, err := getObjectType(ref, o, rule, dynamicAnyAny) + if err != nil { + return nil, err + } + return newType, nil + } + found := false + if ok { + staticProps := obj.StaticProperties() + for _, prop := range staticProps { + valueCopy := prop.Value + key, err := InterfaceToValue(prop.Key) + if err != nil { + return nil, NewError(TypeErr, rule.Location, "unexpected error in override: %s", err.Error()) + } + if len(ref) > 0 && ref[0].Value.Compare(key) == 0 { + found = true + if len(ref) == 1 { + valueCopy = o + } else { + newVal, err := override(ref[1:], valueCopy, o, rule) + if err != nil { + return nil, err + } + valueCopy = newVal + } + } + newStaticProps = append(newStaticProps, types.NewStaticProperty(prop.Key, valueCopy)) + } + } + + // ref[0] is not a top-level key in staticProps, so it must be added + if !found { + newType, err := getObjectType(ref, o, rule, obj.DynamicProperties()) + if err != nil { + return nil, err + } + newStaticProps = append(newStaticProps, newType.StaticProperties()...) + } + return types.NewObject(newStaticProps, obj.DynamicProperties()), nil +} + +func getKeys(ref Ref, rule *Rule) ([]any, *Error) { + keys := []any{} + for _, refElem := range ref { + key, err := JSON(refElem.Value) + if err != nil { + return nil, NewError(TypeErr, rule.Location, "error getting key from value: %s", err.Error()) + } + keys = append(keys, key) + } + return keys, nil +} + +func getObjectTypeRec(keys []any, o types.Type, d *types.DynamicProperty) *types.Object { + if len(keys) == 1 { + staticProps := []*types.StaticProperty{types.NewStaticProperty(keys[0], o)} + return types.NewObject(staticProps, d) + } + + staticProps := []*types.StaticProperty{types.NewStaticProperty(keys[0], getObjectTypeRec(keys[1:], o, d))} + return types.NewObject(staticProps, d) +} + +func getObjectType(ref Ref, o types.Type, rule *Rule, d *types.DynamicProperty) (*types.Object, *Error) { + keys, err := getKeys(ref, rule) + if err != nil { + return nil, err + } + return getObjectTypeRec(keys, o, d), nil +} + +func getRuleAnnotation(as *AnnotationSet, rule *Rule) (result []*SchemaAnnotation) { + + for _, x := range as.GetSubpackagesScope(rule.Module.Package.Path) { + result = append(result, x.Schemas...) + } + + if x := as.GetPackageScope(rule.Module.Package); x != nil { + result = append(result, x.Schemas...) + } + + if x := as.GetDocumentScope(rule.Ref().GroundPrefix()); x != nil { + result = append(result, x.Schemas...) + } + + for _, x := range as.GetRuleScope(rule) { + result = append(result, x.Schemas...) + } + + return result +} + +func processAnnotation(ss *SchemaSet, annot *SchemaAnnotation, rule *Rule, allowNet []string) (types.Type, *Error) { + + var schema any + + if annot.Schema != nil { + if ss == nil { + return nil, nil + } + schema = ss.Get(annot.Schema) + if schema == nil { + return nil, NewError(TypeErr, rule.Location, "undefined schema: %v", annot.Schema) + } + } else if annot.Definition != nil { + schema = *annot.Definition + } + + tpe, err := loadSchema(schema, allowNet) + if err != nil { + return nil, NewError(TypeErr, rule.Location, "%s", err.Error()) + } + + return tpe, nil +} + +func errAnnotationRedeclared(a *Annotations, other *Location) *Error { + return NewError(TypeErr, a.Location, "%v annotation redeclared: %v", a.Scope, other) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/compare.go b/vendor/github.com/open-policy-agent/opa/v1/ast/compare.go new file mode 100644 index 0000000000..8cd2bf9dc4 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/compare.go @@ -0,0 +1,439 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "cmp" + "fmt" + "math/big" + "strings" +) + +// Compare returns an integer indicating whether two AST values are less than, +// equal to, or greater than each other. +// +// If a is less than b, the return value is negative. If a is greater than b, +// the return value is positive. If a is equal to b, the return value is zero. +// +// Different types are never equal to each other. For comparison purposes, types +// are sorted as follows: +// +// nil < Null < Boolean < Number < String < Var < Ref < Array < Object < Set < +// ArrayComprehension < ObjectComprehension < SetComprehension < Expr < SomeDecl +// < With < Body < Rule < Import < Package < Module. +// +// Arrays and Refs are equal if and only if both a and b have the same length +// and all corresponding elements are equal. If one element is not equal, the +// return value is the same as for the first differing element. If all elements +// are equal but a and b have different lengths, the shorter is considered less +// than the other. +// +// Objects are considered equal if and only if both a and b have the same sorted +// (key, value) pairs and are of the same length. Other comparisons are +// consistent but not defined. +// +// Sets are considered equal if and only if the symmetric difference of a and b +// is empty. +// Other comparisons are consistent but not defined. +func Compare(a, b any) int { + + if t, ok := a.(*Term); ok { + if t == nil { + a = nil + } else { + a = t.Value + } + } + + if t, ok := b.(*Term); ok { + if t == nil { + b = nil + } else { + b = t.Value + } + } + + if a == nil { + if b == nil { + return 0 + } + return -1 + } + if b == nil { + return 1 + } + + sortA := sortOrder(a) + sortB := sortOrder(b) + + if sortA < sortB { + return -1 + } else if sortB < sortA { + return 1 + } + + switch a := a.(type) { + case Null: + return 0 + case Boolean: + if a == b.(Boolean) { + return 0 + } + if !a { + return -1 + } + return 1 + case Number: + return NumberCompare(a, b.(Number)) + case String: + b := b.(String) + if a == b { + return 0 + } + if a < b { + return -1 + } + return 1 + case *TemplateString: + b := b.(*TemplateString) + return a.Compare(b) + case Var: + return VarCompare(a, b.(Var)) + case Ref: + return termSliceCompare(a, b.(Ref)) + case *Array: + b := b.(*Array) + return termSliceCompare(a.elems, b.elems) + case *lazyObj: + return Compare(a.force(), b) + case *object: + if x, ok := b.(*lazyObj); ok { + b = x.force() + } + return a.Compare(b.(*object)) + case Set: + return a.Compare(b.(Set)) + case *ArrayComprehension: + b := b.(*ArrayComprehension) + if cmp := Compare(a.Term, b.Term); cmp != 0 { + return cmp + } + return a.Body.Compare(b.Body) + case *ObjectComprehension: + b := b.(*ObjectComprehension) + if cmp := Compare(a.Key, b.Key); cmp != 0 { + return cmp + } + if cmp := Compare(a.Value, b.Value); cmp != 0 { + return cmp + } + return a.Body.Compare(b.Body) + case *SetComprehension: + b := b.(*SetComprehension) + if cmp := Compare(a.Term, b.Term); cmp != 0 { + return cmp + } + return a.Body.Compare(b.Body) + case Call: + return termSliceCompare(a, b.(Call)) + case *Expr: + return a.Compare(b.(*Expr)) + case *SomeDecl: + return a.Compare(b.(*SomeDecl)) + case *Every: + return a.Compare(b.(*Every)) + case *With: + return a.Compare(b.(*With)) + case Body: + return a.Compare(b.(Body)) + case *Head: + return a.Compare(b.(*Head)) + case *Rule: + return a.Compare(b.(*Rule)) + case Args: + return termSliceCompare(a, b.(Args)) + case *Import: + return a.Compare(b.(*Import)) + case *Package: + return a.Compare(b.(*Package)) + case *Annotations: + return a.Compare(b.(*Annotations)) + case *Module: + return a.Compare(b.(*Module)) + } + panic(fmt.Sprintf("illegal value: %T", a)) +} + +type termSlice []*Term + +func (s termSlice) Less(i, j int) bool { return Compare(s[i].Value, s[j].Value) < 0 } +func (s termSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s termSlice) Len() int { return len(s) } + +func sortOrder(x any) int { + switch x.(type) { + case Null: + return 0 + case Boolean: + return 1 + case Number: + return 2 + case String: + return 3 + case *TemplateString: + return 4 + case Var: + return 5 + case Ref: + return 6 + case *Array: + return 7 + case Object: + return 8 + case Set: + return 9 + case *ArrayComprehension: + return 10 + case *ObjectComprehension: + return 11 + case *SetComprehension: + return 12 + case Call: + return 13 + case Args: + return 14 + case *Expr: + return 100 + case *SomeDecl: + return 101 + case *Every: + return 102 + case *With: + return 110 + case *Head: + return 120 + case Body: + return 200 + case *Rule: + return 1000 + case *Import: + return 1001 + case *Package: + return 1002 + case *Annotations: + return 1003 + case *Module: + return 10000 + } + panic(fmt.Sprintf("illegal value: %T", x)) +} + +func importsCompare(a, b []*Import) int { + minLen := min(len(b), len(a)) + for i := range minLen { + if cmp := a[i].Compare(b[i]); cmp != 0 { + return cmp + } + } + if len(a) < len(b) { + return -1 + } + if len(b) < len(a) { + return 1 + } + return 0 +} + +func annotationsCompare(a, b []*Annotations) int { + minLen := min(len(b), len(a)) + for i := range minLen { + if cmp := a[i].Compare(b[i]); cmp != 0 { + return cmp + } + } + if len(a) < len(b) { + return -1 + } + if len(b) < len(a) { + return 1 + } + return 0 +} + +func rulesCompare(a, b []*Rule) int { + minLen := min(len(b), len(a)) + for i := range minLen { + if cmp := a[i].Compare(b[i]); cmp != 0 { + return cmp + } + } + if len(a) < len(b) { + return -1 + } + if len(b) < len(a) { + return 1 + } + return 0 +} + +func termSliceCompare(a, b []*Term) int { + minLen := min(len(b), len(a)) + for i := range minLen { + if cmp := Compare(a[i], b[i]); cmp != 0 { + return cmp + } + } + if len(a) < len(b) { + return -1 + } else if len(b) < len(a) { + return 1 + } + return 0 +} + +func withSliceCompare(a, b []*With) int { + minLen := min(len(b), len(a)) + for i := range minLen { + if cmp := Compare(a[i], b[i]); cmp != 0 { + return cmp + } + } + if len(a) < len(b) { + return -1 + } else if len(b) < len(a) { + return 1 + } + return 0 +} + +func VarCompare(a, b Var) int { + if a == b { + return 0 + } + if a < b { + return -1 + } + return 1 +} + +func TermValueCompare(a, b *Term) int { + return a.Value.Compare(b.Value) +} + +func TermValueEqual(a, b *Term) bool { + return ValueEqual(a.Value, b.Value) +} + +func ValueEqual(a, b Value) bool { + switch v := a.(type) { + case Null: + return v.Equal(b) + case Boolean: + return v.Equal(b) + case Number: + return v.Equal(b) + case String: + return v.Equal(b) + case Var: + return v.Equal(b) + case Ref: + return v.Equal(b) + case *Array: + return v.Equal(b) + case *TemplateString: + return v.Equal(b) + } + + return a.Compare(b) == 0 +} + +func RefCompare(a, b Ref) int { + return termSliceCompare(a, b) +} + +func RefEqual(a, b Ref) bool { + return termSliceEqual(a, b) +} + +func NumberCompare(x, y Number) int { + xs, ys := string(x), string(y) + + var xIsF, yIsF bool + + // Treat "1" and "1.0", "1.00", etc as "1" + if strings.Contains(xs, ".") { + if tx := strings.TrimRight(xs, ".0"); tx != xs { + // Still a float after trimming? + xIsF = strings.Contains(tx, ".") + xs = tx + } + } + if strings.Contains(ys, ".") { + if ty := strings.TrimRight(ys, ".0"); ty != ys { + yIsF = strings.Contains(ty, ".") + ys = ty + } + } + if xs == ys { + return 0 + } + + var xi, yi int64 + var xf, yf float64 + var xiOK, yiOK, xfOK, yfOK bool + + if xi, xiOK = x.Int64(); xiOK { + if yi, yiOK = y.Int64(); yiOK { + return cmp.Compare(xi, yi) + } + } + + if xIsF && yIsF { + if xf, xfOK = x.Float64(); xfOK { + if yf, yfOK = y.Float64(); yfOK { + if xf == yf { + return 0 + } + // could still be "equal" depending on precision, so we continue? + } + } + } + + var a *big.Rat + fa, ok := new(big.Float).SetString(string(x)) + if !ok { + panic("illegal value") + } + if fa.IsInt() { + if i, _ := fa.Int64(); i == 0 { + a = new(big.Rat).SetInt64(0) + } + } + if a == nil { + a, ok = new(big.Rat).SetString(string(x)) + if !ok { + panic("illegal value") + } + } + + var b *big.Rat + fb, ok := new(big.Float).SetString(string(y)) + if !ok { + panic("illegal value") + } + if fb.IsInt() { + if i, _ := fb.Int64(); i == 0 { + b = new(big.Rat).SetInt64(0) + } + } + if b == nil { + b, ok = new(big.Rat).SetString(string(y)) + if !ok { + panic("illegal value") + } + } + + return a.Cmp(b) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/compile.go b/vendor/github.com/open-policy-agent/opa/v1/ast/compile.go new file mode 100644 index 0000000000..a9455145ef --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/compile.go @@ -0,0 +1,6704 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "errors" + "fmt" + "io" + "maps" + "slices" + "sort" + "strings" + "sync" + + "github.com/open-policy-agent/opa/internal/debug" + "github.com/open-policy-agent/opa/internal/gojsonschema" + "github.com/open-policy-agent/opa/v1/ast/location" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" +) + +// CompileErrorLimitDefault is the default number errors a compiler will allow before +// exiting. +const CompileErrorLimitDefault = 10 + +var ( + errLimitReached = newErrorString(CompileErr, nil, "error limit reached") + + doubleEq = Equal.Ref() +) + +// Compiler contains the state of a compilation process. +type Compiler struct { + + // Errors contains errors that occurred during the compilation process. + // If there are one or more errors, the compilation process is considered + // "failed". + Errors Errors + + // Modules contains the compiled modules. The compiled modules are the + // output of the compilation process. If the compilation process failed, + // there is no guarantee about the state of the modules. + Modules map[string]*Module + + // ModuleTree organizes the modules into a tree where each node is keyed by + // an element in the module's package path. E.g., given modules containing + // the following package directives: "a", "a.b", "a.c", and "a.b", the + // resulting module tree would be: + // + // root + // | + // +--- data (no modules) + // | + // +--- a (1 module) + // | + // +--- b (2 modules) + // | + // +--- c (1 module) + // + ModuleTree *ModuleTreeNode + + // RuleTree organizes rules into a tree where each node is keyed by an + // element in the rule's path. The rule path is the concatenation of the + // containing package and the stringified rule name. E.g., given the + // following module: + // + // package ex + // p[1] { true } + // p[2] { true } + // q = true + // a.b.c = 3 + // + // root + // | + // +--- data (no rules) + // | + // +--- ex (no rules) + // | + // +--- p (2 rules) + // | + // +--- q (1 rule) + // | + // +--- a + // | + // +--- b + // | + // +--- c (1 rule) + // + // Another example with general refs containing vars at arbitrary locations: + // + // package ex + // a.b[x].d { x := "c" } # R1 + // a.b.c[x] { x := "d" } # R2 + // a.b[x][y] { x := "c"; y := "d" } # R3 + // p := true # R4 + // + // root + // | + // +--- data (no rules) + // | + // +--- ex (no rules) + // | + // +--- a + // | | + // | +--- b (R1, R3) + // | | + // | +--- c (R2) + // | + // +--- p (R4) + RuleTree *TreeNode + + // Graph contains dependencies between rules. An edge (u,v) is added to the + // graph if rule 'u' refers to the virtual document defined by 'v'. + Graph *Graph + + // TypeEnv holds type information for values inferred by the compiler. + TypeEnv *TypeEnv + + // RewrittenVars is a mapping of variables that have been rewritten + // with the key being the generated name and value being the original. + RewrittenVars map[Var]Var + + // Capabilities required by the modules that were compiled. + Required *Capabilities + + localvargen *localVarGenerator + moduleLoader ModuleLoader + stages []stage + maxErrs int + errCount uint32 + mu *sync.Mutex // Mutex to protect both 'errCount' and 'Errors' + sorted []string // list of sorted module names + pathExists func([]string) (bool, error) + pathConflictCheckRoots []string + after map[string][]CompilerStageDefinition + metrics metrics.Metrics + capabilities *Capabilities // user-supplied capabilities + imports map[string][]*Import // saved imports from stripping + builtins map[string]*Builtin // universe of built-in functions + customBuiltins map[string]*Builtin // user-supplied custom built-in functions (deprecated: use capabilities) + unsafeBuiltinsMap map[string]struct{} // user-supplied set of unsafe built-ins functions to block (deprecated: use capabilities) + deprecatedBuiltinsMap map[string]struct{} // set of deprecated, but not removed, built-in functions + enablePrintStatements bool // indicates if print statements should be elided (default) + comprehensionIndices map[*Term]*ComprehensionIndex // comprehension key index + initialized bool // indicates if init() has been called + debug debug.Debug // emits debug information produced during compilation + schemaSet *SchemaSet // user-supplied schemas for input and data documents + inputType types.Type // global input type retrieved from schema set + annotationSet *AnnotationSet // hierarchical set of annotations + strict bool // enforce strict compilation checks + keepModules bool // whether to keep the unprocessed, parse modules (below) + parsedModules map[string]*Module // parsed, but otherwise unprocessed modules, kept track of when keepModules is true + useTypeCheckAnnotations bool // whether to provide annotated information (schemas) to the type checker + allowUndefinedFuncCalls bool // don't error on calls to unknown functions. + evalMode CompilerEvalMode // + rewriteTestRulesForTracing bool // rewrite test rules to capture dynamic values for tracing. + defaultRegoVersion RegoVersion + skipStages map[StageID]struct{} // stages to skip during compilation + plan *executionPlan // computed execution plan (cached) +} + +func (c *Compiler) DefaultRegoVersion() RegoVersion { + return c.defaultRegoVersion +} + +// CompilerStage defines the interface for stages in the compiler. +type CompilerStage func(*Compiler) *Error + +// StageID uniquely identifies a compiler stage. +type StageID string + +// Compiler stage identifiers. +// Please use them when you depend on a compiler stage, like via [ast.Compiler.WithStageAfterID]. +// There is no guarantee that they are stable across OPA versions, but using the identifiers +// at least lets you know what your attention is needed when you depend on the stages. +const ( + StageResolveRefs StageID = "ResolveRefs" + StageInitLocalVarGen StageID = "InitLocalVarGen" + StageRewriteRuleHeadRefs StageID = "RewriteRuleHeadRefs" + StageCheckKeywordOverrides StageID = "CheckKeywordOverrides" + StageCheckDuplicateImports StageID = "CheckDuplicateImports" + StageRemoveImports StageID = "RemoveImports" + StageSetModuleTree StageID = "SetModuleTree" + StageSetRuleTree StageID = "SetRuleTree" + StageRewriteLocalVars StageID = "RewriteLocalVars" + StageRewriteTemplateStrings StageID = "RewriteTemplateStrings" + StageCheckVoidCalls StageID = "CheckVoidCalls" + StageRewritePrintCalls StageID = "RewritePrintCalls" + StageRewriteExprTerms StageID = "RewriteExprTerms" + StageParseMetadataBlocks StageID = "ParseMetadataBlocks" + StageSetAnnotationSet StageID = "SetAnnotationSet" + StageRewriteRegoMetadataCalls StageID = "RewriteRegoMetadataCalls" + StageSetGraph StageID = "SetGraph" + StageRewriteComprehensionTerms StageID = "RewriteComprehensionTerms" + StageRewriteRefsInHead StageID = "RewriteRefsInHead" + StageRewriteWithValues StageID = "RewriteWithValues" + StageCheckRuleConflicts StageID = "CheckRuleConflicts" + StageCheckUndefinedFuncs StageID = "CheckUndefinedFuncs" + StageCheckSafetyRuleHeads StageID = "CheckSafetyRuleHeads" + StageCheckSafetyRuleBodies StageID = "CheckSafetyRuleBodies" + StageRewriteEquals StageID = "RewriteEquals" + StageRewriteDynamicTerms StageID = "RewriteDynamicTerms" + StageRewriteTestRulesForTracing StageID = "RewriteTestRulesForTracing" + StageCheckRecursion StageID = "CheckRecursion" + StageCheckTypes StageID = "CheckTypes" + StageCheckUnsafeBuiltins StageID = "CheckUnsafeBuiltins" + StageCheckDeprecatedBuiltins StageID = "CheckDeprecatedBuiltins" + StageBuildRuleIndices StageID = "BuildRuleIndices" + StageBuildComprehensionIndices StageID = "BuildComprehensionIndices" + StageBuildRequiredCapabilities StageID = "BuildRequiredCapabilities" + + // These only exist in the [ast.QueryCompiler]: + StageCheckSafety StageID = "CheckSafety" +) + +// AllStages returns the complete list of compiler stages in execution order. +func AllStages() []StageID { + return []StageID{ + StageResolveRefs, + StageInitLocalVarGen, + StageRewriteRuleHeadRefs, + StageCheckKeywordOverrides, + StageCheckDuplicateImports, + StageRemoveImports, + StageSetModuleTree, + StageSetRuleTree, + StageRewriteLocalVars, + StageRewriteTemplateStrings, + StageCheckVoidCalls, + StageRewritePrintCalls, + StageRewriteExprTerms, + StageParseMetadataBlocks, + StageSetAnnotationSet, + StageRewriteRegoMetadataCalls, + StageSetGraph, + StageRewriteComprehensionTerms, + StageRewriteRefsInHead, + StageRewriteWithValues, + StageCheckRuleConflicts, + StageCheckUndefinedFuncs, + StageCheckSafetyRuleHeads, + StageCheckSafetyRuleBodies, + StageRewriteEquals, + StageRewriteDynamicTerms, + StageRewriteTestRulesForTracing, + StageCheckRecursion, + StageCheckTypes, + StageCheckUnsafeBuiltins, + StageCheckDeprecatedBuiltins, + StageBuildRuleIndices, + StageBuildComprehensionIndices, + StageBuildRequiredCapabilities, + } +} + +// CompilerEvalMode allows toggling certain stages that are only +// needed for certain modes, Concretely, only "topdown" mode will +// have the compiler build comprehension and rule indices. +type CompilerEvalMode int + +const ( + // EvalModeTopdown (default) instructs the compiler to build rule + // and comprehension indices used by topdown evaluation. + EvalModeTopdown CompilerEvalMode = iota + + // EvalModeIR makes the compiler skip the stages for comprehension + // and rule indices. + EvalModeIR +) + +// CompilerStageDefinition defines a compiler stage +type CompilerStageDefinition struct { + Name string + MetricName string + Stage CompilerStage +} + +// executionPlan represents the complete ordered list of stages to execute. +type executionPlan struct { + stages []plannedStage +} + +// plannedStage represents a single stage in the execution plan. +type plannedStage struct { + name string + metricName string + f func() +} + +// RulesOptions defines the options for retrieving rules by Ref from the +// compiler. +type RulesOptions struct { + // IncludeHiddenModules determines if the result contains hidden modules, + // currently only the "system" namespace, i.e. "data.system.*". + IncludeHiddenModules bool +} + +// QueryContext contains contextual information for running an ad-hoc query. +// +// Ad-hoc queries can be run in the context of a package and imports may be +// included to provide concise access to data. +type QueryContext struct { + Package *Package + Imports []*Import +} + +// NewQueryContext returns a new QueryContext object. +func NewQueryContext() *QueryContext { + return &QueryContext{} +} + +// WithPackage sets the pkg on qc. +func (qc *QueryContext) WithPackage(pkg *Package) *QueryContext { + if qc == nil { + qc = NewQueryContext() + } + qc.Package = pkg + return qc +} + +// WithImports sets the imports on qc. +func (qc *QueryContext) WithImports(imports []*Import) *QueryContext { + if qc == nil { + qc = NewQueryContext() + } + qc.Imports = imports + return qc +} + +// Copy returns a deep copy of qc. +func (qc *QueryContext) Copy() *QueryContext { + if qc == nil { + return nil + } + cpy := *qc + if cpy.Package != nil { + cpy.Package = qc.Package.Copy() + } + cpy.Imports = make([]*Import, len(qc.Imports)) + for i := range qc.Imports { + cpy.Imports[i] = qc.Imports[i].Copy() + } + return &cpy +} + +// QueryCompiler defines the interface for compiling ad-hoc queries. +type QueryCompiler interface { + + // Compile should be called to compile ad-hoc queries. The return value is + // the compiled version of the query. + Compile(q Body) (Body, error) + + // TypeEnv returns the type environment built after running type checking + // on the query. + TypeEnv() *TypeEnv + + // WithContext sets the QueryContext on the QueryCompiler. Subsequent calls + // to Compile will take the QueryContext into account. + WithContext(qctx *QueryContext) QueryCompiler + + // WithEnablePrintStatements enables print statements in queries compiled + // with the QueryCompiler. + WithEnablePrintStatements(yes bool) QueryCompiler + + // WithUnsafeBuiltins sets the built-in functions to treat as unsafe and not + // allow inside of queries. By default the query compiler inherits the + // compiler's unsafe built-in functions. This function allows callers to + // override that set. If an empty (non-nil) map is provided, all built-ins + // are allowed. + WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler + + // WithStageAfter registers a stage to run during query compilation after + // the named stage. + // + // Caution: Use [ast.QueryCompiler.WithStageAfterID] instead. It provides + // more (Golang) compile-time safety + WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler + + // WithStageAfterID registers a stage to run during query compilation after + // the named stage. + WithStageAfterID(after StageID, stage QueryCompilerStageDefinition) QueryCompiler + + // RewrittenVars maps generated vars in the compiled query to vars from the + // parsed query. For example, given the query "input := 1" the rewritten + // query would be "__local0__ = 1". The mapping would then be {__local0__: input}. + RewrittenVars() map[Var]Var + + // ComprehensionIndex returns an index data structure for the given comprehension + // term. If no index is found, returns nil. + ComprehensionIndex(term *Term) *ComprehensionIndex + + // WithStrict enables strict mode for the query compiler. + WithStrict(strict bool) QueryCompiler +} + +// QueryCompilerStage defines the interface for stages in the query compiler. +type QueryCompilerStage func(QueryCompiler, Body) (Body, error) + +// QueryCompilerStageDefinition defines a QueryCompiler stage +type QueryCompilerStageDefinition struct { + Name string + MetricName string + Stage QueryCompilerStage +} + +type stage struct { + name StageID + metricName string + f func() +} + +// NewCompiler returns a new empty compiler. +func NewCompiler() *Compiler { + + c := &Compiler{ + Modules: map[string]*Module{}, + RewrittenVars: map[Var]Var{}, + Required: &Capabilities{}, + maxErrs: CompileErrorLimitDefault, + mu: &sync.Mutex{}, + after: map[string][]CompilerStageDefinition{}, + unsafeBuiltinsMap: map[string]struct{}{}, + deprecatedBuiltinsMap: map[string]struct{}{}, + comprehensionIndices: map[*Term]*ComprehensionIndex{}, + debug: debug.Discard(), + defaultRegoVersion: DefaultRegoVersion, + } + + c.ModuleTree = NewModuleTree(nil) + c.RuleTree = NewRuleTree(c.ModuleTree) + + c.stages = []stage{ + // Reference resolution should run first as it may be used to lazily + // load additional modules. If any stages run before resolution, they + // need to be re-run after resolution. + {StageResolveRefs, "compile_stage_resolve_refs", c.resolveAllRefs}, + // The local variable generator must be initialized after references are + // resolved and the dynamic module loader has run but before subsequent + // stages that need to generate variables. + {StageInitLocalVarGen, "compile_stage_init_local_var_gen", c.initLocalVarGen}, + {StageRewriteRuleHeadRefs, "compile_stage_rewrite_rule_head_refs", c.rewriteRuleHeadRefs}, + {StageCheckKeywordOverrides, "compile_stage_check_keyword_overrides", c.checkKeywordOverrides}, + {StageCheckDuplicateImports, "compile_stage_check_imports", c.checkImports}, + {StageRemoveImports, "compile_stage_remove_imports", c.removeImports}, + {StageSetModuleTree, "compile_stage_set_module_tree", c.setModuleTree}, + {StageSetRuleTree, "compile_stage_set_rule_tree", c.setRuleTree}, // depends on RewriteRuleHeadRefs + {StageRewriteLocalVars, "compile_stage_rewrite_local_vars", c.rewriteLocalVars}, + {StageRewriteTemplateStrings, "compile_stage_rewrite_template_strings", c.rewriteTemplateStrings}, + {StageCheckVoidCalls, "compile_stage_check_void_calls", c.checkVoidCalls}, + {StageRewritePrintCalls, "compile_stage_rewrite_print_calls", c.rewritePrintCalls}, + {StageRewriteExprTerms, "compile_stage_rewrite_expr_terms", c.rewriteExprTerms}, + {StageParseMetadataBlocks, "compile_stage_parse_metadata_blocks", c.parseMetadataBlocks}, + {StageSetAnnotationSet, "compile_stage_set_annotationset", c.setAnnotationSet}, + {StageRewriteRegoMetadataCalls, "compile_stage_rewrite_rego_metadata_calls", c.rewriteRegoMetadataCalls}, + {StageSetGraph, "compile_stage_set_graph", c.setGraph}, + {StageRewriteComprehensionTerms, "compile_stage_rewrite_comprehension_terms", c.rewriteComprehensionTerms}, + {StageRewriteRefsInHead, "compile_stage_rewrite_refs_in_head", c.rewriteRefsInHead}, + {StageRewriteWithValues, "compile_stage_rewrite_with_values", c.rewriteWithModifiers}, + {StageCheckRuleConflicts, "compile_stage_check_rule_conflicts", c.checkRuleConflicts}, + {StageCheckUndefinedFuncs, "compile_stage_check_undefined_funcs", c.checkUndefinedFuncs}, + {StageCheckSafetyRuleHeads, "compile_stage_check_safety_rule_heads", c.checkSafetyRuleHeads}, + {StageCheckSafetyRuleBodies, "compile_stage_check_safety_rule_bodies", c.checkSafetyRuleBodies}, + {StageRewriteEquals, "compile_stage_rewrite_equals", c.rewriteEquals}, + {StageRewriteDynamicTerms, "compile_stage_rewrite_dynamic_terms", c.rewriteDynamicTerms}, + {StageRewriteTestRulesForTracing, "compile_stage_rewrite_test_rules_for_tracing", c.rewriteTestRuleEqualities}, // must run after RewriteDynamicTerms + {StageCheckRecursion, "compile_stage_check_recursion", c.checkRecursion}, + {StageCheckTypes, "compile_stage_check_types", c.checkTypes}, // must be run after CheckRecursion + {StageCheckUnsafeBuiltins, "compile_state_check_unsafe_builtins", c.checkUnsafeBuiltins}, + {StageCheckDeprecatedBuiltins, "compile_state_check_deprecated_builtins", c.checkDeprecatedBuiltins}, + {StageBuildRuleIndices, "compile_stage_rebuild_indices", c.buildRuleIndices}, + {StageBuildComprehensionIndices, "compile_stage_rebuild_comprehension_indices", c.buildComprehensionIndices}, + {StageBuildRequiredCapabilities, "compile_stage_build_required_capabilities", c.buildRequiredCapabilities}, + } + + return c +} + +// SetErrorLimit sets the number of errors the compiler can encounter before it +// quits. Zero or a negative number indicates no limit. +func (c *Compiler) SetErrorLimit(limit int) *Compiler { + c.maxErrs = limit + return c +} + +// WithEnablePrintStatements enables print statements inside of modules compiled +// by the compiler. If print statements are not enabled, calls to print() are +// erased at compile-time. +func (c *Compiler) WithEnablePrintStatements(yes bool) *Compiler { + c.enablePrintStatements = yes + return c +} + +// WithPathConflictsCheck enables base-virtual document conflict +// detection. The compiler will check that rules don't overlap with +// paths that exist as determined by the provided callable. +func (c *Compiler) WithPathConflictsCheck(fn func([]string) (bool, error)) *Compiler { + c.pathExists = fn + return c +} + +// WithPathConflictsCheckRoots enables checking path conflicts from the specified root instead +// of the top root node. Limiting conflict checks to a known set of roots, such as bundle roots, +// improves performance. Each root has the format of a "/"-delimited string, excluding the "data" +// root document. +func (c *Compiler) WithPathConflictsCheckRoots(rootPaths []string) *Compiler { + c.pathConflictCheckRoots = rootPaths + return c +} + +// WithStageAfter registers a stage to run during compilation after +// the named stage. +// +// Caution: Consider using [ast.QueryCompiler.WithStageAfterID] instead. It provides +// more (Golang) compile-time safety +func (c *Compiler) WithStageAfter(after string, stage CompilerStageDefinition) *Compiler { + c.after[after] = append(c.after[after], stage) + c.plan = nil // invalidate cached plan + return c +} + +// WithStageAfterID registers a stage to run during compilation after +// the identified stage. +func (c *Compiler) WithStageAfterID(after StageID, stage CompilerStageDefinition) *Compiler { + return c.WithStageAfter(string(after), stage) +} + +// WithSkipStages configures the compiler to skip the specified stages during +// compilation. This invalidates any cached execution plan. +func (c *Compiler) WithSkipStages(stages ...StageID) *Compiler { + if c.skipStages == nil { + c.skipStages = make(map[StageID]struct{}, len(stages)) + } + for _, s := range stages { + c.skipStages[s] = struct{}{} + } + c.plan = nil // invalidate cached plan + return c +} + +// WithOnlyStagesUpTo configures the compiler to run only stages up to and +// including the specified target stage. All stages after the target will be skipped. +func (c *Compiler) WithOnlyStagesUpTo(target StageID) *Compiler { + allStages := AllStages() + i := slices.Index(allStages, target) + if i == -1 { + return c + } + return c.WithSkipStages(allStages[i+1:]...) +} + +// WithMetrics will set a metrics.Metrics and be used for profiling +// the Compiler instance. +func (c *Compiler) WithMetrics(metrics metrics.Metrics) *Compiler { + c.metrics = metrics + return c +} + +// WithCapabilities sets capabilities to enable during compilation. Capabilities allow the caller +// to specify the set of built-in functions available to the policy. In the future, capabilities +// may be able to restrict access to other language features. Capabilities allow callers to check +// if policies are compatible with a particular version of OPA. If policies are a compiled for a +// specific version of OPA, there is no guarantee that _this_ version of OPA can evaluate them +// successfully. +func (c *Compiler) WithCapabilities(capabilities *Capabilities) *Compiler { + c.capabilities = capabilities + return c +} + +// Capabilities returns the capabilities enabled during compilation. +func (c *Compiler) Capabilities() *Capabilities { + return c.capabilities +} + +// WithDebug sets where debug messages are written to. Passing `nil` has no +// effect. +func (c *Compiler) WithDebug(sink io.Writer) *Compiler { + if sink != nil { + c.debug = debug.New(sink) + } + return c +} + +// WithBuiltins is deprecated. +// +// Deprecated: Use WithCapabilities instead. +func (c *Compiler) WithBuiltins(builtins map[string]*Builtin) *Compiler { + c.customBuiltins = maps.Clone(builtins) + return c +} + +// WithUnsafeBuiltins is deprecated. +// +// Deprecated: Use WithCapabilities instead. +func (c *Compiler) WithUnsafeBuiltins(unsafeBuiltins map[string]struct{}) *Compiler { + maps.Copy(c.unsafeBuiltinsMap, unsafeBuiltins) + return c +} + +// WithStrict toggles strict mode in the compiler. +func (c *Compiler) WithStrict(strict bool) *Compiler { + c.strict = strict + return c +} + +// WithKeepModules enables retaining unprocessed modules in the compiler. +// Note that the modules aren't copied on the way in or out -- so when +// accessing them via ParsedModules(), mutations will occur in the module +// map that was passed into Compile().` +func (c *Compiler) WithKeepModules(y bool) *Compiler { + c.keepModules = y + return c +} + +// WithUseTypeCheckAnnotations use schema annotations during type checking +func (c *Compiler) WithUseTypeCheckAnnotations(enabled bool) *Compiler { + c.useTypeCheckAnnotations = enabled + return c +} + +func (c *Compiler) WithAllowUndefinedFunctionCalls(allow bool) *Compiler { + c.allowUndefinedFuncCalls = allow + return c +} + +// WithEvalMode allows setting the CompilerEvalMode of the compiler +func (c *Compiler) WithEvalMode(e CompilerEvalMode) *Compiler { + c.evalMode = e + return c +} + +// WithRewriteTestRules enables rewriting test rules to capture dynamic values in local variables, +// so they can be accessed by tracing. +func (c *Compiler) WithRewriteTestRules(rewrite bool) *Compiler { + c.rewriteTestRulesForTracing = rewrite + return c +} + +// ParsedModules returns the parsed, unprocessed modules from the compiler. +// It is `nil` if keeping modules wasn't enabled via `WithKeepModules(true)`. +// The map includes all modules loaded via the ModuleLoader, if one was used. +func (c *Compiler) ParsedModules() map[string]*Module { + return c.parsedModules +} + +func (c *Compiler) QueryCompiler() QueryCompiler { + c.init() + c0 := *c + return newQueryCompiler(&c0) +} + +// Compile runs the compilation process on the input modules. The compiled +// version of the modules and associated data structures are stored on the +// compiler. If the compilation process fails for any reason, the compiler will +// contain a slice of errors. +func (c *Compiler) Compile(modules map[string]*Module) { + + c.init() + + c.Modules = make(map[string]*Module, len(modules)) + c.sorted = make([]string, 0, len(modules)) + + if c.keepModules { + c.parsedModules = make(map[string]*Module, len(modules)) + } else { + c.parsedModules = nil + } + + for k, v := range modules { + c.Modules[k] = v.Copy() + c.sorted = append(c.sorted, k) + if c.parsedModules != nil { + c.parsedModules[k] = v + } + } + + sort.Strings(c.sorted) + + c.compile() +} + +// WithSchemas sets a schemaSet to the compiler +func (c *Compiler) WithSchemas(schemas *SchemaSet) *Compiler { + c.schemaSet = schemas + return c +} + +// Failed returns true if a compilation error has been encountered. +func (c *Compiler) Failed() bool { + return len(c.Errors) > 0 +} + +// ComprehensionIndex returns a data structure specifying how to index comprehension +// results so that callers do not have to recompute the comprehension more than once. +// If no index is found, returns nil. +func (c *Compiler) ComprehensionIndex(term *Term) *ComprehensionIndex { + return c.comprehensionIndices[term] +} + +// GetArity returns the number of args a function referred to by ref takes. If +// ref refers to built-in function, the built-in declaration is consulted, +// otherwise, the ref is used to perform a ruleset lookup. +func (c *Compiler) GetArity(ref Ref) int { + if bi := c.builtins[ref.String()]; bi != nil { + return bi.Decl.Arity() + } + rules := c.GetRulesExact(ref) + if len(rules) == 0 { + return -1 + } + return len(rules[0].Head.Args) +} + +// GetRulesExact returns a slice of rules referred to by the reference. +// +// E.g., given the following module: +// +// package a.b.c +// +// p[k] = v { ... } # rule1 +// p[k1] = v1 { ... } # rule2 +// +// The following calls yield the rules on the right. +// +// GetRulesExact("data.a.b.c.p") => [rule1, rule2] +// GetRulesExact("data.a.b.c.p.x") => nil +// GetRulesExact("data.a.b.c") => nil +func (c *Compiler) GetRulesExact(ref Ref) (rules []*Rule) { + node := c.RuleTree + + for _, x := range ref { + if node = node.Child(x.Value); node == nil { + return nil + } + } + + return node.Values +} + +// GetRulesForVirtualDocument returns a slice of rules that produce the virtual +// document referred to by the reference. +// +// E.g., given the following module: +// +// package a.b.c +// +// p[k] = v { ... } # rule1 +// p[k1] = v1 { ... } # rule2 +// +// The following calls yield the rules on the right. +// +// GetRulesForVirtualDocument("data.a.b.c.p") => [rule1, rule2] +// GetRulesForVirtualDocument("data.a.b.c.p.x") => [rule1, rule2] +// GetRulesForVirtualDocument("data.a.b.c") => nil +func (c *Compiler) GetRulesForVirtualDocument(ref Ref) (rules []*Rule) { + + node := c.RuleTree + + for _, x := range ref { + if node = node.Child(x.Value); node == nil { + return nil + } + if len(node.Values) > 0 { + return node.Values + } + } + + return node.Values +} + +// GetRulesWithPrefix returns a slice of rules that share the prefix ref. +// +// E.g., given the following module: +// +// package a.b.c +// +// p[x] = y { ... } # rule1 +// p[k] = v { ... } # rule2 +// q { ... } # rule3 +// +// The following calls yield the rules on the right. +// +// GetRulesWithPrefix("data.a.b.c.p") => [rule1, rule2] +// GetRulesWithPrefix("data.a.b.c.p.a") => nil +// GetRulesWithPrefix("data.a.b.c") => [rule1, rule2, rule3] +func (c *Compiler) GetRulesWithPrefix(ref Ref) (rules []*Rule) { + + node := c.RuleTree + + for _, x := range ref { + if node = node.Child(x.Value); node == nil { + return nil + } + } + + var acc func(node *TreeNode) + + acc = func(node *TreeNode) { + rules = append(rules, node.Values...) + for _, child := range node.Children { + if child.Hide { + continue + } + acc(child) + } + } + + acc(node) + + return rules +} + +// GetRules returns a slice of rules that are referred to by ref. +// +// E.g., given the following module: +// +// package a.b.c +// +// p[x] = y { q[x] = y; ... } # rule1 +// q[x] = y { ... } # rule2 +// +// The following calls yield the rules on the right. +// +// GetRules("data.a.b.c.p") => [rule1] +// GetRules("data.a.b.c.p.x") => [rule1] +// GetRules("data.a.b.c.q") => [rule2] +// GetRules("data.a.b.c") => [rule1, rule2] +// GetRules("data.a.b.d") => nil +func (c *Compiler) GetRules(ref Ref) (rules []*Rule) { + + set := map[*Rule]struct{}{} + + for _, rule := range c.GetRulesForVirtualDocument(ref) { + set[rule] = struct{}{} + } + + for _, rule := range c.GetRulesWithPrefix(ref) { + set[rule] = struct{}{} + } + + for rule := range set { + rules = append(rules, rule) + } + + return rules +} + +// GetRulesDynamic returns a slice of rules that could be referred to by a ref. +// +// Deprecated: use GetRulesDynamicWithOpts +func (c *Compiler) GetRulesDynamic(ref Ref) []*Rule { + return c.GetRulesDynamicWithOpts(ref, RulesOptions{}) +} + +// GetRulesDynamicWithOpts returns a slice of rules that could be referred to by +// a ref. +// When parts of the ref are statically known, we use that information to narrow +// down which rules the ref could refer to, but in the most general case this +// will be an over-approximation. +// +// E.g., given the following modules: +// +// package a.b.c +// +// r1 = 1 # rule1 +// +// and: +// +// package a.d.c +// +// r2 = 2 # rule2 +// +// The following calls yield the rules on the right. +// +// GetRulesDynamicWithOpts("data.a[x].c[y]", opts) => [rule1, rule2] +// GetRulesDynamicWithOpts("data.a[x].c.r2", opts) => [rule2] +// GetRulesDynamicWithOpts("data.a.b[x][y]", opts) => [rule1] +// +// Using the RulesOptions parameter, the inclusion of hidden modules can be +// controlled: +// +// With +// +// package system.main +// +// r3 = 3 # rule3 +// +// We'd get this result: +// +// GetRulesDynamicWithOpts("data[x]", RulesOptions{IncludeHiddenModules: true}) => [rule1, rule2, rule3] +// +// Without the options, it would be excluded. +func (c *Compiler) GetRulesDynamicWithOpts(ref Ref, opts RulesOptions) []*Rule { + node := c.RuleTree + + set := map[*Rule]struct{}{} + var walk func(node *TreeNode, i int) + walk = func(node *TreeNode, i int) { + switch { + case i >= len(ref): + // We've reached the end of the reference and want to collect everything + // under this "prefix". + node.DepthFirst(func(descendant *TreeNode) bool { + insertRules(set, descendant.Values) + if opts.IncludeHiddenModules { + return false + } + return descendant.Hide + }) + + case i == 0 || IsConstant(ref[i].Value): + // The head of the ref is always grounded. In case another part of the + // ref is also grounded, we can lookup the exact child. If it's not found + // we can immediately return... + if child := node.Child(ref[i].Value); child != nil { + if len(child.Values) > 0 { + // Add any rules at this position + insertRules(set, child.Values) + } + // There might still be "sub-rules" contributing key-value "overrides" for e.g. partial object rules, continue walking + walk(child, i+1) + } else { + return + } + + default: + // This part of the ref is a dynamic term. We can't know what it refers + // to and will just need to try all of the children. + for _, child := range node.Children { + if child.Hide && !opts.IncludeHiddenModules { + continue + } + insertRules(set, child.Values) + walk(child, i+1) + } + } + } + + walk(node, 0) + rules := make([]*Rule, 0, len(set)) + for rule := range set { + rules = append(rules, rule) + } + return rules +} + +// Utility: add all rule values to the set. +func insertRules(set map[*Rule]struct{}, rules []*Rule) { + for _, rule := range rules { + set[rule] = struct{}{} + } +} + +// RuleIndex returns a RuleIndex built for the rule set referred to by path. +// The path must refer to the rule set exactly, i.e., given a rule set at path +// data.a.b.c.p, refs data.a.b.c.p.x and data.a.b.c would not return a +// RuleIndex built for the rule. +func (c *Compiler) RuleIndex(path Ref) RuleIndex { + if node := c.RuleTree.Find(path); node != nil { + return node.Index + } + return nil +} + +// PassesTypeCheck determines whether the given body passes type checking +func (c *Compiler) PassesTypeCheck(body Body) bool { + checker := newTypeChecker().WithSchemaSet(c.schemaSet).WithInputType(c.inputType) + env := c.TypeEnv + _, errs := checker.CheckBody(env, body) + return len(errs) == 0 +} + +// PassesTypeCheckRules determines whether the given rules passes type checking +func (c *Compiler) PassesTypeCheckRules(rules []*Rule) Errors { + elems := make([]util.T, 0, len(rules)) + + for _, rule := range rules { + elems = append(elems, rule) + } + + // Load the global input schema if one was provided. + if c.schemaSet != nil { + if schema := c.schemaSet.Get(SchemaRootRef); schema != nil { + + var allowNet []string + if c.capabilities != nil { + allowNet = c.capabilities.AllowNet + } + + tpe, err := loadSchema(schema, allowNet) + if err != nil { + return Errors{newErrorString(TypeErr, nil, err.Error())} + } + c.inputType = tpe + } + } + + var as *AnnotationSet + if c.useTypeCheckAnnotations { + as = c.annotationSet + } + + checker := newTypeChecker().WithSchemaSet(c.schemaSet).WithInputType(c.inputType) + + if c.TypeEnv == nil { + if c.capabilities == nil { + c.capabilities = CapabilitiesForThisVersion() + } + + c.builtins = make(map[string]*Builtin, len(c.capabilities.Builtins)+len(c.customBuiltins)) + + for _, bi := range c.capabilities.Builtins { + c.builtins[bi.Name] = bi + } + + maps.Copy(c.builtins, c.customBuiltins) + + c.TypeEnv = checker.Env(c.builtins) + } + + _, errs := checker.CheckTypes(c.TypeEnv, elems, as) + return errs +} + +// ModuleLoader defines the interface that callers can implement to enable lazy +// loading of modules during compilation. +type ModuleLoader func(resolved map[string]*Module) (parsed map[string]*Module, err error) + +// WithModuleLoader sets f as the ModuleLoader on the compiler. +// +// The compiler will invoke the ModuleLoader after resolving all references in +// the current set of input modules. The ModuleLoader can return a new +// collection of parsed modules that are to be included in the compilation +// process. This process will repeat until the ModuleLoader returns an empty +// collection or an error. If an error is returned, compilation will stop +// immediately. +func (c *Compiler) WithModuleLoader(f ModuleLoader) *Compiler { + c.moduleLoader = f + return c +} + +// WithDefaultRegoVersion sets the default Rego version to use when a module doesn't specify one; +// such as when it's hand-crafted instead of parsed. +func (c *Compiler) WithDefaultRegoVersion(regoVersion RegoVersion) *Compiler { + c.defaultRegoVersion = regoVersion + return c +} + +// buildExecutionPlan creates the unified list of stages to execute, including +// both main stages and "after" stages, with filtering applied. +func (c *Compiler) buildExecutionPlan() *executionPlan { + plan := &executionPlan{ + stages: make([]plannedStage, 0, len(c.stages)*2), + } + + for _, s := range c.stages { + if _, skip := c.skipStages[s.name]; skip { + continue + } + + plan.stages = append(plan.stages, plannedStage{name: string(s.name), metricName: s.metricName, f: s.f}) + + for _, a := range c.after[string(s.name)] { + if _, skip := c.skipStages[StageID(a.Name)]; skip { + continue + } + + afterStage := a // Capture variables in closure properly + plan.stages = append(plan.stages, plannedStage{ + name: afterStage.Name, + metricName: afterStage.MetricName, + f: func() { + if err := afterStage.Stage(c); err != nil { + c.err(err) + } + }, + }) + } + } + + return plan +} + +// getOrBuildPlan ensures we have a valid execution plan. +func (c *Compiler) getOrBuildPlan() *executionPlan { + if c.plan == nil { + c.plan = c.buildExecutionPlan() + } + return c.plan +} + +// StagesToRun returns the list of stage IDs that will be executed during +// compilation, in execution order. This includes both main stages and any +// registered "after" stages. +func (c *Compiler) StagesToRun() []StageID { + plan := c.getOrBuildPlan() + result := make([]StageID, len(plan.stages)) + for i, s := range plan.stages { + result[i] = StageID(s.name) + } + return result +} + +func (c *Compiler) counterAdd(name string, n uint64) { + if c.metrics == nil { + return + } + c.metrics.Counter(name).Add(n) +} + +func (c *Compiler) buildRuleIndices() { + + c.RuleTree.DepthFirst(func(node *TreeNode) bool { + if len(node.Values) == 0 { + return false + } + rules := node.Values + hasNonGroundRef := false + for _, r := range rules { + hasNonGroundRef = !r.Head.Ref().IsGround() + if hasNonGroundRef { + break + } + } + + if hasNonGroundRef { + // Collect children to ensure that all rules within the extent of a rule with a general ref + // are found on the same index. E.g. the following rules should be indexed under data.a.b.c: + // + // package a + // b.c[x].e := 1 { x := input.x } + // b.c.d := 2 + // b.c.d2.e[x] := 3 { x := input.x } + for _, child := range node.Children { + child.DepthFirst(func(c *TreeNode) bool { + rules = append(rules, c.Values...) + return false + }) + } + } + + index := newBaseDocEqIndex(func(ref Ref) bool { + return isVirtual(c.RuleTree, ref.GroundPrefix()) + }) + if index.Build(rules) { + node.Index = index + } + return hasNonGroundRef // currently, we don't allow those branches to go deeper + }) +} + +func (c *Compiler) buildComprehensionIndices() { + vis := varVisitorPool.Get() + + for _, name := range c.sorted { + WalkRules(c.Modules[name], func(r *Rule) bool { + vis = vis.Clear() + vis.vars.Update(ReservedVars) + if len(r.Head.Args) > 0 { + vis.WalkArgs(r.Head.Args) + } + n := buildComprehensionIndices(c.debug, c.GetArity, vis.vars, c.RewrittenVars, r.Body, c.comprehensionIndices) + c.counterAdd(compileStageComprehensionIndexBuild, n) + return false + }) + } + + varVisitorPool.Put(vis) +} + +var futureKeywordsPrefix = Ref{FutureRootDocument, InternedTerm("keywords")} + +// buildRequiredCapabilities updates the required capabilities on the compiler +// to include any keyword and feature dependencies present in the modules. The +// built-in function dependencies will have already been added by the type +// checker. +func (c *Compiler) buildRequiredCapabilities() { + + features := map[string]struct{}{} + + // extract required keywords from modules + + keywords := map[string]struct{}{} + + for _, name := range c.sorted { + for _, imp := range c.imports[name] { + path := imp.Path.Value.(Ref) + switch { + case path.Equal(RegoV1CompatibleRef): + if !c.moduleIsRegoV1(c.Modules[name]) { + features[FeatureRegoV1Import] = struct{}{} + } + case path.HasPrefix(futureKeywordsPrefix): + if len(path) == 2 { + if c.moduleIsRegoV1(c.Modules[name]) { + for kw := range futureKeywords { + keywords[kw] = struct{}{} + } + } else { + for kw := range allFutureKeywords { + keywords[kw] = struct{}{} + } + } + } else { + kw := string(path[2].Value.(String)) + if c.moduleIsRegoV1(c.Modules[name]) { + for allowedKw := range futureKeywords { + if kw == allowedKw { + keywords[kw] = struct{}{} + break + } + } + } else { + for allowedKw := range allFutureKeywords { + if kw == allowedKw { + keywords[kw] = struct{}{} + break + } + } + } + } + } + } + } + + c.Required.FutureKeywords = util.KeysSorted(keywords) + + // extract required features from modules + + for _, name := range c.sorted { + mod := c.Modules[name] + + if c.moduleIsRegoV1(mod) { + features[FeatureRegoV1] = struct{}{} + } else { + for _, rule := range mod.Rules { + refLen := len(rule.Head.Reference) + if refLen >= 3 { + if refLen > len(rule.Head.Reference.ConstantPrefix()) { + features[FeatureRefHeads] = struct{}{} + } else { + features[FeatureRefHeadStringPrefixes] = struct{}{} + } + } + } + } + } + + for i, bi := range c.Required.Builtins { + c.Required.Builtins[i] = bi.Minimal() + + if bi.Name == InternalTemplateString.Name { + features[FeatureTemplateStrings] = struct{}{} + } + } + + c.Required.Features = util.KeysSorted(features) +} + +// checkRecursion ensures that there are no recursive definitions, i.e., there are +// no cycles in the Graph. +func (c *Compiler) checkRecursion() { + eq := func(a, b util.T) bool { + return a.(*Rule) == b.(*Rule) + } + + c.RuleTree.DepthFirst(func(node *TreeNode) bool { + for _, rule := range node.Values { + for node := rule; node != nil; node = node.Else { + c.checkSelfPath(node.Loc(), eq, node, node) + } + } + return false + }) +} + +func (c *Compiler) checkSelfPath(loc *Location, eq func(a, b util.T) bool, a, b util.T) { + tr := NewGraphTraversal(c.Graph) + if p := util.DFSPath(tr, eq, a, b); len(p) > 0 { + n := make([]string, 0, len(p)) + for _, x := range p { + n = append(n, astNodeToString(x)) + } + if !c.err(NewError(RecursionErr, loc, "rule %v is recursive: %v", astNodeToString(a), strings.Join(n, " -> "))) { + return + } + } +} + +func astNodeToString(x any) string { + return x.(*Rule).Ref().String() +} + +// checkRuleConflicts ensures that rules definitions are not in conflict. +func (c *Compiler) checkRuleConflicts() { + rw := rewriteVarsInRef(c.RewrittenVars) + + c.RuleTree.DepthFirst(func(node *TreeNode) bool { + if len(node.Values) == 0 { + return false // go deeper + } + + kinds := make(map[RuleKind]struct{}, len(node.Values)) + completeRules := 0 + partialRules := 0 + arities := make(map[int]struct{}, len(node.Values)) + name := "" + var conflicts []Ref + defaultRules := make([]*Rule, 0) + + for _, rule := range node.Values { + r := rule + ref := r.Ref() + name = rw(ref.CopyNonGround()).String() // varRewriter operates in-place + kinds[r.Head.RuleKind()] = struct{}{} + arities[len(r.Head.Args)] = struct{}{} + if r.Default { + defaultRules = append(defaultRules, r) + } + + // Single-value rules may not have any other rules in their extent. + // Rules with vars in their ref are allowed to have rules inside their extent. + // Only the ground portion (terms before the first var term) of a rule's ref is considered when determining + // whether it's inside the extent of another (c.RuleTree is organized this way already). + // These pairs are invalid: + // + // data.p.q.r { true } # data.p.q is { "r": true } + // data.p.q.r.s { true } + // + // data.p.q.r { true } + // data.p.q.r[s].t { s = input.key } + // + // But this is allowed: + // + // data.p.q.r { true } + // data.p.q[r].s.t { r = input.key } + // + // data.p[r] := x { r = input.key; x = input.bar } + // data.p.q[r] := x { r = input.key; x = input.bar } + // + // data.p.q[r] { r := input.r } + // data.p.q.r.s { true } + // + // data.p.q[r] = 1 { r := "r" } + // data.p.q.s = 2 + // + // data.p[q][r] { q := input.q; r := input.r } + // data.p.q.r { true } + // + // data.p.q[r] { r := input.r } + // data.p[q].r { q := input.q } + // + // data.p.q[r][s] { r := input.r; s := input.s } + // data.p[q].r.s { q := input.q } + + if ref.IsGround() && len(node.Children) > 0 { + conflicts = node.flattenChildren() + } + + if r.Head.RuleKind() == SingleValue && r.Head.Ref().IsGround() { + completeRules++ + } else { + partialRules++ + } + } + + switch { + case conflicts != nil: + return !c.err(NewError(TypeErr, node.Values[0].Loc(), "rule %v conflicts with %v", name, conflicts)) + + case len(kinds) > 1 || len(arities) > 1 || (completeRules >= 1 && partialRules >= 1): + return !c.err(NewError(TypeErr, node.Values[0].Loc(), "conflicting rules %v found", name)) + + case len(defaultRules) > 1: + buf := append(append(append(make([]byte, 0, 64), "multiple default rules "...), name...), " found at "...) + buf, _ = defaultRules[0].Loc().AppendText(buf) + + for _, next := range defaultRules[1:] { + buf, _ = next.Loc().AppendText(append(buf, ", "...)) + } + + return !c.err(&Error{ + Code: TypeErr, + Location: defaultRules[0].Module.Package.Loc(), + Message: util.ByteSliceToString(buf), + }) + } + + return false + }) + + if c.pathExists != nil { + for _, err := range CheckPathConflicts(c, c.pathExists) { + c.err(err) + } + } + + // NOTE(sr): depthfirst might better use sorted for stable errs? + c.ModuleTree.DepthFirst(func(node *ModuleTreeNode) bool { + for _, mod := range node.Modules { + for _, rule := range mod.Rules { + ref := rule.Head.Ref().GroundPrefix() + // Rules with a dynamic portion in their ref are exempted, as a conflict within the dynamic portion + // can only be detected at eval-time. + if len(ref) < len(rule.Head.Ref()) { + continue + } + + childNode, tail := node.find(ref) + if childNode != nil && len(tail) == 0 { + for _, childMod := range childNode.Modules { + // Avoid recursively checking a module for equality unless we know it's a possible self-match. + if childMod.Equal(mod) { + continue // don't self-conflict + } + if !c.err(NewError(TypeErr, mod.Package.Loc(), "%v conflicts with rule %v defined at %v", childMod.Package, rule.Head.Ref(), rule.Loc())) { + return true + } + } + } + } + } + return false + }) +} + +func (c *Compiler) checkUndefinedFuncs() { + for _, name := range c.sorted { + m := c.Modules[name] + c.err(checkUndefinedFuncs(c.TypeEnv, m, c.GetArity, c.RewrittenVars)...) + } +} + +func checkUndefinedFuncs(env *TypeEnv, x any, arity func(Ref) int, rwVars map[Var]Var) Errors { + + var errs Errors + + WalkExprs(x, func(expr *Expr) bool { + if !expr.IsCall() { + return false + } + ref := expr.Operator() + if arity := arity(ref); arity >= 0 { + operands := len(expr.Operands()) + if expr.Generated { // an output var was added + if !expr.IsEquality() && operands != arity+1 { + ref = rewriteVarsInRef(rwVars)(ref) + errs = append(errs, arityMismatchError(env, ref, expr, arity, operands-1)) + return true + } + } else { // either output var or not + if operands != arity && operands != arity+1 { + ref = rewriteVarsInRef(rwVars)(ref) + errs = append(errs, arityMismatchError(env, ref, expr, arity, operands)) + return true + } + } + return false + } + ref = rewriteVarsInRef(rwVars)(ref) + errs = append(errs, NewError(TypeErr, expr.Loc(), "undefined function %v", ref)) + return true + }) + + return errs +} + +func arityMismatchError(env *TypeEnv, f Ref, expr *Expr, exp, act int) *Error { + if want, ok := env.Get(f).(*types.Function); ok { // generate richer error for built-in functions + have := make([]types.Type, len(expr.Operands())) + for i, op := range expr.Operands() { + have[i] = env.Get(op) + } + return newArgError(expr.Loc(), f, "arity mismatch", have, want.NamedFuncArgs()) + } + if act != 1 { + return NewError(TypeErr, expr.Loc(), "function %v has arity %d, got %d arguments", f, exp, act) + } + return NewError(TypeErr, expr.Loc(), "function %v has arity %d, got %d argument", f, exp, act) +} + +// checkSafetyRuleBodies ensures that variables appearing in negated expressions or non-target +// positions of built-in expressions will be bound when evaluating the rule from left +// to right, re-ordering as necessary. +func (c *Compiler) checkSafetyRuleBodies() { + vis := varVisitorPool.Get() + + for _, name := range c.sorted { + m := c.Modules[name] + WalkRules(m, func(r *Rule) bool { + vis = vis.Clear() + // vis.vars == safe + vis.vars.Update(ReservedVars) + if len(r.Head.Args) > 0 { + vis.WalkArgs(r.Head.Args) + } + r.Body = c.checkBodySafety(vis.vars, r.Body) + return false + }) + } + + varVisitorPool.Put(vis) +} + +func (c *Compiler) checkBodySafety(safe VarSet, b Body) Body { + reordered, unsafe := reorderBodyForSafety(c.builtins, c.GetArity, safe, b) + if errs := safetyErrorSlice(unsafe, c.RewrittenVars); len(errs) > 0 { + c.err(errs...) + return b + } + return reordered +} + +// SafetyCheckVisitorParams defines the AST visitor parameters to use for collecting +// variables during the safety check. This has to be exported because it's relied on +// by the copy propagation implementation in topdown. +var SafetyCheckVisitorParams = VarVisitorParams{ + SkipRefCallHead: true, + SkipClosures: true, +} + +// checkSafetyRuleHeads ensures that variables appearing in the head of a +// rule also appear in the body. +func (c *Compiler) checkSafetyRuleHeads() { + vis := varVisitorPool.Get() + + for _, name := range c.sorted { + WalkRules(c.Modules[name], func(r *Rule) bool { + if headMayHaveVars(r.Head) { + vis = vis.Clear().WithParams(SafetyCheckVisitorParams) + vis.WalkBody(r.Body) + + vis = vis.WithParams(VarVisitorParams{}) + if len(r.Head.Args) > 0 { + vis.WalkArgs(r.Head.Args) + } + + vars := r.Head.Vars() + if vars.DiffCount(vis.vars) > 0 { + unsafe := vars.Diff(vis.vars) + for v := range unsafe { + if w, ok := c.RewrittenVars[v]; ok { + v = w + } + if !v.IsGenerated() { + if !c.err(NewError(UnsafeVarErr, vars[v].Location, "var %v is unsafe", v)) { + return true + } + } + } + } + } + return false + }) + } + + varVisitorPool.Put(vis) +} + +func compileSchema(goSchema any, allowNet []string) (*gojsonschema.Schema, error) { + gojsonschema.SetAllowNet(allowNet) + + var refLoader gojsonschema.JSONLoader + sl := gojsonschema.NewSchemaLoader() + + if goSchema != nil { + refLoader = gojsonschema.NewGoLoader(goSchema) + } else { + return nil, errors.New("no schema as input to compile") + } + schemasCompiled, err := sl.Compile(refLoader) + if err != nil { + return nil, fmt.Errorf("unable to compile the schema: %w", err) + } + return schemasCompiled, nil +} + +func mergeSchemas(schemas ...*gojsonschema.SubSchema) (*gojsonschema.SubSchema, error) { + if len(schemas) == 0 { + return nil, nil + } + var result = schemas[0] + + for i := range schemas { + if len(schemas[i].PropertiesChildren) > 0 { + if !schemas[i].Types.Contains("object") { + if err := schemas[i].Types.Add("object"); err != nil { + return nil, errors.New("unable to set the type in schemas") + } + } + } else if len(schemas[i].ItemsChildren) > 0 { + if !schemas[i].Types.Contains("array") { + if err := schemas[i].Types.Add("array"); err != nil { + return nil, errors.New("unable to set the type in schemas") + } + } + } + } + + for i := 1; i < len(schemas); i++ { + if result.Types.String() != schemas[i].Types.String() { + return nil, fmt.Errorf("unable to merge these schemas: type mismatch: %v and %v", result.Types.String(), schemas[i].Types.String()) + } else if result.Types.Contains("object") && len(result.PropertiesChildren) > 0 && schemas[i].Types.Contains("object") && len(schemas[i].PropertiesChildren) > 0 { + result.PropertiesChildren = append(result.PropertiesChildren, schemas[i].PropertiesChildren...) + } else if result.Types.Contains("array") && len(result.ItemsChildren) > 0 && schemas[i].Types.Contains("array") && len(schemas[i].ItemsChildren) > 0 { + for j := range len(schemas[i].ItemsChildren) { + if len(result.ItemsChildren)-1 < j && !(len(schemas[i].ItemsChildren)-1 < j) { + result.ItemsChildren = append(result.ItemsChildren, schemas[i].ItemsChildren[j]) + } + if result.ItemsChildren[j].Types.String() != schemas[i].ItemsChildren[j].Types.String() { + return nil, errors.New("unable to merge these schemas") + } + } + } + } + return result, nil +} + +type schemaParser struct { + definitionCache map[string]*cachedDef +} + +type cachedDef struct { + properties []*types.StaticProperty +} + +func newSchemaParser() *schemaParser { + return &schemaParser{ + definitionCache: map[string]*cachedDef{}, + } +} + +func (parser *schemaParser) parseSchema(schema any) (types.Type, error) { + return parser.parseSchemaWithPropertyKey(schema, "") +} + +func (parser *schemaParser) parseSchemaWithPropertyKey(schema any, propertyKey string) (types.Type, error) { + subSchema, ok := schema.(*gojsonschema.SubSchema) + if !ok { + return nil, fmt.Errorf("unexpected schema type %v", subSchema) + } + + // Handle referenced schemas, returns directly when a $ref is found + if subSchema.RefSchema != nil { + if existing, ok := parser.definitionCache[subSchema.Ref.String()]; ok { + return types.NewObject(existing.properties, nil), nil + } + return parser.parseSchemaWithPropertyKey(subSchema.RefSchema, subSchema.Ref.String()) + } + + // Handle anyOf + if subSchema.AnyOf != nil { + var orType types.Type + + // If there is a core schema, find its type first + if subSchema.Types.IsTyped() { + copySchema := *subSchema + copySchemaRef := ©Schema + copySchemaRef.AnyOf = nil + coreType, err := parser.parseSchema(copySchemaRef) + if err != nil { + return nil, fmt.Errorf("unexpected schema type %v: %w", subSchema, err) + } + + // Only add Object type with static props to orType + if objType, ok := coreType.(*types.Object); ok { + if objType.StaticProperties() != nil && objType.DynamicProperties() == nil { + orType = types.Or(orType, coreType) + } + } + } + + // Iterate through every property of AnyOf and add it to orType + for _, pSchema := range subSchema.AnyOf { + newtype, err := parser.parseSchema(pSchema) + if err != nil { + return nil, fmt.Errorf("unexpected schema type %v: %w", pSchema, err) + } + orType = types.Or(newtype, orType) + } + + return orType, nil + } + + if subSchema.AllOf != nil { + subSchemaArray := subSchema.AllOf + allOfResult, err := mergeSchemas(subSchemaArray...) + if err != nil { + return nil, err + } + + if subSchema.Types.IsTyped() { + if (subSchema.Types.Contains("object") && allOfResult.Types.Contains("object")) || (subSchema.Types.Contains("array") && allOfResult.Types.Contains("array")) { + objectOrArrayResult, err := mergeSchemas(allOfResult, subSchema) + if err != nil { + return nil, err + } + return parser.parseSchema(objectOrArrayResult) + } else if subSchema.Types.String() != allOfResult.Types.String() { + return nil, errors.New("unable to merge these schemas") + } + } + return parser.parseSchema(allOfResult) + } + + if subSchema.Types.IsTyped() { + if subSchema.Types.Contains("boolean") { + return types.B, nil + + } else if subSchema.Types.Contains("string") { + return types.S, nil + + } else if subSchema.Types.Contains("integer") || subSchema.Types.Contains("number") { + return types.N, nil + + } else if subSchema.Types.Contains("object") { + if len(subSchema.PropertiesChildren) > 0 { + def := &cachedDef{ + properties: make([]*types.StaticProperty, 0, len(subSchema.PropertiesChildren)), + } + for _, pSchema := range subSchema.PropertiesChildren { + def.properties = append(def.properties, types.NewStaticProperty(pSchema.Property, nil)) + } + if propertyKey != "" { + parser.definitionCache[propertyKey] = def + } + for _, pSchema := range subSchema.PropertiesChildren { + newtype, err := parser.parseSchema(pSchema) + if err != nil { + return nil, fmt.Errorf("unexpected schema type %v: %w", pSchema, err) + } + for i, prop := range def.properties { + if prop.Key == pSchema.Property { + def.properties[i].Value = newtype + break + } + } + } + return types.NewObject(def.properties, nil), nil + } + return types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), nil + + } else if subSchema.Types.Contains("array") { + if len(subSchema.ItemsChildren) > 0 { + if subSchema.ItemsChildrenIsSingleSchema { + iSchema := subSchema.ItemsChildren[0] + newtype, err := parser.parseSchema(iSchema) + if err != nil { + return nil, fmt.Errorf("unexpected schema type %v", iSchema) + } + return types.NewArray(nil, newtype), nil + } + newTypes := make([]types.Type, 0, len(subSchema.ItemsChildren)) + for i := 0; i != len(subSchema.ItemsChildren); i++ { + iSchema := subSchema.ItemsChildren[i] + newtype, err := parser.parseSchema(iSchema) + if err != nil { + return nil, fmt.Errorf("unexpected schema type %v", iSchema) + } + newTypes = append(newTypes, newtype) + } + return types.NewArray(newTypes, nil), nil + } + return types.NewArray(nil, types.A), nil + } + } + + // Assume types if not specified in schema + if len(subSchema.PropertiesChildren) > 0 { + if err := subSchema.Types.Add("object"); err == nil { + return parser.parseSchema(subSchema) + } + } else if len(subSchema.ItemsChildren) > 0 { + if err := subSchema.Types.Add("array"); err == nil { + return parser.parseSchema(subSchema) + } + } + + return types.A, nil +} + +func (c *Compiler) setAnnotationSet() { + // Sorting modules by name for stable error reporting + sorted := make([]*Module, 0, len(c.Modules)) + for _, mName := range c.sorted { + sorted = append(sorted, c.Modules[mName]) + } + + as, errs := BuildAnnotationSet(sorted) + for _, err := range errs { + c.err(err) + } + c.annotationSet = as +} + +// checkTypes runs the type checker on all rules. The type checker builds a +// TypeEnv that is stored on the compiler. +func (c *Compiler) checkTypes() { + // Recursion is caught in earlier step, so this cannot fail. + sorted, _ := c.Graph.Sort() + checker := newTypeChecker(). + WithAllowNet(c.capabilities.AllowNet). + WithSchemaSet(c.schemaSet). + WithInputType(c.inputType). + WithBuiltins(c.builtins). + WithRequiredCapabilities(c.Required). + WithVarRewriter(rewriteVarsInRef(c.RewrittenVars)). + WithAllowUndefinedFunctionCalls(c.allowUndefinedFuncCalls) + var as *AnnotationSet + if c.useTypeCheckAnnotations { + as = c.annotationSet + } + env, errs := checker.CheckTypes(c.TypeEnv, sorted, as) + for _, err := range errs { + c.err(err) + } + c.TypeEnv = env +} + +func (c *Compiler) checkUnsafeBuiltins() { + if len(c.unsafeBuiltinsMap) == 0 { + return + } + + for _, name := range c.sorted { + errs := checkUnsafeBuiltins(c.unsafeBuiltinsMap, c.Modules[name]) + for _, err := range errs { + c.err(err) + } + } +} + +func (c *Compiler) checkDeprecatedBuiltins() { + checkNeeded := false + for _, b := range c.Required.Builtins { + if _, found := c.deprecatedBuiltinsMap[b.Name]; found { + checkNeeded = true + break + } + } + if !checkNeeded { + return + } + + for _, name := range c.sorted { + if c.strict || c.Modules[name].regoV1Compatible() { + errs := checkDeprecatedBuiltins(c.deprecatedBuiltinsMap, c.Modules[name]) + for _, err := range errs { + c.err(err) + } + } + } +} + +func (c *Compiler) compile() { + plan := c.getOrBuildPlan() + + if c.metrics != nil { + for _, s := range plan.stages { + c.metrics.Timer(s.metricName).Start() + s.f() + c.metrics.Timer(s.metricName).Stop() + if c.Failed() { + return + } + } + } else { + for _, s := range plan.stages { + s.f() + if c.Failed() { + return + } + } + } +} + +func (c *Compiler) init() { + + if c.initialized { + return + } + + if defaultModuleLoader != nil { + if c.moduleLoader == nil { + c.moduleLoader = defaultModuleLoader + } else { + first := c.moduleLoader + c.moduleLoader = func(res map[string]*Module) (map[string]*Module, error) { + res0, err := first(res) + if err != nil { + return nil, err + } + res1, err := defaultModuleLoader(res) + if err != nil { + return nil, err + } + // merge res1 into res0, based on module "file" names, to avoid clashes + for k, v := range res1 { + if _, ok := res0[k]; !ok { + res0[k] = v + } + } + return res0, nil + } + } + } + + if c.capabilities == nil { + c.capabilities = CapabilitiesForThisVersion() + } + + c.builtins = make(map[string]*Builtin, len(c.capabilities.Builtins)+len(c.customBuiltins)) + + for _, bi := range c.capabilities.Builtins { + c.builtins[bi.Name] = bi + if bi.IsDeprecated() { + c.deprecatedBuiltinsMap[bi.Name] = struct{}{} + } + } + + maps.Copy(c.builtins, c.customBuiltins) + + // Load the global input schema if one was provided. + if c.schemaSet != nil { + if schema := c.schemaSet.Get(SchemaRootRef); schema != nil { + tpe, err := loadSchema(schema, c.capabilities.AllowNet) + if err != nil { + if !c.err(newErrorString(TypeErr, nil, err.Error())) { + return + } + } else { + c.inputType = tpe + } + } + } + + c.TypeEnv = newTypeChecker(). + WithSchemaSet(c.schemaSet). + WithInputType(c.inputType). + Env(c.builtins) + + // Configure default stage skips based on existing configuration + if c.evalMode == EvalModeIR { + c.WithSkipStages(StageBuildRuleIndices, StageBuildComprehensionIndices) + } + if c.allowUndefinedFuncCalls { + c.WithSkipStages(StageCheckUndefinedFuncs, StageCheckSafetyRuleBodies) + } + + c.initialized = true +} + +func (c *Compiler) err(errs ...*Error) bool { // returns if we should continue + if len(errs) == 0 { + return true + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.maxErrs <= 0 { + c.Errors = append(c.Errors, errs...) + return true + } + + remaining := c.maxErrs - int(c.errCount) + if remaining <= 0 { + // The error limit has already been reached or exceeded. + // No more errors should be added. + return false + } + + numToTake := min(remaining, len(errs)) + + // The limit is reached if, after adding numToTake errors, the total count + // is equal to c.maxErrs. + isLimitReachedInThisCall := (int(c.errCount)+numToTake == c.maxErrs) + + c.errCount += uint32(numToTake) + c.Errors = append(c.Errors, errs[:numToTake]...) + if isLimitReachedInThisCall { + c.Errors = append(c.Errors, errLimitReached) + } + + return !isLimitReachedInThisCall // Return false if the limit was reached, true otherwise. +} + +func (c *Compiler) getExports() *util.HasherMap[Ref, []Ref] { + rules := util.NewHasherMap[Ref, []Ref](RefEqual) + + for _, name := range c.sorted { + for _, rule := range c.Modules[name].Rules { + hashMapAdd(rules, c.Modules[name].Package.Path, rule.Head.Ref().GroundPrefix()) + } + } + + return rules +} + +func refSliceEqual(a, b []Ref) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if !a[i].Equal(b[i]) { + return false + } + } + return true +} + +func hashMapAdd(rules *util.HasherMap[Ref, []Ref], pkg, rule Ref) { + prev, ok := rules.Get(pkg) + if !ok { + rules.Put(pkg, []Ref{rule}) + return + } + for _, p := range prev { + if p.Equal(rule) { + return + } + } + rules.Put(pkg, append(prev, rule)) +} + +func (c *Compiler) GetAnnotationSet() *AnnotationSet { + return c.annotationSet +} + +func (c *Compiler) checkImports() { + modules := make([]*Module, 0, len(c.Modules)) + + supportsRegoV1Import := c.capabilities.ContainsFeature(FeatureRegoV1Import) || + c.capabilities.ContainsFeature(FeatureRegoV1) + + for _, name := range c.sorted { + for _, imp := range c.Modules[name].Imports { + if !supportsRegoV1Import && RegoV1CompatibleRef.Equal(imp.Path.Value) { + if !c.err(NewError(CompileErr, imp.Loc(), "rego.v1 import is not supported")) { + continue + } + } + } + + if c.strict || c.moduleIsRegoV1Compatible(c.Modules[name]) { + modules = append(modules, c.Modules[name]) + } + } + + c.err(checkDuplicateImports(modules)...) +} + +func (c *Compiler) checkKeywordOverrides() { + for _, name := range c.sorted { + if c.strict || c.moduleIsRegoV1Compatible(c.Modules[name]) { + if !c.err(checkRootDocumentOverrides(c.Modules[name])...) { + continue + } + } + } +} + +func (c *Compiler) moduleIsRegoV1(mod *Module) bool { + if mod.regoVersion == RegoUndefined { + switch c.defaultRegoVersion { + case RegoUndefined: + c.err(NewError(CompileErr, mod.Package.Loc(), "cannot determine rego version for module")) + return false + case RegoV1: + return true + } + return false + } + return mod.regoVersion == RegoV1 +} + +func (c *Compiler) moduleIsRegoV1Compatible(mod *Module) bool { + if mod.regoVersion == RegoUndefined { + switch c.defaultRegoVersion { + case RegoUndefined: + c.err(NewError(CompileErr, mod.Package.Loc(), "cannot determine rego version for module")) + return false + case RegoV1, RegoV0CompatV1: + return true + } + return false + } + return mod.regoV1Compatible() +} + +// resolveAllRefs resolves references in expressions to their fully qualified values. +// +// For instance, given the following module: +// +// package a.b +// import data.foo.bar +// p[x] { bar[_] = x } +// +// The reference "bar[_]" would be resolved to "data.foo.bar[_]". +// +// Ref rules are resolved, too: +// +// package a.b +// q { c.d.e == 1 } +// c.d[e] := 1 if e := "e" +// +// The reference "c.d.e" would be resolved to "data.a.b.c.d.e". +func (c *Compiler) resolveAllRefs() { + rules := c.getExports() + + for _, name := range c.sorted { + mod := c.Modules[name] + var ruleExports []Ref + if x, ok := rules.Get(mod.Package.Path); ok { + ruleExports = x + } + + globals := getGlobals(mod.Package, ruleExports, mod.Imports) + + WalkRules(mod, func(rule *Rule) bool { + err := resolveRefsInRule(globals, rule) + if err != nil { + return c.err(newErrorString(CompileErr, rule.Location, err.Error())) + } + return false + }) + + if c.strict { // check for unused imports + for _, imp := range mod.Imports { + path := imp.Path.Value.(Ref) + if FutureRootDocument.Equal(path[0]) || RegoRootDocument.Equal(path[0]) { + continue // ignore future and rego imports + } + + for v, u := range globals { + if v.Equal(imp.Name()) && !u.used { + if !c.err(NewError(CompileErr, imp.Location, "%s unused", imp.String())) { + return + } + } + } + } + } + } + + if c.moduleLoader != nil { + + parsed, err := c.moduleLoader(c.Modules) + if err != nil { + c.err(newErrorString(CompileErr, nil, err.Error())) + return + } + + if len(parsed) == 0 { + return + } + + for id, module := range parsed { + c.Modules[id] = module.Copy() + c.sorted = append(c.sorted, id) + if c.parsedModules != nil { + c.parsedModules[id] = module + } + } + + sort.Strings(c.sorted) + c.resolveAllRefs() + } +} + +func (c *Compiler) removeImports() { + c.imports = make(map[string][]*Import, len(c.Modules)) + for name := range c.Modules { + c.imports[name] = c.Modules[name].Imports + c.Modules[name].Imports = nil + } +} + +func (c *Compiler) initLocalVarGen() { + c.localvargen = newLocalVarGeneratorForModuleSet(c.sorted, c.Modules) +} + +func (c *Compiler) rewriteComprehensionTerms() { + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + _, _ = rewriteComprehensionTerms(f, c.Modules[name]) // ignore error + } +} + +func (c *Compiler) rewriteExprTerms() { + for _, name := range c.sorted { + WalkRules(c.Modules[name], func(rule *Rule) bool { + rewriteExprTermsInHead(c.localvargen, rule) + rule.Body = rewriteExprTermsInBody(c.localvargen, rule.Body) + return false + }) + } +} + +func (c *Compiler) rewriteRuleHeadRefs() { + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + WalkRules(c.Modules[name], func(rule *Rule) bool { + + ref := rule.Head.Ref() + // NOTE(sr): We're backfilling Refs here -- all parser code paths would have them, but + // it's possible to construct Module{} instances from Golang code, so we need + // to accommodate for that, too. + if len(rule.Head.Reference) == 0 { + rule.Head.Reference = ref + } + + cannotSpeakStringPrefixRefs := true + cannotSpeakGeneralRefs := true + for _, f := range c.capabilities.Features { + switch f { + case FeatureRefHeadStringPrefixes: + cannotSpeakStringPrefixRefs = false + case FeatureRefHeads: + cannotSpeakGeneralRefs = false + case FeatureRegoV1: + cannotSpeakStringPrefixRefs = false + cannotSpeakGeneralRefs = false + } + } + + if cannotSpeakStringPrefixRefs && cannotSpeakGeneralRefs && rule.Head.Name == "" { + c.err(NewError(CompileErr, rule.Loc(), "rule heads with refs are not supported: %v", rule.Head.Reference)) + return true + } + + for i := 1; i < len(ref); i++ { + if cannotSpeakGeneralRefs && (rule.Head.RuleKind() == MultiValue || i != len(ref)-1) { // last + if _, ok := ref[i].Value.(String); !ok { + if !c.err(NewError(TypeErr, rule.Loc(), "rule heads with general refs (containing variables) are not supported: %v", rule.Head.Reference)) { + return true + } + continue + } + } + + // Rewrite so that any non-scalar elements in the rule's ref are vars: + // p.q.r[y.z] { ... } => p.q.r[__local0__] { __local0__ = y.z } + // p.q[a.b][c.d] { ... } => p.q[__local0__] { __local0__ = a.b; __local1__ = c.d } + // because that's what the RuleTree knows how to deal with. + if _, ok := ref[i].Value.(Var); !ok && !IsScalar(ref[i].Value) { + expr := f.Generate(ref[i]) + if i == len(ref)-1 && rule.Head.Key.Equal(ref[i]) { + rule.Head.Key = expr.Operand(0) + } + rule.Head.Reference[i] = expr.Operand(0) + rule.Body = appendToBody(rule.Body, expr) + } + } + + return true + }) + } +} + +func (c *Compiler) checkVoidCalls() { + for _, name := range c.sorted { + c.err(checkVoidCalls(c.TypeEnv, c.Modules[name])...) + } +} + +func (c *Compiler) builtinLoc(ref Ref) *Builtin { + n := ref.String() + if b, ok := c.builtins[n]; ok { + return b + } + if b, ok := c.customBuiltins[n]; ok { + return b + } + return nil +} + +// isRefToKnownDefinedRule answers whether a rule (counting all incremental definitions) reference +// is known to evaluate to a value (not undefined). A rule reference is considered safe if it references +// a rule with no arguments (i.e. not a function) and: +// - The rule has a `default` value assigned +// - The rule is a multi-value rule — it generates a set that may be empty but not undefined +// - The rule is a "constant", meaning it has a single definition, a ground value and no body +func (c *Compiler) isRefToKnownDefinedRule(ref Ref) bool { + var matched *TreeNode + if len(ref) < 2 || !ref.HasPrefix(DefaultRootRef) { + return false + } + if matched = c.RuleTree.Find(ref); matched == nil || len(matched.Values) == 0 { + return false + } + first := matched.Values[0] + if len(first.Head.Args) > 0 { + return false + } + if first.Default || first.Head.RuleKind() == MultiValue { + return true + } + if len(matched.Values) == 1 { + return isConstantRule(first) + } + return slices.ContainsFunc(matched.Values[1:], func(r *Rule) bool { + return r.Default + }) +} + +// templateStringRewriter +type templateStringRewriter struct { + rule *Rule + gen *localVarGenerator + vis *VarVisitor + rewritten map[Var]Var + arity func(Ref) int + safeRuleRef func(Ref) bool + builtins builtinLocator + capsSupport bool +} + +func rewriterFromCompiler(c *Compiler) *templateStringRewriter { + return &templateStringRewriter{ + vis: NewVarVisitor(), + gen: c.localvargen, + builtins: c.builtinLoc, + arity: c.GetArity, + safeRuleRef: c.isRefToKnownDefinedRule, + rewritten: c.RewrittenVars, + capsSupport: c.capabilities.ContainsFeature(FeatureTemplateStrings) && + c.capabilities.ContainsBuiltin(InternalTemplateString.Name), + } +} + +func rewriterFromQueryCompiler(qc *queryCompiler, gen *localVarGenerator) *templateStringRewriter { + rw := rewriterFromCompiler(qc.compiler) + rw.gen = gen + return rw +} + +func (tsr *templateStringRewriter) Clear() *templateStringRewriter { + tsr.rule = nil + tsr.vis = tsr.vis.Clear() + return tsr +} + +// rewriteTemplateStrings rewrites template-string calls as they appear in bodies; e.g. rules, comprehensions, etc. +func (c *Compiler) rewriteTemplateStrings() { + tsr := rewriterFromCompiler(c) + modified := false + for _, name := range c.sorted { + mod := c.Modules[name] + WalkRules(mod, func(r *Rule) bool { + tsr = tsr.Clear() + safe := r.Head.Args.Vars() + + if len(r.Head.Args) > 0 { + tsr.vis = tsr.vis.WithParams(VarVisitorParams{SkipTemplateStrings: true}) + tsr.vis.WalkArgs(r.Head.Args) + } + + safe.Update(ReservedVars) + + modrec, safe, errs := rewriteTemplateStrings(tsr, safe, r.Body) + if modrec { + modified = true + } + c.err(errs...) + + if modrec, _, errs = rewriteTemplateStrings(tsr, safe, r.Head); modrec { + modified = true + } + c.err(errs...) + + return false + }) + } + if modified { + c.Required.addBuiltinSorted(InternalTemplateString) + } +} + +func rewriteTemplateStrings(tsr *templateStringRewriter, globals VarSet, x any) (bool, VarSet, Errors) { + var errs Errors + var modified bool + + // All output vars in the current body are safe, recursively + var safe VarSet + if b, ok := x.(Body); ok { + safe = outputVarsForBody(b, tsr.arity, globals, tsr.vis) + safe.Update(globals) + } else { + safe = globals.Copy() + } + + vis := &GenericVisitor{func(x any) bool { + var modrec bool + var errsrec Errors + switch x := x.(type) { + case *Term: + if _, ok := x.Value.(*TemplateString); ok { + modrec, errsrec = rewriteTemplateStringTerm(tsr, safe, x) + } + case *SetComprehension: + var s VarSet + modrec, s, errsrec = rewriteTemplateStrings(tsr, safe, x.Body) + if modrec { + modified = true + } + errs = append(errs, errsrec...) + + modrec, errsrec = rewriteTemplateStringTerm(tsr, s, x.Term) + case *ArrayComprehension: + var s VarSet + modrec, s, errsrec = rewriteTemplateStrings(tsr, safe, x.Body) + if modrec { + modified = true + } + errs = append(errs, errsrec...) + + modrec, errsrec = rewriteTemplateStringTerm(tsr, s, x.Term) + case *ObjectComprehension: + var s VarSet + modrec, s, errsrec = rewriteTemplateStrings(tsr, safe, x.Body) + if modrec { + modified = true + } + errs = append(errs, errsrec...) + + modrec, errsrec = rewriteTemplateStringTerm(tsr, s, x.Key) + if modrec { + modified = true + } + errs = append(errs, errsrec...) + + modrec, errsrec = rewriteTemplateStringTerm(tsr, s, x.Value) + case *Every: + modrec, errsrec = rewriteTemplateStringTerm(tsr, safe, x.Domain) + if modrec { + modified = true + } + errs = append(errs, errsrec...) + + s := safe.Copy() + s.Update(x.KeyValueVars()) + modrec, _, errsrec = rewriteTemplateStrings(tsr, s, x.Body) + } + if modrec { + modified = true + } + errs = append(errs, errsrec...) + return false + }} + vis.Walk(x) + + return modified, safe, errs +} + +func rewriteTemplateStringTerm(tsr *templateStringRewriter, globals VarSet, t *Term) (bool, Errors) { + if ts, ok := t.Value.(*TemplateString); ok { + call, errs := rewriteTemplateString(tsr, globals, t.Loc(), ts) + if len(errs) != 0 { + return false, errs + } + t.Value = call + return true, nil + } + return false, nil +} + +type builtinLocator func(Ref) *Builtin + +func rewriteTemplateString(tsr *templateStringRewriter, safe VarSet, loc *Location, ts *TemplateString) (Call, Errors) { + if !tsr.capsSupport { + return nil, Errors{NewError(CompileErr, loc, "template-strings are not supported")} + } + + var errs Errors + terms := make([]*Term, 0, len(ts.Parts)) + + if len(ts.Parts) == 0 { + terms = append(terms, NewTerm(InternedEmptyStringValue).SetLocation(loc)) + } else { + vis := ClearOrNewVarVisitor(nil).WithParams(SafetyCheckVisitorParams) + for _, p := range ts.Parts { + switch p := p.(type) { + case *Expr: + var t *Term + if p.IsCall() { + // Assert that the call isn't for a known relation built-in + if bi := tsr.builtins(p.Operator()); bi != nil && bi.Relation { + errs = append(errs, NewError( + CompileErr, + t.Loc(), + "illegal call to relation built-in '%s' that may cause multiple outputs", bi.Name, + )) + continue + } + t = CallTerm(p.Terms.([]*Term)...) + } else { + var ok bool + t, ok = p.Terms.(*Term) + if !ok { + errs = append(errs, NewError( + CompileErr, + p.Location, + "unexpected template-string expression type: %T", p.Terms)) + continue + } + } + + if ref, ok := t.Value.(Ref); ok && tsr.safeRuleRef(ref) { + terms = append(terms, SetTerm(t)) + continue + } + + if _, ok := t.Value.(Var); ok { + terms = append(terms, SetTerm(t)) + continue + } + + vis = ClearOrNewVarVisitor(vis).WithParams(SafetyCheckVisitorParams) + vis.Walk(t) + vars := vis.Vars() + if vars.DiffCount(safe) > 0 { + unsafe := vars.Diff(safe) + for _, v := range unsafe.Sorted() { + if w, ok := tsr.rewritten[v]; ok { + v = w + } + errs = append(errs, NewError(CompileErr, t.Loc(), "var %v is undeclared", v)) + } + } + + loc := t.Loc() + x := NewTerm(tsr.gen.Generate()).SetLocation(loc) + capture := Equality.Expr(x, t).SetLocation(loc) + capture.With = p.With + terms = append(terms, SetComprehensionTerm(x, NewBody(capture)).SetLocation(loc)) + case *Term: + terms = append(terms, p) + default: + errs = append(errs, NewError( + CompileErr, + loc, + "expected only term or expression parts in template-string, got %T", p, + )) + return nil, errs + } + } + } + + call := InternalTemplateString.Call(ArrayTerm(terms...)).Value.(Call) + return call, errs +} + +func (c *Compiler) rewritePrintCalls() { + var modified bool + if !c.enablePrintStatements { + for _, name := range c.sorted { + if erasePrintCalls(c.Modules[name]) { + modified = true + } + } + } else { + vis := varVisitorPool.Get() + + for _, name := range c.sorted { + WalkRules(c.Modules[name], func(r *Rule) bool { + vis = vis.Clear() + vis.vars.Update(ReservedVars) + if len(r.Head.Args) > 0 { + vis.WalkArgs(r.Head.Args) + } + + bodyVis := func(b Body) bool { + modrec, errs := rewritePrintCalls(c.localvargen, c.GetArity, vis.vars, b) + if modrec { + modified = true + } + if !c.err(errs...) { + return true + } + return false + } + WalkBodies(r.Head, bodyVis) + WalkBodies(r.Body, bodyVis) + return false + }) + } + + varVisitorPool.Put(vis) + } + if modified { + c.Required.addBuiltinSorted(Print) + } +} + +// checkVoidCalls returns errors for any expressions that treat void function +// calls as values. The only void functions in Rego are specific built-ins like +// print(). +func checkVoidCalls(env *TypeEnv, x any) Errors { + var errs Errors + WalkTerms(x, func(x *Term) bool { + if call, ok := x.Value.(Call); ok { + if tpe, ok := env.Get(call[0]).(*types.Function); ok && tpe.Result() == nil { + errs = append(errs, NewError(TypeErr, x.Loc(), "%v used as value", call)) + } + } + return false + }) + return errs +} + +// rewritePrintCalls will rewrite the body so that print operands are captured +// in local variables and their evaluation occurs within a comprehension. +// Wrapping the terms inside of a comprehension ensures that undefined values do +// not short-circuit evaluation. +// +// For example, given the following print statement: +// +// print("the value of x is:", input.x) +// +// The expression would be rewritten to: +// +// print({__local0__ | __local0__ = "the value of x is:"}, {__local1__ | __local1__ = input.x}) +func rewritePrintCalls(gen *localVarGenerator, getArity func(Ref) int, globals VarSet, body Body) (bool, Errors) { + + var errs Errors + var modified bool + + // Visit comprehension bodies recursively to ensure print statements inside + // those bodies only close over variables that are safe. + for i := range body { + if ContainsClosures(body[i]) { + safe := outputVarsForBody(body[:i], getArity, globals, nil) + safe.Update(globals) + WalkClosures(body[i], func(x any) bool { + var modrec bool + var errsrec Errors + switch x := x.(type) { + case *SetComprehension: + modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) + case *ArrayComprehension: + modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) + case *ObjectComprehension: + modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) + case *Every: + safe.Update(x.KeyValueVars()) + modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) + } + if modrec { + modified = true + } + errs = append(errs, errsrec...) + return true + }) + if len(errs) > 0 { + return false, errs + } + } + } + + for i := range body { + + if !isPrintCall(body[i]) { + continue + } + + modified = true + + var errs Errors + safe := outputVarsForBody(body[:i], getArity, globals, nil) + safe.Update(globals) + + // Fixes Issue #7647 by adding generated variables to the safe set + WalkVars(body[:i], func(v Var) bool { + if v.IsGenerated() { + safe.Add(v) + } + return false + }) + + args := body[i].Operands() + + var vis *VarVisitor + if len(args) > 0 { + vis = varVisitorPool.Get() + defer varVisitorPool.Put(vis) + } + + for j := range args { + vis = vis.Clear().WithParams(SafetyCheckVisitorParams) + vis.Walk(args[j]) + vars := vis.Vars() + if vars.DiffCount(safe) > 0 { + unsafe := vars.Diff(safe) + for _, v := range unsafe.Sorted() { + errs = append(errs, NewError(CompileErr, args[j].Loc(), "var %v is undeclared", v)) + } + } + } + + if len(errs) > 0 { + return false, errs + } + + terms := make([]*Term, 0, len(args)) + + for j := range args { + x := NewTerm(gen.Generate()).SetLocation(args[j].Loc()) + capture := Equality.Expr(x, args[j]).SetLocation(args[j].Loc()) + terms = append(terms, SetComprehensionTerm(x, NewBody(capture)).SetLocation(args[j].Loc())) + } + + body.Set(NewExpr([]*Term{ + NewTerm(InternalPrint.Ref()).SetLocation(body[i].Loc()), + ArrayTerm(terms...).SetLocation(body[i].Loc()), + }).SetLocation(body[i].Loc()), i) + } + + return modified, nil +} + +func erasePrintCalls(node any) bool { + var modified bool + NewGenericVisitor(func(x any) bool { + var modrec bool + switch x := x.(type) { + case *Rule: + modrec, x.Body = erasePrintCallsInBody(x.Body) + case *ArrayComprehension: + modrec, x.Body = erasePrintCallsInBody(x.Body) + case *SetComprehension: + modrec, x.Body = erasePrintCallsInBody(x.Body) + case *ObjectComprehension: + modrec, x.Body = erasePrintCallsInBody(x.Body) + case *Every: + modrec, x.Body = erasePrintCallsInBody(x.Body) + } + if modrec { + modified = true + } + return false + }).Walk(node) + return modified +} + +func erasePrintCallsInBody(x Body) (bool, Body) { + + if !containsPrintCall(x) { + return false, x + } + + var cpy Body + + for i := range x { + + // Recursively visit any comprehensions contained in this expression. + erasePrintCalls(x[i]) + + if !isPrintCall(x[i]) { + cpy.Append(x[i]) + } + } + + if len(cpy) == 0 { + term := BooleanTerm(true).SetLocation(x.Loc()) + expr := NewExpr(term).SetLocation(x.Loc()) + cpy.Append(expr) + } + + return true, cpy +} + +func containsPrintCall(x any) bool { + var found bool + WalkExprs(x, func(expr *Expr) bool { + if !found { + if isPrintCall(expr) { + found = true + } + } + return found + }) + return found +} + +var printRef = Print.Ref() + +func isPrintCall(x *Expr) bool { + return x.IsCall() && x.Operator().Equal(printRef) +} + +// rewriteRefsInHead will rewrite rules so that the head does not contain any +// terms that require evaluation (e.g., refs or comprehensions). If the key or +// value contains one or more of these terms, the key or value will be moved +// into the body and assigned to a new variable. The new variable will replace +// the key or value in the head. +// +// For instance, given the following rule: +// +// p[{"foo": data.foo[i]}] { i < 100 } +// +// The rule would be re-written as: +// +// p[__local0__] { i < 100; __local0__ = {"foo": data.foo[i]} } +func (c *Compiler) rewriteRefsInHead() { + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + WalkRules(c.Modules[name], func(rule *Rule) bool { + if requiresEval(rule.Head.Key) { + expr := f.Generate(rule.Head.Key) + rule.Head.Key = expr.Operand(0) + rule.Body = appendToBody(rule.Body, expr) + } + if requiresEval(rule.Head.Value) { + expr := f.Generate(rule.Head.Value) + rule.Head.Value = expr.Operand(0) + rule.Body = appendToBody(rule.Body, expr) + } + for i := 0; i < len(rule.Head.Args); i++ { + if requiresEval(rule.Head.Args[i]) { + expr := f.Generate(rule.Head.Args[i]) + rule.Head.Args[i] = expr.Operand(0) + rule.Body = appendToBody(rule.Body, expr) + } + } + return false + }) + } +} + +func (c *Compiler) rewriteEquals() { + modified := false + for _, name := range c.sorted { + modified = rewriteEquals(c.Modules[name]) || modified + } + if modified { + c.Required.addBuiltinSorted(Equal) + } +} + +func (c *Compiler) rewriteDynamicTerms() { + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + WalkRules(c.Modules[name], func(rule *Rule) bool { + rule.Body = rewriteDynamics(f, rule.Body) + return false + }) + } +} + +// rewriteTestRuleEqualities rewrites equality expressions in test rule bodies to create local vars for statements that would otherwise +// not have their values captured through tracing, such as refs and comprehensions not unified/assigned to a local var. +// For example, given the following module: +// +// package test +// +// p.q contains v if { +// some v in numbers.range(1, 3) +// } +// +// p.r := "foo" +// +// test_rule { +// p == { +// "q": {4, 5, 6} +// } +// } +// +// `p` in `test_rule` resolves to `data.test.p`, which won't be an entry in the virtual-cache and must therefore be calculated after-the-fact. +// If `p` isn't captured in a local var, there is no trivial way to retrieve its value for test reporting. +func (c *Compiler) rewriteTestRuleEqualities() { + if !c.rewriteTestRulesForTracing { + return + } + + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + WalkRules(c.Modules[name], func(rule *Rule) bool { + if strings.HasPrefix(string(rule.Head.Name), "test_") { + rule.Body = rewriteTestEqualities(f, rule.Body) + } + return false + }) + } +} + +func (c *Compiler) parseMetadataBlocks() { + // Only parse annotations if rego.metadata built-ins are called + regoMetadataCalled := false + for _, name := range c.sorted { + WalkExprs(c.Modules[name], func(expr *Expr) bool { + if isRegoMetadataChainCall(expr) || isRegoMetadataRuleCall(expr) { + regoMetadataCalled = true + } + return regoMetadataCalled + }) + + if regoMetadataCalled { + break + } + } + + if regoMetadataCalled { + // NOTE: Possible optimization: only parse annotations for modules on the path of rego.metadata-calling module + for _, name := range c.sorted { + mod := c.Modules[name] + if len(mod.Annotations) == 0 { + var errs Errors + mod.Annotations, errs = parseAnnotations(mod.Comments) + if !c.err(errs...) || !c.err(attachAnnotationsNodes(mod)...) { + return + } + + attachRuleAnnotations(mod) + } + } + } +} + +func (c *Compiler) rewriteRegoMetadataCalls() { + eqFactory := newEqualityFactory(c.localvargen) + + _, chainFuncAllowed := c.builtins[RegoMetadataChain.Name] + _, ruleFuncAllowed := c.builtins[RegoMetadataRule.Name] + + for _, name := range c.sorted { + WalkRules(c.Modules[name], func(rule *Rule) bool { + var firstChainCall, firstRuleCall *Expr + + WalkExprs(rule, func(expr *Expr) bool { + if chainFuncAllowed && firstChainCall == nil && isRegoMetadataChainCall(expr) { + firstChainCall = expr + } else if ruleFuncAllowed && firstRuleCall == nil && isRegoMetadataRuleCall(expr) { + firstRuleCall = expr + } + return firstChainCall != nil && firstRuleCall != nil + }) + + chainCalled := firstChainCall != nil + ruleCalled := firstRuleCall != nil + + if chainCalled || ruleCalled { + body := make(Body, 0, len(rule.Body)+2) + + var metadataChainVar Var + if chainCalled { + // Create and inject metadata chain for rule + + chain, err := createMetadataChain(c.annotationSet.Chain(rule)) + if err != nil { + return !c.err(err) + } + + chain.Location = firstChainCall.Location + eq := eqFactory.Generate(chain) + metadataChainVar = eq.Operands()[0].Value.(Var) + body = appendToBody(body, eq) + } + + var metadataRuleVar Var + if ruleCalled { + // Create and inject metadata for rule + + var metadataRuleTerm *Term + + a := getPrimaryRuleAnnotations(c.annotationSet, rule) + if a != nil { + annotObj, err := a.toObject() + if err != nil { + return !c.err(err) + } + metadataRuleTerm = NewTerm(*annotObj) + } else { + // If rule has no annotations, assign an empty object + metadataRuleTerm = ObjectTerm() + } + + metadataRuleTerm.Location = firstRuleCall.Location + eq := eqFactory.Generate(metadataRuleTerm) + metadataRuleVar = eq.Operands()[0].Value.(Var) + body = appendToBody(body, eq) + } + + body = appendToBody(body, rule.Body...) + rule.Body = body + + vis := func(b Body) bool { + return !c.err(rewriteRegoMetadataCalls(&metadataChainVar, &metadataRuleVar, b, &c.RewrittenVars)...) + } + WalkBodies(rule.Head, vis) + WalkBodies(rule.Body, vis) + } + + return false + }) + } +} + +func getPrimaryRuleAnnotations(as *AnnotationSet, rule *Rule) *Annotations { + annots := as.GetRuleScope(rule) + + if len(annots) == 0 { + return nil + } + + // Sort by annotation location; chain must start with annotations declared closest to rule, then going outward + slices.SortStableFunc(annots, func(a, b *Annotations) int { + return -a.Location.Compare(b.Location) + }) + + return annots[0] +} + +func rewriteRegoMetadataCalls(metadataChainVar *Var, metadataRuleVar *Var, body Body, rewrittenVars *map[Var]Var) Errors { + var errs Errors + + WalkClosures(body, func(x any) bool { + switch x := x.(type) { + case *ArrayComprehension: + errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) + case *SetComprehension: + errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) + case *ObjectComprehension: + errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) + case *Every: + errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) + } + return true + }) + + for i := range body { + expr := body[i] + var metadataVar Var + + if metadataChainVar != nil && isRegoMetadataChainCall(expr) { + metadataVar = *metadataChainVar + } else if metadataRuleVar != nil && isRegoMetadataRuleCall(expr) { + metadataVar = *metadataRuleVar + } else { + continue + } + + // NOTE(johanfylling): An alternative strategy would be to walk the body and replace all operands[0] + // usages with *metadataChainVar + operands := expr.Operands() + var newExpr *Expr + if len(operands) > 0 { // There is an output var to rewrite + rewrittenVar := operands[0] + newExpr = Equality.Expr(rewrittenVar, NewTerm(metadataVar)) + } else { // No output var, just rewrite expr to metadataVar + newExpr = NewExpr(NewTerm(metadataVar)) + } + + newExpr.Generated = true + newExpr.Location = expr.Location + body.Set(newExpr, i) + } + + return errs +} + +var regoMetadataChainRef = RegoMetadataChain.Ref() +var regoMetadataRuleRef = RegoMetadataRule.Ref() + +func isRegoMetadataChainCall(x *Expr) bool { + return x.IsCall() && x.Operator().Equal(regoMetadataChainRef) +} + +func isRegoMetadataRuleCall(x *Expr) bool { + return x.IsCall() && x.Operator().Equal(regoMetadataRuleRef) +} + +func createMetadataChain(chain []*AnnotationsRef) (*Term, *Error) { + + metaArray := NewArray() + for _, link := range chain { + // Dropping leading 'data' element of path + p := link.Path[1:].toArray() + obj := NewObject(Item(InternedTerm("path"), NewTerm(p))) + if link.Annotations != nil { + annotObj, err := link.Annotations.toObject() + if err != nil { + return nil, err + } + obj.Insert(InternedTerm("annotations"), NewTerm(*annotObj)) + } + metaArray = metaArray.Append(NewTerm(obj)) + } + + return NewTerm(metaArray), nil +} + +func (c *Compiler) rewriteLocalVars() { + var assignment bool + + args := NewVarVisitor() + argsStack := newLocalDeclaredVars() + + for _, name := range c.sorted { + mod := c.Modules[name] + gen := c.localvargen + + WalkRules(mod, func(rule *Rule) bool { + args.Clear() + argsStack.Clear() + + if c.strict && len(rule.Head.Args) > 0 { + args.WalkArgs(rule.Head.Args) + } + unusedArgs := args.Vars() + + c.rewriteLocalArgVars(gen, argsStack, rule) + + // Rewrite local vars in each else-branch of the rule. + // Note: this is done instead of a walk so that we can capture any unused function arguments + // across else-branches. + for rule := rule; rule != nil; rule = rule.Else { + stack, errs := c.rewriteLocalVarsInRule(rule, unusedArgs, argsStack, gen) + if !c.err(errs...) { + return true + } + if stack.assignment { + assignment = true + } + + for arg := range unusedArgs { + if stack.Count(arg) > 1 { + delete(unusedArgs, arg) + } + } + } + + if c.strict { + // Report an error for each unused function argument + for arg := range unusedArgs { + if !arg.IsWildcard() { + if !c.err(NewError(CompileErr, rule.Head.Location, "unused argument %v. (hint: use _ (wildcard variable) instead)", arg)) { + return true + } + } + } + } + + return true + }) + } + + if assignment { + c.Required.addBuiltinSorted(Assign) + } +} + +func (c *Compiler) rewriteLocalVarsInRule(rule *Rule, unusedArgs VarSet, argsStack *localDeclaredVars, gen *localVarGenerator) (*localDeclaredVars, Errors) { + onlyScalars := !headMayHaveVars(rule.Head) + + var used VarSet + + if !onlyScalars { + // Rewrite assignments contained in head of rule. Assignments can + // occur in rule head if they're inside a comprehension. Note, + // assigned vars in comprehensions in the head will be rewritten + // first to preserve scoping rules. For example: + // + // p = [x | x := 1] { x := 2 } becomes p = [__local0__ | __local0__ = 1] { __local1__ = 2 } + // + // This behaviour is consistent scoping inside the body. For example: + // + // p = xs { x := 2; xs = [x | x := 1] } becomes p = xs { __local0__ = 2; xs = [__local1__ | __local1__ = 1] } + nestedXform := &rewriteNestedHeadVarLocalTransform{ + gen: gen, + RewrittenVars: c.RewrittenVars, + strict: c.strict, + } + + NewGenericVisitor(nestedXform.Visit).Walk(rule.Head) + c.err(nestedXform.errs...) // NB(sr): This is a bit bogus -- Why not return them? + + // Rewrite assignments in body. + vis := NewVarVisitor() + + for _, t := range rule.Head.Ref()[1:] { + if !IsScalar(t.Value) { + vis.Walk(t) + } + } + + if rule.Head.Key != nil && !IsScalar(rule.Head.Key.Value) { + vis.Walk(rule.Head.Key) + } + + if rule.Head.Value != nil && !IsScalar(rule.Head.Value.Value) { + valueVars := rule.Head.Value.Vars() + vis.vars.Update(valueVars) + for arg := range unusedArgs { + if valueVars.Contains(arg) { + delete(unusedArgs, arg) + } + } + } + + used = vis.Vars() + } + + stack := argsStack.Copy() + + body, declared, errs := rewriteLocalVars(gen, stack, used, rule.Body, c.strict) + + // For rewritten vars use the collection of all variables that + // were in the stack at some point in time. + maps.Copy(c.RewrittenVars, stack.rewritten) + + rule.Body = body + + if onlyScalars { + return stack, errs + } + + // Rewrite vars in head that refer to locally declared vars in the body. + localXform := rewriteHeadVarLocalTransform{declared: declared} + + for i := range rule.Head.Args { + rule.Head.Args[i], _ = transformTerm(localXform, rule.Head.Args[i]) + } + + for i := 1; i < len(rule.Head.Ref()); i++ { + rule.Head.Reference[i], _ = transformTerm(localXform, rule.Head.Ref()[i]) + } + if rule.Head.Key != nil { + rule.Head.Key, _ = transformTerm(localXform, rule.Head.Key) + } + + if rule.Head.Value != nil { + rule.Head.Value, _ = transformTerm(localXform, rule.Head.Value) + } + return stack, errs +} + +func headMayHaveVars(head *Head) bool { + if head == nil { + return false + } + for i := range head.Args { + if !IsScalar(head.Args[i].Value) { + return true + } + } + if head.Key != nil && !IsScalar(head.Key.Value) { + return true + } + if head.Value != nil && !IsScalar(head.Value.Value) { + return true + } + ref := head.Ref()[1:] + for i := range ref { + if !IsScalar(ref[i].Value) { + return true + } + } + return false +} + +type rewriteNestedHeadVarLocalTransform struct { + gen *localVarGenerator + errs Errors + RewrittenVars map[Var]Var + strict bool +} + +func (xform *rewriteNestedHeadVarLocalTransform) Visit(x any) bool { + if term, ok := x.(*Term); ok { + stop := false + stack := newLocalDeclaredVars() + + switch x := term.Value.(type) { + case *object: + vis := NewGenericVisitor(xform.Visit) + cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) { + kcpy := k.Copy() + vis.Walk(kcpy) + vcpy := v.Copy() + vis.Walk(vcpy) + return kcpy, vcpy, nil + }) + term.Value = cpy + stop = true + case *set: + vis := NewGenericVisitor(xform.Visit) + cpy, _ := x.Map(func(v *Term) (*Term, error) { + vcpy := v.Copy() + vis.Walk(vcpy) + return vcpy, nil + }) + term.Value = cpy + stop = true + case *ArrayComprehension: + xform.errs = rewriteDeclaredVarsInArrayComprehension(xform.gen, stack, x, xform.errs, xform.strict) + stop = true + case *SetComprehension: + xform.errs = rewriteDeclaredVarsInSetComprehension(xform.gen, stack, x, xform.errs, xform.strict) + stop = true + case *ObjectComprehension: + xform.errs = rewriteDeclaredVarsInObjectComprehension(xform.gen, stack, x, xform.errs, xform.strict) + stop = true + case *TemplateString: + xform.errs = rewriteDeclaredVarsInTemplateString(xform.gen, stack, x, xform.errs, xform.strict) + stop = true + } + + maps.Copy(xform.RewrittenVars, stack.rewritten) + + return stop + } + + return false +} + +type rewriteHeadVarLocalTransform struct { + declared map[Var]Var +} + +func (xform rewriteHeadVarLocalTransform) Transform(x any) (any, error) { + if v, ok := x.(Var); ok { + if gv, ok := xform.declared[v]; ok { + return gv, nil + } + } + return x, nil +} + +func (c *Compiler) rewriteLocalArgVars(gen *localVarGenerator, stack *localDeclaredVars, rule *Rule) { + vis := &ruleArgLocalRewriter{ + stack: stack, + gen: gen, + } + + for i := range rule.Head.Args { + Walk(vis, rule.Head.Args[i]) + } + + c.err(vis.errs...) +} + +type ruleArgLocalRewriter struct { + stack *localDeclaredVars + gen *localVarGenerator + errs []*Error +} + +func (vis *ruleArgLocalRewriter) Visit(x any) Visitor { + + t, ok := x.(*Term) + if !ok { + return vis + } + + switch v := t.Value.(type) { + case Var: + gv, ok := vis.stack.Declared(v) + if ok { + vis.stack.Seen(v) + } else { + gv = vis.gen.Generate() + vis.stack.Insert(v, gv, argVar) + } + t.Value = gv + return nil + case *object: + if cpy, err := v.Map(func(k, v *Term) (*Term, *Term, error) { + vcpy := v.Copy() + Walk(vis, vcpy) + return k, vcpy, nil + }); err != nil { + vis.errs = append(vis.errs, newErrorString(CompileErr, t.Location, err.Error())) + } else { + t.Value = cpy + } + return nil + case Null, Boolean, Number, String, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Set, *TemplateString: + // Scalars are no-ops. Comprehensions and template-strings are handled above. Sets must not + // contain variables. + return nil + case Call: + vis.errs = append(vis.errs, NewError(CompileErr, t.Location, "rule arguments cannot contain calls")) + return nil + default: + // Recurse on refs and arrays. Any embedded + // variables can be rewritten. + return vis + } +} + +func (c *Compiler) rewriteWithModifiers() { + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + mod := c.Modules[name] + t := NewGenericTransformer(func(x any) (any, error) { + body, ok := x.(Body) + if !ok { + return x, nil + } + body, err := rewriteWithModifiersInBody(c, c.unsafeBuiltinsMap, f, body) + if err != nil { + c.err(err) + } + + return body, nil + }) + _, _ = Transform(t, mod) // ignore error + } +} + +func (c *Compiler) setModuleTree() { + c.ModuleTree = NewModuleTree(c.Modules) +} + +func (c *Compiler) setRuleTree() { + c.RuleTree = NewRuleTree(c.ModuleTree) +} + +func (c *Compiler) setGraph() { + list := func(r Ref) []*Rule { + return c.GetRulesDynamicWithOpts(r, RulesOptions{IncludeHiddenModules: true}) + } + c.Graph = NewGraph(c.Modules, list) +} + +type queryCompiler struct { + compiler *Compiler + qctx *QueryContext + typeEnv *TypeEnv + rewritten map[Var]Var + after map[string][]QueryCompilerStageDefinition + unsafeBuiltins map[string]struct{} + comprehensionIndices map[*Term]*ComprehensionIndex + enablePrintStatements bool +} + +func newQueryCompiler(compiler *Compiler) QueryCompiler { + qc := &queryCompiler{ + compiler: compiler, + qctx: nil, + after: map[string][]QueryCompilerStageDefinition{}, + comprehensionIndices: map[*Term]*ComprehensionIndex{}, + } + return qc +} + +func (qc *queryCompiler) WithStrict(strict bool) QueryCompiler { + qc.compiler.WithStrict(strict) + return qc +} + +func (qc *queryCompiler) WithEnablePrintStatements(yes bool) QueryCompiler { + qc.enablePrintStatements = yes + return qc +} + +func (qc *queryCompiler) WithContext(qctx *QueryContext) QueryCompiler { + qc.qctx = qctx + return qc +} + +func (qc *queryCompiler) WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler { + qc.after[after] = append(qc.after[after], stage) + return qc +} + +func (qc *queryCompiler) WithStageAfterID(after StageID, stage QueryCompilerStageDefinition) QueryCompiler { + return qc.WithStageAfter(string(after), stage) +} + +func (qc *queryCompiler) WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler { + qc.unsafeBuiltins = unsafe + return qc +} + +func (qc *queryCompiler) RewrittenVars() map[Var]Var { + return qc.rewritten +} + +func (qc *queryCompiler) ComprehensionIndex(term *Term) *ComprehensionIndex { + if result, ok := qc.comprehensionIndices[term]; ok { + return result + } else if result, ok := qc.compiler.comprehensionIndices[term]; ok { + return result + } + return nil +} + +func (qc *queryCompiler) runStage(metricName string, qctx *QueryContext, query Body, s func(*QueryContext, Body) (Body, error)) (Body, error) { + if qc.compiler.metrics != nil { + qc.compiler.metrics.Timer(metricName).Start() + defer qc.compiler.metrics.Timer(metricName).Stop() + } + return s(qctx, query) +} + +func (qc *queryCompiler) runStageAfter(metricName string, query Body, s QueryCompilerStage) (Body, error) { + if qc.compiler.metrics != nil { + qc.compiler.metrics.Timer(metricName).Start() + defer qc.compiler.metrics.Timer(metricName).Stop() + } + return s(qc, query) +} + +type queryStage = struct { + name StageID + metricName string + f func(*QueryContext, Body) (Body, error) +} + +func (qc *queryCompiler) Compile(query Body) (Body, error) { + if len(query) == 0 { + return nil, Errors{NewError(CompileErr, nil, "empty query cannot be compiled")} + } + + query = query.Copy() + + stages := []queryStage{ + {StageCheckKeywordOverrides, "query_compile_stage_check_keyword_overrides", qc.checkKeywordOverrides}, + {StageResolveRefs, "query_compile_stage_resolve_refs", qc.resolveRefs}, + {StageRewriteLocalVars, "query_compile_stage_rewrite_local_vars", qc.rewriteLocalVars}, + {StageRewriteTemplateStrings, "compile_stage_rewrite_template_strings", qc.rewriteTemplateStrings}, + {StageCheckVoidCalls, "query_compile_stage_check_void_calls", qc.checkVoidCalls}, + {StageRewritePrintCalls, "query_compile_stage_rewrite_print_calls", qc.rewritePrintCalls}, + {StageRewriteExprTerms, "query_compile_stage_rewrite_expr_terms", qc.rewriteExprTerms}, + {StageRewriteComprehensionTerms, "query_compile_stage_rewrite_comprehension_terms", qc.rewriteComprehensionTerms}, + {StageRewriteWithValues, "query_compile_stage_rewrite_with_values", qc.rewriteWithModifiers}, + {StageCheckUndefinedFuncs, "query_compile_stage_check_undefined_funcs", qc.checkUndefinedFuncs}, + {StageCheckSafety, "query_compile_stage_check_safety", qc.checkSafety}, + {StageRewriteDynamicTerms, "query_compile_stage_rewrite_dynamic_terms", qc.rewriteDynamicTerms}, + {StageCheckTypes, "query_compile_stage_check_types", qc.checkTypes}, + {StageCheckUnsafeBuiltins, "query_compile_stage_check_unsafe_builtins", qc.checkUnsafeBuiltins}, + {StageCheckDeprecatedBuiltins, "query_compile_stage_check_deprecated_builtins", qc.checkDeprecatedBuiltins}, + } + if qc.compiler.evalMode == EvalModeTopdown { + stages = append(stages, queryStage{"BuildComprehensionIndex", "query_compile_stage_build_comprehension_index", qc.buildComprehensionIndices}) + } + + qctx := qc.qctx.Copy() + + for _, s := range stages { + var err error + query, err = qc.runStage(s.metricName, qctx, query, s.f) + if err != nil { + return nil, qc.applyErrorLimit(err) + } + for _, s := range qc.after[string(s.name)] { + query, err = qc.runStageAfter(s.MetricName, query, s.Stage) + if err != nil { + return nil, qc.applyErrorLimit(err) + } + } + } + + return query, nil +} + +func (qc *queryCompiler) TypeEnv() *TypeEnv { + return qc.typeEnv +} + +func (qc *queryCompiler) applyErrorLimit(err error) error { + var errs Errors + if errors.As(err, &errs) { + if qc.compiler.maxErrs > 0 && len(errs) > qc.compiler.maxErrs { + err = append(errs[:qc.compiler.maxErrs], errLimitReached) + } + } + return err +} + +func (qc *queryCompiler) checkKeywordOverrides(_ *QueryContext, body Body) (Body, error) { + if qc.compiler.strict { + if errs := checkRootDocumentOverrides(body); len(errs) > 0 { + return nil, errs + } + } + return body, nil +} + +func (qc *queryCompiler) resolveRefs(qctx *QueryContext, body Body) (Body, error) { + + var globals map[Var]*usedRef + + if qctx != nil { + pkg := qctx.Package + // Query compiler ought to generate a package if one was not provided and one or more imports were provided. + // The generated package name could even be an empty string to avoid conflicts (it doesn't have to be valid syntactically) + if pkg == nil && len(qctx.Imports) > 0 { + pkg = &Package{Path: RefTerm(VarTerm("")).Value.(Ref)} + } + if pkg != nil { + var ruleExports []Ref + rules := qc.compiler.getExports() + if exist, ok := rules.Get(pkg.Path); ok { + ruleExports = exist + } + + globals = getGlobals(qctx.Package, ruleExports, qctx.Imports) + qctx.Imports = nil + } + } + + ignore := &declaredVarStack{declaredVars(body)} + + return resolveRefsInBody(globals, ignore, body), nil +} + +func (*queryCompiler) rewriteComprehensionTerms(_ *QueryContext, body Body) (Body, error) { + gen := newLocalVarGenerator("q", body) + f := newEqualityFactory(gen) + node, err := rewriteComprehensionTerms(f, body) + if err != nil { + return nil, err + } + return node.(Body), nil +} + +func (*queryCompiler) rewriteDynamicTerms(_ *QueryContext, body Body) (Body, error) { + gen := newLocalVarGenerator("q", body) + f := newEqualityFactory(gen) + return rewriteDynamics(f, body), nil +} + +func (*queryCompiler) rewriteExprTerms(_ *QueryContext, body Body) (Body, error) { + gen := newLocalVarGenerator("q", body) + return rewriteExprTermsInBody(gen, body), nil +} + +func (qc *queryCompiler) rewriteLocalVars(_ *QueryContext, body Body) (Body, error) { + gen := newLocalVarGenerator("q", body) + stack := newLocalDeclaredVars() + body, _, err := rewriteLocalVars(gen, stack, nil, body, qc.compiler.strict) + if len(err) != 0 { + return nil, err + } + + // The vars returned during the rewrite will include all seen vars, + // even if they're not declared with an assignment operation. We don't + // want to include these inside the rewritten set though. + qc.rewritten = maps.Clone(stack.rewritten) + + return body, nil +} + +func (qc *queryCompiler) rewriteTemplateStrings(_ *QueryContext, body Body) (Body, error) { + gen := newLocalVarGenerator("q", body) + tsr := rewriterFromQueryCompiler(qc, gen) + if _, _, errs := rewriteTemplateStrings(tsr, ReservedVars, body); len(errs) > 0 { + return nil, errs + } + return body, nil +} + +func (qc *queryCompiler) rewritePrintCalls(_ *QueryContext, body Body) (Body, error) { + if !qc.enablePrintStatements { + _, cpy := erasePrintCallsInBody(body) + return cpy, nil + } + gen := newLocalVarGenerator("q", body) + if _, errs := rewritePrintCalls(gen, qc.compiler.GetArity, ReservedVars, body); len(errs) > 0 { + return nil, errs + } + return body, nil +} + +func (qc *queryCompiler) checkVoidCalls(_ *QueryContext, body Body) (Body, error) { + if errs := checkVoidCalls(qc.compiler.TypeEnv, body); len(errs) > 0 { + return nil, errs + } + return body, nil +} + +func (qc *queryCompiler) checkUndefinedFuncs(_ *QueryContext, body Body) (Body, error) { + if errs := checkUndefinedFuncs(qc.compiler.TypeEnv, body, qc.compiler.GetArity, qc.rewritten); len(errs) > 0 { + return nil, errs + } + return body, nil +} + +func (qc *queryCompiler) checkSafety(_ *QueryContext, body Body) (Body, error) { + safe := ReservedVars.Copy() + reordered, unsafe := reorderBodyForSafety(qc.compiler.builtins, qc.compiler.GetArity, safe, body) + if errs := safetyErrorSlice(unsafe, qc.RewrittenVars()); len(errs) > 0 { + return nil, errs + } + return reordered, nil +} + +func (qc *queryCompiler) checkTypes(_ *QueryContext, body Body) (Body, error) { + var errs Errors + checker := newTypeChecker(). + WithSchemaSet(qc.compiler.schemaSet). + WithInputType(qc.compiler.inputType). + WithVarRewriter(rewriteVarsInRef(qc.rewritten, qc.compiler.RewrittenVars)) + qc.typeEnv, errs = checker.CheckBody(qc.compiler.TypeEnv, body) + if len(errs) > 0 { + return nil, errs + } + + return body, nil +} + +func (qc *queryCompiler) checkUnsafeBuiltins(_ *QueryContext, body Body) (Body, error) { + errs := checkUnsafeBuiltins(qc.unsafeBuiltinsMap(), body) + if len(errs) > 0 { + return nil, errs + } + return body, nil +} + +func (qc *queryCompiler) unsafeBuiltinsMap() map[string]struct{} { + if qc.unsafeBuiltins != nil { + return qc.unsafeBuiltins + } + return qc.compiler.unsafeBuiltinsMap +} + +func (qc *queryCompiler) checkDeprecatedBuiltins(_ *QueryContext, body Body) (Body, error) { + if qc.compiler.strict { + errs := checkDeprecatedBuiltins(qc.compiler.deprecatedBuiltinsMap, body) + if len(errs) > 0 { + return nil, errs + } + } + return body, nil +} + +func (qc *queryCompiler) rewriteWithModifiers(_ *QueryContext, body Body) (Body, error) { + f := newEqualityFactory(newLocalVarGenerator("q", body)) + body, err := rewriteWithModifiersInBody(qc.compiler, qc.unsafeBuiltinsMap(), f, body) + if err != nil { + return nil, Errors{err} + } + return body, nil +} + +func (qc *queryCompiler) buildComprehensionIndices(_ *QueryContext, body Body) (Body, error) { + // NOTE(tsandall): The query compiler does not have a metrics object so we + // cannot record index metrics currently. + _ = buildComprehensionIndices(qc.compiler.debug, qc.compiler.GetArity, ReservedVars, qc.RewrittenVars(), body, qc.comprehensionIndices) + return body, nil +} + +// ComprehensionIndex specifies how the comprehension term can be indexed. The keys +// tell the evaluator what variables to use for indexing. In the future, the index +// could be expanded with more information that would allow the evaluator to index +// a larger fragment of comprehensions (e.g., by closing over variables in the outer +// query.) +type ComprehensionIndex struct { + Term *Term + Keys []*Term +} + +func (ci *ComprehensionIndex) String() string { + if ci == nil { + return "" + } + return fmt.Sprintf("", NewArray(ci.Keys...)) +} + +func buildComprehensionIndices(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, node Body, result map[*Term]*ComprehensionIndex) uint64 { + var n uint64 + cpy := candidates.Copy() + vis := varVisitorPool.Get() + + defer varVisitorPool.Put(vis) + + WalkBodies(node, func(b Body) bool { + for _, expr := range b { + index := getComprehensionIndex(dbg, arity, cpy, rwVars, expr) + if index != nil { + result[index.Term] = index + n++ + } + // Any variables appearing in the expressions leading up to the comprehension + // are fair-game to be used as index keys. + vis = vis.Clear().WithParams(VarVisitorParams{SkipClosures: true, SkipRefCallHead: true}) + vis.Walk(expr) + cpy.Update(vis.Vars()) + } + return false + }) + return n +} + +func getComprehensionIndex(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, expr *Expr) *ComprehensionIndex { + // Ignore everything except = expressions. Extract + // the comprehension term from the expression. + if !expr.IsEquality() || expr.Negated || len(expr.With) > 0 { + // No debug message, these are assumed to be known hinderances + // to comprehension indexing. + return nil + } + + var term *Term + + lhs, rhs := expr.Operand(0), expr.Operand(1) + + if _, ok := lhs.Value.(Var); ok && IsComprehension(rhs.Value) { + term = rhs + } else if _, ok := rhs.Value.(Var); ok && IsComprehension(lhs.Value) { + term = lhs + } + + if term == nil { + // no debug for this, it's the ordinary "nothing to do here" case + return nil + } + + // Ignore comprehensions that contain expressions that close over variables + // in the outer body if those variables are not also output variables in the + // comprehension body. In other words, ignore comprehensions that we cannot + // safely evaluate without bindings from the outer body. For example: + // + // x = [1] + // [true | data.y[z] = x] # safe to evaluate w/o outer body + // [true | data.y[z] = x[0]] # NOT safe to evaluate because 'x' would be unsafe. + // + // By identifying output variables in the body we also know what to index on by + // intersecting with candidate variables from the outer query. + // + // For example: + // + // x = data.foo[_] + // _ = [y | data.bar[y] = x] # index on 'x' + // + // This query goes from O(data.foo*data.bar) to O(data.foo+data.bar). + var body Body + + switch x := term.Value.(type) { + case *ArrayComprehension: + body = x.Body + case *SetComprehension: + body = x.Body + case *ObjectComprehension: + body = x.Body + } + + outputs := outputVarsForBody(body, arity, ReservedVars, nil) + unsafe := body.Vars(SafetyCheckVisitorParams).Diff(outputs).Diff(ReservedVars) + + if len(unsafe) > 0 { + dbg.Printf("%s: comprehension index: unsafe vars: %v", expr.Location, unsafe) + return nil + } + + // Similarly, ignore comprehensions that contain references with output variables + // that intersect with the candidates. Indexing these comprehensions could worsen + // performance. + regressionVis := newComprehensionIndexRegressionCheckVisitor(candidates) + regressionVis.Walk(body) + if regressionVis.worse { + dbg.Printf("%s: comprehension index: output vars intersect candidates", expr.Location) + return nil + } + + // Check if any nested comprehensions close over candidates. If any intersection is found + // the comprehension cannot be cached because it would require closing over the candidates + // which the evaluator does not support today. + nestedVis := newComprehensionIndexNestedCandidateVisitor(candidates) + nestedVis.Walk(body) + if nestedVis.found { + dbg.Printf("%s: comprehension index: nested comprehensions close over candidates", expr.Location) + return nil + } + + // Make a sorted set of variable names that will serve as the index key set. + // Sort to ensure deterministic indexing. In future this could be relaxed + // if we can decide that one ordering is better than another. If the set is + // empty, there is no indexing to do. + indexVars := candidates.Intersect(outputs) + if len(indexVars) == 0 { + dbg.Printf("%s: comprehension index: no index vars", expr.Location) + return nil + } + + result := make([]*Term, 0, len(indexVars)) + for v := range indexVars { + result = append(result, NewTerm(v)) + } + slices.SortFunc(result, TermValueCompare) + + debugRes := make([]*Term, len(result)) + for i, r := range result { + if o, ok := rwVars[r.Value.(Var)]; ok { + debugRes[i] = NewTerm(o) + } else { + debugRes[i] = r + } + } + dbg.Printf("%s: comprehension index: built with keys: %v", expr.Location, debugRes) + return &ComprehensionIndex{Term: term, Keys: result} +} + +type comprehensionIndexRegressionCheckVisitor struct { + candidates VarSet + seen VarSet + worse bool +} + +// TODO(tsandall): Improve this so that users can either supply this list explicitly +// or the information is maintained on the built-in function declaration. What we really +// need to know is whether the built-in function allows callers to push down output +// values or not. It's unlikely that anything outside of OPA does this today so this +// solution is fine for now. +var comprehensionIndexBlacklist = map[string]int{ + WalkBuiltin.Name: len(WalkBuiltin.Decl.FuncArgs().Args), +} + +func newComprehensionIndexRegressionCheckVisitor(candidates VarSet) *comprehensionIndexRegressionCheckVisitor { + return &comprehensionIndexRegressionCheckVisitor{ + candidates: candidates, + seen: NewVarSet(), + } +} + +func (vis *comprehensionIndexRegressionCheckVisitor) Walk(x any) { + NewGenericVisitor(vis.visit).Walk(x) +} + +func (vis *comprehensionIndexRegressionCheckVisitor) visit(x any) bool { + if !vis.worse { + switch x := x.(type) { + case *Expr: + operands := x.Operands() + if pos := comprehensionIndexBlacklist[x.Operator().String()]; pos > 0 && pos < len(operands) { + vis.assertEmptyIntersection(operands[pos].Vars()) + } + case Ref: + vis.assertEmptyIntersection(x.OutputVars()) + case Var: + vis.seen.Add(x) + // Always skip comprehensions. We do not have to visit their bodies here. + case *ArrayComprehension, *SetComprehension, *ObjectComprehension: + return true + } + } + return vis.worse +} + +func (vis *comprehensionIndexRegressionCheckVisitor) assertEmptyIntersection(vs VarSet) { + for v := range vs { + if vis.candidates.Contains(v) && !vis.seen.Contains(v) { + vis.worse = true + return + } + } +} + +type comprehensionIndexNestedCandidateVisitor struct { + candidates VarSet + found bool +} + +func newComprehensionIndexNestedCandidateVisitor(candidates VarSet) *comprehensionIndexNestedCandidateVisitor { + return &comprehensionIndexNestedCandidateVisitor{ + candidates: candidates, + } +} + +func (vis *comprehensionIndexNestedCandidateVisitor) Walk(x any) { + NewGenericVisitor(vis.visit).Walk(x) +} + +func (vis *comprehensionIndexNestedCandidateVisitor) visit(x any) bool { + if vis.found { + return true + } + + if v, ok := x.(Value); ok && IsComprehension(v) { + varVis := varVisitorPool.Get().WithParams(VarVisitorParams{SkipRefHead: true}) + varVis.Walk(v) + vis.found = len(varVis.Vars().Intersect(vis.candidates)) > 0 + varVisitorPool.Put(varVis) + + return true + } + + return false +} + +// ModuleTreeNode represents a node in the module tree. The module +// tree is keyed by the package path. +type ModuleTreeNode struct { + Key Value + Modules []*Module + Children map[Value]*ModuleTreeNode + Hide bool +} + +func (n *ModuleTreeNode) String() string { + var rules []string + for _, m := range n.Modules { + for _, r := range m.Rules { + rules = append(rules, r.Head.String()) + } + } + return fmt.Sprintf("", n.Key, n.Children, rules, n.Hide) +} + +// NewModuleTree returns a new ModuleTreeNode that represents the root +// of the module tree populated with the given modules. +func NewModuleTree(mods map[string]*Module) *ModuleTreeNode { + root := &ModuleTreeNode{ + Children: map[Value]*ModuleTreeNode{}, + } + for _, name := range util.KeysSorted(mods) { + m := mods[name] + node := root + for i, x := range m.Package.Path { + c, ok := node.Children[x.Value] + if !ok { + var hide bool + if i == 1 && SystemDocumentKey.Equal(x.Value) { + hide = true + } + c = &ModuleTreeNode{ + Key: x.Value, + Children: map[Value]*ModuleTreeNode{}, + Hide: hide, + } + node.Children[x.Value] = c + } + node = c + } + node.Modules = append(node.Modules, m) + } + return root +} + +// Size returns the number of modules in the tree. +func (n *ModuleTreeNode) Size() int { + s := len(n.Modules) + for _, c := range n.Children { + s += c.Size() + } + return s +} + +// Child returns n's child with key k. +func (n *ModuleTreeNode) child(k Value) *ModuleTreeNode { + switch k.(type) { + case String, Var: + return n.Children[k] + } + return nil +} + +// Find dereferences ref along the tree. ref[0] is converted to a String +// for convenience. +func (n *ModuleTreeNode) find(ref Ref) (*ModuleTreeNode, Ref) { + if v, ok := ref[0].Value.(Var); ok { + ref = Ref{StringTerm(string(v))}.Concat(ref[1:]) + } + node := n + for i, r := range ref { + next := node.child(r.Value) + if next == nil { + tail := make(Ref, len(ref)-i) + tail[0] = VarTerm(string(ref[i].Value.(String))) + copy(tail[1:], ref[i+1:]) + return node, tail + } + node = next + } + return node, nil +} + +// DepthFirst performs a depth-first traversal of the module tree rooted at n. +// If f returns true, traversal will not continue to the children of n. +func (n *ModuleTreeNode) DepthFirst(f func(*ModuleTreeNode) bool) { + if f(n) { + return + } + for _, node := range n.Children { + node.DepthFirst(f) + } +} + +// TreeNode represents a node in the rule tree. The rule tree is keyed by +// rule path. +type TreeNode struct { + Key Value + Values []*Rule + Children map[Value]*TreeNode + Sorted []Value + Hide bool + Index RuleIndex +} + +func (n *TreeNode) String() string { + return fmt.Sprintf("", n.Key, n.Values, n.Sorted, n.Hide) +} + +// NewRuleTree returns a new TreeNode that represents the root +// of the rule tree populated with the given rules. +func NewRuleTree(mtree *ModuleTreeNode) *TreeNode { + root := TreeNode{ + Key: mtree.Key, + } + + mtree.DepthFirst(func(m *ModuleTreeNode) bool { + for _, mod := range m.Modules { + if len(mod.Rules) == 0 { + root.add(mod.Package.Path, nil) + } + for _, rule := range mod.Rules { + root.add(rule.Ref().GroundPrefix(), rule) + } + } + return false + }) + + // ensure that data.system's TreeNode is hidden + node, tail := root.find(DefaultRootRef.Append(NewTerm(SystemDocumentKey))) + if len(tail) == 0 { // found + node.Hide = true + } + + root.DepthFirst(func(x *TreeNode) bool { + slices.SortFunc(x.Sorted, Value.Compare) + return false + }) + + return &root +} + +func (n *TreeNode) add(path Ref, rule *Rule) { + node, tail := n.find(path) + if len(tail) > 0 { + sub := treeNodeFromRef(tail, rule) + if node.Children == nil { + node.Children = make(map[Value]*TreeNode, 1) + } + node.Children[sub.Key] = sub + node.Sorted = append(node.Sorted, sub.Key) + } else if rule != nil { + node.Values = append(node.Values, rule) + } +} + +// Size returns the number of rules in the tree. +func (n *TreeNode) Size() (s int) { + for _, c := range n.Children { + s += c.Size() + } + return s + len(n.Values) +} + +// Child returns n's child with key k. +func (n *TreeNode) Child(k Value) *TreeNode { + switch k.(type) { + case Ref, Call: + return nil + default: + return n.Children[k] + } +} + +// Find dereferences ref along the tree +func (n *TreeNode) Find(ref Ref) *TreeNode { + node := n + for _, r := range ref { + node = node.Child(r.Value) + if node == nil { + return nil + } + } + return node +} + +// Iteratively dereferences ref along the node's subtree. +// - If matching fails immediately, the tail will contain the full ref. +// - Partial matching will result in a tail of non-zero length. +// - A complete match will result in a 0 length tail. +func (n *TreeNode) find(ref Ref) (*TreeNode, Ref) { + node := n + for i := range ref { + next := node.Child(ref[i].Value) + if next == nil { + tail := make(Ref, len(ref)-i) + copy(tail, ref[i:]) + return node, tail + } + node = next + } + return node, nil +} + +// DepthFirst performs a depth-first traversal of the rule tree rooted at n. If +// f returns true, traversal will not continue to the children of n. +func (n *TreeNode) DepthFirst(f func(*TreeNode) bool) { + if f(n) { + return + } + for _, node := range n.Children { + node.DepthFirst(f) + } +} + +func treeNodeFromRef(ref Ref, rule *Rule) *TreeNode { + depth := len(ref) - 1 + key := ref[depth].Value + node := &TreeNode{ + Key: key, + Children: nil, + } + if rule != nil { + node.Values = []*Rule{rule} + } + + for i := len(ref) - 2; i >= 0; i-- { + key := ref[i].Value + node = &TreeNode{ + Key: key, + Children: map[Value]*TreeNode{ref[i+1].Value: node}, + Sorted: []Value{ref[i+1].Value}, + } + } + return node +} + +// flattenChildren flattens all children's rule refs into a sorted array. +func (n *TreeNode) flattenChildren() []Ref { + ret := newRefSet() + for _, sub := range n.Children { // we only want the children, so don't use n.DepthFirst() right away + sub.DepthFirst(func(x *TreeNode) bool { + for _, r := range x.Values { + rule := r + ret.AddPrefix(rule.Ref()) + } + return false + }) + } + + return util.SortedFunc(ret.s, RefCompare) +} + +// Graph represents the graph of dependencies between rules. +type Graph struct { + adj map[util.T]map[util.T]struct{} + radj map[util.T]map[util.T]struct{} + nodes map[util.T]struct{} + sorted []util.T +} + +// NewGraph returns a new Graph based on modules. The list function must return +// the rules referred to directly by the ref. +func NewGraph(modules map[string]*Module, list func(Ref) []*Rule) *Graph { + + graph := &Graph{ + adj: map[util.T]map[util.T]struct{}{}, + radj: map[util.T]map[util.T]struct{}{}, + nodes: map[util.T]struct{}{}, + sorted: nil, + } + + // Create visitor to walk a rule AST and add edges to the rule graph for + // each dependency. + vis := func(a *Rule) *GenericVisitor { + stop := false + return NewGenericVisitor(func(x any) bool { + switch x := x.(type) { + case Ref: + for _, b := range list(x) { + for node := b; node != nil; node = node.Else { + graph.addDependency(a, node) + } + } + case *Rule: + if stop { + // Do not recurse into else clauses (which will be handled + // by the outer visitor.) + return true + } + stop = true + } + return false + }) + } + + // Walk over all rules, add them to graph, and build adjacency lists. + for _, module := range modules { + WalkRules(module, func(a *Rule) bool { + graph.addNode(a) + vis(a).Walk(a) + return false + }) + } + + return graph +} + +// Dependencies returns the set of rules that x depends on. +func (g *Graph) Dependencies(x util.T) map[util.T]struct{} { + return g.adj[x] +} + +// Dependents returns the set of rules that depend on x. +func (g *Graph) Dependents(x util.T) map[util.T]struct{} { + return g.radj[x] +} + +// Sort returns a slice of rules sorted by dependencies. If a cycle is found, +// ok is set to false. +func (g *Graph) Sort() (sorted []util.T, ok bool) { + if g.sorted != nil { + return g.sorted, true + } + + sorter := &graphSort{ + sorted: make([]util.T, 0, len(g.nodes)), + deps: g.Dependencies, + marked: map[util.T]struct{}{}, + temp: map[util.T]struct{}{}, + } + + for node := range g.nodes { + if !sorter.Visit(node) { + return nil, false + } + } + + g.sorted = sorter.sorted + return g.sorted, true +} + +func (g *Graph) addDependency(u util.T, v util.T) { + + if _, ok := g.nodes[u]; !ok { + g.addNode(u) + } + + if _, ok := g.nodes[v]; !ok { + g.addNode(v) + } + + edges, ok := g.adj[u] + if !ok { + edges = map[util.T]struct{}{} + g.adj[u] = edges + } + + edges[v] = struct{}{} + + edges, ok = g.radj[v] + if !ok { + edges = map[util.T]struct{}{} + g.radj[v] = edges + } + + edges[u] = struct{}{} +} + +func (g *Graph) addNode(n util.T) { + g.nodes[n] = struct{}{} +} + +type graphSort struct { + sorted []util.T + deps func(util.T) map[util.T]struct{} + marked map[util.T]struct{} + temp map[util.T]struct{} +} + +func (sort *graphSort) Marked(node util.T) bool { + _, marked := sort.marked[node] + return marked +} + +func (sort *graphSort) Visit(node util.T) (ok bool) { + if _, ok := sort.temp[node]; ok { + return false + } + if sort.Marked(node) { + return true + } + sort.temp[node] = struct{}{} + for other := range sort.deps(node) { + if !sort.Visit(other) { + return false + } + } + sort.marked[node] = struct{}{} + delete(sort.temp, node) + sort.sorted = append(sort.sorted, node) + return true +} + +// GraphTraversal is a Traversal that understands the dependency graph +type GraphTraversal struct { + graph *Graph + visited map[util.T]struct{} +} + +// NewGraphTraversal returns a Traversal for the dependency graph +func NewGraphTraversal(graph *Graph) *GraphTraversal { + return &GraphTraversal{ + graph: graph, + visited: map[util.T]struct{}{}, + } +} + +// Edges lists all dependency connections for a given node +func (g *GraphTraversal) Edges(x util.T) []util.T { + return util.Keys(g.graph.Dependencies(x)) +} + +// Visited returns whether a node has been visited, setting a node to visited if not +func (g *GraphTraversal) Visited(u util.T) bool { + _, ok := g.visited[u] + g.visited[u] = struct{}{} + return ok +} + +type unsafePair struct { + Expr *Expr + Vars VarSet +} + +type unsafeVarLoc struct { + Var Var + Loc *Location +} + +type unsafeVars map[*Expr]VarSet + +func (vs unsafeVars) Add(e *Expr, v Var) { + if u, ok := vs[e]; ok { + u[v] = struct{ *Location }{} + } else { + vs[e] = VarSet{v: struct{ *Location }{}} + } +} + +func (vs unsafeVars) Set(e *Expr, s VarSet) { + vs[e] = s +} + +func (vs unsafeVars) Update(o unsafeVars) { + for k, v := range o { + if _, ok := vs[k]; !ok { + vs[k] = VarSet{} + } + vs[k].Update(v) + } +} + +func (vs unsafeVars) Vars() (result []unsafeVarLoc) { + locs := map[Var]*Location{} + + // If var appears in multiple sets then pick first by location. + for expr, vars := range vs { + for v := range vars { + if locs[v].Compare(expr.Location) > 0 { + locs[v] = expr.Location + } + } + } + + for v, loc := range locs { + result = append(result, unsafeVarLoc{Var: v, Loc: loc}) + } + + return util.SortedFunc(result, func(a, b unsafeVarLoc) int { + return a.Loc.Compare(b.Loc) + }) +} + +func (vs unsafeVars) Slice() (result []unsafePair) { + for expr, vs := range vs { + result = append(result, unsafePair{ + Expr: expr, + Vars: vs, + }) + } + return +} + +// reorderBodyForSafety returns a copy of the body ordered such that +// left to right evaluation of the body will not encounter unbound variables +// in input positions or negated expressions. +// +// Expressions are added to the re-ordered body as soon as they are considered +// safe. If multiple expressions become safe in the same pass, they are added +// in their original order. This results in minimal re-ordering of the body. +// +// If the body cannot be reordered to ensure safety, the second return value +// contains a mapping of expressions to unsafe variables in those expressions. +func reorderBodyForSafety(builtins map[string]*Builtin, arity func(Ref) int, globals VarSet, body Body) (Body, unsafeVars) { + vis := varVisitorPool.Get().WithParams(SafetyCheckVisitorParams) + vis.WalkBody(body) + + defer varVisitorPool.Put(vis) + + bodyVars := vis.Vars().Copy() + safe := bodyVars.Intersect(globals) + unsafe := make(unsafeVars, len(bodyVars)-len(safe)) + + for _, e := range body { + vis = vis.Clear().WithParams(SafetyCheckVisitorParams) + vis.Walk(e) + for v := range vis.Vars() { + if _, ok := safe[v]; !ok { + unsafe.Add(e, v) + } + } + } + + reordered := make(Body, 0, len(body)) + output := NewVarSet() + unsVis := varVisitorPool.Get() + + for { + n := len(reordered) + + for _, e := range body { + if reordered.Contains(e) { + continue + } + + ovs := outputVarsForExpr(e, arity, safe, output, vis) + + // check closures: is this expression closing over variables that + // haven't been made safe by what's already included in `reordered`? + unsafeVarsInClosures(e, unsVis) + cv := unsVis.Vars().Intersect(bodyVars).Diff(globals) + unsVis.Clear() + + ob := outputVarsForBody(reordered, arity, safe, vis) + + if cv.DiffCount(ob) > 0 { + uv := cv.Diff(ob) + if uv.Equal(ovs) { // special case "closure-self" + continue + } + unsafe.Set(e, uv) + } + + for v := range unsafe[e] { + if ovs.Contains(v) || safe.Contains(v) { + delete(unsafe[e], v) + } + } + + if len(unsafe[e]) == 0 { + delete(unsafe, e) + reordered.Append(e) + safe.Update(ovs) // this expression's outputs are safe + } + } + + if len(reordered) == n { // fixed point, could not add any expr of body + break + } + } + + varVisitorPool.Put(unsVis) + + // Recursively visit closures and perform the safety checks on them. + // Update the globals at each expression to include the variables that could + // be closed over. + g := globals.Copy() + xform := newBodySafetyTransformer(builtins, arity) + xform.gv = NewGenericVisitor(xform.Visit) + + for i, e := range reordered { + if i > 0 { + vis = vis.Clear().WithParams(SafetyCheckVisitorParams) + vis.Walk(reordered[i-1]) + g.Update(vis.Vars()) + } + xform.current = e + xform.globals = g + xform.unsafe = unsafe + xform.gv.Walk(e) + } + + return reordered, unsafe +} + +type bodySafetyTransformer struct { + builtins map[string]*Builtin + arity func(Ref) int + current *Expr + globals VarSet + unsafe unsafeVars + gv *GenericVisitor +} + +func newBodySafetyTransformer(builtins map[string]*Builtin, arity func(Ref) int) *bodySafetyTransformer { + return &bodySafetyTransformer{ + builtins: builtins, + arity: arity, + } +} + +func (xform *bodySafetyTransformer) Visit(x any) bool { + if xform.gv == nil { + xform.gv = NewGenericVisitor(xform.Visit) + } + switch term := x.(type) { + case *Term: + switch x := term.Value.(type) { + case *object: + cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) { + kcpy := k.Copy() + xform.gv.Walk(kcpy) + vcpy := v.Copy() + xform.gv.Walk(vcpy) + return kcpy, vcpy, nil + }) + term.Value = cpy + return true + case *set: + cpy, _ := x.Map(func(v *Term) (*Term, error) { + vcpy := v.Copy() + xform.gv.Walk(vcpy) + return vcpy, nil + }) + term.Value = cpy + return true + case *ArrayComprehension: + xform.reorderArrayComprehensionSafety(x) + return true + case *ObjectComprehension: + xform.reorderObjectComprehensionSafety(x) + return true + case *SetComprehension: + xform.reorderSetComprehensionSafety(x) + return true + } + case *Expr: + if ev, ok := term.Terms.(*Every); ok { + xform.globals.Update(ev.KeyValueVars()) + ev.Body = xform.reorderComprehensionSafety(NewVarSet(), ev.Body) + return true + } + } + return false +} + +func (xform *bodySafetyTransformer) reorderComprehensionSafety(tv VarSet, body Body) Body { + bv := body.Vars(SafetyCheckVisitorParams) + bv.Update(xform.globals) + + if tv.DiffCount(bv) > 0 { + uv := tv.Diff(bv) + for v := range uv { + xform.unsafe.Add(xform.current, v) + } + } + + r, u := reorderBodyForSafety(xform.builtins, xform.arity, xform.globals, body) + if len(u) == 0 { + return r + } + + xform.unsafe.Update(u) + return body +} + +func (xform *bodySafetyTransformer) reorderArrayComprehensionSafety(ac *ArrayComprehension) { + ac.Body = xform.reorderComprehensionSafety(ac.Term.Vars(), ac.Body) +} + +func (xform *bodySafetyTransformer) reorderObjectComprehensionSafety(oc *ObjectComprehension) { + tv := oc.Key.Vars() + tv.Update(oc.Value.Vars()) + oc.Body = xform.reorderComprehensionSafety(tv, oc.Body) +} + +func (xform *bodySafetyTransformer) reorderSetComprehensionSafety(sc *SetComprehension) { + sc.Body = xform.reorderComprehensionSafety(sc.Term.Vars(), sc.Body) +} + +// unsafeVarsInClosures collects vars that are contained in closures within +// this expression. +func unsafeVarsInClosures(e *Expr, vis *VarVisitor) { + WalkClosures(e, func(x any) bool { + if ev, ok := x.(*Every); ok { + vis.WalkBody(ev.Body) + return true + } + vis.Walk(x) + return true + }) +} + +// OutputVarsFromBody returns all variables which are the "output" for +// the given body. For safety checks this means that they would be +// made safe by the body. +func OutputVarsFromBody(c *Compiler, body Body, safe VarSet) VarSet { + return outputVarsForBody(body, c.GetArity, safe, nil) +} + +func outputVarsForBody(body Body, arity func(Ref) int, safe VarSet, vis *VarVisitor) VarSet { + o := safe.Copy() + output := VarSet{} + + vis = ClearOrNewVarVisitor(vis) + + for _, e := range body { + o.Update(outputVarsForExpr(e, arity, o, output, vis)) + } + return o.Diff(safe) +} + +// OutputVarsFromExpr returns all variables which are the "output" for +// the given expression. For safety checks this means that they would be +// made safe by the expr. +func OutputVarsFromExpr(c *Compiler, expr *Expr, safe VarSet) VarSet { + return outputVarsForExpr(expr, c.GetArity, safe, VarSet{}, nil) +} + +func outputVarsForExpr(expr *Expr, arity func(Ref) int, safe VarSet, output VarSet, vis *VarVisitor) VarSet { + // Negated expressions must be safe. + if expr.Negated { + return VarSet{} + } + + if len(expr.With) > 0 { + vis = ClearOrNewVarVisitor(vis).WithParams(SafetyCheckVisitorParams) + } + + // With modifier inputs must be safe. + for _, with := range expr.With { + vis = vis.Clear().WithParams(SafetyCheckVisitorParams) + vis.Walk(with) + if vis.Vars().DiffCount(safe) > 0 { + return VarSet{} + } + } + + switch terms := expr.Terms.(type) { + case *TemplateString: + // Template-expressions have no output vars + return VarSet{} + case *Term: + return outputVarsForTerms(expr, safe, nil) + case []*Term: + if expr.IsEquality() { + return outputVarsForExprEq(expr, safe, output) + } + + operator, ok := terms[0].Value.(Ref) + if !ok { + return VarSet{} + } + + ar := arity(operator) + if ar < 0 { + return VarSet{} + } + + return outputVarsForExprCall(expr, ar, safe, terms, vis, output) + case *Every: + return outputVarsForTerms(terms.Domain, safe, output) + default: + panic("illegal expression") + } +} + +func outputVarsForExprEq(expr *Expr, safe VarSet, output VarSet) VarSet { + if !validEqAssignArgCount(expr) { + return safe + } + + output = outputVarsForTerms(expr, safe, output) + output.Update(safe) + output.Update(Unify(output, expr.Operand(0), expr.Operand(1))) + + diff := output.Diff(safe) + + clear(output) + + return diff +} + +func outputVarsForExprCall(expr *Expr, arity int, safe VarSet, terms []*Term, vis *VarVisitor, output VarSet) VarSet { + clear(output) + + output = outputVarsForTerms(expr, safe, output) + + numInputTerms := arity + 1 + if numInputTerms >= len(terms) { + return output + } + + params := VarVisitorParams{ + SkipClosures: true, + SkipSets: true, + SkipObjectKeys: true, + SkipRefHead: true, + } + + vis = ClearOrNewVarVisitor(vis).WithParams(params) + vis.WalkArgs(Args(terms[:numInputTerms])) + + unsafe := vis.Vars().Diff(output).DiffCount(safe) + if unsafe > 0 { + return VarSet{} + } + + vis = vis.Clear().WithParams(params) + vis.WalkArgs(Args(terms[numInputTerms:])) + output.Update(vis.vars) + return output +} + +func outputVarsForTerms(expr any, safe, output VarSet) VarSet { + if output == nil { + output = VarSet{} + } + WalkTerms(expr, func(x *Term) bool { + switch r := x.Value.(type) { + case *SetComprehension, *ArrayComprehension, *ObjectComprehension, *TemplateString: + return true + case Ref: + if !isRefSafe(r, safe) { + return true + } + if !r.IsGround() { + // Avoiding r.OutputVars() here as it won't allow reusing the visitor. + vis := varVisitorPool.Get().WithParams(VarVisitorParams{SkipRefHead: true}) + vis.WalkRef(r) + output.Update(vis.Vars()) + varVisitorPool.Put(vis) + } + } + return false + }) + return output +} + +type equalityFactory struct { + gen *localVarGenerator +} + +func newEqualityFactory(gen *localVarGenerator) *equalityFactory { + return &equalityFactory{gen} +} + +func (f *equalityFactory) Generate(other *Term) *Expr { + term := NewTerm(f.gen.Generate()).SetLocation(other.Location) + expr := Equality.Expr(term, other) + expr.Generated = true + expr.Location = other.Location + return expr +} + +// TODO: Move to internal package? +const LocalVarPrefix = "__local" + +type localVarGenerator struct { + exclude VarSet + suffix string + next int +} + +func newLocalVarGeneratorForModuleSet(sorted []string, modules map[string]*Module) *localVarGenerator { + vis := NewVarVisitor() + for _, key := range sorted { + vis.Walk(modules[key]) + } + return &localVarGenerator{exclude: vis.vars, suffix: LocalVarPrefix} +} + +func newLocalVarGenerator(suffix string, node any) *localVarGenerator { + vis := NewVarVisitor() + vis.Walk(node) + return &localVarGenerator{exclude: vis.vars, suffix: LocalVarPrefix + suffix} +} + +func (l *localVarGenerator) Generate() Var { + buf := make([]byte, 0, len(l.suffix)+util.NumDigitsInt(l.next)+2) + for { + buf = append(util.AppendInt(append(buf, l.suffix...), l.next), "__"...) + result := Var(util.ByteSliceToString(buf)) + l.next++ + if !l.exclude.Contains(result) { + return result + } + + buf = buf[:0] + } +} + +func getGlobals(pkg *Package, rules []Ref, imports []*Import) map[Var]*usedRef { + globals := make(map[Var]*usedRef, len(rules)+len(imports)) + + for _, ref := range rules { + v := ref[0].Value.(Var) + globals[v] = &usedRef{ref: pkg.Path.Append(StringTerm(string(v)))} + } + + for _, imp := range imports { + path := imp.Path.Value.(Ref) + if FutureRootDocument.Equal(path[0]) || RegoRootDocument.Equal(path[0]) { + continue + } + globals[imp.Name()] = &usedRef{ref: path} + } + + return globals +} + +func requiresEval(x *Term) bool { + if x == nil { + return false + } + return ContainsRefs(x) || ContainsComprehensions(x) +} + +func resolveRef(globals map[Var]*usedRef, ignore *declaredVarStack, ref Ref) Ref { + r := make(Ref, 0, len(ref)) + for i, x := range ref { + switch v := x.Value.(type) { + case Var: + if g, ok := globals[v]; ok && !ignore.Contains(v) { + cpy := g.ref.Copy() + for i := range cpy { + cpy[i].SetLocation(x.Location) + } + if i == 0 { + r = cpy + } else { + r = append(r, NewTerm(cpy).SetLocation(x.Location)) + } + g.used = true + } else { + r = append(r, x) + } + case Ref, *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: + r = append(r, resolveRefsInTerm(globals, ignore, x)) + default: + r = append(r, x) + } + } + + return r +} + +type usedRef struct { + ref Ref + used bool +} + +func resolveRefsInRule(globals map[Var]*usedRef, rule *Rule) error { + ignore := &declaredVarStack{} + + vars := NewVarSet() + var vis *GenericVisitor + var err error + + // Walk args to collect vars and transform body so that callers can shadow + // root documents. + vis = NewGenericVisitor(func(x any) bool { + if err != nil { + return true + } + switch x := x.(type) { + case Var: + vars.Add(x) + + // Object keys cannot be pattern matched so only walk values. + case *object: + x.Foreach(func(_, v *Term) { + vis.Walk(v) + }) + + // Skip terms that could contain vars that cannot be pattern matched. + case Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: + return true + + case *Term: + if _, ok := x.Value.(Ref); ok { + if RootDocumentRefs.Contains(x) { + // We could support args named input, data, etc. however + // this would require rewriting terms in the head and body. + // Preventing root document shadowing is simpler, and + // arguably, will prevent confusing names from being used. + // NOTE: this check is also performed as part of strict-mode in + // checkRootDocumentOverrides. + err = fmt.Errorf("args must not shadow %v (use a different variable name)", x) + return true + } + } + } + return false + }) + + vis.Walk(rule.Head.Args) + + if err != nil { + return err + } + + ignore.Push(vars) + ignore.Push(declaredVars(rule.Body)) + + ref := rule.Head.Ref() + for i := 1; i < len(ref); i++ { + ref[i] = resolveRefsInTerm(globals, ignore, ref[i]) + } + if rule.Head.Key != nil { + rule.Head.Key = resolveRefsInTerm(globals, ignore, rule.Head.Key) + } + + if rule.Head.Value != nil { + rule.Head.Value = resolveRefsInTerm(globals, ignore, rule.Head.Value) + } + + rule.Body = resolveRefsInBody(globals, ignore, rule.Body) + return nil +} + +func resolveRefsInBody(globals map[Var]*usedRef, ignore *declaredVarStack, body Body) Body { + r := make([]*Expr, 0, len(body)) + for _, expr := range body { + r = append(r, resolveRefsInExpr(globals, ignore, expr)) + } + return r +} + +func resolveRefsInExpr(globals map[Var]*usedRef, ignore *declaredVarStack, expr *Expr) *Expr { + cpy := *expr + switch ts := expr.Terms.(type) { + case *Term: + cpy.Terms = resolveRefsInTerm(globals, ignore, ts) + case []*Term: + buf := make([]*Term, len(ts)) + for i := range ts { + buf[i] = resolveRefsInTerm(globals, ignore, ts[i]) + } + cpy.Terms = buf + case *SomeDecl: + if val, ok := ts.Symbols[0].Value.(Call); ok { + cpy.Terms = &SomeDecl{ + Symbols: []*Term{CallTerm(resolveRefsInTermSlice(globals, ignore, val)...)}, + Location: ts.Location, + } + } + case *Every: + locals := NewVarSet() + if ts.Key != nil { + locals.Update(ts.Key.Vars()) + } + locals.Update(ts.Value.Vars()) + ignore.Push(locals) + cpy.Terms = &Every{ + Key: ts.Key.Copy(), // TODO(sr): do more? + Value: ts.Value.Copy(), // TODO(sr): do more? + Domain: resolveRefsInTerm(globals, ignore, ts.Domain), + Body: resolveRefsInBody(globals, ignore, ts.Body), + } + ignore.Pop() + } + for _, w := range cpy.With { + w.Target = resolveRefsInTerm(globals, ignore, w.Target) + w.Value = resolveRefsInTerm(globals, ignore, w.Value) + } + return &cpy +} + +func resolveRefsInTerm(globals map[Var]*usedRef, ignore *declaredVarStack, term *Term) *Term { + switch v := term.Value.(type) { + case Var: + if g, ok := globals[v]; ok && !ignore.Contains(v) { + cpy := g.ref.Copy() + for i := range cpy { + cpy[i].SetLocation(term.Location) + } + g.used = true + return NewTerm(cpy).SetLocation(term.Location) + } + return term + case Ref: + fqn := resolveRef(globals, ignore, v) + cpy := *term + cpy.Value = fqn + return &cpy + case *object: + cpy := *term + cpy.Value, _ = v.Map(func(k, v *Term) (*Term, *Term, error) { + k = resolveRefsInTerm(globals, ignore, k) + v = resolveRefsInTerm(globals, ignore, v) + return k, v, nil + }) + return &cpy + case *Array: + cpy := *term + cpy.Value = NewArray(resolveRefsInTermArray(globals, ignore, v)...) + return &cpy + case Call: + cpy := *term + cpy.Value = Call(resolveRefsInTermSlice(globals, ignore, v)) + return &cpy + case Set: + s, _ := v.Map(func(e *Term) (*Term, error) { + return resolveRefsInTerm(globals, ignore, e), nil + }) + cpy := *term + cpy.Value = s + return &cpy + case *ArrayComprehension: + ac := &ArrayComprehension{} + ignore.Push(declaredVars(v.Body)) + ac.Term = resolveRefsInTerm(globals, ignore, v.Term) + ac.Body = resolveRefsInBody(globals, ignore, v.Body) + cpy := *term + cpy.Value = ac + ignore.Pop() + return &cpy + case *ObjectComprehension: + oc := &ObjectComprehension{} + ignore.Push(declaredVars(v.Body)) + oc.Key = resolveRefsInTerm(globals, ignore, v.Key) + oc.Value = resolveRefsInTerm(globals, ignore, v.Value) + oc.Body = resolveRefsInBody(globals, ignore, v.Body) + cpy := *term + cpy.Value = oc + ignore.Pop() + return &cpy + case *SetComprehension: + sc := &SetComprehension{} + ignore.Push(declaredVars(v.Body)) + sc.Term = resolveRefsInTerm(globals, ignore, v.Term) + sc.Body = resolveRefsInBody(globals, ignore, v.Body) + cpy := *term + cpy.Value = sc + ignore.Pop() + return &cpy + case *TemplateString: + ts := &TemplateString{} + if len(v.Parts) > 0 { + ts.Parts = make([]Node, 0, len(v.Parts)) + } + for _, p := range v.Parts { + if expr, ok := p.(*Expr); ok { + ts.Parts = append(ts.Parts, resolveRefsInExpr(globals, ignore, expr)) + } else { + ts.Parts = append(ts.Parts, p) + } + } + cpy := *term + cpy.Value = ts + return &cpy + default: + return term + } +} + +func resolveRefsInTermArray(globals map[Var]*usedRef, ignore *declaredVarStack, terms *Array) []*Term { + cpy := make([]*Term, terms.Len()) + for i := range terms.Len() { + cpy[i] = resolveRefsInTerm(globals, ignore, terms.Elem(i)) + } + return cpy +} + +func resolveRefsInTermSlice(globals map[Var]*usedRef, ignore *declaredVarStack, terms []*Term) []*Term { + cpy := make([]*Term, len(terms)) + for i := range terms { + cpy[i] = resolveRefsInTerm(globals, ignore, terms[i]) + } + return cpy +} + +type declaredVarStack []VarSet + +func (s declaredVarStack) Contains(v Var) bool { + for i := len(s) - 1; i >= 0; i-- { + if _, ok := s[i][v]; ok { + return ok + } + } + return false +} + +func (s declaredVarStack) Add(v Var) { + s[len(s)-1].Add(v) +} + +func (s *declaredVarStack) Push(vs VarSet) { + *s = append(*s, vs) +} + +func (s *declaredVarStack) Pop() { + curr := *s + *s = curr[:len(curr)-1] +} + +func declaredVars(x any) VarSet { + vars := NewVarSet() + vis := NewGenericVisitor(func(x any) bool { + switch x := x.(type) { + case *Expr: + if x.IsAssignment() && validEqAssignArgCount(x) { + WalkVars(x.Operand(0), func(v Var) bool { + vars.Add(v) + return false + }) + } else if decl, ok := x.Terms.(*SomeDecl); ok { + for i := range decl.Symbols { + switch val := decl.Symbols[i].Value.(type) { + case Var: + vars.Add(val) + case Call: + args := val[1:] + if len(args) == 3 { // some x, y in xs + WalkVars(args[1], func(v Var) bool { + vars.Add(v) + return false + }) + } + // some x in xs + WalkVars(args[0], func(v Var) bool { + vars.Add(v) + return false + }) + } + } + } + case *ArrayComprehension, *SetComprehension, *ObjectComprehension: + return true + } + return false + }) + vis.Walk(x) + return vars +} + +// rewriteComprehensionTerms will rewrite comprehensions so that the term part +// is bound to a variable in the body. This allows any type of term to be used +// in the term part (even if the term requires evaluation.) +// +// For instance, given the following comprehension: +// +// [x[0] | x = y[_]; y = [1,2,3]] +// +// The comprehension would be rewritten as: +// +// [__local0__ | x = y[_]; y = [1,2,3]; __local0__ = x[0]] +func rewriteComprehensionTerms(f *equalityFactory, node any) (any, error) { + return TransformComprehensions(node, func(x any) (Value, error) { + switch x := x.(type) { + case *ArrayComprehension: + if requiresEval(x.Term) { + expr := f.Generate(x.Term) + x.Term = expr.Operand(0) + x.Body = appendToBody(x.Body, expr) + } + return x, nil + case *SetComprehension: + if requiresEval(x.Term) { + expr := f.Generate(x.Term) + x.Term = expr.Operand(0) + x.Body = appendToBody(x.Body, expr) + } + return x, nil + case *ObjectComprehension: + if requiresEval(x.Key) { + expr := f.Generate(x.Key) + x.Key = expr.Operand(0) + x.Body = appendToBody(x.Body, expr) + } + if requiresEval(x.Value) { + expr := f.Generate(x.Value) + x.Value = expr.Operand(0) + x.Body = appendToBody(x.Body, expr) + } + return x, nil + } + panic("illegal type") + }) +} + +// rewriteEquals will rewrite exprs under x as unification calls instead of == +// calls. For example: +// +// data.foo == data.bar is rewritten as data.foo = data.bar +// +// This stage should only run the safety check (since == is a built-in with no +// outputs, so the inputs must not be marked as safe.) +// +// This stage is not executed by the query compiler by default because when +// callers specify == instead of = they expect to receive a true/false/undefined +// result back whereas with = the result is only ever true/undefined. For +// partial evaluation cases we do want to rewrite == to = to simplify the +// result. +func rewriteEquals(x any) (modified bool) { + unifyOp := Equality.Ref() + t := NewGenericTransformer(func(x any) (any, error) { + if x, ok := x.(*Expr); ok && x.IsCall() { + operator := x.Operator() + if operator.Equal(doubleEq) && len(x.Operands()) == 2 { + modified = true + x.SetOperator(NewTerm(unifyOp)) + } + } + return x, nil + }) + _, _ = Transform(t, x) // ignore error + return modified +} + +func rewriteTestEqualities(f *equalityFactory, body Body) Body { + result := make(Body, 0, len(body)) + for _, expr := range body { + // We can't rewrite negated expressions; if the extracted term is undefined, evaluation would fail before + // reaching the negation check. + if !expr.Negated && !expr.Generated { + switch { + case expr.IsEquality(): + terms := expr.Terms.([]*Term) + result, terms[1] = rewriteDynamicsShallow(expr, f, terms[1], result) + result, terms[2] = rewriteDynamicsShallow(expr, f, terms[2], result) + case expr.IsEvery(): + // We rewrite equalities inside of every-bodies as a fail here will be the cause of the test-rule fail. + // Failures inside other expressions with closures, such as comprehensions, won't cause the test-rule to fail, so we skip those. + every := expr.Terms.(*Every) + every.Body = rewriteTestEqualities(f, every.Body) + } + } + result = appendToBody(result, expr) + } + return result +} + +func rewriteDynamicsShallow(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { + switch term.Value.(type) { + case Ref, *ArrayComprehension, *SetComprehension, *ObjectComprehension: + generated := f.Generate(term) + generated.With = original.With + result.Append(generated) + connectGeneratedExprs(original, generated) + return result, result[len(result)-1].Operand(0) + } + return result, term +} + +// rewriteDynamics will rewrite the body so that dynamic terms (i.e., refs and +// comprehensions) are bound to vars earlier in the query. This translation +// results in eager evaluation. +// +// For instance, given the following query: +// +// foo(data.bar) = 1 +// +// The rewritten version will be: +// +// __local0__ = data.bar; foo(__local0__) = 1 +func rewriteDynamics(f *equalityFactory, body Body) Body { + result := make(Body, 0, len(body)) + for _, expr := range body { + switch { + case expr.IsEquality(): + result = rewriteDynamicsEqExpr(f, expr, result) + case expr.IsCall(): + result = rewriteDynamicsCallExpr(f, expr, result) + case expr.IsEvery(): + result = rewriteDynamicsEveryExpr(f, expr, result) + default: + result = rewriteDynamicsTermExpr(f, expr, result) + } + } + return result +} + +func rewriteDynamicsEqExpr(f *equalityFactory, expr *Expr, result Body) Body { + if !validEqAssignArgCount(expr) { + return appendToBody(result, expr) + } + terms := expr.Terms.([]*Term) + result, terms[1] = rewriteDynamicsInTerm(expr, f, terms[1], result) + result, terms[2] = rewriteDynamicsInTerm(expr, f, terms[2], result) + result.Append(expr) + return result +} + +func rewriteDynamicsCallExpr(f *equalityFactory, expr *Expr, result Body) Body { + terms := expr.Terms.([]*Term) + for i := 1; i < len(terms); i++ { + result, terms[i] = rewriteDynamicsOne(expr, f, terms[i], result) + } + result.Append(expr) + return result +} + +func rewriteDynamicsEveryExpr(f *equalityFactory, expr *Expr, result Body) Body { + ev := expr.Terms.(*Every) + result, ev.Domain = rewriteDynamicsOne(expr, f, ev.Domain, result) + ev.Body = rewriteDynamics(f, ev.Body) + result.Append(expr) + return result +} + +func rewriteDynamicsTermExpr(f *equalityFactory, expr *Expr, result Body) Body { + term := expr.Terms.(*Term) + result, expr.Terms = rewriteDynamicsInTerm(expr, f, term, result) + result.Append(expr) + return result +} + +func rewriteDynamicsInTerm(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { + switch v := term.Value.(type) { + case Ref: + for i := 1; i < len(v); i++ { + result, v[i] = rewriteDynamicsOne(original, f, v[i], result) + } + case *ArrayComprehension: + v.Body = rewriteDynamics(f, v.Body) + case *SetComprehension: + v.Body = rewriteDynamics(f, v.Body) + case *ObjectComprehension: + v.Body = rewriteDynamics(f, v.Body) + default: + result, term = rewriteDynamicsOne(original, f, term, result) + } + return result, term +} + +func rewriteDynamicsOne(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { + switch v := term.Value.(type) { + case Ref: + for i := 1; i < len(v); i++ { + result, v[i] = rewriteDynamicsOne(original, f, v[i], result) + } + generated := f.Generate(term) + generated.With = original.With + result.Append(generated) + connectGeneratedExprs(original, generated) + return result, result[len(result)-1].Operand(0) + case *Array: + for i := range v.Len() { + var t *Term + result, t = rewriteDynamicsOne(original, f, v.Elem(i), result) + v.set(i, t) + } + return result, term + case *object: + cpy := NewObject() + v.Foreach(func(key, value *Term) { + result, key = rewriteDynamicsOne(original, f, key, result) + result, value = rewriteDynamicsOne(original, f, value, result) + cpy.Insert(key, value) + }) + return result, NewTerm(cpy).SetLocation(term.Location) + case Set: + cpy := NewSet() + for _, term := range v.Slice() { + var rw *Term + result, rw = rewriteDynamicsOne(original, f, term, result) + cpy.Add(rw) + } + return result, NewTerm(cpy).SetLocation(term.Location) + case *ArrayComprehension: + var extra *Expr + v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) + result.Append(extra) + connectGeneratedExprs(original, extra) + return result, result[len(result)-1].Operand(0) + case *SetComprehension: + var extra *Expr + v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) + result.Append(extra) + connectGeneratedExprs(original, extra) + return result, result[len(result)-1].Operand(0) + case *ObjectComprehension: + var extra *Expr + v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) + result.Append(extra) + connectGeneratedExprs(original, extra) + return result, result[len(result)-1].Operand(0) + } + return result, term +} + +func rewriteDynamicsComprehensionBody(original *Expr, f *equalityFactory, body Body, term *Term) (Body, *Expr) { + body = rewriteDynamics(f, body) + generated := f.Generate(term) + generated.With = original.With + return body, generated +} + +func rewriteExprTermsInHead(gen *localVarGenerator, rule *Rule) { + for i := range rule.Head.Args { + support, output := expandExprTerm(gen, rule.Head.Args[i]) + rule.Body = appendToBody(rule.Body, support...) + rule.Head.Args[i] = output + } + if rule.Head.Key != nil { + support, output := expandExprTerm(gen, rule.Head.Key) + rule.Body = appendToBody(rule.Body, support...) + rule.Head.Key = output + } + if rule.Head.Value != nil { + support, output := expandExprTerm(gen, rule.Head.Value) + rule.Body = appendToBody(rule.Body, support...) + rule.Head.Value = output + } +} + +// isEmptyBody true for a rule like `pi := 3.14 if { true}` +func isEmptyBody(body Body) bool { + if len(body) == 1 { + if term, ok := body[0].Terms.(*Term); ok { + return Boolean(true).Equal(term.Value) + } + } + + return false +} + +func isConstantRule(rule *Rule) bool { + if isEmptyBody(rule.Body) { + switch v := rule.Head.Value.Value.(type) { + case String, Var, Number, Boolean, Null: + return true + case *Array, *object, Set: + return v.IsGround() + } + } + return false +} + +// appendToBody inlines Body.Append and adds additional logic for +// replacing a single 'true' expression (i.e an empty body) with +// the first expression to be appended, while appending the rest +// of the expressions as normal. Additionally accepts multiple +// expressions to append, which potentially reduces allocations +// in larger appends. +func appendToBody(body Body, exprs ...*Expr) Body { + if len(exprs) == 0 { + return body + } + + blen := len(body) + if blen == 1 && isEmptyBody(body) { + // body will no longer be empty, so instead of appending, + // replace the 'true' expression with the new expression. + exprs[0].Index = 0 + body[0], exprs = exprs[0], exprs[1:] + } + for i, expr := range exprs { + expr.Index = blen + i + } + + return append(body, exprs...) +} + +func rewriteExprTermsInBody(gen *localVarGenerator, body Body) Body { + cpy := make(Body, 0, len(body)) + for i := range body { + for _, expr := range expandExpr(gen, body[i]) { + cpy.Append(expr) + } + } + return cpy +} + +func expandExpr(gen *localVarGenerator, expr *Expr) (result []*Expr) { + for i := range expr.With { + extras, value := expandExprTerm(gen, expr.With[i].Value) + expr.With[i].Value = value + result = append(result, extras...) + } + switch terms := expr.Terms.(type) { + case *Term: + extras, term := expandExprTerm(gen, terms) + if len(expr.With) > 0 { + for i := range extras { + extras[i].With = expr.With + } + } + expr.Terms = term + result = append(append(result, extras...), expr) + case []*Term: + for i := 1; i < len(terms); i++ { + var extras []*Expr + extras, terms[i] = expandExprTerm(gen, terms[i]) + connectGeneratedExprs(expr, extras...) + if len(expr.With) > 0 { + for i := range extras { + extras[i].With = expr.With + } + } + result = append(result, extras...) + } + result = append(result, expr) + case *Every: + var extras []*Expr + + term := NewTerm(gen.Generate()).SetLocation(terms.Domain.Location) + eq := Equality.Expr(term, terms.Domain).SetLocation(terms.Domain.Location) + eq.Generated = true + eq.With = expr.With + extras = expandExpr(gen, eq) + terms.Domain = term + + terms.Body = rewriteExprTermsInBody(gen, terms.Body) + result = append(result, extras...) + result = append(result, expr) + } + return +} + +func connectGeneratedExprs(parent *Expr, children ...*Expr) { + for _, child := range children { + child.generatedFrom = parent + parent.generates = append(parent.generates, child) + } +} + +func expandExprTerm(gen *localVarGenerator, term *Term) (support []*Expr, output *Term) { + output = term + + switch v := term.Value.(type) { + case Call: + for i := 1; i < len(v); i++ { + var extras []*Expr + extras, v[i] = expandExprTerm(gen, v[i]) + support = append(support, extras...) + } + output = NewTerm(gen.Generate()).SetLocation(term.Location) + expr := v.MakeExpr(output).SetLocation(term.Location) + expr.Generated = true + support = append(support, expr) + case Ref: + support = expandExprRef(gen, v) + case *Array: + support = expandExprTermArray(gen, v) + case *object: + cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) { + extras1, expandedKey := expandExprTerm(gen, k) + extras2, expandedValue := expandExprTerm(gen, v) + support = append(support, extras1...) + support = append(support, extras2...) + return expandedKey, expandedValue, nil + }) + output = NewTerm(cpy).SetLocation(term.Location) + case Set: + cpy, _ := v.Map(func(x *Term) (*Term, error) { + extras, expanded := expandExprTerm(gen, x) + support = append(support, extras...) + return expanded, nil + }) + output = NewTerm(cpy).SetLocation(term.Location) + case *ArrayComprehension: + support, term := expandExprTerm(gen, v.Term) + v.Term = term + v.Body = rewriteExprTermsInBody(gen, appendToBody(v.Body, support...)) + case *SetComprehension: + support, term := expandExprTerm(gen, v.Term) + v.Term = term + v.Body = rewriteExprTermsInBody(gen, appendToBody(v.Body, support...)) + case *ObjectComprehension: + support, key := expandExprTerm(gen, v.Key) + v.Body = appendToBody(v.Body, support...) + v.Key = key + support, value := expandExprTerm(gen, v.Value) + v.Value = value + v.Body = rewriteExprTermsInBody(gen, appendToBody(v.Body, support...)) + } + return +} + +func expandExprRef(gen *localVarGenerator, v []*Term) (support []*Expr) { + // Start by calling a normal expandExprTerm on all terms. + support = expandExprTermSlice(gen, v) + + // Rewrite references in order to support indirect references. We rewrite + // e.g. + // + // [1, 2, 3][i] + // + // to + // + // __local_var = [1, 2, 3] + // __local_var[i] + // + // to support these. This only impacts the reference subject, i.e. the + // first item in the slice. + var subject = v[0] + switch subject.Value.(type) { + case *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: + f := newEqualityFactory(gen) + assignToLocal := f.Generate(subject) + support = append(support, assignToLocal) + v[0] = assignToLocal.Operand(0) + } + return +} + +func expandExprTermArray(gen *localVarGenerator, arr *Array) (support []*Expr) { + for i := range arr.Len() { + extras, v := expandExprTerm(gen, arr.Elem(i)) + arr.set(i, v) + support = append(support, extras...) + } + return +} + +func expandExprTermSlice(gen *localVarGenerator, v []*Term) (support []*Expr) { + for i := range v { + var extras []*Expr + extras, v[i] = expandExprTerm(gen, v[i]) + support = append(support, extras...) + } + return +} + +type localDeclaredVars struct { + vars []*declaredVarSet + + // rewritten contains a mapping of *all* user-defined variables + // that have been rewritten whereas vars contains the state + // from the current query (not any nested queries, and all vars + // seen). + rewritten map[Var]Var + + // indicates if an assignment (:= operator) has been seen *ever* + assignment bool +} + +type varOccurrence uint8 + +const ( + newVar varOccurrence = iota + argVar + seenVar + assignedVar + declaredVar +) + +type declaredVarSet struct { + vs map[Var]Var + occurrence map[Var]varOccurrence + count map[Var]int +} + +func newDeclaredVarSet() *declaredVarSet { + return &declaredVarSet{ + vs: map[Var]Var{}, + occurrence: map[Var]varOccurrence{}, + count: map[Var]int{}, + } +} + +func (s *declaredVarSet) clear() *declaredVarSet { + clear(s.vs) + clear(s.occurrence) + clear(s.count) + + return s +} + +func newLocalDeclaredVars() *localDeclaredVars { + return &localDeclaredVars{ + vars: []*declaredVarSet{newDeclaredVarSet()}, + rewritten: map[Var]Var{}, + } +} + +func (s *localDeclaredVars) Clear() { + var vs *declaredVarSet + if len(s.vars) > 0 { + vs = s.vars[0] + } + + clear(s.vars) + clear(s.rewritten) + + s.vars = s.vars[:0] + + if vs != nil { + s.vars = append(s.vars, vs.clear()) + } + if s.vars[0] == nil { + s.vars[0] = newDeclaredVarSet() + } + s.assignment = false +} + +func (s *localDeclaredVars) Copy() *localDeclaredVars { + stack := &localDeclaredVars{ + vars: make([]*declaredVarSet, 0, len(s.vars)), + } + + for i := range s.vars { + stack.vars = append(stack.vars, newDeclaredVarSet()) + maps.Copy(stack.vars[0].vs, s.vars[i].vs) + maps.Copy(stack.vars[0].occurrence, s.vars[i].occurrence) + maps.Copy(stack.vars[0].count, s.vars[i].count) + } + + stack.rewritten = maps.Clone(s.rewritten) + + return stack +} + +func (s *localDeclaredVars) Push() { + s.vars = append(s.vars, newDeclaredVarSet()) +} + +func (s *localDeclaredVars) Pop() *declaredVarSet { + sl := s.vars + curr := sl[len(sl)-1] + s.vars = sl[:len(sl)-1] + return curr +} + +func (s localDeclaredVars) Peek() *declaredVarSet { + return s.vars[len(s.vars)-1] +} + +func (s localDeclaredVars) Insert(x, y Var, occurrence varOccurrence) { + elem := s.vars[len(s.vars)-1] + elem.vs[x] = y + elem.occurrence[x] = occurrence + + elem.count[x] = 1 + + // If the variable has been rewritten (where x != y, with y being + // the generated value), store it in the map of rewritten vars. + // Assume that the generated values are unique for the compilation. + if !x.Equal(y) { + s.rewritten[y] = x + } +} + +func (s localDeclaredVars) Declared(x Var) (y Var, ok bool) { + for i := len(s.vars) - 1; i >= 0; i-- { + if y, ok = s.vars[i].vs[x]; ok { + return + } + } + return +} + +// Occurrence returns a flag that indicates whether x has occurred in the +// current scope. +func (s localDeclaredVars) Occurrence(x Var) varOccurrence { + return s.vars[len(s.vars)-1].occurrence[x] +} + +// GlobalOccurrence returns a flag that indicates whether x has occurred in the +// global scope. +func (s localDeclaredVars) GlobalOccurrence(x Var) (varOccurrence, bool) { + for i := len(s.vars) - 1; i >= 0; i-- { + if occ, ok := s.vars[i].occurrence[x]; ok { + return occ, true + } + } + return newVar, false +} + +// Seen marks x as seen by incrementing its counter +func (s localDeclaredVars) Seen(x Var) { + for i := len(s.vars) - 1; i >= 0; i-- { + dvs := s.vars[i] + if c, ok := dvs.count[x]; ok { + dvs.count[x] = c + 1 + return + } + } + + s.vars[len(s.vars)-1].count[x] = 1 +} + +// Count returns how many times x has been seen +func (s localDeclaredVars) Count(x Var) int { + for i := len(s.vars) - 1; i >= 0; i-- { + if c, ok := s.vars[i].count[x]; ok { + return c + } + } + + return 0 +} + +// rewriteLocalVars rewrites bodies to remove assignment/declaration +// expressions. For example: +// +// a := 1; p[a] +// +// Is rewritten to: +// +// __local0__ = 1; p[__local0__] +// +// During rewriting, assignees are validated to prevent use before declaration. +func rewriteLocalVars(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body, strict bool) (Body, map[Var]Var, Errors) { + var errs Errors + body, errs = rewriteDeclaredVarsInBody(g, stack, used, body, errs, strict) + return body, stack.Peek().vs, errs +} + +func rewriteDeclaredVarsInBody(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body, errs Errors, strict bool) (Body, Errors) { + var cpy Body + + for i := range body { + var expr *Expr + switch { + case body[i].IsAssignment(): + stack.assignment = true + expr, errs = rewriteDeclaredAssignment(g, stack, body[i], errs, strict) + case body[i].IsSome(): + expr, errs = rewriteSomeDeclStatement(g, stack, body[i], errs, strict) + case body[i].IsEvery(): + expr, errs = rewriteEveryStatement(g, stack, body[i], errs, strict) + default: + expr, errs = rewriteDeclaredVarsInExpr(g, stack, body[i], errs, strict) + } + if expr != nil { + cpy.Append(expr) + } + } + + // If the body only contained a var statement it will be empty at this + // point. Append true to the body to ensure that it's non-empty (zero length + // bodies are not supported.) + if len(cpy) == 0 { + cpy.Append(NewExpr(BooleanTerm(true))) + } + + errs = checkUnusedAssignedVars(body, stack, used, errs, strict) + return cpy, checkUnusedDeclaredVars(body, stack, used, cpy, errs) +} + +func checkUnusedAssignedVars(body Body, stack *localDeclaredVars, used VarSet, errs Errors, strict bool) Errors { + if !strict || len(errs) > 0 { + return errs + } + + dvs := stack.Peek() + + hasAssignedVars := false + for _, occ := range dvs.occurrence { + if occ == assignedVar { + hasAssignedVars = true + } + } + if !hasAssignedVars { + return errs + } + + unused := NewVarSet() + + for v, occ := range dvs.occurrence { + // A var that was assigned in this scope must have been seen (used) more than once (the time of assignment) in + // the same, or nested, scope to be counted as used. + if !v.IsWildcard() && stack.Count(v) <= 1 && occ == assignedVar { + unused.Add(dvs.vs[v]) + } + } + + rewrittenUsed := NewVarSet() + for v := range used { + if gv, ok := stack.Declared(v); ok { + rewrittenUsed.Add(gv) + } else { + rewrittenUsed.Add(v) + } + } + + unused = unused.Diff(rewrittenUsed) + if len(unused) == 0 { + return errs + } + + reversed := make(map[Var]Var, len(dvs.vs)) + for k, v := range dvs.vs { + reversed[v] = k + } + + for _, gv := range unused.Sorted() { + found := false + for i := range body { + if body[i].Vars(VarVisitorParams{}).Contains(gv) { + errs = append(errs, NewError(CompileErr, body[i].Loc(), "assigned var %v unused", reversed[gv])) + found = true + break + } + } + if !found { + errs = append(errs, NewError(CompileErr, body[0].Loc(), "assigned var %v unused", reversed[gv])) + } + } + + return errs +} + +func checkUnusedDeclaredVars(body Body, stack *localDeclaredVars, used VarSet, cpy Body, errs Errors) Errors { + + // NOTE(tsandall): Do not generate more errors if there are existing + // declaration errors. + if len(errs) > 0 { + return errs + } + + dvs := stack.Peek() + + hasDeclaredVars := false + for _, occ := range dvs.occurrence { + if occ == declaredVar { + hasDeclaredVars = true + } + } + if !hasDeclaredVars { + return errs + } + + declared := NewVarSet() + + for v, occ := range dvs.occurrence { + if occ == declaredVar { + declared.Add(dvs.vs[v]) + } + } + + bodyvars := cpy.Vars(VarVisitorParams{}) + + for v := range used { + if gv, ok := stack.Declared(v); ok { + bodyvars.Add(gv) + } else { + bodyvars.Add(v) + } + } + + dbv := declared.Diff(bodyvars) + if dbv.DiffCount(used) == 0 { + return errs + } + + reversed := make(map[Var]Var, len(dvs.vs)) + for k, v := range dvs.vs { + reversed[v] = k + } + + for _, gv := range dbv.Diff(used).Sorted() { + rv := reversed[gv] + if !rv.IsGenerated() { + // Scan through body exprs, looking for a match between the + // bad var's original name, and each expr's declared vars. + foundUnusedVarByName := false + for i := range body { + varsDeclaredInExpr := declaredVars(body[i]) + if varsDeclaredInExpr.Contains(rv) { + // TODO(philipc): Clean up the offset logic here when the parser + // reports more accurate locations. + errs = append(errs, NewError(CompileErr, body[i].Loc(), "declared var %v unused", rv)) + foundUnusedVarByName = true + break + } + } + // Default error location returned. + if !foundUnusedVarByName { + errs = append(errs, NewError(CompileErr, body[0].Loc(), "declared var %v unused", rv)) + } + } + } + + return errs +} + +func rewriteEveryStatement(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { + e := expr.Copy() + every := e.Terms.(*Every) + + errs = rewriteDeclaredVarsInTermRecursive(g, stack, every.Domain, errs, strict) + + stack.Push() + defer stack.Pop() + + // if the key exists, rewrite + if every.Key != nil { + if v := every.Key.Value.(Var); !v.IsWildcard() { + gv, err := rewriteDeclaredVar(g, stack, v, declaredVar) + if err != nil { + return nil, append(errs, newErrorString(CompileErr, every.Loc(), err.Error())) + } + every.Key.Value = gv + } + } else { // if the key doesn't exist, add dummy local + every.Key = NewTerm(g.Generate()) + } + + // value is always present + if v := every.Value.Value.(Var); !v.IsWildcard() { + gv, err := rewriteDeclaredVar(g, stack, v, declaredVar) + if err != nil { + return nil, append(errs, newErrorString(CompileErr, every.Loc(), err.Error())) + } + every.Value.Value = gv + } + + used := NewVarSet() + every.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, every.Body, errs, strict) + + return rewriteDeclaredVarsInExpr(g, stack, e, errs, strict) +} + +func rewriteSomeDeclStatement(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { + e := expr.Copy() + decl := e.Terms.(*SomeDecl) + for i := range decl.Symbols { + switch v := decl.Symbols[i].Value.(type) { + case Var: + if _, err := rewriteDeclaredVar(g, stack, v, declaredVar); err != nil { + return nil, append(errs, newErrorString(CompileErr, decl.Loc(), err.Error())) + } + case Call: + var key, val, container *Term + switch len(v) { + case 4: // member3 + key = v[1] + val = v[2] + container = v[3] + case 3: // member + key = NewTerm(g.Generate()) + val = v[1] + container = v[2] + } + + var rhs *Term + switch c := container.Value.(type) { + case Ref: + rhs = RefTerm(append(c, key)...) + default: + rhs = RefTerm(container, key) + } + e.Terms = []*Term{ + RefTerm(VarTerm(Equality.Name)), val, rhs, + } + + output := NewVarSet() + + for _, v0 := range outputVarsForExprEq(e, container.Vars(), output).Sorted() { + if _, err := rewriteDeclaredVar(g, stack, v0, declaredVar); err != nil { + return nil, append(errs, newErrorString(CompileErr, decl.Loc(), err.Error())) + } + } + return rewriteDeclaredVarsInExpr(g, stack, e, errs, strict) + } + } + return nil, errs +} + +func rewriteDeclaredVarsInExpr(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { + vis := NewGenericVisitor(func(x any) bool { + var stop bool + switch x := x.(type) { + case *Term: + stop, errs = rewriteDeclaredVarsInTerm(g, stack, x, errs, strict) + case *With: + stop, errs = true, rewriteDeclaredVarsInWithRecursive(g, stack, x, errs, strict) + } + return stop + }) + vis.Walk(expr) + return expr, errs +} + +func rewriteDeclaredAssignment(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { + + if expr.Negated { + errs = append(errs, NewError(CompileErr, expr.Location, "cannot assign vars inside negated expression")) + return expr, errs + } + + numErrsBefore := len(errs) + + if !validEqAssignArgCount(expr) { + return expr, errs + } + + // Rewrite terms on right hand side capture seen vars and recursively + // process comprehensions before left hand side is processed. Also + // rewrite with modifier. + errs = rewriteDeclaredVarsInTermRecursive(g, stack, expr.Operand(1), errs, strict) + + for _, w := range expr.With { + errs = rewriteDeclaredVarsInTermRecursive(g, stack, w.Value, errs, strict) + } + + // Rewrite vars on left hand side with unique names. Catch redeclaration + // and invalid term types here. + var vis func(t *Term) bool + + vis = func(t *Term) bool { + switch v := t.Value.(type) { + case Var: + if gv, err := rewriteDeclaredVar(g, stack, v, assignedVar); err != nil { + errs = append(errs, newErrorString(CompileErr, t.Location, err.Error())) + } else { + t.Value = gv + } + return true + case *Array: + return false + case *object: + v.Foreach(func(_, v *Term) { + WalkTerms(v, vis) + }) + return true + case Ref: + if RootDocumentRefs.Contains(t) { + if gv, err := rewriteDeclaredVar(g, stack, v[0].Value.(Var), assignedVar); err != nil { + errs = append(errs, newErrorString(CompileErr, t.Location, err.Error())) + } else { + t.Value = gv + } + return true + } + } + errs = append(errs, NewError(CompileErr, t.Location, "cannot assign to %v", ValueName(t.Value))) + return true + } + + WalkTerms(expr.Operand(0), vis) + + if len(errs) == numErrsBefore { + loc := expr.Operator()[0].Location + expr.SetOperator(RefTerm(VarTerm(Equality.Name).SetLocation(loc)).SetLocation(loc)) + } + + return expr, errs +} + +func rewriteDeclaredVarsInTerm(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors, strict bool) (bool, Errors) { + switch v := term.Value.(type) { + case Var: + if gv, ok := stack.Declared(v); ok { + term.Value = gv + stack.Seen(v) + } else if stack.Occurrence(v) == newVar { + stack.Insert(v, v, seenVar) + } + case Ref: + if RootDocumentRefs.Contains(term) { + x := v[0].Value.(Var) + if occ, ok := stack.GlobalOccurrence(x); ok && occ != seenVar { + gv, _ := stack.Declared(x) + term.Value = gv + } + + return true, errs + } + return false, errs + case Call: + ref := v[0] + WalkVars(ref, func(v Var) bool { + if gv, ok := stack.Declared(v); ok && !gv.Equal(v) { + // We will rewrite the ref of a function call, which is never ok since we don't have first-class functions. + errs = append(errs, NewError(CompileErr, term.Location, "called function %s shadowed", ref)) + return true + } + return false + }) + return false, errs + case *object: + cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) { + kcpy := k.Copy() + errs = rewriteDeclaredVarsInTermRecursive(g, stack, kcpy, errs, strict) + errs = rewriteDeclaredVarsInTermRecursive(g, stack, v, errs, strict) + return kcpy, v, nil + }) + term.Value = cpy + case Set: + cpy, _ := v.Map(func(elem *Term) (*Term, error) { + elemcpy := elem.Copy() + errs = rewriteDeclaredVarsInTermRecursive(g, stack, elemcpy, errs, strict) + return elemcpy, nil + }) + term.Value = cpy + case *ArrayComprehension: + errs = rewriteDeclaredVarsInArrayComprehension(g, stack, v, errs, strict) + case *SetComprehension: + errs = rewriteDeclaredVarsInSetComprehension(g, stack, v, errs, strict) + case *ObjectComprehension: + errs = rewriteDeclaredVarsInObjectComprehension(g, stack, v, errs, strict) + default: + return false, errs + } + return true, errs +} + +func rewriteDeclaredVarsInTermRecursive(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors, strict bool) Errors { + WalkTerms(term, func(t *Term) bool { + var stop bool + stop, errs = rewriteDeclaredVarsInTerm(g, stack, t, errs, strict) + return stop + }) + return errs +} + +func rewriteDeclaredVarsInWithRecursive(g *localVarGenerator, stack *localDeclaredVars, w *With, errs Errors, strict bool) Errors { + // NOTE(sr): `with input as` and `with input.a.b.c as` are deliberately skipped here: `input` could + // have been shadowed by a local variable/argument but should NOT be replaced in the `with` target. + // + // We cannot drop `input` from the stack since it's conceivable to do `with input[input] as` where + // the second input is meant to be the local var. It's a terrible idea, but when you're shadowing + // `input` those might be your thing. + errs = rewriteDeclaredVarsInTermRecursive(g, stack, w.Target, errs, strict) + if sdwInput, ok := stack.Declared(InputRootDocument.Value.(Var)); ok { // Was "input" shadowed... + switch value := w.Target.Value.(type) { + case Var: + if sdwInput.Equal(value) { // ...and replaced? If so, fix it + w.Target.Value = InputRootRef + } + case Ref: + if sdwInput.Equal(value[0].Value.(Var)) { + w.Target.Value.(Ref)[0].Value = InputRootDocument.Value + } + } + } + // No special handling of the `with` value + return rewriteDeclaredVarsInTermRecursive(g, stack, w.Value, errs, strict) +} + +func rewriteDeclaredVarsInTemplateString(g *localVarGenerator, stack *localDeclaredVars, ts *TemplateString, errs Errors, strict bool) Errors { + for i, p := range ts.Parts { + if expr, ok := p.(*Expr); ok { + stack.Push() + ts.Parts[i], errs = rewriteDeclaredVarsInExpr(g, stack, expr, errs, strict) + stack.Pop() + } + } + + return errs +} + +func rewriteDeclaredVarsInArrayComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ArrayComprehension, errs Errors, strict bool) Errors { + used := v.Term.Vars() + + stack.Push() + v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) + errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs, strict) + stack.Pop() + return errs +} + +func rewriteDeclaredVarsInSetComprehension(g *localVarGenerator, stack *localDeclaredVars, v *SetComprehension, errs Errors, strict bool) Errors { + used := v.Term.Vars() + + stack.Push() + v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) + errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs, strict) + stack.Pop() + return errs +} + +func rewriteDeclaredVarsInObjectComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ObjectComprehension, errs Errors, strict bool) Errors { + used := NewVarSet() + used.Update(v.Key.Vars()) + used.Update(v.Value.Vars()) + + stack.Push() + v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) + errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Key, errs, strict) + errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Value, errs, strict) + stack.Pop() + return errs +} + +func rewriteDeclaredVar(g *localVarGenerator, stack *localDeclaredVars, v Var, occ varOccurrence) (gv Var, err error) { + switch stack.Occurrence(v) { + case seenVar: + return gv, fmt.Errorf("var %v referenced above", v) + case assignedVar: + return gv, fmt.Errorf("var %v assigned above", v) + case declaredVar: + return gv, fmt.Errorf("var %v declared above", v) + case argVar: + return gv, fmt.Errorf("arg %v redeclared", v) + } + gv = g.Generate() + stack.Insert(v, gv, occ) + return +} + +// rewriteWithModifiersInBody will rewrite the body so that with modifiers do +// not contain terms that require evaluation as values. If this function +// encounters an invalid with modifier target then it will raise an error. +func rewriteWithModifiersInBody(c *Compiler, unsafeBuiltinsMap map[string]struct{}, f *equalityFactory, body Body) (Body, *Error) { + var result Body + for i := range body { + exprs, err := rewriteWithModifier(c, unsafeBuiltinsMap, f, body[i]) + if err != nil { + return nil, err + } + if len(exprs) > 0 { + for _, expr := range exprs { + result.Append(expr) + } + } else { + result.Append(body[i]) + } + } + return result, nil +} + +func rewriteWithModifier(c *Compiler, unsafeBuiltinsMap map[string]struct{}, f *equalityFactory, expr *Expr) ([]*Expr, *Error) { + + var result []*Expr + for i := range expr.With { + eval, err := validateWith(c, unsafeBuiltinsMap, expr, i) + if err != nil { + return nil, err + } + + if eval { + eq := f.Generate(expr.With[i].Value) + result = append(result, eq) + expr.With[i].Value = eq.Operand(0) + } + } + + return append(result, expr), nil +} + +func validateWith(c *Compiler, unsafeBuiltinsMap map[string]struct{}, expr *Expr, i int) (bool, *Error) { + target, value := expr.With[i].Target, expr.With[i].Value + + // Ensure that values that are built-ins are rewritten to Ref (not Var) + if v, ok := value.Value.(Var); ok { + if _, ok := c.builtins[v.String()]; ok { + value.Value = Ref([]*Term{NewTerm(v)}) + } + } + isBuiltinRefOrVar, err := isBuiltinRefOrVar(c.builtins, unsafeBuiltinsMap, target) + if err != nil { + return false, err + } + + isAllowedUnknownFuncCall := false + if c.allowUndefinedFuncCalls { + switch target.Value.(type) { + case Ref, Var: + isAllowedUnknownFuncCall = true + } + } + + switch { + case isDataRef(target): + ref := target.Value.(Ref) + targetNode := c.RuleTree + for i := range len(ref) - 1 { + child := targetNode.Child(ref[i].Value) + if child == nil { + break + } else if len(child.Values) > 0 { + return false, NewError(CompileErr, target.Loc(), "with keyword cannot partially replace virtual document(s)") + } + targetNode = child + } + + if targetNode != nil { + // NOTE(sr): at this point in the compiler stages, we don't have a fully-populated + // TypeEnv yet -- so we have to make do with this check to see if the replacement + // target is a function. It's probably wrong for arity-0 functions, but those are + // and edge case anyways. + if child := targetNode.Child(ref[len(ref)-1].Value); child != nil { + for _, v := range child.Values { + if len(v.Head.Args) > 0 { + if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok { + return false, err // err may be nil + } + } + } + } + } + + // If the with-value is a ref to a function, but not a call, we can't rewrite it + if r, ok := value.Value.(Ref); ok { + // TODO: check that target ref doesn't exist? + if valueNode := c.RuleTree.Find(r); valueNode != nil { + for _, v := range valueNode.Values { + if len(v.Head.Args) > 0 { + return false, nil + } + } + } + } + case isInputRef(target): // ok, valid + case isBuiltinRefOrVar: + // NOTE(sr): first we ensure that parsed Var builtins (`count`, `concat`, etc) + // are rewritten to their proper Ref convention + if v, ok := target.Value.(Var); ok { + target.Value = Ref([]*Term{NewTerm(v)}) + } + + targetRef := target.Value.(Ref) + bi := c.builtins[targetRef.String()] // safe because isBuiltinRefOrVar checked this + if err := validateWithBuiltinTarget(bi, targetRef, target.Loc()); err != nil { + return false, err + } + + if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok { + return false, err // err may be nil + } + case isAllowedUnknownFuncCall: + // The target isn't a ref to the input doc, data doc, or a known built-in, but it might be a ref to an unknown built-in. + return false, nil + default: + return false, NewError(TypeErr, target.Location, "with keyword target must reference existing %v, %v, or a function", InputRootDocument, DefaultRootDocument) + } + return requiresEval(value), nil +} + +func validateWithBuiltinTarget(bi *Builtin, target Ref, loc *location.Location) *Error { + switch bi.Name { + case Equality.Name, + RegoMetadataChain.Name, + RegoMetadataRule.Name: + return NewError(CompileErr, loc, "with keyword replacing built-in function: replacement of %q invalid", bi.Name) + } + + switch { + case target.HasPrefix(Ref([]*Term{VarTerm("internal")})): + return NewError(CompileErr, loc, "with keyword replacing built-in function: replacement of internal function %q invalid", target) + + case bi.Relation: + return NewError(CompileErr, loc, "with keyword replacing built-in function: target must not be a relation") + + case bi.Decl.Result() == nil: + return NewError(CompileErr, loc, "with keyword replacing built-in function: target must not be a void function") + } + return nil +} + +func validateWithFunctionValue(bs map[string]*Builtin, unsafeMap map[string]struct{}, ruleTree *TreeNode, value *Term) (bool, *Error) { + if v, ok := value.Value.(Ref); ok { + if ruleTree.Find(v) != nil { // ref exists in rule tree + return true, nil + } + } + return isBuiltinRefOrVar(bs, unsafeMap, value) +} + +func isInputRef(term *Term) bool { + ref, ok := term.Value.(Ref) + return ok && ref.HasPrefix(InputRootRef) +} + +func isDataRef(term *Term) bool { + ref, ok := term.Value.(Ref) + return ok && ref.HasPrefix(DefaultRootRef) +} + +func isBuiltinRefOrVar(bs map[string]*Builtin, unsafeBuiltinsMap map[string]struct{}, term *Term) (bool, *Error) { + switch v := term.Value.(type) { + case Ref, Var: + vs := v.String() + if _, ok := unsafeBuiltinsMap[vs]; ok { + return false, NewError(CompileErr, term.Location, "with keyword replacing built-in function: target must not be unsafe: %q", v) + } + _, ok := bs[vs] + return ok, nil + } + return false, nil +} + +func isVirtual(node *TreeNode, ref Ref) bool { + for i := range ref { + child := node.Child(ref[i].Value) + if child == nil { + return false + } else if len(child.Values) > 0 { + return true + } + node = child + } + return true +} + +func safetyErrorSlice(unsafe unsafeVars, rewritten map[Var]Var) (result Errors) { + if len(unsafe) == 0 { + return + } + + for _, pair := range unsafe.Vars() { + v := pair.Var + if w, ok := rewritten[v]; ok { + v = w + } + if !v.IsGenerated() { + if _, ok := allFutureKeywords[string(v)]; ok { + result = append(result, NewError(UnsafeVarErr, pair.Loc, + "var %[1]v is unsafe (hint: `import future.keywords.%[1]v` to import a future keyword)", v)) + continue + } + result = append(result, NewError(UnsafeVarErr, pair.Loc, "var %v is unsafe", v)) + } + } + + if len(result) > 0 { + return + } + + // If the expression contains unsafe generated variables, report which + // expressions are unsafe instead of the variables that are unsafe (since + // the latter are not meaningful to the user.) + pairs := util.SortedFunc(unsafe.Slice(), func(a, b unsafePair) int { + return a.Expr.Location.Compare(b.Expr.Location) + }) + + // Report at most one error per generated variable. + seen := NewVarSet() + + for _, expr := range pairs { + before := len(seen) + for v := range expr.Vars { + if v.IsGenerated() { + seen.Add(v) + } + } + if len(seen) > before { + result = append(result, NewError(UnsafeVarErr, expr.Expr.Location, "expression is unsafe")) + } + } + + return +} + +func checkUnsafeBuiltins(unsafeBuiltinsMap map[string]struct{}, node any) Errors { + var errs Errors + WalkExprs(node, func(x *Expr) bool { + if x.IsCall() { + operator := x.Operator().String() + if _, ok := unsafeBuiltinsMap[operator]; ok { + errs = append(errs, NewError(TypeErr, x.Loc(), "unsafe built-in function calls in expression: %v", operator)) + } + } + return false + }) + return errs +} + +func rewriteVarsInRef(vars ...map[Var]Var) varRewriter { + return func(node Ref) Ref { + i, _ := TransformVars(node, func(v Var) (Value, error) { + for _, m := range vars { + if u, ok := m[v]; ok { + return u, nil + } + } + return v, nil + }) + return i.(Ref) + } +} + +// NOTE(sr): This is duplicated with compile/compile.go; but moving it into another location +// would cause a circular dependency -- the refSet definition needs ast.Ref. If we make it +// public in the ast package, the compile package could take it from there, but it would also +// increase our public interface. Let's reconsider if we need it in a third place. +type refSet struct { + s []Ref +} + +func newRefSet(x ...Ref) *refSet { + result := &refSet{} + for i := range x { + result.AddPrefix(x[i]) + } + return result +} + +// ContainsPrefix returns true if r is prefixed by any of the existing refs in the set. +func (rs *refSet) ContainsPrefix(r Ref) bool { + return slices.ContainsFunc(rs.s, r.HasPrefix) +} + +// AddPrefix inserts r into the set if r is not prefixed by any existing +// refs in the set. If any existing refs are prefixed by r, those existing +// refs are removed. +func (rs *refSet) AddPrefix(r Ref) { + if rs.ContainsPrefix(r) { + return + } + cpy := []Ref{r} + for i := range rs.s { + if !rs.s[i].HasPrefix(r) { + cpy = append(cpy, rs.s[i]) + } + } + rs.s = cpy +} + +// Sorted returns a sorted slice of terms for refs in the set. +func (rs *refSet) Sorted() []*Term { + terms := make([]*Term, len(rs.s)) + for i := range rs.s { + terms[i] = NewTerm(rs.s[i]) + } + return util.SortedFunc(terms, TermValueCompare) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go b/vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go new file mode 100644 index 0000000000..4ea122f3cb --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go @@ -0,0 +1,63 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +// CompileModules takes a set of Rego modules represented as strings and +// compiles them for evaluation. The keys of the map are used as filenames. +func CompileModules(modules map[string]string) (*Compiler, error) { + return CompileModulesWithOpt(modules, CompileOpts{}) +} + +// CompileOpts defines a set of options for the compiler. +type CompileOpts struct { + EnablePrintStatements bool + ParserOptions ParserOptions +} + +// CompileModulesWithOpt takes a set of Rego modules represented as strings and +// compiles them for evaluation. The keys of the map are used as filenames. +func CompileModulesWithOpt(modules map[string]string, opts CompileOpts) (*Compiler, error) { + + parsed := make(map[string]*Module, len(modules)) + + for f, module := range modules { + var pm *Module + var err error + if pm, err = ParseModuleWithOpts(f, module, opts.ParserOptions); err != nil { + return nil, err + } + parsed[f] = pm + } + + compiler := NewCompiler(). + WithDefaultRegoVersion(opts.ParserOptions.RegoVersion). + WithEnablePrintStatements(opts.EnablePrintStatements). + WithCapabilities(opts.ParserOptions.Capabilities) + compiler.Compile(parsed) + + if compiler.Failed() { + return nil, compiler.Errors + } + + return compiler, nil +} + +// MustCompileModules compiles a set of Rego modules represented as strings. If +// the compilation process fails, this function panics. +func MustCompileModules(modules map[string]string) *Compiler { + return MustCompileModulesWithOpts(modules, CompileOpts{}) +} + +// MustCompileModulesWithOpts compiles a set of Rego modules represented as strings. If +// the compilation process fails, this function panics. +func MustCompileModulesWithOpts(modules map[string]string, opts CompileOpts) *Compiler { + + compiler, err := CompileModulesWithOpt(modules, opts) + if err != nil { + panic(err) + } + + return compiler +} diff --git a/vendor/github.com/open-policy-agent/opa/ast/compilemetrics.go b/vendor/github.com/open-policy-agent/opa/v1/ast/compilemetrics.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/ast/compilemetrics.go rename to vendor/github.com/open-policy-agent/opa/v1/ast/compilemetrics.go diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go b/vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go new file mode 100644 index 0000000000..b119f80eac --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go @@ -0,0 +1,79 @@ +// Copyright 2019 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "slices" + "strings" +) + +// CheckPathConflicts returns a set of errors indicating paths that +// are in conflict with the result of the provided callable. +func CheckPathConflicts(c *Compiler, exists func([]string) (bool, error)) Errors { + var errs Errors + + root := c.RuleTree.Child(DefaultRootDocument.Value) + if root == nil { + return nil + } + + if len(c.pathConflictCheckRoots) == 0 || slices.Contains(c.pathConflictCheckRoots, "") { + for _, child := range root.Children { + errs = append(errs, checkDocumentConflicts(child, exists, nil)...) + } + return errs + } + + for _, rootPath := range c.pathConflictCheckRoots { + // traverse AST from `path` to go to the new root + paths := strings.Split(rootPath, "/") + node := root + for _, key := range paths { + node = node.Child(String(key)) + if node == nil { + break + } + } + + if node == nil { + // could not find the node from the AST (e.g. `path` is from a data file) + // then no conflict is possible + continue + } + + for _, child := range node.Children { + errs = append(errs, checkDocumentConflicts(child, exists, paths)...) + } + } + + return errs +} + +func checkDocumentConflicts(node *TreeNode, exists func([]string) (bool, error), path []string) Errors { + + switch key := node.Key.(type) { + case String: + path = append(path, string(key)) + default: // other key types cannot conflict with data + return nil + } + + if len(node.Values) > 0 { + s := strings.Join(path, "/") + if ok, err := exists(path); err != nil { + return Errors{NewError(CompileErr, node.Values[0].Loc(), "conflict check for data path %v: %v", s, err.Error())} + } else if ok { + return Errors{NewError(CompileErr, node.Values[0].Loc(), "conflicting rule for data path %v found", s)} + } + } + + var errs Errors + + for _, child := range node.Children { + errs = append(errs, checkDocumentConflicts(child, exists, path)...) + } + + return errs +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/default_module_loader.go b/vendor/github.com/open-policy-agent/opa/v1/ast/default_module_loader.go new file mode 100644 index 0000000000..528c253e16 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/default_module_loader.go @@ -0,0 +1,14 @@ +// Copyright 2025 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +var defaultModuleLoader ModuleLoader + +// DefaultModuleLoader lets you inject an `ast.ModuleLoader` that will +// always be used. If another one is provided with the ast package, +// they will both be consulted to enrich the set of modules dynamically. +func DefaultModuleLoader(ml ModuleLoader) { + defaultModuleLoader = ml +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/doc.go b/vendor/github.com/open-policy-agent/opa/v1/ast/doc.go new file mode 100644 index 0000000000..62b04e301e --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/doc.go @@ -0,0 +1,36 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package ast declares Rego syntax tree types and also includes a parser and compiler for preparing policies for execution in the policy engine. +// +// Rego policies are defined using a relatively small set of types: modules, package and import declarations, rules, expressions, and terms. At their core, policies consist of rules that are defined by one or more expressions over documents available to the policy engine. The expressions are defined by intrinsic values (terms) such as strings, objects, variables, etc. +// +// Rego policies are typically defined in text files and then parsed and compiled by the policy engine at runtime. The parsing stage takes the text or string representation of the policy and converts it into an abstract syntax tree (AST) that consists of the types mentioned above. The AST is organized as follows: +// +// Module +// | +// +--- Package (Reference) +// | +// +--- Imports +// | | +// | +--- Import (Term) +// | +// +--- Rules +// | +// +--- Rule +// | +// +--- Head +// | | +// | +--- Name (Variable) +// | | +// | +--- Key (Term) +// | | +// | +--- Value (Term) +// | +// +--- Body +// | +// +--- Expression (Term | Terms | Variable Declaration) +// +// At query time, the policy engine expects policies to have been compiled. The compilation stage takes one or more modules and compiles them into a format that the policy engine supports. +package ast diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/env.go b/vendor/github.com/open-policy-agent/opa/v1/ast/env.go new file mode 100644 index 0000000000..3a3e793778 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/env.go @@ -0,0 +1,514 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "strings" + + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" +) + +// TypeEnv contains type info for static analysis such as type checking. +type TypeEnv struct { + tree *typeTreeNode + next *TypeEnv + newChecker func() *typeChecker +} + +// newTypeEnv returns an empty TypeEnv. The constructor is not exported because +// type environments should only be created by the type checker. +func newTypeEnv(f func() *typeChecker) *TypeEnv { + return &TypeEnv{ + tree: newTypeTree(), + newChecker: f, + } +} + +// Get returns the type of x. +// +// Deprecated: Use GetByValue or GetByRef instead, as they are more efficient. +func (env *TypeEnv) Get(x any) types.Type { + if term, ok := x.(*Term); ok { + x = term.Value + } + + if v, ok := x.(Value); ok { + return env.GetByValue(v) + } + + panic("unreachable") +} + +// GetByValue returns the type of v. +func (env *TypeEnv) GetByValue(v Value) types.Type { + switch x := v.(type) { + + // Scalars. + case Null: + return types.Nl + case Boolean: + return types.B + case Number: + return types.N + case String, *TemplateString: + return types.S + + // Composites. + case *Array: + static := make([]types.Type, x.Len()) + for i := range static { + static[i] = env.GetByValue(x.Elem(i).Value) + } + + var dynamic types.Type + if len(static) == 0 { + dynamic = types.A + } + + return types.NewArray(static, dynamic) + + case *lazyObj: + return env.GetByValue(x.force()) + case *object: + static := []*types.StaticProperty{} + var dynamic *types.DynamicProperty + + x.Foreach(func(k, v *Term) { + if IsConstant(k.Value) { + if kjson, err := JSON(k.Value); err == nil { + static = append(static, types.NewStaticProperty(kjson, env.GetByValue(v.Value))) + return + } + } + // Can't handle it as a static property, fallback to dynamic + dynamic = types.NewDynamicProperty(env.GetByValue(k.Value), env.GetByValue(v.Value)) + }) + + if len(static) == 0 && dynamic == nil { + dynamic = types.NewDynamicProperty(types.A, types.A) + } + + return types.NewObject(static, dynamic) + + case *set: + var tpe types.Type + x.Foreach(func(elem *Term) { + tpe = types.Or(tpe, env.GetByValue(elem.Value)) + }) + if tpe == nil { + tpe = types.A + } + return types.NewSet(tpe) + + // Comprehensions. + case *ArrayComprehension: + cpy, errs := env.newChecker().CheckBody(env, x.Body) + if len(errs) == 0 { + return types.NewArray(nil, cpy.GetByValue(x.Term.Value)) + } + return nil + case *ObjectComprehension: + cpy, errs := env.newChecker().CheckBody(env, x.Body) + if len(errs) == 0 { + return types.NewObject(nil, types.NewDynamicProperty(cpy.GetByValue(x.Key.Value), cpy.GetByValue(x.Value.Value))) + } + return nil + case *SetComprehension: + cpy, errs := env.newChecker().CheckBody(env, x.Body) + if len(errs) == 0 { + return types.NewSet(cpy.GetByValue(x.Term.Value)) + } + return nil + + // Refs. + case Ref: + return env.GetByRef(x) + + // Vars. + case Var: + if node := env.tree.Child(v); node != nil { + return node.Value() + } + if env.next != nil { + return env.next.GetByValue(v) + } + return nil + + // Calls. + case Call: + return nil + } + + return env.Get(v) +} + +// GetByRef returns the type of the value referred to by ref. +func (env *TypeEnv) GetByRef(ref Ref) types.Type { + node := env.tree.Child(ref[0].Value) + if node == nil { + return env.getRefFallback(ref) + } + + return env.getRefRec(node, ref, ref[1:]) +} + +func (env *TypeEnv) getRefFallback(ref Ref) types.Type { + if env.next != nil { + return env.next.GetByRef(ref) + } + + if RootDocumentNames.Contains(ref[0]) { + // types.A is an empty types.Any + // this is used to represent a potential non-local reference + return types.A + } + + return nil +} + +func (env *TypeEnv) getRefRec(node *typeTreeNode, ref, tail Ref) types.Type { + if len(tail) == 0 { + return env.getRefRecExtent(node) + } + + if node.Leaf() { + if node.children.Len() > 0 { + if child := node.Child(tail[0].Value); child != nil { + return env.getRefRec(child, ref, tail[1:]) + } + } + return selectRef(node.Value(), tail) + } + + if !IsConstant(tail[0].Value) { + return selectRef(env.getRefRecExtent(node), tail) + } + + child := node.Child(tail[0].Value) + if child == nil { + return env.getRefFallback(ref) + } + + return env.getRefRec(child, ref, tail[1:]) +} + +func (env *TypeEnv) getRefRecExtent(node *typeTreeNode) types.Type { + + if node.Leaf() { + return node.Value() + } + + children := []*types.StaticProperty{} + + node.Children().Iter(func(key Value, child *typeTreeNode) bool { + tpe := env.getRefRecExtent(child) + + // NOTE(sr): Converting to Golang-native types here is an extension of what we did + // before -- only supporting strings. But since we cannot differentiate sets and arrays + // that way, we could reconsider. + switch key.(type) { + case String, Number, Boolean: // skip anything else + propKey, err := JSON(key) + if err != nil { + panic(fmt.Errorf("unreachable, ValueToInterface: %w", err)) + } + children = append(children, types.NewStaticProperty(propKey, tpe)) + } + return false + }) + + // TODO(tsandall): for now, these objects can have any dynamic properties + // because we don't have schema for base docs. Once schemas are supported + // we can improve this. + return types.NewObject(children, types.NewDynamicProperty(types.S, types.A)) +} + +func (env *TypeEnv) wrap() *TypeEnv { + cpy := *env + cpy.next = env + cpy.tree = newTypeTree() + return &cpy +} + +// typeTreeNode is used to store type information in a tree. +type typeTreeNode struct { + key Value + value types.Type + children *util.HasherMap[Value, *typeTreeNode] +} + +func newTypeTree() *typeTreeNode { + return &typeTreeNode{ + key: nil, + value: nil, + children: util.NewHasherMap[Value, *typeTreeNode](ValueEqual), + } +} + +func (n *typeTreeNode) Child(key Value) *typeTreeNode { + value, ok := n.children.Get(key) + if !ok { + return nil + } + return value +} + +func (n *typeTreeNode) Children() *util.HasherMap[Value, *typeTreeNode] { + return n.children +} + +func (n *typeTreeNode) Get(path Ref) types.Type { + curr := n + for _, term := range path { + child, ok := curr.children.Get(term.Value) + if !ok { + return nil + } + curr = child + } + return curr.Value() +} + +func (n *typeTreeNode) Leaf() bool { + return n.value != nil +} + +func (n *typeTreeNode) PutOne(key Value, tpe types.Type) { + c, ok := n.children.Get(key) + + var child *typeTreeNode + if !ok { + child = newTypeTree() + child.key = key + n.children.Put(key, child) + } else { + child = c + } + + child.value = tpe +} + +func (n *typeTreeNode) Put(path Ref, tpe types.Type) { + curr := n + for _, term := range path { + child, ok := curr.children.Get(term.Value) + if !ok { + child = newTypeTree() + child.key = term.Value + curr.children.Put(child.key, child) + } + + curr = child + } + curr.value = tpe +} + +// Insert inserts tpe at path in the tree, but also merges the value into any types.Object present along that path. +// If a types.Object is inserted, any leafs already present further down the tree are merged into the inserted object. +// path must be ground. +func (n *typeTreeNode) Insert(path Ref, tpe types.Type, env *TypeEnv) { + curr := n + for i, term := range path { + child, ok := curr.children.Get(term.Value) + if !ok { + child = newTypeTree() + child.key = term.Value + curr.children.Put(child.key, child) + } else if child.value != nil && i+1 < len(path) { + // If child has an object value, merge the new value into it. + if o, ok := child.value.(*types.Object); ok { + var err error + child.value, err = insertIntoObject(o, path[i+1:], tpe, env) + if err != nil { + panic(fmt.Errorf("unreachable, insertIntoObject: %w", err)) + } + } + } + + curr = child + } + + curr.value = mergeTypes(curr.value, tpe) + + if _, ok := tpe.(*types.Object); ok && curr.children.Len() > 0 { + // merge all leafs into the inserted object + for p, t := range curr.Leafs() { + var err error + curr.value, err = insertIntoObject(curr.value.(*types.Object), *p, t, env) + if err != nil { + panic(fmt.Errorf("unreachable, insertIntoObject: %w", err)) + } + } + } +} + +// mergeTypes merges the types of 'a' and 'b'. If both are sets, their 'of' types are joined with an types.Or. +// If both are objects, the key types of their dynamic properties are joined with types.Or:s, and their value types +// are recursively merged (using mergeTypes). +// If 'a' and 'b' are both objects, and at least one of them have static properties, they are joined +// with an types.Or, instead of being merged. +// If 'a' is an Any containing an Object, and 'b' is an Object (or vice versa); AND both objects have no +// static properties, they are merged. +// If 'a' and 'b' are different types, they are joined with an types.Or. +func mergeTypes(a, b types.Type) types.Type { + if a == nil { + return b + } + + if b == nil { + return a + } + + switch a := a.(type) { + case *types.Object: + if bObj, ok := b.(*types.Object); ok && len(a.StaticProperties()) == 0 && len(bObj.StaticProperties()) == 0 { + if len(a.StaticProperties()) > 0 || len(bObj.StaticProperties()) > 0 { + return types.Or(a, bObj) + } + + aDynProps := a.DynamicProperties() + bDynProps := bObj.DynamicProperties() + dynProps := types.NewDynamicProperty( + types.Or(aDynProps.Key, bDynProps.Key), + mergeTypes(aDynProps.Value, bDynProps.Value), + ) + return types.NewObject(nil, dynProps) + } else if bAny, ok := b.(types.Any); ok && len(a.StaticProperties()) == 0 { + // If a is an object type with no static components ... + for _, t := range bAny { + if tObj, ok := t.(*types.Object); ok && len(tObj.StaticProperties()) == 0 { + // ... and b is a types.Any containing an object with no static components, we merge them. + aDynProps := a.DynamicProperties() + tDynProps := tObj.DynamicProperties() + tDynProps.Key = types.Or(tDynProps.Key, aDynProps.Key) + tDynProps.Value = types.Or(tDynProps.Value, aDynProps.Value) + return bAny + } + } + } + case *types.Set: + if bSet, ok := b.(*types.Set); ok { + return types.NewSet(types.Or(a.Of(), bSet.Of())) + } + case types.Any: + if _, ok := b.(types.Any); !ok { + return mergeTypes(b, a) + } + } + + return types.Or(a, b) +} + +func (n *typeTreeNode) String() string { + b := &strings.Builder{} + + key := "-" + if k := n.key; k != nil { + key = k.String() + } + + b.WriteString(key) + if v := n.value; v != nil { + b.WriteString(": ") + b.WriteString(v.String()) + } + + n.children.Iter(func(_ Value, child *typeTreeNode) bool { + b.WriteString("\n\t+ ") + b.WriteString(strings.ReplaceAll(child.String(), "\n", "\n\t")) + + return false + }) + + return b.String() +} + +func insertIntoObject(o *types.Object, path Ref, tpe types.Type, env *TypeEnv) (*types.Object, error) { + if len(path) == 0 { + return o, nil + } + + key := env.GetByValue(path[0].Value) + + if len(path) == 1 { + var dynamicProps *types.DynamicProperty + if dp := o.DynamicProperties(); dp != nil { + dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, tpe)) + } else { + dynamicProps = types.NewDynamicProperty(key, tpe) + } + return types.NewObject(o.StaticProperties(), dynamicProps), nil + } + + child, err := insertIntoObject(types.NewObject(nil, nil), path[1:], tpe, env) + if err != nil { + return nil, err + } + + var dynamicProps *types.DynamicProperty + if dp := o.DynamicProperties(); dp != nil { + dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, child)) + } else { + dynamicProps = types.NewDynamicProperty(key, child) + } + return types.NewObject(o.StaticProperties(), dynamicProps), nil +} + +func (n *typeTreeNode) Leafs() map[*Ref]types.Type { + leafs := map[*Ref]types.Type{} + n.children.Iter(func(_ Value, v *typeTreeNode) bool { + collectLeafs(v, nil, leafs) + return false + }) + return leafs +} + +func collectLeafs(n *typeTreeNode, path Ref, leafs map[*Ref]types.Type) { + nPath := append(path, NewTerm(n.key)) + if n.Leaf() { + npc := nPath // copy of else nPath escapes to heap even if !n.Leaf() + leafs[&npc] = n.Value() + return + } + n.children.Iter(func(_ Value, v *typeTreeNode) bool { + collectLeafs(v, nPath, leafs) + return false + }) +} + +func (n *typeTreeNode) Value() types.Type { + return n.value +} + +// selectConstant returns the attribute of the type referred to by the term. If +// the attribute type cannot be determined, nil is returned. +func selectConstant(tpe types.Type, term *Term) types.Type { + x, err := JSON(term.Value) + if err == nil { + return types.Select(tpe, x) + } + return nil +} + +// selectRef returns the type of the nested attribute referred to by ref. If +// the attribute type cannot be determined, nil is returned. If the ref +// contains vars or refs, then the returned type will be a union of the +// possible types. +func selectRef(tpe types.Type, ref Ref) types.Type { + if tpe == nil || len(ref) == 0 { + return tpe + } + + head, tail := ref[0], ref[1:] + + switch head.Value.(type) { + case Var, Ref, *Array, Object, Set: + return selectRef(types.Values(tpe), tail) + default: + return selectRef(selectConstant(tpe, head), tail) + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/errors.go b/vendor/github.com/open-policy-agent/opa/v1/ast/errors.go new file mode 100644 index 0000000000..bf8bca7472 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/errors.go @@ -0,0 +1,133 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "slices" + "strconv" + "strings" +) + +// Errors represents a series of errors encountered during parsing, compiling, +// etc. +type Errors []*Error + +func (e Errors) Error() string { + + if len(e) == 0 { + return "no error(s)" + } + + if len(e) == 1 { + return fmt.Sprintf("1 error occurred: %v", e[0].Error()) + } + + s := make([]string, len(e)) + for i, err := range e { + s[i] = err.Error() + } + + return fmt.Sprintf("%d errors occurred:\n%s", len(e), strings.Join(s, "\n")) +} + +// Sort sorts the error slice by location. If the locations are equal then the +// error message is compared. +func (e Errors) Sort() { + slices.SortFunc(e, func(a, b *Error) int { + if cmp := a.Location.Compare(b.Location); cmp != 0 { + return cmp + } + + return strings.Compare(a.Error(), b.Error()) + }) +} + +const ( + // ParseErr indicates an unclassified parse error occurred. + ParseErr = "rego_parse_error" + + // CompileErr indicates an unclassified compile error occurred. + CompileErr = "rego_compile_error" + + // TypeErr indicates a type error was caught. + TypeErr = "rego_type_error" + + // UnsafeVarErr indicates an unsafe variable was found during compilation. + UnsafeVarErr = "rego_unsafe_var_error" + + // RecursionErr indicates recursion was found during compilation. + RecursionErr = "rego_recursion_error" + + // FormatErr indicates an error occurred during formatting. + FormatErr = "rego_format_error" +) + +// IsError returns true if err is an AST error with code. +func IsError(code string, err error) bool { + if err, ok := err.(*Error); ok { + return err.Code == code + } + return false +} + +// ErrorDetails defines the interface for detailed error messages. +type ErrorDetails interface { + Lines() []string +} + +// Error represents a single error caught during parsing, compiling, etc. +type Error struct { + Code string `json:"code"` + Message string `json:"message"` + Location *Location `json:"location,omitempty"` + Details ErrorDetails `json:"details,omitempty"` +} + +func (e *Error) Error() string { + + var prefix string + + if e.Location != nil { + + if len(e.Location.File) > 0 { + prefix += e.Location.File + ":" + strconv.Itoa(e.Location.Row) + } else { + prefix += strconv.Itoa(e.Location.Row) + ":" + strconv.Itoa(e.Location.Col) + } + } + + sb := strings.Builder{} + if len(prefix) > 0 { + sb.WriteString(prefix) + sb.WriteString(": ") + } + + sb.WriteString(e.Code) + sb.WriteString(": ") + sb.WriteString(e.Message) + + if e.Details != nil { + for _, line := range e.Details.Lines() { + sb.WriteString("\n\t") + sb.WriteString(line) + } + } + + return sb.String() +} + +// NewError returns a new Error object. +func NewError(code string, loc *Location, f string, a ...any) *Error { + return newErrorString(code, loc, fmt.Sprintf(f, a...)) +} + +func newErrorString(code string, loc *Location, m string) *Error { + return &Error{ + Code: code, + Location: loc, + Message: m, + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/index.go b/vendor/github.com/open-policy-agent/opa/v1/ast/index.go new file mode 100644 index 0000000000..45c0fa7a81 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/index.go @@ -0,0 +1,1177 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "slices" + "sort" + "strings" + "sync" + + "github.com/open-policy-agent/opa/v1/util" +) + +// RuleIndex defines the interface for rule indices. +type RuleIndex interface { + + // Build tries to construct an index for the given rules. If the index was + // constructed, it returns true, otherwise false. + Build(rules []*Rule) bool + + // Lookup searches the index for rules that will match the provided + // resolver. If the resolver returns an error, it is returned via err. + Lookup(resolver ValueResolver) (*IndexResult, error) + + // AllRules traverses the index and returns all rules that will match + // the provided resolver without any optimizations (effectively with + // indexing disabled). If the resolver returns an error, it is returned + // via err. + AllRules(resolver ValueResolver) (*IndexResult, error) +} + +// IndexResult contains the result of an index lookup. +type IndexResult struct { + Rules []*Rule + Else map[*Rule][]*Rule + Default *Rule + Kind RuleKind + EarlyExit bool + OnlyGroundRefs bool +} + +// NewIndexResult returns a new IndexResult object. +func NewIndexResult(kind RuleKind) *IndexResult { + return &IndexResult{ + Kind: kind, + } +} + +// Empty returns true if there are no rules to evaluate. +func (ir *IndexResult) Empty() bool { + return len(ir.Rules) == 0 && ir.Default == nil +} + +type baseDocEqIndex struct { + isVirtual func(Ref) bool + root *trieNode + defaultRule *Rule + kind RuleKind + onlyGroundRefs bool +} + +var ( + equalityRef = Equality.Ref() + equalRef = Equal.Ref() + globMatchRef = GlobMatch.Ref() + internalPrintRef = InternalPrint.Ref() + internalTestCaseRef = InternalTestCase.Ref() + internalMemberRef = Member.Ref() + + skipIndexing = NewSet(NewTerm(internalPrintRef), NewTerm(internalTestCaseRef)) +) + +func newBaseDocEqIndex(isVirtual func(Ref) bool) *baseDocEqIndex { + return &baseDocEqIndex{ + isVirtual: isVirtual, + root: newTrieNodeImpl(), + onlyGroundRefs: true, + } +} + +func (i *baseDocEqIndex) Build(rules []*Rule) bool { + if len(rules) == 0 { + return false + } + + i.kind = rules[0].Head.RuleKind() + indices := newrefindices(i.isVirtual) + values := make(map[Var]Value) + + // build indices for each rule. + for idx := range rules { + WalkRules(rules[idx], func(rule *Rule) bool { + if rule.Default { + i.defaultRule = rule + return false + } + if i.onlyGroundRefs { + i.onlyGroundRefs = rule.Head.Reference.IsGround() + } + var skip bool + for i := range rule.Body { + if op := rule.Body[i].OperatorTerm(); op != nil && skipIndexing.Contains(op) { + skip = true + break + } + } + if !skip { + clear(values) + for i := range rule.Body { + indices.Update(rule, rule.Body[i], values) + } + } + return false + }) + } + + // build trie out of indices. + for idx := range rules { + var prio int + WalkRules(rules[idx], func(rule *Rule) bool { + if rule.Default { + return false + } + node := i.root + if indices.Indexed(rule) { + for _, ref := range indices.Sorted() { + var values []*refindex + for _, ri := range indices.rules[rule] { + if ri.Ref.Equal(ref) { + values = append(values, ri) + } + } + if len(values) == 0 { + node = node.Insert(ref, nil, nil) + } else if len(values) == 1 { + node = node.Insert(ref, values[0].Value, values[0].Mapper) + } else { + var hasVar bool + for i := range values { + if _, isVar := values[i].Value.(Var); isVar { + hasVar = true + break + } + } + + if hasVar { + child := node.Insert(ref, anyValue, values[0].Mapper) + for i := range values { + if values[i].Mapper != nil { + node.next.addMapper(values[i].Mapper) + } + } + node = child + } else { + // When a rule has multiple scalar values (e.g., internal.member_2 with a set), + // each value should have its own child node, and the rule is appended to each. + // This creates separate paths for each value so different rules with overlapping + // values don't interfere with each other. + for _, val := range values { + child := node.Insert(ref, val.Value, val.Mapper) + child.append([...]int{idx, prio}, rule) + } + prio++ + return false + } + } + } + } + // Insert rule into trie with (insertion order, priority order) + // tuple. Retaining the insertion order allows us to return rules + // in the order they were passed to this function. + node.append([...]int{idx, prio}, rule) + prio++ + return false + }) + } + return true +} + +func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) { + tr := ttrPool.Get().(*trieTraversalResult) + + defer func() { + clear(tr.unordered) + tr.ordering = tr.ordering[:0] + tr.multiple = false + tr.exist = nil + + ttrPool.Put(tr) + }() + + err := i.root.Traverse(resolver, tr) + if err != nil { + return nil, err + } + + result := IndexResultPool.Get() + + result.Kind = i.kind + result.Default = i.defaultRule + result.OnlyGroundRefs = i.onlyGroundRefs + + if result.Rules == nil { + result.Rules = make([]*Rule, 0, len(tr.ordering)) + } else { + result.Rules = result.Rules[:0] + } + + clear(result.Else) + + for _, pos := range tr.ordering { + slices.SortFunc(tr.unordered[pos], func(a, b *ruleNode) int { + return a.prio[1] - b.prio[1] + }) + nodes := tr.unordered[pos] + root := nodes[0].rule + + result.Rules = append(result.Rules, root) + if len(nodes) > 1 { + if result.Else == nil { + result.Else = map[*Rule][]*Rule{} + } + + result.Else[root] = make([]*Rule, len(nodes)-1) + for i := 1; i < len(nodes); i++ { + result.Else[root][i-1] = nodes[i].rule + } + } + } + + if !tr.multiple { + // even when the indexer hasn't seen multiple values, the rule itself could be one + // where early exit shouldn't be applied. + var lastValue Value + for i := range result.Rules { + if result.Rules[i].Head.DocKind() != CompleteDoc { + tr.multiple = true + break + } + if result.Rules[i].Head.Value != nil { + if lastValue != nil && !ValueEqual(lastValue, result.Rules[i].Head.Value.Value) { + tr.multiple = true + break + } + lastValue = result.Rules[i].Head.Value.Value + } + } + } + + result.EarlyExit = !tr.multiple + + return result, nil +} + +func (i *baseDocEqIndex) AllRules(ValueResolver) (*IndexResult, error) { + tr := newTrieTraversalResult() + + // Walk over the rule trie and accumulate _all_ rules + rw := &ruleWalker{result: tr} + i.root.Do(rw) + + result := NewIndexResult(i.kind) + result.Default = i.defaultRule + result.OnlyGroundRefs = i.onlyGroundRefs + result.Rules = make([]*Rule, 0, len(tr.ordering)) + + for _, pos := range tr.ordering { + slices.SortFunc(tr.unordered[pos], func(a, b *ruleNode) int { + return a.prio[1] - b.prio[1] + }) + nodes := tr.unordered[pos] + root := nodes[0].rule + result.Rules = append(result.Rules, root) + if len(nodes) > 1 { + if result.Else == nil { + result.Else = map[*Rule][]*Rule{} + } + + result.Else[root] = make([]*Rule, len(nodes)-1) + for i := 1; i < len(nodes); i++ { + result.Else[root][i-1] = nodes[i].rule + } + } + } + + result.EarlyExit = !tr.multiple + + return result, nil +} + +type ruleWalker struct { + result *trieTraversalResult +} + +func (r *ruleWalker) Do(x any) trieWalker { + tn := x.(*trieNode) + r.result.Add(tn) + return r +} + +type valueMapper struct { + Key string + MapValue func(Value) Value +} + +type refindex struct { + Ref Ref + Value Value + Mapper *valueMapper +} + +type refindices struct { + isVirtual func(Ref) bool + rules map[*Rule][]*refindex + frequency *util.HasherMap[Ref, int] + sorted []Ref +} + +func newrefindices(isVirtual func(Ref) bool) *refindices { + return &refindices{ + isVirtual: isVirtual, + rules: map[*Rule][]*refindex{}, + frequency: util.NewHasherMap[Ref, int](RefEqual), + } +} + +// anyValue is a fake variable we used to put "naked ref" expressions +// into the rule index +var anyValue = Var("__any__") + +// Update attempts to update the refindices for the given expression in the +// given rule. If the expression cannot be indexed the update does not affect +// the indices. +func (i *refindices) Update(rule *Rule, expr *Expr, values map[Var]Value) { + + if len(expr.With) > 0 { + // NOTE(tsandall): In the future, we may need to consider expressions + // that have with statements applied to them. + return + } + + if expr.Negated { + // NOTE(sr): We could try to cover simple expressions, like + // not input.funky => input.funky == false or undefined (two refindex?) + return + } + + op := expr.Operator() + if op == nil { + if ts, ok := expr.Terms.(*Term); ok { + // NOTE(sr): If we wanted to cover function args, we'd need to also + // check for type "Var" here. But since it's impossible to call a + // function with a undefined argument, there's no point to recording + // "needs to be anything" for function args + if ref, ok := ts.Value.(Ref); ok { // "naked ref" + i.updateEq(rule, ref, anyValue, nil) + } + } + } + + equalish := op.Equal(equalityRef) || // unification, no 3-operands version exists + // NOTE(tsandall): if equal() is called with more than two arguments the + // output value is being captured in which case the indexer cannot + // exclude the rule if the equal() call would return false (because the + // false value must still be produced.) + (op.Equal(equalRef) && len(expr.Operands()) == 2) + + a, b := expr.Operand(0), expr.Operand(1) + switch { + case equalish: + if !i.updateEqWildcardRef(rule, a.Value, b.Value, values) { + i.updateEq(rule, a.Value, b.Value, values) + } + + case op.Equal(globMatchRef) && len(expr.Operands()) == 3: + // NOTE(sr): Same as with equal() above -- 4 operands means the output + // of `glob.match` is captured and the rule can thus not be excluded. + i.updateGlobMatch(rule, expr) + + case op.Equal(internalMemberRef) && len(expr.Operands()) == 2: + // NOTE(sr): Again, 3 operands means captured output (like above). + i.updateMember(rule, expr, values) + } +} + +func (i *refindices) isValidIndexRef(ref Ref) bool { + // NB(sr): the ordering is intentional, cheapest-first + return RootDocumentNames.Contains(ref[0]) && + !ref.IsNested() && + ref.IsGround() && + !i.isVirtual(ref) +} + +// Sorted returns a sorted list of references that the indices were built from. +// References that appear more frequently in the indexed rules are ordered +// before less frequently appearing references. +func (i *refindices) Sorted() []Ref { + + if i.sorted == nil { + counts := make([]int, 0, i.frequency.Len()) + i.sorted = make([]Ref, 0, i.frequency.Len()) + + i.frequency.Iter(func(k Ref, v int) bool { + counts = append(counts, v) + i.sorted = append(i.sorted, k) + return false + }) + + sort.Slice(i.sorted, func(a, b int) bool { + if counts[a] > counts[b] { + return true + } else if counts[b] > counts[a] { + return false + } + return i.sorted[a][0].Loc().Compare(i.sorted[b][0].Loc()) < 0 + }) + } + + return i.sorted +} + +func (i *refindices) Indexed(rule *Rule) bool { + return len(i.rules[rule]) > 0 +} + +func (i *refindices) Value(rule *Rule, ref Ref) Value { + if index := i.index(rule, ref); index != nil { + return index.Value + } + return nil +} + +func (i *refindices) Mapper(rule *Rule, ref Ref) *valueMapper { + if index := i.index(rule, ref); index != nil { + return index.Mapper + } + return nil +} + +func (i *refindices) updateEq(rule *Rule, a, b Value, constants map[Var]Value) { + args := rule.Head.Args + if !i.eqOperandsToRefAndValue(rule, args, a, b, constants) { + i.eqOperandsToRefAndValue(rule, args, b, a, constants) + } +} + +func (i *refindices) updateEqWildcardRef(rule *Rule, a, b Value, constants map[Var]Value) bool { + return i.tryIndexWildcardRef(rule, a, b, constants) || + i.tryIndexWildcardRef(rule, b, a, constants) +} + +func (i *refindices) tryIndexWildcardRef(rule *Rule, a, b Value, constants map[Var]Value) bool { + ref, ok := a.(Ref) + if !ok { + return false + } + + groundPrefix := ref.GroundPrefix() + if len(groundPrefix) != len(ref)-1 || !i.isValidIndexRef(groundPrefix) { + return false + } + + resolvedValue := b + if bvar, ok := b.(Var); ok { + if resolved, ok := constants[bvar]; ok { + resolvedValue = resolved + } + } else if val, ok := indexValue(b); ok { + resolvedValue = val + } else { + return false + } + + if !IsScalar(resolvedValue) { + return false + } + + i.insert(rule, &refindex{Ref: groundPrefix, Value: resolvedValue}) + return true +} + +func (i *refindices) updateGlobMatch(rule *Rule, expr *Expr) { + args := rule.Head.Args + + delim, ok := globDelimiterToString(expr.Operand(1)) + if !ok { + return + } + + if arr := globPatternToArray(expr.Operand(0), delim); arr != nil { + // The 3rd operand of glob.match is the value to match. We assume the + // 3rd operand was a reference that has been rewritten and bound to a + // variable earlier in the query OR a function argument variable. + match := expr.Operand(2) + if v, ok := match.Value.(Var); ok { + if ref := resolveVarToRef(i.rules[rule], args, v); ref != nil { + i.insert(rule, &refindex{ + Ref: ref, + Value: arr.Value, + Mapper: &valueMapper{ + Key: delim, + MapValue: func(v Value) Value { + if s, ok := v.(String); ok { + return stringSliceToArray(splitStringEscaped(string(s), delim)) + } + return v + }, + }, + }) + } + } + } +} + +func (i *refindices) updateMember(rule *Rule, expr *Expr, constants map[Var]Value) { + args := rule.Head.Args + lhs, rhs := expr.Operand(0), expr.Operand(1) + + lvar, ok := lhs.Value.(Var) + if ok { + lref := resolveVarToRef(i.rules[rule], args, lvar) + if lref != nil { + i.updateMemberRefInValue(rule, lref, rhs, constants) // `ref in value` + return + } + } + + // `var0 in var1` case (var0 may be constant, var1 ref) + i.updateMemberValueInRef(rule, args, lhs.Value, rhs, constants) +} + +func (i *refindices) updateMemberValueInRef(rule *Rule, args []*Term, lval Value, rhs *Term, constants map[Var]Value) { + if lvar, ok := lval.(Var); ok { + val, ok := constants[lvar] + if ok { + lval = val + } + } else if !IsScalar(lval) { + return + } + + rref := i.resolveAndValidateRef(rule, args, rhs) + if rref == nil { + return + } + + i.insert(rule, &refindex{Ref: rref, Value: lval}) +} + +func (i *refindices) updateMemberRefInValue(rule *Rule, ref Ref, rhs *Term, constants map[Var]Value) { + rval := rhs.Value + if rvar, ok := rval.(Var); ok { // rhs is var, try to resolve + if resolved, ok := constants[rvar]; ok { + rval = resolved + } + } + + addRef := func(t *Term) error { + i.insert(rule, &refindex{Ref: ref, Value: t.Value}) + return nil + } + + switch rcol := rval.(type) { + case *Array: + _ = rcol.Iter(addRef) + case Set: + _ = rcol.Iter(addRef) + case Object: + _ = rcol.Iter(func(_, v *Term) error { + return addRef(v) + }) + } +} + +func (i *refindices) resolveAndValidateRef(rule *Rule, args []*Term, term *Term) Ref { + var ref Ref + switch v := term.Value.(type) { + case Ref: + ref = v + case Var: + ref = resolveVarToRef(i.rules[rule], args, v) + default: + return nil + } + + if ref == nil || !i.isValidIndexRef(ref) { + return nil + } + + return ref +} + +// resolveVarToRef checks the previously prepared `*refindex` slice for +// occurrences of the var `v`. Since we store `ref = var` expressions for +// "any" lookups (i.e. "return the rule if ref is anything"), we can +// resolve vars to refs in these simple cases: +// +// __local2__ = input.foo +// __local2__ = +// +// This what builtin calls involving refs are rewritten to, so it is used +// for var -> ref lookup when buiding the RI for glob.match or `v in col`. +// +// For convenience, we also resolve function arg vars here. +// +// NB: This also covers explicit var assignments, like `role := input.rule`, +// but it is no help with chains of assignments, like +// +// x := input.role +// y := x +// +// +// as we're not capturing `var = var` expressions in the index. +func resolveVarToRef(ri []*refindex, args []*Term, v Var) Ref { + for _, other := range ri { + if ov, ok := other.Value.(Var); ok && ov.Equal(v) { + return other.Ref + } + } + for j, arg := range args { + if arg.Value.Compare(v) == 0 { + return Ref{FunctionArgRootDocument, InternedTerm(j)} + } + } + + return nil +} + +func (i *refindices) insert(rule *Rule, index *refindex) { + count, _ := i.frequency.Get(index.Ref) + i.frequency.Put(index.Ref, count+1) + + _, indexValueIsVar := index.Value.(Var) + + for pos, other := range i.rules[rule] { + if other.Ref.Equal(index.Ref) { + + if ValueEqual(other.Value, index.Value) { + return + } + _, otherValueIsVar := other.Value.(Var) + if !indexValueIsVar && otherValueIsVar { + i.rules[rule][pos] = index + return + } + } + } + + i.rules[rule] = append(i.rules[rule], index) +} + +func (i *refindices) index(rule *Rule, ref Ref) *refindex { + for _, index := range i.rules[rule] { + if index.Ref.Equal(ref) { + return index + } + } + return nil +} + +type trieWalker interface { + Do(any) trieWalker +} + +type trieTraversalResult struct { + unordered map[int][]*ruleNode + ordering []int + exist *Term + multiple bool +} + +var ttrPool = sync.Pool{ + New: func() any { + return newTrieTraversalResult() + }, +} + +func newTrieTraversalResult() *trieTraversalResult { + return &trieTraversalResult{ + unordered: map[int][]*ruleNode{}, + } +} + +func (tr *trieTraversalResult) Add(t *trieNode) { + for _, node := range t.rules { + root := node.prio[0] + nodes, ok := tr.unordered[root] + if !ok { + tr.ordering = append(tr.ordering, root) + } + // Deduplicate: check if a ruleNode with this priority already exists + if !slices.ContainsFunc(nodes, func(existing *ruleNode) bool { + return existing.prio == node.prio + }) { + tr.unordered[root] = append(nodes, node) + } + } + if t.multiple { + tr.multiple = true + } + if tr.multiple || t.value == nil { + return + } + if t.value.IsGround() && tr.exist == nil || tr.exist.Equal(t.value) { + tr.exist = t.value + return + } + tr.multiple = true +} + +type trieNode struct { + ref Ref + mappers []*valueMapper + next *trieNode + any *trieNode + undefined *trieNode + scalars *util.HasherMap[Value, *trieNode] + array *trieNode + rules []*ruleNode + value *Term + multiple bool +} + +func (node *trieNode) append(prio [2]int, rule *Rule) { + node.rules = append(node.rules, &ruleNode{prio, rule}) + + if node.value != nil && rule.Head.Value != nil && !node.value.Equal(rule.Head.Value) { + node.multiple = true + } + + if node.value == nil && rule.Head.DocKind() == CompleteDoc { + node.value = rule.Head.Value + } +} + +type ruleNode struct { + prio [2]int + rule *Rule +} + +func newTrieNodeImpl() *trieNode { + return &trieNode{ + scalars: util.NewHasherMap[Value, *trieNode](ValueEqual), + } +} + +func (node *trieNode) Do(walker trieWalker) { + if node == nil { + return + } + next := walker.Do(node) + if next == nil { + return + } + + node.any.Do(next) + node.undefined.Do(next) + + node.scalars.Iter(func(_ Value, child *trieNode) bool { + child.Do(next) + return false + }) + + node.array.Do(next) + node.next.Do(next) +} + +func (node *trieNode) Insert(ref Ref, value Value, mapper *valueMapper) *trieNode { + + if node.next == nil { + node.next = newTrieNodeImpl() + node.next.ref = ref + } + + if mapper != nil { + node.next.addMapper(mapper) + } + + return node.next.insertValue(value) +} + +func (node *trieNode) Traverse(resolver ValueResolver, tr *trieTraversalResult) error { + + if node == nil { + return nil + } + + tr.Add(node) + + return node.next.traverse(resolver, tr) +} + +func (node *trieNode) addMapper(mapper *valueMapper) { + for i := range node.mappers { + if node.mappers[i].Key == mapper.Key { + return + } + } + node.mappers = append(node.mappers, mapper) +} + +func (node *trieNode) insertValue(value Value) *trieNode { + + switch value := value.(type) { + case nil: + if node.undefined == nil { + node.undefined = newTrieNodeImpl() + } + return node.undefined + case Var: + if node.any == nil { + node.any = newTrieNodeImpl() + } + return node.any + case Null, Boolean, Number, String: + child, ok := node.scalars.Get(value) + if !ok { + child = newTrieNodeImpl() + node.scalars.Put(value, child) + } + return child + case *Array: + if node.array == nil { + node.array = newTrieNodeImpl() + } + return node.array.insertArray(value) + } + + panic("illegal value") +} + +func (node *trieNode) insertArray(arr *Array) *trieNode { + + if arr.Len() == 0 { + return node + } + + switch head := arr.Elem(0).Value.(type) { + case Var: + if node.any == nil { + node.any = newTrieNodeImpl() + } + return node.any.insertArray(arr.Slice(1, -1)) + case Null, Boolean, Number, String: + child, ok := node.scalars.Get(head) + if !ok { + child = newTrieNodeImpl() + node.scalars.Put(head, child) + } + return child.insertArray(arr.Slice(1, -1)) + } + + panic("illegal value") +} + +func (node *trieNode) traverse(resolver ValueResolver, tr *trieTraversalResult) error { + if node == nil { + return nil + } + + v, err := resolver.Resolve(node.ref) + if err != nil { + if IsUnknownValueErr(err) { + return node.traverseUnknown(resolver, tr) + } + return err + } + + err = node.undefined.Traverse(resolver, tr) + if err != nil { + return err + } + + if v == nil { + return nil + } + + err = node.any.Traverse(resolver, tr) + if err != nil { + return err + } + + err = node.traverseValue(resolver, tr, v) + if err != nil { + return err + } + + for i := range node.mappers { + mapped := node.mappers[i].MapValue(v) + if !ValueEqual(mapped, v) { + if err := node.traverseValue(resolver, tr, mapped); err != nil { + return err + } + } + } + + return nil +} + +func (node *trieNode) traverseValue(resolver ValueResolver, tr *trieTraversalResult, value Value) error { + + switch value := value.(type) { + case *Array, Set, Object: + if node.array != nil { + if arr, ok := value.(*Array); ok { + if err := node.array.traverseArray(resolver, tr, arr); err != nil { + return err + } + } + } + + if node.scalars.Len() > 0 { + return node.traverseCollectionMembership(resolver, tr, value) + } + + return nil + + case Null, Boolean, Number, String: + child, ok := node.scalars.Get(value) + if !ok { + return nil + } + return child.Traverse(resolver, tr) + } + + return nil +} + +func (node *trieNode) traverseCollectionMembership(resolver ValueResolver, tr *trieTraversalResult, collection Value) error { + checkMember := func(t *Term) error { + if IsScalar(t.Value) { + child, _ := node.scalars.Get(t.Value) + return child.Traverse(resolver, tr) + } + return nil + } + + switch col := collection.(type) { + case *Array: + return col.Iter(checkMember) + case Set: + return col.Iter(checkMember) + case Object: + return col.Iter(func(_, v *Term) error { + return checkMember(v) + }) + } + + return nil +} + +func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalResult, arr *Array) error { + if node == nil { + return nil + } + + if arr.Len() == 0 { + return node.Traverse(resolver, tr) + } + + err := node.any.traverseArray(resolver, tr, arr.Slice(1, -1)) + if err != nil { + return err + } + + head := arr.Elem(0).Value + + if !IsScalar(head) { + return nil + } + + switch head := head.(type) { + case Null, Boolean, Number, String: + child, _ := node.scalars.Get(head) + return child.traverseArray(resolver, tr, arr.Slice(1, -1)) + } + + panic("illegal value") +} + +func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalResult) error { + if node == nil { + return nil + } + + if err := node.Traverse(resolver, tr); err != nil { + return err + } + + if err := node.undefined.traverseUnknown(resolver, tr); err != nil { + return err + } + + if err := node.any.traverseUnknown(resolver, tr); err != nil { + return err + } + + if err := node.array.traverseUnknown(resolver, tr); err != nil { + return err + } + + var iterErr error + node.scalars.Iter(func(_ Value, child *trieNode) bool { + return child.traverseUnknown(resolver, tr) != nil + }) + + return iterErr +} + +// If term `a` is one of the function's operands, we store a Ref: `args[0]` +// for the argument number. So for `f(x, y) { x = 10; y = 12 }`, we'll +// bind `args[0]` and `args[1]` to this rule when called for (x=10) and +// (y=12) respectively. +func (i *refindices) eqOperandsToRefAndValue(rule *Rule, args []*Term, a, b Value, constants map[Var]Value) bool { + switch v := a.(type) { + case Var: + // a is a var, but we have not been able to resolve it to a ref, save for later + if IsConstant(b) { + constants[v] = b + } + + bval, ok := indexValue(b) + if !ok { + return false + } + if ref := resolveVarToRef(i.rules[rule], args, v); ref != nil { + i.insert(rule, &refindex{Ref: ref, Value: bval}) + return true + } + + case Ref: + if !i.isValidIndexRef(v) { + return false + } + + if bvar, ok := b.(Var); ok { // cheaper lookup first: constants + if resolved, ok := constants[bvar]; ok { + b = resolved + } + } else if bval, ok := indexValue(b); ok { + b = bval + } else { + return false + } + + i.insert(rule, &refindex{Ref: v, Value: b}) + return true + } + return false +} + +func indexValue(b Value) (Value, bool) { + switch b := b.(type) { + case Null, Boolean, Number, String, Var: + return b, true + case *Array: + stop := false + first := true + vis := NewGenericVisitor(func(x any) bool { + if first { + first = false + return false + } + switch x.(type) { + // No nested structures or values that require evaluation (other than var). + case *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Ref: + stop = true + } + return stop + }) + vis.Walk(b) + if !stop { + return b, true + } + } + + return nil, false +} + +func globDelimiterToString(delim *Term) (string, bool) { + arr, ok := delim.Value.(*Array) + if !ok { + return "", false + } + + var result string + + if arr.Len() == 0 { + result = "." + } else { + sb := strings.Builder{} + for i := range arr.Len() { + term := arr.Elem(i) + s, ok := term.Value.(String) + if !ok { + return "", false + } + sb.WriteString(string(s)) + } + result = sb.String() + } + + return result, true +} + +var globwildcard = VarTerm("$globwildcard") + +func globPatternToArray(pattern *Term, delim string) *Term { + + s, ok := pattern.Value.(String) + if !ok { + return nil + } + + parts := splitStringEscaped(string(s), delim) + arr := make([]*Term, len(parts)) + + for i := range parts { + if parts[i] == "*" { + arr[i] = globwildcard + } else { + var escaped bool + for _, c := range parts[i] { + if c == '\\' { + escaped = !escaped + continue + } + if !escaped { + switch c { + case '[', '?', '{', '*': + // TODO(tsandall): super glob and character pattern + // matching not supported yet. + return nil + } + } + escaped = false + } + arr[i] = StringTerm(parts[i]) + } + } + + return ArrayTerm(arr...) +} + +// splits s on characters in delim except if delim characters have been escaped +// with reverse solidus. +func splitStringEscaped(s string, delim string) []string { + + var last, curr int + var escaped bool + var result []string + + for ; curr < len(s); curr++ { + if s[curr] == '\\' || escaped { + escaped = !escaped + continue + } + if strings.ContainsRune(delim, rune(s[curr])) { + result = append(result, s[last:curr]) + last = curr + 1 + } + } + + result = append(result, s[last:]) + + return result +} + +func stringSliceToArray(s []string) *Array { + arr := make([]*Term, len(s)) + for i, v := range s { + arr[i] = StringTerm(v) + } + return NewArray(arr...) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/index_debug.go b/vendor/github.com/open-policy-agent/opa/v1/ast/index_debug.go new file mode 100644 index 0000000000..88d451b175 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/index_debug.go @@ -0,0 +1,219 @@ +// Copyright 2026 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "sort" + "strings" +) + +func (node *trieNode) mermaid() string { + var sb strings.Builder + sb.WriteString("graph TD\n") + nodeCounter := 0 + nodeIDs := make(map[*trieNode]string) + node.mermaidFormat(&sb, &nodeCounter, nodeIDs, "") + return sb.String() +} + +func (node *trieNode) mermaidFormat(sb *strings.Builder, counter *int, nodeIDs map[*trieNode]string, parentID string) { + currentID, exists := nodeIDs[node] + if !exists { + currentID = fmt.Sprintf("n%d", *counter) + *counter++ + nodeIDs[node] = currentID + label := node.mermaidLabel() + fmt.Fprintf(sb, " %s[\"%s\"]\n", currentID, label) + } + + if parentID != "" { + fmt.Fprintf(sb, " %s --> %s\n", parentID, currentID) + } + + if exists { + return + } + + if node.undefined != nil { + if childID, childExists := nodeIDs[node.undefined]; childExists { + fmt.Fprintf(sb, " %s -->|undefined| %s\n", currentID, childID) + } else { + node.undefined.mermaidFormat(sb, counter, nodeIDs, "") + fmt.Fprintf(sb, " %s -->|undefined| %s\n", currentID, nodeIDs[node.undefined]) + } + } + + if node.any != nil { + if childID, childExists := nodeIDs[node.any]; childExists { + fmt.Fprintf(sb, " %s -->|any| %s\n", currentID, childID) + } else { + node.any.mermaidFormat(sb, counter, nodeIDs, "") + fmt.Fprintf(sb, " %s -->|any| %s\n", currentID, nodeIDs[node.any]) + } + } + + if node.scalars.Len() > 0 { + type scalarPair struct { + key Value + node *trieNode + } + pairs := make([]scalarPair, 0, node.scalars.Len()) + node.scalars.Iter(func(key Value, val *trieNode) bool { + pairs = append(pairs, scalarPair{key, val}) + return false + }) + sort.Slice(pairs, func(a, b int) bool { + return pairs[a].key.Compare(pairs[b].key) < 0 + }) + for _, pair := range pairs { + var scalarLabel string + if s, ok := pair.key.(String); ok { + scalarLabel = string(s) + } else { + scalarLabel = pair.key.String() + } + if len(scalarLabel) > 20 { + scalarLabel = scalarLabel[:20] + "..." + } + scalarLabel = mermaidEscape(scalarLabel) + if childID, childExists := nodeIDs[pair.node]; childExists { + fmt.Fprintf(sb, " %s -->|\"%s\"| %s\n", currentID, scalarLabel, childID) + } else { + pair.node.mermaidFormat(sb, counter, nodeIDs, "") + fmt.Fprintf(sb, " %s -->|\"%s\"| %s\n", currentID, scalarLabel, nodeIDs[pair.node]) + } + } + } + + if node.array != nil { + if childID, childExists := nodeIDs[node.array]; childExists { + fmt.Fprintf(sb, " %s -->|array| %s\n", currentID, childID) + } else { + node.array.mermaidFormat(sb, counter, nodeIDs, "") + fmt.Fprintf(sb, " %s -->|array| %s\n", currentID, nodeIDs[node.array]) + } + } + + if node.next != nil { + node.next.mermaidFormat(sb, counter, nodeIDs, currentID) + } +} + +func (node *trieNode) mermaidLabel() string { + var parts []string + + if len(node.ref) > 0 { + parts = append(parts, node.ref.String()) + } + + if len(node.rules) > 0 { + for _, rn := range node.rules { + bodyStr := "" + if rn.rule.Body != nil { + bodyStr = rn.rule.Body.String() + if len(bodyStr) > 50 { + bodyStr = bodyStr[:50] + "..." + } + } + bodyStr = mermaidEscape(bodyStr) + parts = append(parts, bodyStr) + } + } + + if len(node.mappers) > 0 { + parts = append(parts, fmt.Sprintf("%d mapper(s)", len(node.mappers))) + } + if node.multiple { + parts = append(parts, "multiple") + } + + if len(parts) == 0 { + return "·" + } + + return strings.Join(parts, "
") +} + +func mermaidEscape(s string) string { + s = strings.ReplaceAll(s, `"`, `"`) + return s +} + +func (node *trieNode) String() string { + var sb strings.Builder + node.format(&sb, 0) + return sb.String() +} + +func (node *trieNode) format(sb *strings.Builder, depth int) { + indent := strings.Repeat(" ", depth) + + if len(node.ref) > 0 { + sb.WriteString(indent) + sb.WriteString(node.ref.String()) + } else if depth == 0 { + sb.WriteString("root") + } + + if len(node.rules) > 0 { + fmt.Fprintf(sb, " [%d rule(s)]", len(node.rules)) + } + if len(node.mappers) > 0 { + fmt.Fprintf(sb, " [%d mapper(s)]", len(node.mappers)) + } + if node.value != nil { + fmt.Fprintf(sb, " value=%v", node.value) + } + if node.multiple { + sb.WriteString(" [multiple]") + } + sb.WriteString("\n") + + if node.undefined != nil { + sb.WriteString(indent) + sb.WriteString(" undefined:\n") + node.undefined.format(sb, depth+2) + } + + if node.any != nil { + sb.WriteString(indent) + sb.WriteString(" any:\n") + node.any.format(sb, depth+2) + } + + if node.scalars.Len() > 0 { + scalars := make([]Value, 0, node.scalars.Len()) + nodes := make([]*trieNode, 0, node.scalars.Len()) + node.scalars.Iter(func(key Value, val *trieNode) bool { + scalars = append(scalars, key) + nodes = append(nodes, val) + return false + }) + sort.Slice(scalars, func(a, b int) bool { + return scalars[a].Compare(scalars[b]) < 0 + }) + for i := range scalars { + sb.WriteString(indent) + fmt.Fprintf(sb, " %v:\n", scalars[i]) + for j := range nodes { + if ValueEqual(scalars[i], scalars[j]) { + nodes[j].format(sb, depth+2) + break + } + } + } + } + + if node.array != nil { + sb.WriteString(indent) + sb.WriteString(" array:\n") + node.array.format(sb, depth+2) + } + + if node.next != nil { + node.next.format(sb, depth) + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go b/vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go new file mode 100644 index 0000000000..6b2b03b27a --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go @@ -0,0 +1,623 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package scanner + +import ( + "fmt" + "io" + "unicode" + "unicode/utf8" + + "github.com/open-policy-agent/opa/v1/ast/internal/tokens" + "github.com/open-policy-agent/opa/v1/util" +) + +const bom = 0xFEFF + +// Scanner is used to tokenize an input stream of +// Rego source code. +type Scanner struct { + keywords map[string]tokens.Token + bs []byte + errors []Error + tabs []int + offset int + row int + col int + width int + curr rune + regoV1Compatible bool +} + +// Error represents a scanner error. +type Error struct { + Message string + Pos Position +} + +// Position represents a point in the scanned source code. +type Position struct { + Tabs []int // positions of any tabs preceding Col + Offset int // start offset in bytes + End int // end offset in bytes + Row int // line number computed in bytes + Col int // column number computed in bytes +} + +// New returns an initialized scanner that will scan +// through the source code provided by the io.Reader. +func New(r io.Reader) (*Scanner, error) { + + bs, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + s := &Scanner{ + offset: 0, + row: 1, + col: 0, + bs: bs, + curr: -1, + width: 0, + keywords: tokens.Keywords(), + tabs: []int{}, + } + + s.next() + + if s.curr == bom { + s.next() + } + + return s, nil +} + +// Bytes returns the raw bytes for the full source +// which the scanner has read in. +func (s *Scanner) Bytes() []byte { + return s.bs +} + +// String returns a human readable string of the current scanner state. +func (s *Scanner) String() string { + return fmt.Sprintf("", s.curr, s.offset, len(s.bs)) +} + +// Keyword will return a token for the passed in +// literal value. If the value is a Rego keyword +// then the appropriate token is returned. Everything +// else is an Ident. +func (s *Scanner) Keyword(lit string) tokens.Token { + if tok, ok := s.keywords[lit]; ok { + return tok + } + return tokens.Ident +} + +// AddKeyword adds a string -> token mapping to this Scanner instance. +func (s *Scanner) AddKeyword(kw string, tok tokens.Token) { + s.keywords[kw] = tok + + if tok == tokens.Every { + // importing 'every' means also importing 'in' + s.keywords["in"] = tokens.In + } +} + +func (s *Scanner) HasKeyword(keywords map[string]tokens.Token) bool { + for kw := range s.keywords { + if _, ok := keywords[kw]; ok { + return true + } + } + return false +} + +func (s *Scanner) IsKeyword(str string) bool { + _, ok := s.keywords[str] + return ok +} + +func (s *Scanner) SetRegoV1Compatible() { + s.regoV1Compatible = true +} + +func (s *Scanner) RegoV1Compatible() bool { + return s.regoV1Compatible +} + +// WithKeywords returns a new copy of the Scanner struct `s`, with the set +// of known keywords being that of `s` with `kws` added. +func (s *Scanner) WithKeywords(kws map[string]tokens.Token) *Scanner { + cpy := *s + cpy.keywords = make(map[string]tokens.Token, len(s.keywords)+len(kws)) + for kw, tok := range s.keywords { + cpy.AddKeyword(kw, tok) + } + for k, t := range kws { + cpy.AddKeyword(k, t) + } + return &cpy +} + +// WithoutKeywords returns a new copy of the Scanner struct `s`, with the +// set of known keywords being that of `s` with `kws` removed. +// The previously known keywords are returned for a convenient reset. +func (s *Scanner) WithoutKeywords(kws map[string]tokens.Token) (*Scanner, map[string]tokens.Token) { + cpy := *s + kw := s.keywords + cpy.keywords = make(map[string]tokens.Token, len(s.keywords)-len(kws)) + for kw, tok := range s.keywords { + if _, ok := kws[kw]; !ok { + cpy.AddKeyword(kw, tok) + } + } + return &cpy, kw +} + +type ScanOptions struct { + continueTemplateString bool + rawTemplateString bool +} + +type ScanOption func(*ScanOptions) + +// ContinueTemplateString will continue scanning a template string +func ContinueTemplateString(raw bool) ScanOption { + return func(opts *ScanOptions) { + opts.continueTemplateString = true + opts.rawTemplateString = raw + } +} + +// Scan will increment the scanners position in the source +// code until the next token is found. The token, starting position +// of the token, string literal, and any errors encountered are +// returned. A token will always be returned, the caller must check +// for any errors before using the other values. +func (s *Scanner) Scan(opts ...ScanOption) (tokens.Token, Position, string, []Error) { + scanOpts := &ScanOptions{} + for _, opt := range opts { + opt(scanOpts) + } + + pos := Position{Offset: s.offset - s.width, Row: s.row, Col: s.col, Tabs: s.tabs} + var tok tokens.Token + var lit string + if scanOpts.continueTemplateString { + if scanOpts.rawTemplateString { + lit, tok = s.scanRawTemplateString() + } else { + lit, tok = s.scanTemplateString() + } + } else if s.isWhitespace() { + // string(rune) is an unnecessary heap allocation in this case as we know all + // the possible whitespace values, and can simply translate to string ourselves + switch s.curr { + case ' ': + lit = " " + case '\t': + lit = "\t" + case '\n': + lit = "\n" + case '\r': + lit = "\r" + default: + // unreachable unless isWhitespace changes + lit = string(s.curr) + } + s.next() + tok = tokens.Whitespace + } else if isLetter(s.curr) { + lit = s.scanIdentifier() + tok = s.Keyword(lit) + } else if isDecimal(s.curr) { + lit = s.scanNumber() + tok = tokens.Number + } else { + ch := s.curr + s.next() + switch ch { + case -1: + tok = tokens.EOF + case '#': + lit = s.scanComment() + tok = tokens.Comment + case '"': + lit = s.scanString() + tok = tokens.String + case '`': + lit = s.scanRawString() + tok = tokens.String + case '[': + tok = tokens.LBrack + case ']': + tok = tokens.RBrack + case '{': + tok = tokens.LBrace + case '}': + tok = tokens.RBrace + case '(': + tok = tokens.LParen + case ')': + tok = tokens.RParen + case ',': + tok = tokens.Comma + case ':': + if s.curr == '=' { + s.next() + tok = tokens.Assign + } else { + tok = tokens.Colon + } + case '+': + tok = tokens.Add + case '-': + tok = tokens.Sub + case '*': + tok = tokens.Mul + case '/': + tok = tokens.Quo + case '%': + tok = tokens.Rem + case '&': + tok = tokens.And + case '|': + tok = tokens.Or + case '=': + if s.curr == '=' { + s.next() + tok = tokens.Equal + } else { + tok = tokens.Unify + } + case '>': + if s.curr == '=' { + s.next() + tok = tokens.Gte + } else { + tok = tokens.Gt + } + case '<': + if s.curr == '=' { + s.next() + tok = tokens.Lte + } else { + tok = tokens.Lt + } + case '!': + if s.curr == '=' { + s.next() + tok = tokens.Neq + } else { + s.error("illegal ! character") + } + case ';': + tok = tokens.Semicolon + case '.': + tok = tokens.Dot + case '$': + switch s.curr { + case '`': + s.next() + lit, tok = s.scanRawTemplateString() + case '"': + s.next() + lit, tok = s.scanTemplateString() + default: + s.error("illegal $ character") + } + } + } + + pos.End = s.offset - s.width + errs := s.errors + s.errors = nil + + return tok, pos, lit, errs +} + +func (s *Scanner) scanIdentifier() string { + start := s.offset - 1 + for isLetter(s.curr) || isDigit(s.curr) { + s.next() + } + + return util.ByteSliceToString(s.bs[start : s.offset-1]) +} + +func (s *Scanner) scanNumber() string { + + start := s.offset - 1 + + if s.curr != '.' { + for isDecimal(s.curr) { + s.next() + } + } + + if s.curr == '.' { + s.next() + var found bool + for isDecimal(s.curr) { + s.next() + found = true + } + if !found { + s.error("expected fraction") + } + } + + if lower(s.curr) == 'e' { + s.next() + if s.curr == '+' || s.curr == '-' { + s.next() + } + var found bool + for isDecimal(s.curr) { + s.next() + found = true + } + if !found { + s.error("expected exponent") + } + } + + // Scan any digits following the decimals to get the + // entire invalid number/identifier. + // Example: 0a2b should be a single invalid number "0a2b" + // rather than a number "0", followed by identifier "a2b". + if isLetter(s.curr) { + s.error("illegal number format") + for isLetter(s.curr) || isDigit(s.curr) { + s.next() + } + } + + return util.ByteSliceToString(s.bs[start : s.offset-1]) +} + +func (s *Scanner) scanString() string { + start := s.literalStart() + for { + ch := s.curr + + if ch == '\n' || ch < 0 { + s.error("non-terminated string") + break + } + + s.next() + + if ch == '"' { + break + } + + if ch == '\\' { + switch s.curr { + case '\\', '"', '/', 'b', 'f', 'n', 'r', 't': + s.next() + case 'u': + s.next() + s.next() + s.next() + s.next() + default: + s.error("illegal escape sequence") + } + } + } + + return util.ByteSliceToString(s.bs[start : s.offset-1]) +} + +func (s *Scanner) scanRawString() string { + start := s.literalStart() + for { + ch := s.curr + s.next() + if ch == '`' { + break + } else if ch < 0 { + s.error("non-terminated string") + break + } + } + + return util.ByteSliceToString(s.bs[start : s.offset-1]) +} + +func (s *Scanner) scanTemplateString() (string, tokens.Token) { + tok := tokens.TemplateStringPart + start := s.literalStart() + var escapes []int + for { + ch := s.curr + + if ch == '\n' || ch < 0 { + s.error("non-terminated string") + break + } + + s.next() + + if ch == '"' { + tok = tokens.TemplateStringEnd + break + } + + if ch == '{' { + break + } + + if ch == '\\' { + switch s.curr { + case '\\', '"', '/', 'b', 'f', 'n', 'r', 't': + s.next() + case '{': + escapes = append(escapes, s.offset-1) + s.next() + case 'u': + s.next() + s.next() + s.next() + s.next() + default: + s.error("illegal escape sequence") + } + } + } + + // Lazily remove escapes to not unnecessarily allocate a new byte slice + if len(escapes) > 0 { + return util.ByteSliceToString(removeEscapes(s, escapes, start)), tok + } + + return util.ByteSliceToString(s.bs[start : s.offset-1]), tok +} + +func (s *Scanner) scanRawTemplateString() (string, tokens.Token) { + tok := tokens.RawTemplateStringPart + start := s.literalStart() + var escapes []int + for { + ch := s.curr + + if ch < 0 { + s.error("non-terminated string") + break + } + + s.next() + + if ch == '`' { + tok = tokens.RawTemplateStringEnd + break + } + + if ch == '{' { + break + } + + if ch == '\\' { + switch s.curr { + case '{': + escapes = append(escapes, s.offset-1) + s.next() + } + } + } + + // Lazily remove escapes to not unnecessarily allocate a new byte slice + if len(escapes) > 0 { + return util.ByteSliceToString(removeEscapes(s, escapes, start)), tok + } + + return util.ByteSliceToString(s.bs[start : s.offset-1]), tok +} + +func removeEscapes(s *Scanner, escapes []int, start int) []byte { + from := start + bs := make([]byte, 0, s.offset-start-len(escapes)) + + for _, escape := range escapes { + // Append the bytes before the escape sequence. + if escape > from { + bs = append(bs, s.bs[from:escape-1]...) + } + // Skip the escape character. + from = escape + } + + // Append the remaining bytes after the last escape sequence. + if from < s.offset-1 { + bs = append(bs, s.bs[from:s.offset-1]...) + } + + return bs +} + +func (s *Scanner) scanComment() string { + start := s.literalStart() + for s.curr != '\n' && s.curr != -1 { + s.next() + } + end := s.offset - 1 + // Trim carriage returns that precede the newline + if s.offset > 1 && s.bs[s.offset-2] == '\r' { + end -= 1 + } + + return util.ByteSliceToString(s.bs[start:end]) +} + +func (s *Scanner) next() { + + if s.offset >= len(s.bs) { + s.curr = -1 + s.offset = len(s.bs) + 1 + return + } + + s.curr = rune(s.bs[s.offset]) + s.width = 1 + + if s.curr == 0 { + s.error("illegal null character") + } else if s.curr >= utf8.RuneSelf { + s.curr, s.width = utf8.DecodeRune(s.bs[s.offset:]) + if s.curr == utf8.RuneError && s.width == 1 { + s.error("illegal utf-8 character") + } else if s.curr == bom && s.offset > 0 { + s.error("illegal byte-order mark") + } + } + + s.offset += s.width + + if s.curr == '\n' { + s.row++ + s.col = 0 + s.tabs = s.tabs[:0] + } else { + s.col++ + if s.curr == '\t' { + s.tabs = append(s.tabs, s.col) + } + } +} + +func (s *Scanner) literalStart() int { + // The current offset is at the first character past the literal delimiter (#, ", `, etc.) + // Need to subtract width of first character (plus one for the delimiter). + return s.offset - (s.width + 1) +} + +// From the Go scanner (src/go/scanner/scanner.go) + +func isLetter(ch rune) bool { + return 'a' <= lower(ch) && lower(ch) <= 'z' || ch == '_' +} + +func isDigit(ch rune) bool { + return isDecimal(ch) || ch >= utf8.RuneSelf && unicode.IsDigit(ch) +} + +func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' } + +func lower(ch rune) rune { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter + +func (s *Scanner) isWhitespace() bool { + return s.curr == ' ' || s.curr == '\t' || s.curr == '\n' || s.curr == '\r' +} + +func (s *Scanner) error(reason string) { + s.errors = append(s.errors, Error{Pos: Position{ + Offset: s.offset, + Row: s.row, + Col: s.col, + }, Message: reason}) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go b/vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go new file mode 100644 index 0000000000..2721c3618b --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go @@ -0,0 +1,163 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package tokens + +import "maps" + +// Token represents a single Rego source code token +// for use by the Parser. +type Token uint8 + +func (t Token) String() string { + if int(t) >= len(strings) { + return "unknown" + } + return strings[t] +} + +// All tokens must be defined here +const ( + Illegal Token = iota + EOF + Whitespace + Ident + Comment + + Package + Import + As + Default + Else + Not + Some + With + Null + True + False + + Number + String + TemplateStringPart + TemplateStringEnd + RawTemplateStringPart + RawTemplateStringEnd + + LBrack + RBrack + LBrace + RBrace + LParen + RParen + Comma + Colon + + Add + Sub + Mul + Quo + Rem + And + Or + Unify + Equal + Assign + In + Neq + Gt + Lt + Gte + Lte + Dot + Semicolon + Dollar + + Every + Contains + If +) + +var strings = [...]string{ + Illegal: "illegal", + EOF: "eof", + Whitespace: "whitespace", + Comment: "comment", + Ident: "identifier", + Package: "package", + Import: "import", + As: "as", + Default: "default", + Else: "else", + Not: "not", + Some: "some", + With: "with", + Null: "null", + True: "true", + False: "false", + Number: "number", + String: "string", + TemplateStringPart: "template-string-part", + TemplateStringEnd: "template-string-end", + RawTemplateStringPart: "raw-template-string-part", + RawTemplateStringEnd: "raw-template-string-end", + LBrack: "[", + RBrack: "]", + LBrace: "{", + RBrace: "}", + LParen: "(", + RParen: ")", + Comma: ",", + Colon: ":", + Add: "plus", + Sub: "minus", + Mul: "mul", + Quo: "div", + Rem: "rem", + And: "and", + Or: "or", + Unify: "eq", + Equal: "equal", + Assign: "assign", + In: "in", + Neq: "neq", + Gt: "gt", + Lt: "lt", + Gte: "gte", + Lte: "lte", + Dot: ".", + Semicolon: ";", + Dollar: "dollar", + Every: "every", + Contains: "contains", + If: "if", +} + +var keywords = map[string]Token{ + "package": Package, + "import": Import, + "as": As, + "default": Default, + "else": Else, + "not": Not, + "some": Some, + "with": With, + "null": Null, + "true": True, + "false": False, +} + +// Keywords returns a copy of the default string -> Token keyword map. +func Keywords() map[string]Token { + return maps.Clone(keywords) +} + +// IsKeyword returns if a token is a keyword +func IsKeyword(tok Token) bool { + _, ok := keywords[strings[tok]] + return ok +} + +func KeywordFor(tok Token) string { + return strings[tok] +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/interning.go b/vendor/github.com/open-policy-agent/opa/v1/ast/interning.go new file mode 100644 index 0000000000..4f454beb96 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/interning.go @@ -0,0 +1,1902 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "iter" + "strconv" +) + +type internable interface { + bool | string | int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 +} + +// NOTE! Great care must be taken **not** to modify the terms returned +// from these functions, as they are shared across all callers. +// This package is currently considered experimental, and may change +// at any time without notice. + +var ( + InternedNullValue Value = Null{} + InternedNullTerm = NewTerm(InternedNullValue) + + InternedBooleanTrueValue Value = Boolean(true) + InternedBooleanFalseValue Value = Boolean(false) + InternedEmptyStringValue Value = String("") + InternedEmptyArrayValue Value = NewArray() + InternedEmptyRefValue Value = Ref{} + InternedEmptyObjectValue Value = NewObject() + InternedEmptySetValue Value = NewSet() + + InternedBooleanTrue = NewTerm(InternedBooleanTrueValue) + InternedBooleanFalse = NewTerm(InternedBooleanFalseValue) + InternedEmptyString = NewTerm(InternedEmptyStringValue) + InternedEmptyObject = NewTerm(InternedEmptyObjectValue) + InternedEmptyArray = NewTerm(InternedEmptyArrayValue) + InternedEmptySet = NewTerm(InternedEmptySetValue) + + // since this is by far the most common negative number + minusOneValue Value = Number("-1") + minusOneTerm = NewTerm(minusOneValue) + + internedStringTerms = map[string]*Term{ + "": InternedEmptyString, + } + + internedVarValues = map[string]Value{ + "input": Var("input"), + "data": Var("data"), + "args": Var("args"), + "schema": Var("schema"), + "key": Var("key"), + "value": Var("value"), + "future": Var("future"), + "rego": Var("rego"), + "set": Var("set"), + "internal": Var("internal"), + "else": Var("else"), + + "i": Var("i"), "j": Var("j"), "k": Var("k"), "v": Var("v"), "x": Var("x"), "y": Var("y"), "z": Var("z"), + } +) + +// InternStringTerm interns the given strings as terms. Note that Interning is +// considered experimental and should not be relied upon by external code. +// WARNING: This must **only** be called at initialization time, as the +// interned terms are shared globally, and the underlying map is not thread-safe. +func InternStringTerm(str ...string) { + for _, s := range str { + if _, ok := internedStringTerms[s]; ok { + continue + } + + internedStringTerms[s] = &Term{Value: String(s)} + } +} + +// InternVarValue interns the given variable names as Var Values. Note that Interning is +// considered experimental and should not be relied upon by external code. +// WARNING: This must **only** be called at initialization time, as the +// interned terms are shared globally, and the underlying map is not thread-safe. +func InternVarValue(names ...string) { + for _, name := range names { + if _, ok := internedVarValues[name]; ok { + continue + } + + internedVarValues[name] = Var(name) + } +} + +// HasInternedValue returns true if the given value is interned, otherwise false. +func HasInternedValue[T internable](v T) bool { + switch value := any(v).(type) { + case bool: + return true + case int: + return HasInternedIntNumberTerm(value) + case int8: + return HasInternedIntNumberTerm(int(value)) + case int16: + return HasInternedIntNumberTerm(int(value)) + case int32: + return HasInternedIntNumberTerm(int(value)) + case int64: + return HasInternedIntNumberTerm(int(value)) + case uint: + return HasInternedIntNumberTerm(int(value)) + case uint8: + return HasInternedIntNumberTerm(int(value)) + case uint16: + return HasInternedIntNumberTerm(int(value)) + case uint32: + return HasInternedIntNumberTerm(int(value)) + case uint64: + return HasInternedIntNumberTerm(int(value)) + case string: + _, ok := internedStringTerms[value] + return ok + } + return false +} + +// InternedValue returns an interned Value for scalar v, if the value is +// interned. If the value is not interned, a new Value is returned. +func InternedValue[T internable](v T) Value { + return InternedValueOr(v, internedTermValue) +} + +// InternedVarValue returns an interned Var Value for the given name. If the +// name is not interned, a new Var Value is returned. +func InternedVarValue(name string) Value { + if v, ok := internedVarValues[name]; ok { + return v + } + + return Var(name) +} + +// InternedValueOr returns an interned Value for scalar v. Calls supplier +// to produce a Value if the value is not interned. +func InternedValueOr[T internable](v T, supplier func(T) Value) Value { + switch value := any(v).(type) { + case bool: + return internedBooleanValue(value) + case int: + return internedIntNumberValue(value) + case int8: + return internedIntNumberValue(int(value)) + case int16: + return internedIntNumberValue(int(value)) + case int32: + return internedIntNumberValue(int(value)) + case int64: + return internedIntNumberValue(int(value)) + case uint: + return internedIntNumberValue(int(value)) + case uint8: + return internedIntNumberValue(int(value)) + case uint16: + return internedIntNumberValue(int(value)) + case uint32: + return internedIntNumberValue(int(value)) + case uint64: + return internedIntNumberValue(int(value)) + } + return supplier(v) +} + +// Interned returns a possibly interned term for the given scalar value. +// If the value is not interned, a new term is created for that value. +func InternedTerm[T internable](v T) *Term { + switch value := any(v).(type) { + case bool: + return internedBooleanTerm(value) + case string: + return internedStringTerm(value) + case int: + return internedIntNumberTerm(value) + case int8: + return internedIntNumberTerm(int(value)) + case int16: + return internedIntNumberTerm(int(value)) + case int32: + return internedIntNumberTerm(int(value)) + case int64: + return internedIntNumberTerm(int(value)) + case uint: + return internedIntNumberTerm(int(value)) + case uint8: + return internedIntNumberTerm(int(value)) + case uint16: + return internedIntNumberTerm(int(value)) + case uint32: + return internedIntNumberTerm(int(value)) + case uint64: + return internedIntNumberTerm(int(value)) + default: + panic("unreachable") + } +} + +// InternedItem works just like [Item] but returns interned terms for both +// key and value where possible. This is mostly useful for making tests less +// verbose. +func InternedItem[K, V internable](key K, value V) [2]*Term { + return [2]*Term{InternedTerm(key), InternedTerm(value)} +} + +// InternedIntFromString returns a term with the given integer value if the string +// maps to an interned term. If the string does not map to an interned term, nil is +// returned. +func InternedIntNumberTermFromString(s string) *Term { + if term, ok := stringToIntNumberTermMap[s]; ok { + return term + } + + return nil +} + +// InternedIntRange returns a sequence of interned integer number terms +// from start (inclusive) to end (exclusive). For values outside of the +// interned range, non-interned IntNumberTerms are returned. +func InternedIntRange(start, end int) iter.Seq[*Term] { + return func(yield func(*Term) bool) { + for i := start; i < end; i++ { + if !yield(internedIntNumberTerm(i)) { + return + } + } + } +} + +// HasInternedIntNumberTerm returns true if the given integer value maps to an interned +// term, otherwise false. +func HasInternedIntNumberTerm(i int) bool { + return i >= -1 && i < len(intNumberTerms) +} + +// Returns an interned string term representing the integer value i, if +// interned. If not, creates a new StringTerm for the integer value. +func InternedIntegerString(i int) *Term { + // Cheapest option - we don't need to call strconv.Itoa + if HasInternedIntNumberTerm(i) { + if interned, ok := internedStringTerms[IntNumberTerm(i).String()]; ok { + return interned + } + } + + // Next cheapest option — the string could still be interned if the store + // has been extended with more terms than we cucrrently intern. + s := strconv.Itoa(i) + if interned, ok := internedStringTerms[s]; ok { + return interned + } + + // Nope, create a new term + return StringTerm(s) +} + +func internedBooleanValue(b bool) Value { + if b { + return InternedBooleanTrueValue + } + + return InternedBooleanFalseValue +} + +// InternedBooleanTerm returns an interned term with the given boolean value. +func internedBooleanTerm(b bool) *Term { + if b { + return InternedBooleanTrue + } + + return InternedBooleanFalse +} + +func internedIntNumberValue(i int) Value { + if i >= 0 && i < len(intNumberTerms) { + return intNumberValues[i] + } + + if i == -1 { + return minusOneValue + } + + return Number(strconv.Itoa(i)) +} + +// InternedIntNumberTerm returns a term with the given integer value. The term is +// cached between -1 to 512, and for values outside of that range, this function +// is equivalent to IntNumberTerm. +func internedIntNumberTerm(i int) *Term { + if i >= 0 && i < len(intNumberTerms) { + return intNumberTerms[i] + } + + if i == -1 { + return minusOneTerm + } + + return &Term{Value: Number(strconv.Itoa(i))} +} + +// InternedStringTerm returns an interned term with the given string value. If the +// provided string is not interned, a new term is created for that value. It does *not* +// modify the global interned terms map. +func internedStringTerm(s string) *Term { + if term, ok := internedStringTerms[s]; ok { + return term + } + + return StringTerm(s) +} + +func internedTermValue[T internable](v T) Value { + return InternedTerm(v).Value +} + +func init() { + InternStringTerm( + // Numbers + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", + "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", + "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", + "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", + "57", "58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "70", "71", "72", "73", "74", + "75", "76", "77", "78", "79", "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "90", "91", "92", + "93", "94", "95", "96", "97", "98", "99", "100", + // Types + "null", "boolean", "number", "string", "array", "object", "set", "var", "ref", "true", "false", + // Runtime + "config", "env", "version", "commit", "authorization_enabled", "skip_known_schema_check", + // Annotations + "annotations", "scope", "title", "entrypoint", "description", "organizations", "authors", "related_resources", + "schemas", "custom", "name", "email", "schema", "definition", "document", "package", "rule", "subpackages", + // Debug + "text", "value", "bindings", "expressions", + // Various + "data", "input", "result", "keywords", "path", "v1", "error", "partial", + // HTTP + "code", "message", "status_code", "method", "url", "uri", "body", "raw_body", "headers", "query_params", + // JWT + "enc", "cty", "iss", "exp", "nbf", "aud", "secret", "cert", + // Decisions + "revision", "labels", "decision_id", "bundles", "query", "mapped_result", "nd_builtin_cache", + "erased", "masked", "requested_by", "timestamp", "metrics", "req_id", + + // Whitespace + " ", "\n", "\t", + ) +} + +var stringToIntNumberTermMap = map[string]*Term{ + "-1": minusOneTerm, + "0": intNumberTerms[0], + "1": intNumberTerms[1], + "2": intNumberTerms[2], + "3": intNumberTerms[3], + "4": intNumberTerms[4], + "5": intNumberTerms[5], + "6": intNumberTerms[6], + "7": intNumberTerms[7], + "8": intNumberTerms[8], + "9": intNumberTerms[9], + "10": intNumberTerms[10], + "11": intNumberTerms[11], + "12": intNumberTerms[12], + "13": intNumberTerms[13], + "14": intNumberTerms[14], + "15": intNumberTerms[15], + "16": intNumberTerms[16], + "17": intNumberTerms[17], + "18": intNumberTerms[18], + "19": intNumberTerms[19], + "20": intNumberTerms[20], + "21": intNumberTerms[21], + "22": intNumberTerms[22], + "23": intNumberTerms[23], + "24": intNumberTerms[24], + "25": intNumberTerms[25], + "26": intNumberTerms[26], + "27": intNumberTerms[27], + "28": intNumberTerms[28], + "29": intNumberTerms[29], + "30": intNumberTerms[30], + "31": intNumberTerms[31], + "32": intNumberTerms[32], + "33": intNumberTerms[33], + "34": intNumberTerms[34], + "35": intNumberTerms[35], + "36": intNumberTerms[36], + "37": intNumberTerms[37], + "38": intNumberTerms[38], + "39": intNumberTerms[39], + "40": intNumberTerms[40], + "41": intNumberTerms[41], + "42": intNumberTerms[42], + "43": intNumberTerms[43], + "44": intNumberTerms[44], + "45": intNumberTerms[45], + "46": intNumberTerms[46], + "47": intNumberTerms[47], + "48": intNumberTerms[48], + "49": intNumberTerms[49], + "50": intNumberTerms[50], + "51": intNumberTerms[51], + "52": intNumberTerms[52], + "53": intNumberTerms[53], + "54": intNumberTerms[54], + "55": intNumberTerms[55], + "56": intNumberTerms[56], + "57": intNumberTerms[57], + "58": intNumberTerms[58], + "59": intNumberTerms[59], + "60": intNumberTerms[60], + "61": intNumberTerms[61], + "62": intNumberTerms[62], + "63": intNumberTerms[63], + "64": intNumberTerms[64], + "65": intNumberTerms[65], + "66": intNumberTerms[66], + "67": intNumberTerms[67], + "68": intNumberTerms[68], + "69": intNumberTerms[69], + "70": intNumberTerms[70], + "71": intNumberTerms[71], + "72": intNumberTerms[72], + "73": intNumberTerms[73], + "74": intNumberTerms[74], + "75": intNumberTerms[75], + "76": intNumberTerms[76], + "77": intNumberTerms[77], + "78": intNumberTerms[78], + "79": intNumberTerms[79], + "80": intNumberTerms[80], + "81": intNumberTerms[81], + "82": intNumberTerms[82], + "83": intNumberTerms[83], + "84": intNumberTerms[84], + "85": intNumberTerms[85], + "86": intNumberTerms[86], + "87": intNumberTerms[87], + "88": intNumberTerms[88], + "89": intNumberTerms[89], + "90": intNumberTerms[90], + "91": intNumberTerms[91], + "92": intNumberTerms[92], + "93": intNumberTerms[93], + "94": intNumberTerms[94], + "95": intNumberTerms[95], + "96": intNumberTerms[96], + "97": intNumberTerms[97], + "98": intNumberTerms[98], + "99": intNumberTerms[99], + "100": intNumberTerms[100], + "101": intNumberTerms[101], + "102": intNumberTerms[102], + "103": intNumberTerms[103], + "104": intNumberTerms[104], + "105": intNumberTerms[105], + "106": intNumberTerms[106], + "107": intNumberTerms[107], + "108": intNumberTerms[108], + "109": intNumberTerms[109], + "110": intNumberTerms[110], + "111": intNumberTerms[111], + "112": intNumberTerms[112], + "113": intNumberTerms[113], + "114": intNumberTerms[114], + "115": intNumberTerms[115], + "116": intNumberTerms[116], + "117": intNumberTerms[117], + "118": intNumberTerms[118], + "119": intNumberTerms[119], + "120": intNumberTerms[120], + "121": intNumberTerms[121], + "122": intNumberTerms[122], + "123": intNumberTerms[123], + "124": intNumberTerms[124], + "125": intNumberTerms[125], + "126": intNumberTerms[126], + "127": intNumberTerms[127], + "128": intNumberTerms[128], + "129": intNumberTerms[129], + "130": intNumberTerms[130], + "131": intNumberTerms[131], + "132": intNumberTerms[132], + "133": intNumberTerms[133], + "134": intNumberTerms[134], + "135": intNumberTerms[135], + "136": intNumberTerms[136], + "137": intNumberTerms[137], + "138": intNumberTerms[138], + "139": intNumberTerms[139], + "140": intNumberTerms[140], + "141": intNumberTerms[141], + "142": intNumberTerms[142], + "143": intNumberTerms[143], + "144": intNumberTerms[144], + "145": intNumberTerms[145], + "146": intNumberTerms[146], + "147": intNumberTerms[147], + "148": intNumberTerms[148], + "149": intNumberTerms[149], + "150": intNumberTerms[150], + "151": intNumberTerms[151], + "152": intNumberTerms[152], + "153": intNumberTerms[153], + "154": intNumberTerms[154], + "155": intNumberTerms[155], + "156": intNumberTerms[156], + "157": intNumberTerms[157], + "158": intNumberTerms[158], + "159": intNumberTerms[159], + "160": intNumberTerms[160], + "161": intNumberTerms[161], + "162": intNumberTerms[162], + "163": intNumberTerms[163], + "164": intNumberTerms[164], + "165": intNumberTerms[165], + "166": intNumberTerms[166], + "167": intNumberTerms[167], + "168": intNumberTerms[168], + "169": intNumberTerms[169], + "170": intNumberTerms[170], + "171": intNumberTerms[171], + "172": intNumberTerms[172], + "173": intNumberTerms[173], + "174": intNumberTerms[174], + "175": intNumberTerms[175], + "176": intNumberTerms[176], + "177": intNumberTerms[177], + "178": intNumberTerms[178], + "179": intNumberTerms[179], + "180": intNumberTerms[180], + "181": intNumberTerms[181], + "182": intNumberTerms[182], + "183": intNumberTerms[183], + "184": intNumberTerms[184], + "185": intNumberTerms[185], + "186": intNumberTerms[186], + "187": intNumberTerms[187], + "188": intNumberTerms[188], + "189": intNumberTerms[189], + "190": intNumberTerms[190], + "191": intNumberTerms[191], + "192": intNumberTerms[192], + "193": intNumberTerms[193], + "194": intNumberTerms[194], + "195": intNumberTerms[195], + "196": intNumberTerms[196], + "197": intNumberTerms[197], + "198": intNumberTerms[198], + "199": intNumberTerms[199], + "200": intNumberTerms[200], + "201": intNumberTerms[201], + "202": intNumberTerms[202], + "203": intNumberTerms[203], + "204": intNumberTerms[204], + "205": intNumberTerms[205], + "206": intNumberTerms[206], + "207": intNumberTerms[207], + "208": intNumberTerms[208], + "209": intNumberTerms[209], + "210": intNumberTerms[210], + "211": intNumberTerms[211], + "212": intNumberTerms[212], + "213": intNumberTerms[213], + "214": intNumberTerms[214], + "215": intNumberTerms[215], + "216": intNumberTerms[216], + "217": intNumberTerms[217], + "218": intNumberTerms[218], + "219": intNumberTerms[219], + "220": intNumberTerms[220], + "221": intNumberTerms[221], + "222": intNumberTerms[222], + "223": intNumberTerms[223], + "224": intNumberTerms[224], + "225": intNumberTerms[225], + "226": intNumberTerms[226], + "227": intNumberTerms[227], + "228": intNumberTerms[228], + "229": intNumberTerms[229], + "230": intNumberTerms[230], + "231": intNumberTerms[231], + "232": intNumberTerms[232], + "233": intNumberTerms[233], + "234": intNumberTerms[234], + "235": intNumberTerms[235], + "236": intNumberTerms[236], + "237": intNumberTerms[237], + "238": intNumberTerms[238], + "239": intNumberTerms[239], + "240": intNumberTerms[240], + "241": intNumberTerms[241], + "242": intNumberTerms[242], + "243": intNumberTerms[243], + "244": intNumberTerms[244], + "245": intNumberTerms[245], + "246": intNumberTerms[246], + "247": intNumberTerms[247], + "248": intNumberTerms[248], + "249": intNumberTerms[249], + "250": intNumberTerms[250], + "251": intNumberTerms[251], + "252": intNumberTerms[252], + "253": intNumberTerms[253], + "254": intNumberTerms[254], + "255": intNumberTerms[255], + "256": intNumberTerms[256], + "257": intNumberTerms[257], + "258": intNumberTerms[258], + "259": intNumberTerms[259], + "260": intNumberTerms[260], + "261": intNumberTerms[261], + "262": intNumberTerms[262], + "263": intNumberTerms[263], + "264": intNumberTerms[264], + "265": intNumberTerms[265], + "266": intNumberTerms[266], + "267": intNumberTerms[267], + "268": intNumberTerms[268], + "269": intNumberTerms[269], + "270": intNumberTerms[270], + "271": intNumberTerms[271], + "272": intNumberTerms[272], + "273": intNumberTerms[273], + "274": intNumberTerms[274], + "275": intNumberTerms[275], + "276": intNumberTerms[276], + "277": intNumberTerms[277], + "278": intNumberTerms[278], + "279": intNumberTerms[279], + "280": intNumberTerms[280], + "281": intNumberTerms[281], + "282": intNumberTerms[282], + "283": intNumberTerms[283], + "284": intNumberTerms[284], + "285": intNumberTerms[285], + "286": intNumberTerms[286], + "287": intNumberTerms[287], + "288": intNumberTerms[288], + "289": intNumberTerms[289], + "290": intNumberTerms[290], + "291": intNumberTerms[291], + "292": intNumberTerms[292], + "293": intNumberTerms[293], + "294": intNumberTerms[294], + "295": intNumberTerms[295], + "296": intNumberTerms[296], + "297": intNumberTerms[297], + "298": intNumberTerms[298], + "299": intNumberTerms[299], + "300": intNumberTerms[300], + "301": intNumberTerms[301], + "302": intNumberTerms[302], + "303": intNumberTerms[303], + "304": intNumberTerms[304], + "305": intNumberTerms[305], + "306": intNumberTerms[306], + "307": intNumberTerms[307], + "308": intNumberTerms[308], + "309": intNumberTerms[309], + "310": intNumberTerms[310], + "311": intNumberTerms[311], + "312": intNumberTerms[312], + "313": intNumberTerms[313], + "314": intNumberTerms[314], + "315": intNumberTerms[315], + "316": intNumberTerms[316], + "317": intNumberTerms[317], + "318": intNumberTerms[318], + "319": intNumberTerms[319], + "320": intNumberTerms[320], + "321": intNumberTerms[321], + "322": intNumberTerms[322], + "323": intNumberTerms[323], + "324": intNumberTerms[324], + "325": intNumberTerms[325], + "326": intNumberTerms[326], + "327": intNumberTerms[327], + "328": intNumberTerms[328], + "329": intNumberTerms[329], + "330": intNumberTerms[330], + "331": intNumberTerms[331], + "332": intNumberTerms[332], + "333": intNumberTerms[333], + "334": intNumberTerms[334], + "335": intNumberTerms[335], + "336": intNumberTerms[336], + "337": intNumberTerms[337], + "338": intNumberTerms[338], + "339": intNumberTerms[339], + "340": intNumberTerms[340], + "341": intNumberTerms[341], + "342": intNumberTerms[342], + "343": intNumberTerms[343], + "344": intNumberTerms[344], + "345": intNumberTerms[345], + "346": intNumberTerms[346], + "347": intNumberTerms[347], + "348": intNumberTerms[348], + "349": intNumberTerms[349], + "350": intNumberTerms[350], + "351": intNumberTerms[351], + "352": intNumberTerms[352], + "353": intNumberTerms[353], + "354": intNumberTerms[354], + "355": intNumberTerms[355], + "356": intNumberTerms[356], + "357": intNumberTerms[357], + "358": intNumberTerms[358], + "359": intNumberTerms[359], + "360": intNumberTerms[360], + "361": intNumberTerms[361], + "362": intNumberTerms[362], + "363": intNumberTerms[363], + "364": intNumberTerms[364], + "365": intNumberTerms[365], + "366": intNumberTerms[366], + "367": intNumberTerms[367], + "368": intNumberTerms[368], + "369": intNumberTerms[369], + "370": intNumberTerms[370], + "371": intNumberTerms[371], + "372": intNumberTerms[372], + "373": intNumberTerms[373], + "374": intNumberTerms[374], + "375": intNumberTerms[375], + "376": intNumberTerms[376], + "377": intNumberTerms[377], + "378": intNumberTerms[378], + "379": intNumberTerms[379], + "380": intNumberTerms[380], + "381": intNumberTerms[381], + "382": intNumberTerms[382], + "383": intNumberTerms[383], + "384": intNumberTerms[384], + "385": intNumberTerms[385], + "386": intNumberTerms[386], + "387": intNumberTerms[387], + "388": intNumberTerms[388], + "389": intNumberTerms[389], + "390": intNumberTerms[390], + "391": intNumberTerms[391], + "392": intNumberTerms[392], + "393": intNumberTerms[393], + "394": intNumberTerms[394], + "395": intNumberTerms[395], + "396": intNumberTerms[396], + "397": intNumberTerms[397], + "398": intNumberTerms[398], + "399": intNumberTerms[399], + "400": intNumberTerms[400], + "401": intNumberTerms[401], + "402": intNumberTerms[402], + "403": intNumberTerms[403], + "404": intNumberTerms[404], + "405": intNumberTerms[405], + "406": intNumberTerms[406], + "407": intNumberTerms[407], + "408": intNumberTerms[408], + "409": intNumberTerms[409], + "410": intNumberTerms[410], + "411": intNumberTerms[411], + "412": intNumberTerms[412], + "413": intNumberTerms[413], + "414": intNumberTerms[414], + "415": intNumberTerms[415], + "416": intNumberTerms[416], + "417": intNumberTerms[417], + "418": intNumberTerms[418], + "419": intNumberTerms[419], + "420": intNumberTerms[420], + "421": intNumberTerms[421], + "422": intNumberTerms[422], + "423": intNumberTerms[423], + "424": intNumberTerms[424], + "425": intNumberTerms[425], + "426": intNumberTerms[426], + "427": intNumberTerms[427], + "428": intNumberTerms[428], + "429": intNumberTerms[429], + "430": intNumberTerms[430], + "431": intNumberTerms[431], + "432": intNumberTerms[432], + "433": intNumberTerms[433], + "434": intNumberTerms[434], + "435": intNumberTerms[435], + "436": intNumberTerms[436], + "437": intNumberTerms[437], + "438": intNumberTerms[438], + "439": intNumberTerms[439], + "440": intNumberTerms[440], + "441": intNumberTerms[441], + "442": intNumberTerms[442], + "443": intNumberTerms[443], + "444": intNumberTerms[444], + "445": intNumberTerms[445], + "446": intNumberTerms[446], + "447": intNumberTerms[447], + "448": intNumberTerms[448], + "449": intNumberTerms[449], + "450": intNumberTerms[450], + "451": intNumberTerms[451], + "452": intNumberTerms[452], + "453": intNumberTerms[453], + "454": intNumberTerms[454], + "455": intNumberTerms[455], + "456": intNumberTerms[456], + "457": intNumberTerms[457], + "458": intNumberTerms[458], + "459": intNumberTerms[459], + "460": intNumberTerms[460], + "461": intNumberTerms[461], + "462": intNumberTerms[462], + "463": intNumberTerms[463], + "464": intNumberTerms[464], + "465": intNumberTerms[465], + "466": intNumberTerms[466], + "467": intNumberTerms[467], + "468": intNumberTerms[468], + "469": intNumberTerms[469], + "470": intNumberTerms[470], + "471": intNumberTerms[471], + "472": intNumberTerms[472], + "473": intNumberTerms[473], + "474": intNumberTerms[474], + "475": intNumberTerms[475], + "476": intNumberTerms[476], + "477": intNumberTerms[477], + "478": intNumberTerms[478], + "479": intNumberTerms[479], + "480": intNumberTerms[480], + "481": intNumberTerms[481], + "482": intNumberTerms[482], + "483": intNumberTerms[483], + "484": intNumberTerms[484], + "485": intNumberTerms[485], + "486": intNumberTerms[486], + "487": intNumberTerms[487], + "488": intNumberTerms[488], + "489": intNumberTerms[489], + "490": intNumberTerms[490], + "491": intNumberTerms[491], + "492": intNumberTerms[492], + "493": intNumberTerms[493], + "494": intNumberTerms[494], + "495": intNumberTerms[495], + "496": intNumberTerms[496], + "497": intNumberTerms[497], + "498": intNumberTerms[498], + "499": intNumberTerms[499], + "500": intNumberTerms[500], + "501": intNumberTerms[501], + "502": intNumberTerms[502], + "503": intNumberTerms[503], + "504": intNumberTerms[504], + "505": intNumberTerms[505], + "506": intNumberTerms[506], + "507": intNumberTerms[507], + "508": intNumberTerms[508], + "509": intNumberTerms[509], + "510": intNumberTerms[510], + "511": intNumberTerms[511], + "512": intNumberTerms[512], +} + +var intNumberValues = [...]Value{ + Number("0"), + Number("1"), + Number("2"), + Number("3"), + Number("4"), + Number("5"), + Number("6"), + Number("7"), + Number("8"), + Number("9"), + Number("10"), + Number("11"), + Number("12"), + Number("13"), + Number("14"), + Number("15"), + Number("16"), + Number("17"), + Number("18"), + Number("19"), + Number("20"), + Number("21"), + Number("22"), + Number("23"), + Number("24"), + Number("25"), + Number("26"), + Number("27"), + Number("28"), + Number("29"), + Number("30"), + Number("31"), + Number("32"), + Number("33"), + Number("34"), + Number("35"), + Number("36"), + Number("37"), + Number("38"), + Number("39"), + Number("40"), + Number("41"), + Number("42"), + Number("43"), + Number("44"), + Number("45"), + Number("46"), + Number("47"), + Number("48"), + Number("49"), + Number("50"), + Number("51"), + Number("52"), + Number("53"), + Number("54"), + Number("55"), + Number("56"), + Number("57"), + Number("58"), + Number("59"), + Number("60"), + Number("61"), + Number("62"), + Number("63"), + Number("64"), + Number("65"), + Number("66"), + Number("67"), + Number("68"), + Number("69"), + Number("70"), + Number("71"), + Number("72"), + Number("73"), + Number("74"), + Number("75"), + Number("76"), + Number("77"), + Number("78"), + Number("79"), + Number("80"), + Number("81"), + Number("82"), + Number("83"), + Number("84"), + Number("85"), + Number("86"), + Number("87"), + Number("88"), + Number("89"), + Number("90"), + Number("91"), + Number("92"), + Number("93"), + Number("94"), + Number("95"), + Number("96"), + Number("97"), + Number("98"), + Number("99"), + Number("100"), + Number("101"), + Number("102"), + Number("103"), + Number("104"), + Number("105"), + Number("106"), + Number("107"), + Number("108"), + Number("109"), + Number("110"), + Number("111"), + Number("112"), + Number("113"), + Number("114"), + Number("115"), + Number("116"), + Number("117"), + Number("118"), + Number("119"), + Number("120"), + Number("121"), + Number("122"), + Number("123"), + Number("124"), + Number("125"), + Number("126"), + Number("127"), + Number("128"), + Number("129"), + Number("130"), + Number("131"), + Number("132"), + Number("133"), + Number("134"), + Number("135"), + Number("136"), + Number("137"), + Number("138"), + Number("139"), + Number("140"), + Number("141"), + Number("142"), + Number("143"), + Number("144"), + Number("145"), + Number("146"), + Number("147"), + Number("148"), + Number("149"), + Number("150"), + Number("151"), + Number("152"), + Number("153"), + Number("154"), + Number("155"), + Number("156"), + Number("157"), + Number("158"), + Number("159"), + Number("160"), + Number("161"), + Number("162"), + Number("163"), + Number("164"), + Number("165"), + Number("166"), + Number("167"), + Number("168"), + Number("169"), + Number("170"), + Number("171"), + Number("172"), + Number("173"), + Number("174"), + Number("175"), + Number("176"), + Number("177"), + Number("178"), + Number("179"), + Number("180"), + Number("181"), + Number("182"), + Number("183"), + Number("184"), + Number("185"), + Number("186"), + Number("187"), + Number("188"), + Number("189"), + Number("190"), + Number("191"), + Number("192"), + Number("193"), + Number("194"), + Number("195"), + Number("196"), + Number("197"), + Number("198"), + Number("199"), + Number("200"), + Number("201"), + Number("202"), + Number("203"), + Number("204"), + Number("205"), + Number("206"), + Number("207"), + Number("208"), + Number("209"), + Number("210"), + Number("211"), + Number("212"), + Number("213"), + Number("214"), + Number("215"), + Number("216"), + Number("217"), + Number("218"), + Number("219"), + Number("220"), + Number("221"), + Number("222"), + Number("223"), + Number("224"), + Number("225"), + Number("226"), + Number("227"), + Number("228"), + Number("229"), + Number("230"), + Number("231"), + Number("232"), + Number("233"), + Number("234"), + Number("235"), + Number("236"), + Number("237"), + Number("238"), + Number("239"), + Number("240"), + Number("241"), + Number("242"), + Number("243"), + Number("244"), + Number("245"), + Number("246"), + Number("247"), + Number("248"), + Number("249"), + Number("250"), + Number("251"), + Number("252"), + Number("253"), + Number("254"), + Number("255"), + Number("256"), + Number("257"), + Number("258"), + Number("259"), + Number("260"), + Number("261"), + Number("262"), + Number("263"), + Number("264"), + Number("265"), + Number("266"), + Number("267"), + Number("268"), + Number("269"), + Number("270"), + Number("271"), + Number("272"), + Number("273"), + Number("274"), + Number("275"), + Number("276"), + Number("277"), + Number("278"), + Number("279"), + Number("280"), + Number("281"), + Number("282"), + Number("283"), + Number("284"), + Number("285"), + Number("286"), + Number("287"), + Number("288"), + Number("289"), + Number("290"), + Number("291"), + Number("292"), + Number("293"), + Number("294"), + Number("295"), + Number("296"), + Number("297"), + Number("298"), + Number("299"), + Number("300"), + Number("301"), + Number("302"), + Number("303"), + Number("304"), + Number("305"), + Number("306"), + Number("307"), + Number("308"), + Number("309"), + Number("310"), + Number("311"), + Number("312"), + Number("313"), + Number("314"), + Number("315"), + Number("316"), + Number("317"), + Number("318"), + Number("319"), + Number("320"), + Number("321"), + Number("322"), + Number("323"), + Number("324"), + Number("325"), + Number("326"), + Number("327"), + Number("328"), + Number("329"), + Number("330"), + Number("331"), + Number("332"), + Number("333"), + Number("334"), + Number("335"), + Number("336"), + Number("337"), + Number("338"), + Number("339"), + Number("340"), + Number("341"), + Number("342"), + Number("343"), + Number("344"), + Number("345"), + Number("346"), + Number("347"), + Number("348"), + Number("349"), + Number("350"), + Number("351"), + Number("352"), + Number("353"), + Number("354"), + Number("355"), + Number("356"), + Number("357"), + Number("358"), + Number("359"), + Number("360"), + Number("361"), + Number("362"), + Number("363"), + Number("364"), + Number("365"), + Number("366"), + Number("367"), + Number("368"), + Number("369"), + Number("370"), + Number("371"), + Number("372"), + Number("373"), + Number("374"), + Number("375"), + Number("376"), + Number("377"), + Number("378"), + Number("379"), + Number("380"), + Number("381"), + Number("382"), + Number("383"), + Number("384"), + Number("385"), + Number("386"), + Number("387"), + Number("388"), + Number("389"), + Number("390"), + Number("391"), + Number("392"), + Number("393"), + Number("394"), + Number("395"), + Number("396"), + Number("397"), + Number("398"), + Number("399"), + Number("400"), + Number("401"), + Number("402"), + Number("403"), + Number("404"), + Number("405"), + Number("406"), + Number("407"), + Number("408"), + Number("409"), + Number("410"), + Number("411"), + Number("412"), + Number("413"), + Number("414"), + Number("415"), + Number("416"), + Number("417"), + Number("418"), + Number("419"), + Number("420"), + Number("421"), + Number("422"), + Number("423"), + Number("424"), + Number("425"), + Number("426"), + Number("427"), + Number("428"), + Number("429"), + Number("430"), + Number("431"), + Number("432"), + Number("433"), + Number("434"), + Number("435"), + Number("436"), + Number("437"), + Number("438"), + Number("439"), + Number("440"), + Number("441"), + Number("442"), + Number("443"), + Number("444"), + Number("445"), + Number("446"), + Number("447"), + Number("448"), + Number("449"), + Number("450"), + Number("451"), + Number("452"), + Number("453"), + Number("454"), + Number("455"), + Number("456"), + Number("457"), + Number("458"), + Number("459"), + Number("460"), + Number("461"), + Number("462"), + Number("463"), + Number("464"), + Number("465"), + Number("466"), + Number("467"), + Number("468"), + Number("469"), + Number("470"), + Number("471"), + Number("472"), + Number("473"), + Number("474"), + Number("475"), + Number("476"), + Number("477"), + Number("478"), + Number("479"), + Number("480"), + Number("481"), + Number("482"), + Number("483"), + Number("484"), + Number("485"), + Number("486"), + Number("487"), + Number("488"), + Number("489"), + Number("490"), + Number("491"), + Number("492"), + Number("493"), + Number("494"), + Number("495"), + Number("496"), + Number("497"), + Number("498"), + Number("499"), + Number("500"), + Number("501"), + Number("502"), + Number("503"), + Number("504"), + Number("505"), + Number("506"), + Number("507"), + Number("508"), + Number("509"), + Number("510"), + Number("511"), + Number("512"), +} + +var intNumberTerms = [...]*Term{ + {Value: intNumberValues[0]}, + {Value: intNumberValues[1]}, + {Value: intNumberValues[2]}, + {Value: intNumberValues[3]}, + {Value: intNumberValues[4]}, + {Value: intNumberValues[5]}, + {Value: intNumberValues[6]}, + {Value: intNumberValues[7]}, + {Value: intNumberValues[8]}, + {Value: intNumberValues[9]}, + {Value: intNumberValues[10]}, + {Value: intNumberValues[11]}, + {Value: intNumberValues[12]}, + {Value: intNumberValues[13]}, + {Value: intNumberValues[14]}, + {Value: intNumberValues[15]}, + {Value: intNumberValues[16]}, + {Value: intNumberValues[17]}, + {Value: intNumberValues[18]}, + {Value: intNumberValues[19]}, + {Value: intNumberValues[20]}, + {Value: intNumberValues[21]}, + {Value: intNumberValues[22]}, + {Value: intNumberValues[23]}, + {Value: intNumberValues[24]}, + {Value: intNumberValues[25]}, + {Value: intNumberValues[26]}, + {Value: intNumberValues[27]}, + {Value: intNumberValues[28]}, + {Value: intNumberValues[29]}, + {Value: intNumberValues[30]}, + {Value: intNumberValues[31]}, + {Value: intNumberValues[32]}, + {Value: intNumberValues[33]}, + {Value: intNumberValues[34]}, + {Value: intNumberValues[35]}, + {Value: intNumberValues[36]}, + {Value: intNumberValues[37]}, + {Value: intNumberValues[38]}, + {Value: intNumberValues[39]}, + {Value: intNumberValues[40]}, + {Value: intNumberValues[41]}, + {Value: intNumberValues[42]}, + {Value: intNumberValues[43]}, + {Value: intNumberValues[44]}, + {Value: intNumberValues[45]}, + {Value: intNumberValues[46]}, + {Value: intNumberValues[47]}, + {Value: intNumberValues[48]}, + {Value: intNumberValues[49]}, + {Value: intNumberValues[50]}, + {Value: intNumberValues[51]}, + {Value: intNumberValues[52]}, + {Value: intNumberValues[53]}, + {Value: intNumberValues[54]}, + {Value: intNumberValues[55]}, + {Value: intNumberValues[56]}, + {Value: intNumberValues[57]}, + {Value: intNumberValues[58]}, + {Value: intNumberValues[59]}, + {Value: intNumberValues[60]}, + {Value: intNumberValues[61]}, + {Value: intNumberValues[62]}, + {Value: intNumberValues[63]}, + {Value: intNumberValues[64]}, + {Value: intNumberValues[65]}, + {Value: intNumberValues[66]}, + {Value: intNumberValues[67]}, + {Value: intNumberValues[68]}, + {Value: intNumberValues[69]}, + {Value: intNumberValues[70]}, + {Value: intNumberValues[71]}, + {Value: intNumberValues[72]}, + {Value: intNumberValues[73]}, + {Value: intNumberValues[74]}, + {Value: intNumberValues[75]}, + {Value: intNumberValues[76]}, + {Value: intNumberValues[77]}, + {Value: intNumberValues[78]}, + {Value: intNumberValues[79]}, + {Value: intNumberValues[80]}, + {Value: intNumberValues[81]}, + {Value: intNumberValues[82]}, + {Value: intNumberValues[83]}, + {Value: intNumberValues[84]}, + {Value: intNumberValues[85]}, + {Value: intNumberValues[86]}, + {Value: intNumberValues[87]}, + {Value: intNumberValues[88]}, + {Value: intNumberValues[89]}, + {Value: intNumberValues[90]}, + {Value: intNumberValues[91]}, + {Value: intNumberValues[92]}, + {Value: intNumberValues[93]}, + {Value: intNumberValues[94]}, + {Value: intNumberValues[95]}, + {Value: intNumberValues[96]}, + {Value: intNumberValues[97]}, + {Value: intNumberValues[98]}, + {Value: intNumberValues[99]}, + {Value: intNumberValues[100]}, + {Value: intNumberValues[101]}, + {Value: intNumberValues[102]}, + {Value: intNumberValues[103]}, + {Value: intNumberValues[104]}, + {Value: intNumberValues[105]}, + {Value: intNumberValues[106]}, + {Value: intNumberValues[107]}, + {Value: intNumberValues[108]}, + {Value: intNumberValues[109]}, + {Value: intNumberValues[110]}, + {Value: intNumberValues[111]}, + {Value: intNumberValues[112]}, + {Value: intNumberValues[113]}, + {Value: intNumberValues[114]}, + {Value: intNumberValues[115]}, + {Value: intNumberValues[116]}, + {Value: intNumberValues[117]}, + {Value: intNumberValues[118]}, + {Value: intNumberValues[119]}, + {Value: intNumberValues[120]}, + {Value: intNumberValues[121]}, + {Value: intNumberValues[122]}, + {Value: intNumberValues[123]}, + {Value: intNumberValues[124]}, + {Value: intNumberValues[125]}, + {Value: intNumberValues[126]}, + {Value: intNumberValues[127]}, + {Value: intNumberValues[128]}, + {Value: intNumberValues[129]}, + {Value: intNumberValues[130]}, + {Value: intNumberValues[131]}, + {Value: intNumberValues[132]}, + {Value: intNumberValues[133]}, + {Value: intNumberValues[134]}, + {Value: intNumberValues[135]}, + {Value: intNumberValues[136]}, + {Value: intNumberValues[137]}, + {Value: intNumberValues[138]}, + {Value: intNumberValues[139]}, + {Value: intNumberValues[140]}, + {Value: intNumberValues[141]}, + {Value: intNumberValues[142]}, + {Value: intNumberValues[143]}, + {Value: intNumberValues[144]}, + {Value: intNumberValues[145]}, + {Value: intNumberValues[146]}, + {Value: intNumberValues[147]}, + {Value: intNumberValues[148]}, + {Value: intNumberValues[149]}, + {Value: intNumberValues[150]}, + {Value: intNumberValues[151]}, + {Value: intNumberValues[152]}, + {Value: intNumberValues[153]}, + {Value: intNumberValues[154]}, + {Value: intNumberValues[155]}, + {Value: intNumberValues[156]}, + {Value: intNumberValues[157]}, + {Value: intNumberValues[158]}, + {Value: intNumberValues[159]}, + {Value: intNumberValues[160]}, + {Value: intNumberValues[161]}, + {Value: intNumberValues[162]}, + {Value: intNumberValues[163]}, + {Value: intNumberValues[164]}, + {Value: intNumberValues[165]}, + {Value: intNumberValues[166]}, + {Value: intNumberValues[167]}, + {Value: intNumberValues[168]}, + {Value: intNumberValues[169]}, + {Value: intNumberValues[170]}, + {Value: intNumberValues[171]}, + {Value: intNumberValues[172]}, + {Value: intNumberValues[173]}, + {Value: intNumberValues[174]}, + {Value: intNumberValues[175]}, + {Value: intNumberValues[176]}, + {Value: intNumberValues[177]}, + {Value: intNumberValues[178]}, + {Value: intNumberValues[179]}, + {Value: intNumberValues[180]}, + {Value: intNumberValues[181]}, + {Value: intNumberValues[182]}, + {Value: intNumberValues[183]}, + {Value: intNumberValues[184]}, + {Value: intNumberValues[185]}, + {Value: intNumberValues[186]}, + {Value: intNumberValues[187]}, + {Value: intNumberValues[188]}, + {Value: intNumberValues[189]}, + {Value: intNumberValues[190]}, + {Value: intNumberValues[191]}, + {Value: intNumberValues[192]}, + {Value: intNumberValues[193]}, + {Value: intNumberValues[194]}, + {Value: intNumberValues[195]}, + {Value: intNumberValues[196]}, + {Value: intNumberValues[197]}, + {Value: intNumberValues[198]}, + {Value: intNumberValues[199]}, + {Value: intNumberValues[200]}, + {Value: intNumberValues[201]}, + {Value: intNumberValues[202]}, + {Value: intNumberValues[203]}, + {Value: intNumberValues[204]}, + {Value: intNumberValues[205]}, + {Value: intNumberValues[206]}, + {Value: intNumberValues[207]}, + {Value: intNumberValues[208]}, + {Value: intNumberValues[209]}, + {Value: intNumberValues[210]}, + {Value: intNumberValues[211]}, + {Value: intNumberValues[212]}, + {Value: intNumberValues[213]}, + {Value: intNumberValues[214]}, + {Value: intNumberValues[215]}, + {Value: intNumberValues[216]}, + {Value: intNumberValues[217]}, + {Value: intNumberValues[218]}, + {Value: intNumberValues[219]}, + {Value: intNumberValues[220]}, + {Value: intNumberValues[221]}, + {Value: intNumberValues[222]}, + {Value: intNumberValues[223]}, + {Value: intNumberValues[224]}, + {Value: intNumberValues[225]}, + {Value: intNumberValues[226]}, + {Value: intNumberValues[227]}, + {Value: intNumberValues[228]}, + {Value: intNumberValues[229]}, + {Value: intNumberValues[230]}, + {Value: intNumberValues[231]}, + {Value: intNumberValues[232]}, + {Value: intNumberValues[233]}, + {Value: intNumberValues[234]}, + {Value: intNumberValues[235]}, + {Value: intNumberValues[236]}, + {Value: intNumberValues[237]}, + {Value: intNumberValues[238]}, + {Value: intNumberValues[239]}, + {Value: intNumberValues[240]}, + {Value: intNumberValues[241]}, + {Value: intNumberValues[242]}, + {Value: intNumberValues[243]}, + {Value: intNumberValues[244]}, + {Value: intNumberValues[245]}, + {Value: intNumberValues[246]}, + {Value: intNumberValues[247]}, + {Value: intNumberValues[248]}, + {Value: intNumberValues[249]}, + {Value: intNumberValues[250]}, + {Value: intNumberValues[251]}, + {Value: intNumberValues[252]}, + {Value: intNumberValues[253]}, + {Value: intNumberValues[254]}, + {Value: intNumberValues[255]}, + {Value: intNumberValues[256]}, + {Value: intNumberValues[257]}, + {Value: intNumberValues[258]}, + {Value: intNumberValues[259]}, + {Value: intNumberValues[260]}, + {Value: intNumberValues[261]}, + {Value: intNumberValues[262]}, + {Value: intNumberValues[263]}, + {Value: intNumberValues[264]}, + {Value: intNumberValues[265]}, + {Value: intNumberValues[266]}, + {Value: intNumberValues[267]}, + {Value: intNumberValues[268]}, + {Value: intNumberValues[269]}, + {Value: intNumberValues[270]}, + {Value: intNumberValues[271]}, + {Value: intNumberValues[272]}, + {Value: intNumberValues[273]}, + {Value: intNumberValues[274]}, + {Value: intNumberValues[275]}, + {Value: intNumberValues[276]}, + {Value: intNumberValues[277]}, + {Value: intNumberValues[278]}, + {Value: intNumberValues[279]}, + {Value: intNumberValues[280]}, + {Value: intNumberValues[281]}, + {Value: intNumberValues[282]}, + {Value: intNumberValues[283]}, + {Value: intNumberValues[284]}, + {Value: intNumberValues[285]}, + {Value: intNumberValues[286]}, + {Value: intNumberValues[287]}, + {Value: intNumberValues[288]}, + {Value: intNumberValues[289]}, + {Value: intNumberValues[290]}, + {Value: intNumberValues[291]}, + {Value: intNumberValues[292]}, + {Value: intNumberValues[293]}, + {Value: intNumberValues[294]}, + {Value: intNumberValues[295]}, + {Value: intNumberValues[296]}, + {Value: intNumberValues[297]}, + {Value: intNumberValues[298]}, + {Value: intNumberValues[299]}, + {Value: intNumberValues[300]}, + {Value: intNumberValues[301]}, + {Value: intNumberValues[302]}, + {Value: intNumberValues[303]}, + {Value: intNumberValues[304]}, + {Value: intNumberValues[305]}, + {Value: intNumberValues[306]}, + {Value: intNumberValues[307]}, + {Value: intNumberValues[308]}, + {Value: intNumberValues[309]}, + {Value: intNumberValues[310]}, + {Value: intNumberValues[311]}, + {Value: intNumberValues[312]}, + {Value: intNumberValues[313]}, + {Value: intNumberValues[314]}, + {Value: intNumberValues[315]}, + {Value: intNumberValues[316]}, + {Value: intNumberValues[317]}, + {Value: intNumberValues[318]}, + {Value: intNumberValues[319]}, + {Value: intNumberValues[320]}, + {Value: intNumberValues[321]}, + {Value: intNumberValues[322]}, + {Value: intNumberValues[323]}, + {Value: intNumberValues[324]}, + {Value: intNumberValues[325]}, + {Value: intNumberValues[326]}, + {Value: intNumberValues[327]}, + {Value: intNumberValues[328]}, + {Value: intNumberValues[329]}, + {Value: intNumberValues[330]}, + {Value: intNumberValues[331]}, + {Value: intNumberValues[332]}, + {Value: intNumberValues[333]}, + {Value: intNumberValues[334]}, + {Value: intNumberValues[335]}, + {Value: intNumberValues[336]}, + {Value: intNumberValues[337]}, + {Value: intNumberValues[338]}, + {Value: intNumberValues[339]}, + {Value: intNumberValues[340]}, + {Value: intNumberValues[341]}, + {Value: intNumberValues[342]}, + {Value: intNumberValues[343]}, + {Value: intNumberValues[344]}, + {Value: intNumberValues[345]}, + {Value: intNumberValues[346]}, + {Value: intNumberValues[347]}, + {Value: intNumberValues[348]}, + {Value: intNumberValues[349]}, + {Value: intNumberValues[350]}, + {Value: intNumberValues[351]}, + {Value: intNumberValues[352]}, + {Value: intNumberValues[353]}, + {Value: intNumberValues[354]}, + {Value: intNumberValues[355]}, + {Value: intNumberValues[356]}, + {Value: intNumberValues[357]}, + {Value: intNumberValues[358]}, + {Value: intNumberValues[359]}, + {Value: intNumberValues[360]}, + {Value: intNumberValues[361]}, + {Value: intNumberValues[362]}, + {Value: intNumberValues[363]}, + {Value: intNumberValues[364]}, + {Value: intNumberValues[365]}, + {Value: intNumberValues[366]}, + {Value: intNumberValues[367]}, + {Value: intNumberValues[368]}, + {Value: intNumberValues[369]}, + {Value: intNumberValues[370]}, + {Value: intNumberValues[371]}, + {Value: intNumberValues[372]}, + {Value: intNumberValues[373]}, + {Value: intNumberValues[374]}, + {Value: intNumberValues[375]}, + {Value: intNumberValues[376]}, + {Value: intNumberValues[377]}, + {Value: intNumberValues[378]}, + {Value: intNumberValues[379]}, + {Value: intNumberValues[380]}, + {Value: intNumberValues[381]}, + {Value: intNumberValues[382]}, + {Value: intNumberValues[383]}, + {Value: intNumberValues[384]}, + {Value: intNumberValues[385]}, + {Value: intNumberValues[386]}, + {Value: intNumberValues[387]}, + {Value: intNumberValues[388]}, + {Value: intNumberValues[389]}, + {Value: intNumberValues[390]}, + {Value: intNumberValues[391]}, + {Value: intNumberValues[392]}, + {Value: intNumberValues[393]}, + {Value: intNumberValues[394]}, + {Value: intNumberValues[395]}, + {Value: intNumberValues[396]}, + {Value: intNumberValues[397]}, + {Value: intNumberValues[398]}, + {Value: intNumberValues[399]}, + {Value: intNumberValues[400]}, + {Value: intNumberValues[401]}, + {Value: intNumberValues[402]}, + {Value: intNumberValues[403]}, + {Value: intNumberValues[404]}, + {Value: intNumberValues[405]}, + {Value: intNumberValues[406]}, + {Value: intNumberValues[407]}, + {Value: intNumberValues[408]}, + {Value: intNumberValues[409]}, + {Value: intNumberValues[410]}, + {Value: intNumberValues[411]}, + {Value: intNumberValues[412]}, + {Value: intNumberValues[413]}, + {Value: intNumberValues[414]}, + {Value: intNumberValues[415]}, + {Value: intNumberValues[416]}, + {Value: intNumberValues[417]}, + {Value: intNumberValues[418]}, + {Value: intNumberValues[419]}, + {Value: intNumberValues[420]}, + {Value: intNumberValues[421]}, + {Value: intNumberValues[422]}, + {Value: intNumberValues[423]}, + {Value: intNumberValues[424]}, + {Value: intNumberValues[425]}, + {Value: intNumberValues[426]}, + {Value: intNumberValues[427]}, + {Value: intNumberValues[428]}, + {Value: intNumberValues[429]}, + {Value: intNumberValues[430]}, + {Value: intNumberValues[431]}, + {Value: intNumberValues[432]}, + {Value: intNumberValues[433]}, + {Value: intNumberValues[434]}, + {Value: intNumberValues[435]}, + {Value: intNumberValues[436]}, + {Value: intNumberValues[437]}, + {Value: intNumberValues[438]}, + {Value: intNumberValues[439]}, + {Value: intNumberValues[440]}, + {Value: intNumberValues[441]}, + {Value: intNumberValues[442]}, + {Value: intNumberValues[443]}, + {Value: intNumberValues[444]}, + {Value: intNumberValues[445]}, + {Value: intNumberValues[446]}, + {Value: intNumberValues[447]}, + {Value: intNumberValues[448]}, + {Value: intNumberValues[449]}, + {Value: intNumberValues[450]}, + {Value: intNumberValues[451]}, + {Value: intNumberValues[452]}, + {Value: intNumberValues[453]}, + {Value: intNumberValues[454]}, + {Value: intNumberValues[455]}, + {Value: intNumberValues[456]}, + {Value: intNumberValues[457]}, + {Value: intNumberValues[458]}, + {Value: intNumberValues[459]}, + {Value: intNumberValues[460]}, + {Value: intNumberValues[461]}, + {Value: intNumberValues[462]}, + {Value: intNumberValues[463]}, + {Value: intNumberValues[464]}, + {Value: intNumberValues[465]}, + {Value: intNumberValues[466]}, + {Value: intNumberValues[467]}, + {Value: intNumberValues[468]}, + {Value: intNumberValues[469]}, + {Value: intNumberValues[470]}, + {Value: intNumberValues[471]}, + {Value: intNumberValues[472]}, + {Value: intNumberValues[473]}, + {Value: intNumberValues[474]}, + {Value: intNumberValues[475]}, + {Value: intNumberValues[476]}, + {Value: intNumberValues[477]}, + {Value: intNumberValues[478]}, + {Value: intNumberValues[479]}, + {Value: intNumberValues[480]}, + {Value: intNumberValues[481]}, + {Value: intNumberValues[482]}, + {Value: intNumberValues[483]}, + {Value: intNumberValues[484]}, + {Value: intNumberValues[485]}, + {Value: intNumberValues[486]}, + {Value: intNumberValues[487]}, + {Value: intNumberValues[488]}, + {Value: intNumberValues[489]}, + {Value: intNumberValues[490]}, + {Value: intNumberValues[491]}, + {Value: intNumberValues[492]}, + {Value: intNumberValues[493]}, + {Value: intNumberValues[494]}, + {Value: intNumberValues[495]}, + {Value: intNumberValues[496]}, + {Value: intNumberValues[497]}, + {Value: intNumberValues[498]}, + {Value: intNumberValues[499]}, + {Value: intNumberValues[500]}, + {Value: intNumberValues[501]}, + {Value: intNumberValues[502]}, + {Value: intNumberValues[503]}, + {Value: intNumberValues[504]}, + {Value: intNumberValues[505]}, + {Value: intNumberValues[506]}, + {Value: intNumberValues[507]}, + {Value: intNumberValues[508]}, + {Value: intNumberValues[509]}, + {Value: intNumberValues[510]}, + {Value: intNumberValues[511]}, + {Value: intNumberValues[512]}, +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/json/json.go b/vendor/github.com/open-policy-agent/opa/v1/ast/json/json.go new file mode 100644 index 0000000000..9081fe7039 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/json/json.go @@ -0,0 +1,106 @@ +// Copyright 2023 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// This package provides options for JSON marshalling of AST nodes, and location +// data in particular. Since location data occupies a significant portion of the +// AST when included, it is excluded by default. The options provided here allow +// changing that behavior — either for all nodes or for specific types. Since +// JSONMarshaller implementations have access only to the node being marshaled, +// our options are to either attach these settings to *all* nodes in the AST, or +// to provide them via global state. The former is perhaps a little more elegant, +// and is what we went with initially. The cost of attaching these settings to +// every node however turned out to be non-negligible, and given that the number +// of users who have an interest in AST serialization are likely to be few, we +// have since switched to using global state, as provided here. Note that this +// is mostly to provide an equivalent feature to what we had before, should +// anyone depend on that. Users who need fine-grained control over AST +// serialization are recommended to use external libraries for that purpose, +// such as `github.com/json-iterator/go`. +package json + +import "sync" + +// Options defines the options for JSON operations, +// currently only marshaling can be configured +type Options struct { + MarshalOptions MarshalOptions +} + +// MarshalOptions defines the options for JSON marshaling, +// currently only toggling the marshaling of location information is supported +type MarshalOptions struct { + // IncludeLocation toggles the marshaling of location information + IncludeLocation NodeToggle + // IncludeLocationText additionally/optionally includes the text of the location + IncludeLocationText bool + // ExcludeLocationFile additionally/optionally excludes the file of the location + // Note that this is inverted (i.e. not "include" as the default needs to remain false) + ExcludeLocationFile bool +} + +// NodeToggle is a generic struct to allow the toggling of +// settings for different ast node types +type NodeToggle struct { + Term bool + Package bool + Comment bool + Import bool + Rule bool + Head bool + Expr bool + SomeDecl bool + Every bool + With bool + Annotations bool + AnnotationsRef bool +} + +// configuredJSONOptions synchronizes access to the global JSON options +type configuredJSONOptions struct { + options Options + lock sync.RWMutex +} + +var options = &configuredJSONOptions{ + options: Defaults(), +} + +// SetOptions sets the global options for marshalling AST nodes to JSON +func SetOptions(opts Options) { + options.lock.Lock() + defer options.lock.Unlock() + options.options = opts +} + +// GetOptions returns (a copy of) the global options for marshalling AST nodes to JSON +func GetOptions() Options { + options.lock.RLock() + defer options.lock.RUnlock() + return options.options +} + +// Defaults returns the default JSON options, which is to exclude location +// information in serialized JSON AST. +func Defaults() Options { + return Options{ + MarshalOptions: MarshalOptions{ + IncludeLocation: NodeToggle{ + Term: false, + Package: false, + Comment: false, + Import: false, + Rule: false, + Head: false, + Expr: false, + SomeDecl: false, + Every: false, + With: false, + Annotations: false, + AnnotationsRef: false, + }, + IncludeLocationText: false, + ExcludeLocationFile: false, + }, + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/location/location.go b/vendor/github.com/open-policy-agent/opa/v1/ast/location/location.go new file mode 100644 index 0000000000..6431b02ce9 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/location/location.go @@ -0,0 +1,155 @@ +// Package location defines locations in Rego source code. +package location + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/util" +) + +// Location records a position in source code +type Location struct { + Text []byte `json:"-"` // The original text fragment from the source. + File string `json:"file"` // The name of the source file (which may be empty). + Row int `json:"row"` // The line in the source. + Col int `json:"col"` // The column in the row. + Offset int `json:"-"` // The byte offset for the location in the source. + + Tabs []int `json:"-"` // The column offsets of tabs in the source. +} + +// NewLocation returns a new Location object. +func NewLocation(text []byte, file string, row int, col int) *Location { + return &Location{Text: text, File: file, Row: row, Col: col} +} + +// Equal checks if two locations are equal to each other. +func (loc *Location) Equal(other *Location) bool { + return loc.File == other.File && + loc.Row == other.Row && + loc.Col == other.Col && + bytes.Equal(loc.Text, other.Text) +} + +// Errorf returns a new error value with a message formatted to include the location +// info (e.g., line, column, filename, etc.) +func (loc *Location) Errorf(f string, a ...any) error { + return errors.New(loc.Format(f, a...)) +} + +// Wrapf returns a new error value that wraps an existing error with a message formatted +// to include the location info (e.g., line, column, filename, etc.) +func (loc *Location) Wrapf(err error, f string, a ...any) error { + return fmt.Errorf(loc.Format(f, a...)+": %w", err) +} + +// Format returns a formatted string prefixed with the location information. +func (loc *Location) Format(f string, a ...any) string { + if len(loc.File) > 0 { + f = fmt.Sprintf("%v:%v: %v", loc.File, loc.Row, f) + } else { + f = fmt.Sprintf("%v:%v: %v", loc.Row, loc.Col, f) + } + return fmt.Sprintf(f, a...) +} + +func (loc *Location) String() string { + buf, _ := loc.AppendText(make([]byte, 0, loc.StringLength())) + return util.ByteSliceToString(buf) +} + +func (loc *Location) AppendText(buf []byte) ([]byte, error) { + if loc != nil { + switch { + case len(loc.File) > 0: + buf = util.AppendInt(append(append(buf, loc.File...), ':'), loc.Row) + case len(loc.Text) > 0: + buf = append(buf, loc.Text...) + default: + buf = util.AppendInt(append(util.AppendInt(buf, loc.Row), ':'), loc.Col) + } + } + return buf, nil +} + +func (loc *Location) StringLength() (n int) { + if loc != nil { + if l := len(loc.File); l > 0 { + n = l + 1 + util.NumDigitsInt(loc.Row) + } else if l := len(loc.Text); l > 0 { + n = l + } else { + n = util.NumDigitsInt(loc.Row) + 1 + util.NumDigitsInt(loc.Col) + } + } + return n +} + +// Compare returns -1, 0, or 1 to indicate if this loc is less than, equal to, +// or greater than the other. Comparison is performed on the file, row, and +// column of the Location (but not on the text.) Nil locations are greater than +// non-nil locations. +func (loc *Location) Compare(other *Location) int { + if loc == other { + return 0 + } else if loc == nil { + return 1 + } else if other == nil { + return -1 + } else if loc.File < other.File { + return -1 + } else if loc.File > other.File { + return 1 + } else if loc.Row < other.Row { + return -1 + } else if loc.Row > other.Row { + return 1 + } else if loc.Col < other.Col { + return -1 + } else if loc.Col > other.Col { + return 1 + } + return 0 +} + +func (loc *Location) MarshalJSON() ([]byte, error) { + // structs are used here to preserve the field ordering of the original Location struct + jsonOptions := astJSON.GetOptions().MarshalOptions + if jsonOptions.ExcludeLocationFile { + data := struct { + Row int `json:"row"` + Col int `json:"col"` + Text []byte `json:"text,omitempty"` + }{ + Row: loc.Row, + Col: loc.Col, + } + + if jsonOptions.IncludeLocationText { + data.Text = loc.Text + } + + return json.Marshal(data) + } + + data := struct { + File string `json:"file"` + Row int `json:"row"` + Col int `json:"col"` + Text []byte `json:"text,omitempty"` + }{ + Row: loc.Row, + Col: loc.Col, + File: loc.File, + } + + if jsonOptions.IncludeLocationText { + data.Text = loc.Text + } + + return json.Marshal(data) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/map.go b/vendor/github.com/open-policy-agent/opa/v1/ast/map.go new file mode 100644 index 0000000000..31cad4d611 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/map.go @@ -0,0 +1,108 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "encoding/json" + + "github.com/open-policy-agent/opa/v1/util" +) + +// ValueMap represents a key/value map between AST term values. Any type of term +// can be used as a key in the map. +type ValueMap struct { + hashMap *util.TypedHashMap[Value, Value] +} + +// NewValueMap returns a new ValueMap. +func NewValueMap() *ValueMap { + return &ValueMap{ + hashMap: util.NewTypedHashMap(ValueEqual, ValueEqual, Value.Hash, Value.Hash, nil), + } +} + +// MarshalJSON provides a custom marshaller for the ValueMap which +// will include the key, value, and value type. +func (vs *ValueMap) MarshalJSON() ([]byte, error) { + var tmp []map[string]any + vs.Iter(func(k Value, v Value) bool { + tmp = append(tmp, map[string]any{ + "name": k.String(), + "type": ValueName(v), + "value": v, + }) + return false + }) + return json.Marshal(tmp) +} + +// Equal returns true if this ValueMap equals the other. +func (vs *ValueMap) Equal(other *ValueMap) bool { + if vs == nil { + return other == nil || other.Len() == 0 + } + if other == nil { + return vs.Len() == 0 + } + return vs.hashMap.Equal(other.hashMap) +} + +// Len returns the number of elements in the map. +func (vs *ValueMap) Len() int { + if vs == nil { + return 0 + } + return vs.hashMap.Len() +} + +// Get returns the value in the map for k. +func (vs *ValueMap) Get(k Value) Value { + if vs != nil { + if v, ok := vs.hashMap.Get(k); ok { + return v + } + } + return nil +} + +// Hash returns a hash code for this ValueMap. +func (vs *ValueMap) Hash() int { + if vs == nil { + return 0 + } + return vs.hashMap.Hash() +} + +// Iter calls the iter function for each key/value pair in the map. If the iter +// function returns true, iteration stops. +func (vs *ValueMap) Iter(iter func(Value, Value) bool) bool { + if vs == nil { + return false + } + return vs.hashMap.Iter(iter) +} + +// Put inserts a key k into the map with value v. +func (vs *ValueMap) Put(k, v Value) { + if vs == nil { + panic("put on nil value map") + } + vs.hashMap.Put(k, v) +} + +// Delete removes a key k from the map. +func (vs *ValueMap) Delete(k Value) { + if vs == nil { + return + } + vs.hashMap.Delete(k) +} + +func (vs *ValueMap) String() string { + if vs == nil { + return "{}" + } + return vs.hashMap.String() +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go b/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go new file mode 100644 index 0000000000..9e52b89a67 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go @@ -0,0 +1,3232 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "maps" + "math/big" + "net/url" + "regexp" + "slices" + "sort" + "strconv" + "strings" + "unicode/utf8" + + "go.yaml.in/yaml/v3" + + "github.com/open-policy-agent/opa/v1/ast/internal/scanner" + "github.com/open-policy-agent/opa/v1/ast/internal/tokens" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/ast/location" + "github.com/open-policy-agent/opa/v1/util" +) + +// DefaultMaxParsingRecursionDepth is the default maximum recursion +// depth for the parser +const DefaultMaxParsingRecursionDepth = 100000 + +// ErrMaxParsingRecursionDepthExceeded is returned when the parser +// recursion exceeds the maximum allowed depth +var ErrMaxParsingRecursionDepthExceeded = errors.New("max parsing recursion depth exceeded") + +var RegoV1CompatibleRef = Ref{VarTerm("rego"), InternedTerm("v1")} + +// RegoVersion defines the Rego syntax requirements for a module. +type RegoVersion int + +const DefaultRegoVersion = RegoV1 + +const ( + RegoUndefined RegoVersion = iota + // RegoV0 is the default, original Rego syntax. + RegoV0 + // RegoV0CompatV1 requires modules to comply with both the RegoV0 and RegoV1 syntax (as when 'rego.v1' is imported in a module). + // Shortly, RegoV1 compatibility is required, but 'rego.v1' or 'future.keywords' must also be imported. + RegoV0CompatV1 + // RegoV1 is the Rego syntax enforced by OPA 1.0; e.g.: + // future.keywords part of default keyword set, and don't require imports; + // 'if' and 'contains' required in rule heads; + // (some) strict checks on by default. + RegoV1 +) + +var ( + // this is the name to use for instantiating an empty set, e.g., `set()`. + setConstructor = RefTerm(VarTerm("set")) + + preAllocWildcards = [...]Value{ + Var("$0"), Var("$1"), Var("$2"), Var("$3"), Var("$4"), Var("$5"), + Var("$6"), Var("$7"), Var("$8"), Var("$9"), Var("$10"), + } + + // use static references to avoid allocations, and + // copy them to the call term only when needed + memberWithKeyRef = MemberWithKey.Ref() + memberRef = Member.Ref() + + newlineBytes = []byte{'\n'} + metadataBytes = []byte("METADATA") + metadataParserPool = util.NewSyncPool[metadataParser]() +) + +func (v RegoVersion) Int() int { + if v == RegoV1 { + return 1 + } + return 0 +} + +func (v RegoVersion) String() string { + switch v { + case RegoV0: + return "v0" + case RegoV1: + return "v1" + case RegoV0CompatV1: + return "v0v1" + default: + return "unknown" + } +} + +func RegoVersionFromInt(i int) RegoVersion { + if i == 1 { + return RegoV1 + } + return RegoV0 +} + +// Note: This state is kept isolated from the parser so that we +// can do efficient shallow copies of these values when doing a +// save() and restore(). +type state struct { + errors Errors + comments []*Comment + hints []string + s *scanner.Scanner + loc Location + lit string + lastEnd int + tokEnd int + wildcard int + tok tokens.Token + skippedNL bool +} + +func (s *state) String() string { + return fmt.Sprintf("", s.s, s.tok, s.lit, s.loc, len(s.errors), len(s.comments)) +} + +func (s *state) Loc() *location.Location { + cpy := s.loc + return &cpy +} + +func (s *state) Text(offset, end int) []byte { + bs := s.s.Bytes() + if offset >= 0 && offset < len(bs) { + if end >= offset && end <= len(bs) { + return bs[offset:end] + } + } + return nil +} + +// Parser is used to parse Rego statements. +type Parser struct { + r io.Reader + s *state + po ParserOptions + cache parsedTermCache + recursionDepth int + maxRecursionDepth int +} + +type parsedTermCacheItem struct { + t *Term + post *state // post is the post-state that's restored on a cache-hit + offset int + next *parsedTermCacheItem +} + +type parsedTermCache struct { + m *parsedTermCacheItem +} + +func (c parsedTermCache) String() string { + s := strings.Builder{} + s.WriteRune('{') + var e *parsedTermCacheItem + for e = c.m; e != nil; e = e.next { + s.WriteString(e.String()) + } + s.WriteRune('}') + return s.String() +} + +func (e *parsedTermCacheItem) String() string { + return fmt.Sprintf("<%d:%v>", e.offset, e.t) +} + +// ParserOptions defines the options for parsing Rego statements. +type ParserOptions struct { + Capabilities *Capabilities + ProcessAnnotation bool + AllFutureKeywords bool + FutureKeywords []string + SkipRules bool + // RegoVersion is the version of Rego to parse for. + RegoVersion RegoVersion + unreleasedKeywords bool // TODO(sr): cleanup +} + +// EffectiveRegoVersion returns the effective RegoVersion to use for parsing. +func (po *ParserOptions) EffectiveRegoVersion() RegoVersion { + if po.RegoVersion == RegoUndefined { + return DefaultRegoVersion + } + return po.RegoVersion +} + +// NewParser creates and initializes a Parser. +func NewParser() *Parser { + p := &Parser{ + s: &state{}, + po: ParserOptions{}, + maxRecursionDepth: DefaultMaxParsingRecursionDepth, + } + return p +} + +// WithMaxRecursionDepth sets the maximum recursion depth for the parser. +func (p *Parser) WithMaxRecursionDepth(depth int) *Parser { + p.maxRecursionDepth = depth + return p +} + +// WithFilename provides the filename for Location details +// on parsed statements. +func (p *Parser) WithFilename(filename string) *Parser { + p.s.loc.File = filename + return p +} + +// WithReader provides the io.Reader that the parser will +// use as its source. +func (p *Parser) WithReader(r io.Reader) *Parser { + p.r = r + return p +} + +// WithProcessAnnotation enables or disables the processing of +// annotations by the Parser +func (p *Parser) WithProcessAnnotation(processAnnotation bool) *Parser { + p.po.ProcessAnnotation = processAnnotation + return p +} + +// WithFutureKeywords enables "future" keywords, i.e., keywords that can +// be imported via +// +// import future.keywords.kw +// import future.keywords.other +// +// but in a more direct way. The equivalent of this import would be +// +// WithFutureKeywords("kw", "other") +func (p *Parser) WithFutureKeywords(kws ...string) *Parser { + p.po.FutureKeywords = kws + return p +} + +// WithAllFutureKeywords enables all "future" keywords, i.e., the +// ParserOption equivalent of +// +// import future.keywords +func (p *Parser) WithAllFutureKeywords(yes bool) *Parser { + p.po.AllFutureKeywords = yes + return p +} + +// withUnreleasedKeywords allows using keywords that haven't surfaced +// as future keywords (see above) yet, but have tests that require +// them to be parsed +func (p *Parser) withUnreleasedKeywords(yes bool) *Parser { + p.po.unreleasedKeywords = yes + return p +} + +// WithCapabilities sets the capabilities structure on the parser. +func (p *Parser) WithCapabilities(c *Capabilities) *Parser { + p.po.Capabilities = c + return p +} + +// WithSkipRules instructs the parser not to attempt to parse Rule statements. +func (p *Parser) WithSkipRules(skip bool) *Parser { + p.po.SkipRules = skip + return p +} + +// WithJSONOptions sets the JSON options on the parser (now a no-op). +// +// Deprecated: Use SetOptions in the json package instead, where a longer description +// of why this is deprecated also can be found. +func (p *Parser) WithJSONOptions(_ *astJSON.Options) *Parser { + return p +} + +func (p *Parser) WithRegoVersion(version RegoVersion) *Parser { + p.po.RegoVersion = version + return p +} + +func (p *Parser) parsedTermCacheLookup() (*Term, *state) { + l := p.s.loc.Offset + // stop comparing once the cached offsets are lower than l + for h := p.cache.m; h != nil && h.offset >= l; h = h.next { + if h.offset == l { + return h.t, h.post + } + } + return nil, nil +} + +func (p *Parser) parsedTermCachePush(t *Term, s0 *state) { + s1 := p.save() + o0 := s0.loc.Offset + entry := parsedTermCacheItem{t: t, post: s1, offset: o0} + + // find the first one whose offset is smaller than ours + var e *parsedTermCacheItem + for e = p.cache.m; e != nil; e = e.next { + if e.offset < o0 { + break + } + } + entry.next = e + p.cache.m = &entry +} + +// futureParser returns a shallow copy of `p` with an empty +// cache, and a scanner that knows all future keywords. +// It's used to present hints in errors, when statements would +// only parse successfully if some future keyword is enabled. +func (p *Parser) futureParser() *Parser { + q := *p + q.s = p.save() + q.s.s = p.s.s.WithKeywords(allFutureKeywords) + q.cache = parsedTermCache{} + return &q +} + +// presentParser returns a shallow copy of `p` with an empty +// cache, and a scanner that knows none of the future keywords. +// It is used to successfully parse keyword imports, like +// +// import future.keywords.in +// +// even when the parser has already been informed about the +// future keyword "in". This parser won't error out because +// "in" is an identifier. +func (p *Parser) presentParser() (*Parser, map[string]tokens.Token) { + var cpy map[string]tokens.Token + q := *p + q.s = p.save() + q.s.s, cpy = p.s.s.WithoutKeywords(allFutureKeywords) + q.cache = parsedTermCache{} + return &q, cpy +} + +// Parse will read the Rego source and parse statements and +// comments as they are found. Any errors encountered while +// parsing will be accumulated and returned as a list of Errors. +func (p *Parser) Parse() ([]Statement, []*Comment, Errors) { + + if p.po.Capabilities == nil { + p.po.Capabilities = CapabilitiesForThisVersion(CapabilitiesRegoVersion(p.po.RegoVersion)) + } + + allowedFutureKeywords := map[string]tokens.Token{} + + if p.po.EffectiveRegoVersion() == RegoV1 { + if !p.po.Capabilities.ContainsFeature(FeatureRegoV1) { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: "illegal capabilities: rego_v1 feature required for parsing v1 Rego", + Location: nil, + }, + } + } + + // rego-v1 includes all v0 future keywords in the default language definition + maps.Copy(allowedFutureKeywords, futureKeywordsV0) + + for _, kw := range p.po.Capabilities.FutureKeywords { + if tok, ok := futureKeywords[kw]; ok { + allowedFutureKeywords[kw] = tok + } else { + // For sake of error reporting, we still need to check that keywords in capabilities are known in v0 + if _, ok := futureKeywordsV0[kw]; !ok { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw), + Location: nil, + }, + } + } + } + } + + // Check that explicitly requested future keywords are known. + for _, kw := range p.po.FutureKeywords { + if _, ok := allowedFutureKeywords[kw]; !ok { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: fmt.Sprintf("unknown future keyword: %v", kw), + Location: nil, + }, + } + } + } + } else { + for _, kw := range p.po.Capabilities.FutureKeywords { + var ok bool + allowedFutureKeywords[kw], ok = allFutureKeywords[kw] + if !ok { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw), + Location: nil, + }, + } + } + } + + if p.po.Capabilities.ContainsFeature(FeatureRegoV1) { + // rego-v1 includes all v0 future keywords in the default language definition + maps.Copy(allowedFutureKeywords, futureKeywordsV0) + } + } + + var err error + p.s.s, err = scanner.New(p.r) + if err != nil { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: err.Error(), + Location: nil, + }, + } + } + + selected := map[string]tokens.Token{} + if p.po.AllFutureKeywords || p.po.EffectiveRegoVersion() == RegoV1 { + maps.Copy(selected, allowedFutureKeywords) + } else { + for _, kw := range p.po.FutureKeywords { + tok, ok := allowedFutureKeywords[kw] + if !ok { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: fmt.Sprintf("unknown future keyword: %v", kw), + Location: nil, + }, + } + } + selected[kw] = tok + } + } + p.s.s = p.s.s.WithKeywords(selected) + + if p.po.EffectiveRegoVersion() == RegoV1 { + for kw, tok := range allowedFutureKeywords { + p.s.s.AddKeyword(kw, tok) + } + } + + // read the first token to initialize the parser + p.scan() + + var stmts []Statement + + // Read from the scanner until the last token is reached or no statements + // can be parsed. Attempt to parse package statements, import statements, + // rule statements, and then body/query statements (in that order). If a + // statement cannot be parsed, restore the parser state before trying the + // next type of statement. If a statement can be parsed, continue from that + // point trying to parse packages, imports, etc. in the same order. + for p.s.tok != tokens.EOF { + s := p.save() + + if pkg := p.parsePackage(); pkg != nil { + stmts = append(stmts, pkg) + continue + } else if len(p.s.errors) > 0 { + break + } + + p.restore(s) + s = p.save() + + if imp := p.parseImport(); imp != nil { + if RegoRootDocument.Equal(imp.Path.Value.(Ref)[0]) { + p.regoV1Import(imp) + } + + if FutureRootDocument.Equal(imp.Path.Value.(Ref)[0]) { + p.futureImport(imp, allowedFutureKeywords) + } + + stmts = append(stmts, imp) + continue + } else if len(p.s.errors) > 0 { + break + } + + p.restore(s) + + if !p.po.SkipRules { + s = p.save() + + if rules := p.parseRules(); rules != nil { + for i := range rules { + stmts = append(stmts, rules[i]) + } + continue + } else if len(p.s.errors) > 0 { + break + } + + p.restore(s) + } + + if body := p.parseQuery(true, tokens.EOF); body != nil { + stmts = append(stmts, body) + continue + } + + break + } + + if p.po.ProcessAnnotation { + stmts = p.parseAnnotations(stmts) + } + + return stmts, p.s.comments, p.s.errors +} + +func (p *Parser) parseAnnotations(stmts []Statement) []Statement { + annotStmts, errs := parseAnnotations(p.s.comments) + for _, err := range errs { + p.error(err.Location, err.Message) + } + + stmts = slices.Grow(stmts, len(annotStmts)) + for _, annotStmt := range annotStmts { + stmts = append(stmts, annotStmt) + } + + return stmts +} + +func parseAnnotations(comments []*Comment) (stmts []*Annotations, errs Errors) { + numBlocks := CountFunc(comments, isMetadataComment) + if numBlocks == 0 { + return nil, nil + } + + stmts = make([]*Annotations, 0, numBlocks) + mdp := metadataParserPool.Get() + if mdp.buf == nil { + mdp.buf = &bytes.Buffer{} + } + + for i := range comments { + if isMetadataComment(comments[i]) { // scan until end of block + mdp.Reset(comments[i].Location) + for i++; i < len(comments) && !blockBuster(comments[i], comments[i-1]); i++ { + mdp.Append(comments[i]) + } + + if a, err := mdp.Parse(); err != nil { + errs = append(errs, &Error{Code: ParseErr, Message: err.Error(), Location: mdp.loc}) + } else { + stmts = append(stmts, a) + } + } + } + + metadataParserPool.Put(mdp) + + return stmts, errs +} + +func isMetadataComment(c *Comment) bool { + return c.Location.Col == 1 && bytes.HasPrefix(bytes.TrimSpace(c.Text), metadataBytes) +} + +func blockBuster(curr, prev *Comment) bool { // or endOfBlock, but the name was too good to pass up + return curr.Location.Col != 1 || curr.Location.Row-1 != prev.Location.Row +} + +func (p *Parser) parsePackage() *Package { + if p.s.tok != tokens.Package { + return nil + } + + var pkg Package + pkg.SetLoc(p.s.Loc()) + + p.scanWS() + + // Make sure we allow the first term of refs to be the 'package' keyword. + if p.s.tok == tokens.Dot || p.s.tok == tokens.LBrack { + // This is a ref, not a package declaration. + return nil + } + + if p.s.tok == tokens.Whitespace { + p.scan() + } + + if !isIdentOrAllowedRefKeyword(p) { + p.illegalToken() + return nil + } + + term := p.parseTerm() + + if term != nil { + switch v := term.Value.(type) { + case Var: + pkg.Path = Ref{ + DefaultRootDocument.Copy().SetLocation(term.Location), + StringTerm(string(v)).SetLocation(term.Location), + } + case Ref: + pkg.Path = make(Ref, len(v)+1) + pkg.Path[0] = DefaultRootDocument.Copy().SetLocation(v[0].Location) + first, ok := v[0].Value.(Var) + if !ok { + p.errorf(v[0].Location, "unexpected %v token: expecting var", ValueName(v[0].Value)) + return nil + } + pkg.Path[1] = StringTerm(string(first)).SetLocation(v[0].Location) + for i := 2; i < len(pkg.Path); i++ { + switch v[i-1].Value.(type) { + case String: + pkg.Path[i] = v[i-1] + default: + p.errorf(v[i-1].Location, "unexpected %v token: expecting string", ValueName(v[i-1].Value)) + return nil + } + } + default: + p.illegalToken() + return nil + } + } + + if pkg.Path == nil { + if len(p.s.errors) == 0 { + p.error(p.s.Loc(), "expected path") + } + return nil + } + + return &pkg +} + +func (p *Parser) parseImport() *Import { + if p.s.tok != tokens.Import { + return nil + } + + var imp Import + imp.SetLoc(p.s.Loc()) + + p.scanWS() + + // Make sure we allow the first term of refs to be the 'import' keyword. + if p.s.tok == tokens.Dot || p.s.tok == tokens.LBrack { + // This is a ref, not an import declaration. + return nil + } + + if p.s.tok == tokens.Whitespace { + p.scan() + } + + if !isIdentOrAllowedRefKeyword(p) { + p.illegalToken() + return nil + } + + q, prev := p.presentParser() + term := q.parseTerm() + if term != nil { + switch v := term.Value.(type) { + case Var: + imp.Path = RefTerm(term).SetLocation(term.Location) + case Ref: + for i := 1; i < len(v); i++ { + if _, ok := v[i].Value.(String); !ok { + p.errorf(v[i].Location, "unexpected %v token: expecting string", ValueName(v[i].Value)) + return nil + } + } + imp.Path = term + } + } + // keep advanced parser state, reset known keywords + p.s = q.s + p.s.s = q.s.s.WithKeywords(prev) + + if imp.Path == nil { + p.error(p.s.Loc(), "expected path") + return nil + } + + path := imp.Path.Value.(Ref) + + switch { + case RootDocumentNames.Contains(path[0]): + case FutureRootDocument.Equal(path[0]): + case RegoRootDocument.Equal(path[0]): + default: + p.hint("if this is unexpected, try updating OPA") + p.errorf(imp.Path.Location, "unexpected import path, must begin with one of: %v, got: %v", + RootDocumentNames.Union(NewSet(FutureRootDocument, RegoRootDocument)), + path[0]) + return nil + } + + if p.s.tok == tokens.As { + p.scan() + + if p.s.tok != tokens.Ident { + p.illegal("expected var") + return nil + } + + if alias := p.parseTerm(); alias != nil { + v, ok := alias.Value.(Var) + if ok { + imp.Alias = v + return &imp + } + } + p.illegal("expected var") + return nil + } + + if imp.Alias != "" { + // Unreachable: parsing the alias var should already have generated an error. + name := imp.Alias.String() + if IsKeywordInRegoVersion(name, p.po.EffectiveRegoVersion()) { + p.errorf(imp.Location, "unexpected import alias, must not be a keyword, got: %s", name) + } + return &imp + } + + r := imp.Path.Value.(Ref) + + // Don't allow keywords in the tail path term unless it's a future import + if len(r) == 1 { + t := r[0] + name := string(t.Value.(Var)) + if IsKeywordInRegoVersion(name, p.po.EffectiveRegoVersion()) { + p.errorf(t.Location, "unexpected import path, must not end with a keyword, got: %s", name) + p.hint("import a different path or use an alias") + } + } else if !FutureRootDocument.Equal(r[0]) { + t := r[len(r)-1] + name := string(t.Value.(String)) + if IsKeywordInRegoVersion(name, p.po.EffectiveRegoVersion()) { + p.errorf(t.Location, "unexpected import path, must not end with a keyword, got: %s", name) + p.hint("import a different path or use an alias") + } + } + + return &imp +} + +// isIdentOrAllowedRefKeyword checks if the current token is an Ident or a keyword in the active rego-version. +// If a keyword, sets p.s.token to token.Ident +func isIdentOrAllowedRefKeyword(p *Parser) bool { + if p.s.tok == tokens.Ident { + return true + } + + if p.isAllowedRefKeyword(p.s.tok) { + p.s.tok = tokens.Ident + return true + } + + return false +} + +func scanAheadRef(p *Parser) bool { + if p.isAllowedRefKeyword(p.s.tok) { + // scan ahead to check if we're parsing a ref + s := p.save() + p.scanWS() + tok := p.s.tok + p.restore(s) + + if tok == tokens.Dot || tok == tokens.LBrack { + p.s.tok = tokens.Ident + return true + } + } + + return false +} + +func (p *Parser) parseRules() []*Rule { + + var rule Rule + rule.SetLoc(p.s.Loc()) + + // This allows keywords in the first var term of the ref + _ = scanAheadRef(p) + + if p.s.tok == tokens.Default { + p.scan() + rule.Default = true + _ = scanAheadRef(p) + } + + if p.s.tok != tokens.Ident { + return nil + } + + usesContains := false + if rule.Head, usesContains = p.parseHead(rule.Default); rule.Head == nil { + return nil + } + + if usesContains { + rule.Head.keywords = append(rule.Head.keywords, tokens.Contains) + } + + if rule.Default { + if !p.validateDefaultRuleValue(&rule) { + return nil + } + + if len(rule.Head.Args) > 0 { + if !p.validateDefaultRuleArgs(&rule) { + return nil + } + } + + rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location)) + return []*Rule{&rule} + } + + // back-compat with `p[x] { ... }`` + hasIf := p.s.tok == tokens.If + + // p[x] if ... becomes a single-value rule p[x] + if hasIf && !usesContains && len(rule.Head.Ref()) == 2 { + v := rule.Head.Ref()[1] + _, isRef := v.Value.(Ref) + if (!v.IsGround() || isRef) && len(rule.Head.Args) == 0 { + rule.Head.Key = rule.Head.Ref()[1] + } + + if rule.Head.Value == nil { + rule.Head.generatedValue = true + rule.Head.Value = BooleanTerm(true).SetLocation(rule.Head.Location) + } else { + // p[x] = y if becomes a single-value rule p[x] with value y, but needs name for compat + v, ok := rule.Head.Ref()[0].Value.(Var) + if !ok { + return nil + } + rule.Head.Name = v + } + } + + // p[x] becomes a multi-value rule p + if !hasIf && !usesContains && + len(rule.Head.Args) == 0 && // not a function + len(rule.Head.Ref()) == 2 { // ref like 'p[x]' + v, ok := rule.Head.Ref()[0].Value.(Var) + if !ok { + return nil + } + rule.Head.Name = v + rule.Head.Key = rule.Head.Ref()[1] + if rule.Head.Value == nil { + rule.Head.SetRef(rule.Head.Ref()[:len(rule.Head.Ref())-1]) + } + } + + switch { + case hasIf: + rule.Head.keywords = append(rule.Head.keywords, tokens.If) + p.scan() + s := p.save() + if expr := p.parseLiteral(); expr != nil { + // NOTE(sr): set literals are never false or undefined, so parsing this as + // p if { true } + // ^^^^^^^^ set of one element, `true` + // isn't valid. + isSetLiteral := false + if t, ok := expr.Terms.(*Term); ok { + _, isSetLiteral = t.Value.(Set) + } + // expr.Term is []*Term or Every + if !isSetLiteral { + rule.Body.Append(expr) + break + } + } + + // parsing as literal didn't work out, expect '{ BODY }' + p.restore(s) + fallthrough + + case p.s.tok == tokens.LBrace: + p.scan() + if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil { + return nil + } + p.scan() + + case usesContains: + rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location)) + rule.generatedBody = true + rule.Location = rule.Head.Location + + return []*Rule{&rule} + + default: + return nil + } + + if p.s.tok == tokens.Else { + // This might just be a refhead rule with a leading 'else' term. + if !scanAheadRef(p) { + if r := rule.Head.Ref(); len(r) > 1 && !r.IsGround() { + p.error(p.s.Loc(), "else keyword cannot be used on rules with variables in head") + return nil + } + if rule.Head.Key != nil { + p.error(p.s.Loc(), "else keyword cannot be used on multi-value rules") + return nil + } + + if rule.Else = p.parseElse(rule.Head); rule.Else == nil { + return nil + } + } + } + + rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd) + + rules := []*Rule{&rule} + + for p.s.tok == tokens.LBrace { + + if rule.Else != nil { + p.error(p.s.Loc(), "expected else keyword") + return nil + } + + loc := p.s.Loc() + + p.scan() + var next Rule + + if next.Body = p.parseBody(tokens.RBrace); next.Body == nil { + return nil + } + p.scan() + + loc.Text = p.s.Text(loc.Offset, p.s.lastEnd) + next.SetLoc(loc) + + // Chained rule head's keep the original + // rule's head AST but have their location + // set to the rule body. + next.Head = rule.Head.Copy() + next.Head.keywords = rule.Head.keywords + for i := range next.Head.Args { + if v, ok := next.Head.Args[i].Value.(Var); ok && v.IsWildcard() { + next.Head.Args[i].Value = p.genwildcard() + } + } + setLocRecursive(next.Head, loc) + + rules = append(rules, &next) + } + + return rules +} + +func (p *Parser) parseElse(head *Head) *Rule { + + var rule Rule + rule.SetLoc(p.s.Loc()) + + rule.Head = head.Copy() + rule.Head.generatedValue = false + for i := range rule.Head.Args { + if v, ok := rule.Head.Args[i].Value.(Var); ok && v.IsWildcard() { + rule.Head.Args[i].Value = p.genwildcard() + } + } + rule.Head.SetLoc(p.s.Loc()) + + defer func() { + rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd) + }() + + p.scan() + + switch p.s.tok { + case tokens.LBrace, tokens.If: // no value, but a body follows directly + rule.Head.generatedValue = true + rule.Head.Value = BooleanTerm(true) + case tokens.Assign, tokens.Unify: + rule.Head.Assign = tokens.Assign == p.s.tok + p.scan() + rule.Head.Value = p.parseTermInfixCall() + if rule.Head.Value == nil { + return nil + } + rule.Head.Location.Text = p.s.Text(rule.Head.Location.Offset, p.s.lastEnd) + default: + p.illegal("expected else value term or rule body") + return nil + } + + hasIf := p.s.tok == tokens.If + hasLBrace := p.s.tok == tokens.LBrace + + if !hasIf && !hasLBrace { + rule.Body = NewBody(NewExpr(BooleanTerm(true))) + rule.generatedBody = true + setLocRecursive(rule.Body, rule.Location) + return &rule + } + + if hasIf { + rule.Head.keywords = append(rule.Head.keywords, tokens.If) + p.scan() + } + + if p.s.tok == tokens.LBrace { + p.scan() + if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil { + return nil + } + p.scan() + } else if p.s.tok != tokens.EOF { + expr := p.parseLiteral() + if expr == nil { + return nil + } + rule.Body.Append(expr) + setLocRecursive(rule.Body, rule.Location) + } else { + p.illegal("rule body expected") + return nil + } + + if p.s.tok == tokens.Else { + if rule.Else = p.parseElse(head); rule.Else == nil { + return nil + } + } + return &rule +} + +func (p *Parser) parseHead(defaultRule bool) (*Head, bool) { + head := &Head{} + loc := p.s.Loc() + defer func() { + if head != nil { + head.SetLoc(loc) + head.Location.Text = p.s.Text(head.Location.Offset, p.s.lastEnd) + } + }() + + term := p.parseVar() + if term == nil { + return nil, false + } + + ref := p.parseHeadFinish(term, true) + if ref == nil { + p.illegal("expected rule head name") + return nil, false + } + + switch x := ref.Value.(type) { + case Var: + // TODO + head = VarHead(x, ref.Location, nil) + case Ref: + head = RefHead(x) + case Call: + op, args := x[0], x[1:] + var ref Ref + switch y := op.Value.(type) { + case Var: + ref = Ref{op} + case Ref: + if _, ok := y[0].Value.(Var); !ok { + p.illegal("rule head ref %v invalid", y) + return nil, false + } + ref = y + } + head = RefHead(ref) + head.Args = slices.Clone[[]*Term](args) + + default: + return nil, false + } + + name := head.Ref().String() + + switch p.s.tok { + case tokens.Contains: // NOTE: no Value for `contains` heads, we return here + // Catch error case of using 'contains' with a function definition rule head. + if head.Args != nil { + p.illegal("the contains keyword can only be used with multi-value rule definitions (e.g., %s contains { ... })", name) + } + p.scan() + head.Key = p.parseTermInfixCall() + if head.Key == nil { + p.illegal("expected rule key term (e.g., %s contains { ... })", name) + } + return head, true + + case tokens.Unify: + p.scan() + head.Value = p.parseTermInfixCall() + if head.Value == nil { + // FIX HEAD.String() + p.illegal("expected rule value term (e.g., %s[%s] = { ... })", name, head.Key) + } + case tokens.Assign: + p.scan() + head.Assign = true + head.Value = p.parseTermInfixCall() + if head.Value == nil { + switch { + case len(head.Args) > 0: + p.illegal("expected function value term (e.g., %s(...) := { ... })", name) + case head.Key != nil: + p.illegal("expected partial rule value term (e.g., %s[...] := { ... })", name) + case defaultRule: + p.illegal("expected default rule value term (e.g., default %s := )", name) + default: + p.illegal("expected rule value term (e.g., %s := { ... })", name) + } + } + } + + if head.Value == nil && head.Key == nil { + if len(head.Ref()) != 2 || len(head.Args) > 0 { + head.generatedValue = true + head.Value = BooleanTerm(true).SetLocation(head.Location) + } + } + return head, false +} + +func (p *Parser) parseBody(end tokens.Token) Body { + if !p.enter() { + return nil + } + defer p.leave() + return p.parseQuery(false, end) +} + +func (p *Parser) parseQuery(requireSemi bool, end tokens.Token) Body { + body := Body{} + + if p.s.tok == end { + p.error(p.s.Loc(), "found empty body") + return nil + } + + for { + expr := p.parseLiteral() + if expr == nil { + return nil + } + + body.Append(expr) + + if p.s.tok == tokens.Semicolon { + p.scan() + continue + } + + if p.s.tok == end || requireSemi { + return body + } + + if !p.s.skippedNL { + // If there was already an error then don't pile this one on + if len(p.s.errors) == 0 { + p.illegal(`expected \n or %s or %s`, tokens.Semicolon, end) + } + return nil + } + } +} + +func (p *Parser) parseLiteral() (expr *Expr) { + + offset := p.s.loc.Offset + loc := p.s.Loc() + + defer func() { + if expr != nil { + loc.Text = p.s.Text(offset, p.s.lastEnd) + expr.SetLoc(loc) + } + }() + + // Check that we're not parsing a ref + if p.isAllowedRefKeyword(p.s.tok) { + // Scan ahead + s := p.save() + p.scanWS() + tok := p.s.tok + p.restore(s) + + if tok == tokens.Dot || tok == tokens.LBrack { + p.s.tok = tokens.Ident + return p.parseLiteralExpr(false) + } + } + + var negated bool + if p.s.tok == tokens.Not { + s := p.save() + p.scanWS() + tok := p.s.tok + p.restore(s) + + if tok != tokens.Dot && tok != tokens.LBrack { + p.scan() + negated = true + } + } + + switch p.s.tok { + case tokens.Some: + if negated { + p.illegal("illegal negation of 'some'") + return nil + } + return p.parseSome() + case tokens.Every: + if negated { + p.illegal("illegal negation of 'every'") + return nil + } + return p.parseEvery() + default: + return p.parseLiteralExpr(negated) + } +} + +func (p *Parser) isAllowedRefKeyword(t tokens.Token) bool { + return p.isAllowedRefKeywordStr(t.String()) +} + +func (p *Parser) isAllowedRefKeywordStr(s string) bool { + if p.po.Capabilities.ContainsFeature(FeatureKeywordsInRefs) { + return IsKeywordInRegoVersion(s, p.po.EffectiveRegoVersion()) || p.s.s.IsKeyword(s) + } + + return false +} + +func (p *Parser) parseLiteralExpr(negated bool) *Expr { + s := p.save() + expr := p.parseExpr() + if expr != nil { + expr.Negated = negated + if p.s.tok == tokens.With { + if expr.With = p.parseWith(); expr.With == nil { + return nil + } + } + + if p.isFutureKeyword("every") { + // If we find a plain `every` identifier, attempt to parse an every expression, + // add hint if it succeeds. + if term, ok := expr.Terms.(*Term); ok && Var("every").Equal(term.Value) { + var hint bool + t := p.save() + p.restore(s) + if expr := p.futureParser().parseEvery(); expr != nil { + _, hint = expr.Terms.(*Every) + } + p.restore(t) + if hint { + p.hint("`import future.keywords.every` for `every x in xs { ... }` expressions") + } + } + } + } + return expr +} + +func (p *Parser) parseWith() []*With { + withs := []*With{} + + for { + with := With{Location: p.s.Loc()} + + p.scan() + + if p.s.tok != tokens.Ident { + p.illegal("expected ident") + return nil + } + + with.Target = p.parseTerm() + if with.Target == nil { + return nil + } + + switch with.Target.Value.(type) { + case Ref, Var: + break + default: + p.illegal("expected with target path") + } + + if p.s.tok != tokens.As { + p.illegal("expected as keyword") + return nil + } + + p.scan() + + if with.Value = p.parseTermInfixCall(); with.Value == nil { + return nil + } + + with.Location.Text = p.s.Text(with.Location.Offset, p.s.lastEnd) + + withs = append(withs, &with) + + if p.s.tok != tokens.With { + break + } + } + + return withs +} + +func (p *Parser) parseSome() *Expr { + + decl := &SomeDecl{} + decl.SetLoc(p.s.Loc()) + + // Attempt to parse "some x in xs", which will end up in + // SomeDecl{Symbols: ["member(x, xs)"]} + s := p.save() + p.scan() + if term := p.parseTermInfixCall(); term != nil { + if call, ok := term.Value.(Call); ok { + switch call[0].String() { + case Member.Name: + if len(call) != 3 { + p.illegal("illegal domain") + return nil + } + case MemberWithKey.Name: + if len(call) != 4 { + p.illegal("illegal domain") + return nil + } + default: + p.illegal("expected `x in xs` or `x, y in xs` expression") + return nil + } + + decl.Symbols = []*Term{term} + expr := NewExpr(decl).SetLocation(decl.Location) + if p.s.tok == tokens.With { + if expr.With = p.parseWith(); expr.With == nil { + return nil + } + } + return expr + } + } + + p.restore(s) + + if p.isFutureKeyword("in") { + s = p.save() // new copy for later + var hint bool + p.scan() + if term := p.futureParser().parseTermInfixCall(); term != nil { + if call, ok := term.Value.(Call); ok { + switch call[0].String() { + case Member.Name, MemberWithKey.Name: + hint = true + } + } + } + + // go on as before, it's `some x[...]` or illegal + p.restore(s) + if hint { + p.hint("`import future.keywords.in` for `some x in xs` expressions") + } + } + + for { // collecting var args + p.scan() + + if p.s.tok != tokens.Ident { + p.illegal("expected var") + return nil + } + + decl.Symbols = append(decl.Symbols, p.parseVar()) + + p.scan() + + if p.s.tok != tokens.Comma { + break + } + } + + return NewExpr(decl).SetLocation(decl.Location) +} + +func (p *Parser) parseEvery() *Expr { + qb := &Every{} + qb.SetLoc(p.s.Loc()) + + // TODO(sr): We'd get more accurate error messages if we didn't rely on + // parseTermInfixCall here, but parsed "var [, var] in term" manually. + p.scan() + term := p.parseTermInfixCall() + if term == nil { + return nil + } + call, ok := term.Value.(Call) + if !ok { + p.illegal("expected `x[, y] in xs { ... }` expression") + return nil + } + switch call[0].String() { + case Member.Name: // x in xs + if len(call) != 3 { + p.illegal("illegal domain") + return nil + } + qb.Value = call[1] + qb.Domain = call[2] + case MemberWithKey.Name: // k, v in xs + if len(call) != 4 { + p.illegal("illegal domain") + return nil + } + qb.Key = call[1] + qb.Value = call[2] + qb.Domain = call[3] + if _, ok := qb.Key.Value.(Var); !ok { + p.illegal("expected key to be a variable") + return nil + } + default: + p.illegal("expected `x[, y] in xs { ... }` expression") + return nil + } + if _, ok := qb.Value.Value.(Var); !ok { + p.illegal("expected value to be a variable") + return nil + } + if p.s.tok == tokens.LBrace { // every x in xs { ... } + p.scan() + body := p.parseBody(tokens.RBrace) + if body == nil { + return nil + } + p.scan() + qb.Body = body + expr := NewExpr(qb).SetLocation(qb.Location) + + if p.s.tok == tokens.With { + if expr.With = p.parseWith(); expr.With == nil { + return nil + } + } + return expr + } + + p.illegal("missing body") + return nil +} + +func (p *Parser) parseExpr() *Expr { + + lhs := p.parseTermInfixCall() + if lhs == nil { + return nil + } + + if op := p.parseTermOp(tokens.Assign, tokens.Unify); op != nil { + if rhs := p.parseTermInfixCall(); rhs != nil { + return NewExpr([]*Term{op, lhs, rhs}) + } + return nil + } + + // NOTE(tsandall): the top-level call term is converted to an expr because + // the evaluator does not support the call term type (nested calls are + // rewritten by the compiler.) + if call, ok := lhs.Value.(Call); ok { + return NewExpr([]*Term(call)) + } + + return NewExpr(lhs) +} + +// parseTermInfixCall consumes the next term from the input and returns it. If a +// term cannot be parsed the return value is nil and error will be recorded. The +// scanner will be advanced to the next token before returning. +// By starting out with infix relations (==, !=, <, etc) and further calling the +// other binary operators (|, &, arithmetics), it constitutes the binding +// precedence. +func (p *Parser) parseTermInfixCall() *Term { + if !p.enter() { + return nil + } + defer p.leave() + + return p.parseTermIn(nil, true, p.s.loc.Offset) +} + +func (p *Parser) parseTermInfixCallInList() *Term { + if !p.enter() { + return nil + } + defer p.leave() + + return p.parseTermIn(nil, false, p.s.loc.Offset) +} + +func (p *Parser) parseTermIn(lhs *Term, keyVal bool, offset int) *Term { + if !p.enter() { + return nil + } + defer p.leave() + + // NOTE(sr): `in` is a bit special: besides `lhs in rhs`, it also + // supports `key, val in rhs`, so it can have an optional second lhs. + // `keyVal` triggers if we attempt to parse a second lhs argument (`mhs`). + if lhs == nil { + lhs = p.parseTermRelation(nil, offset) + } + if lhs != nil { + if keyVal && p.s.tok == tokens.Comma { // second "lhs", or "middle hand side" + s := p.save() + p.scan() + if mhs := p.parseTermRelation(nil, offset); mhs != nil { + + if op := p.parseTermOpName(memberWithKeyRef, tokens.In); op != nil { + if rhs := p.parseTermRelation(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, mhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.In: + return p.parseTermIn(call, keyVal, offset) + default: + return call + } + } + } + } + p.restore(s) + } + + _ = scanAheadRef(p) + + if op := p.parseTermOpName(memberRef, tokens.In); op != nil { + if rhs := p.parseTermRelation(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.In: + return p.parseTermIn(call, keyVal, offset) + default: + return call + } + } + } + } + return lhs +} + +func (p *Parser) parseTermRelation(lhs *Term, offset int) *Term { + if !p.enter() { + return nil + } + defer p.leave() + + if lhs == nil { + lhs = p.parseTermOr(nil, offset) + } + if lhs != nil { + if op := p.parseTermOp(tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte); op != nil { + if rhs := p.parseTermOr(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte: + return p.parseTermRelation(call, offset) + default: + return call + } + } + } + } + return lhs +} + +func (p *Parser) parseTermOr(lhs *Term, offset int) *Term { + if !p.enter() { + return nil + } + defer p.leave() + + if lhs == nil { + lhs = p.parseTermAnd(nil, offset) + } + if lhs != nil { + if op := p.parseTermOp(tokens.Or); op != nil { + if rhs := p.parseTermAnd(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.Or: + return p.parseTermOr(call, offset) + default: + return call + } + } + } + return lhs + } + return nil +} + +func (p *Parser) parseTermAnd(lhs *Term, offset int) *Term { + if !p.enter() { + return nil + } + defer p.leave() + + if lhs == nil { + lhs = p.parseTermArith(nil, offset) + } + if lhs != nil { + if op := p.parseTermOp(tokens.And); op != nil { + if rhs := p.parseTermArith(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.And: + return p.parseTermAnd(call, offset) + default: + return call + } + } + } + return lhs + } + return nil +} + +func (p *Parser) parseTermArith(lhs *Term, offset int) *Term { + if !p.enter() { + return nil + } + defer p.leave() + + if lhs == nil { + lhs = p.parseTermFactor(nil, offset) + } + if lhs != nil { + if op := p.parseTermOp(tokens.Add, tokens.Sub); op != nil { + if rhs := p.parseTermFactor(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.Add, tokens.Sub: + return p.parseTermArith(call, offset) + default: + return call + } + } + } + } + return lhs +} + +func (p *Parser) parseTermFactor(lhs *Term, offset int) *Term { + if !p.enter() { + return nil + } + defer p.leave() + + if lhs == nil { + lhs = p.parseTerm() + } + if lhs != nil { + if op := p.parseTermOp(tokens.Mul, tokens.Quo, tokens.Rem); op != nil { + if rhs := p.parseTerm(); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.Mul, tokens.Quo, tokens.Rem: + return p.parseTermFactor(call, offset) + default: + return call + } + } + } + } + return lhs +} + +func (p *Parser) parseTerm() *Term { + if !p.enter() { + return nil + } + defer p.leave() + + if term, s := p.parsedTermCacheLookup(); s != nil { + p.restore(s) + return term + } + s0 := p.save() + + var term *Term + switch p.s.tok { + case tokens.Null: + term = NullTerm().SetLocation(p.s.Loc()) + case tokens.True: + term = BooleanTerm(true).SetLocation(p.s.Loc()) + case tokens.False: + term = BooleanTerm(false).SetLocation(p.s.Loc()) + case tokens.Sub, tokens.Dot, tokens.Number: + term = p.parseNumber() + case tokens.String: + term = p.parseString() + case tokens.TemplateStringPart, tokens.TemplateStringEnd: + term = p.parseTemplateString(false) + case tokens.RawTemplateStringPart, tokens.RawTemplateStringEnd: + term = p.parseTemplateString(true) + case tokens.Ident, tokens.Contains: // NOTE(sr): contains anywhere BUT in rule heads gets no special treatment + term = p.parseVar() + case tokens.LBrack: + term = p.parseArray() + case tokens.LBrace: + term = p.parseSetOrObject() + case tokens.LParen: + offset := p.s.loc.Offset + p.scan() + if r := p.parseTermInfixCall(); r != nil { + if p.s.tok == tokens.RParen { + r.Location.Text = p.s.Text(offset, p.s.tokEnd) + term = r + } else { + p.error(p.s.Loc(), "non-terminated expression") + } + } + default: + p.illegalToken() + } + + term = p.parseTermFinish(term, false) + p.parsedTermCachePush(term, s0) + return term +} + +func (p *Parser) parseTermFinish(head *Term, skipws bool) *Term { + if head == nil { + return nil + } + offset := p.s.loc.Offset + p.doScan(skipws, noScanOptions...) + + switch p.s.tok { + case tokens.LParen, tokens.Dot, tokens.LBrack: + return p.parseRef(head, offset) + case tokens.Whitespace: + p.scan() + fallthrough + default: + if _, ok := head.Value.(Var); ok && RootDocumentNames.Contains(head) { + return RefTerm(head).SetLocation(head.Location) + } + return head + } +} + +func (p *Parser) parseHeadFinish(head *Term, skipws bool) *Term { + if head == nil { + return nil + } + offset := p.s.loc.Offset + p.scanWS() + + switch p.s.tok { + case tokens.Add, tokens.Sub, tokens.Mul, tokens.Quo, tokens.Rem, + tokens.And, tokens.Or, + tokens.Equal, tokens.Neq, tokens.Gt, tokens.Gte, tokens.Lt, tokens.Lte: + p.illegalToken() + case tokens.Whitespace: + p.doScan(skipws, noScanOptions...) + } + + switch p.s.tok { + case tokens.LParen, tokens.Dot, tokens.LBrack: + return p.parseRef(head, offset) + case tokens.Whitespace: + p.scan() + } + + if _, ok := head.Value.(Var); ok && RootDocumentNames.Contains(head) { + return RefTerm(head).SetLocation(head.Location) + } + return head +} + +func (p *Parser) parseNumber() *Term { + var prefix string + loc := p.s.Loc() + + // Handle negative sign + if p.s.tok == tokens.Sub { + prefix = "-" + p.scan() + switch p.s.tok { + case tokens.Number, tokens.Dot: + break + default: + p.illegal("expected number") + return nil + } + } + + // Handle decimal point + if p.s.tok == tokens.Dot { + prefix += "." + p.scan() + if p.s.tok != tokens.Number { + p.illegal("expected number") + return nil + } + } + + // Validate leading zeros: reject numbers like "01", "007", etc. + // Skip validation if prefix ends with '.' (like ".123") + hasDecimalPrefix := len(prefix) > 0 && prefix[len(prefix)-1] == '.' + + if !hasDecimalPrefix && len(p.s.lit) > 1 && p.s.lit[0] == '0' { + // These are the only valid cases starting with '0': + isDecimal := p.s.lit[1] == '.' // "0.123" + isScientific := len(p.s.lit) > 2 && (p.s.lit[1] == 'e' || p.s.lit[1] == 'E') // "0e5", "0E-3" + + if !isDecimal && !isScientific { + p.illegal("expected number without leading zero") + return nil + } + } + + // Ensure that the number is valid + s := prefix + p.s.lit + f, ok := new(big.Float).SetString(s) + if !ok { + p.illegal("invalid float") + return nil + } + + // Put limit on size of exponent to prevent non-linear cost of String() + // function on big.Float from causing denial of service: https://github.com/golang/go/issues/11068 + // + // n == sign * mantissa * 2^exp + // 0.5 <= mantissa < 1.0 + // + // The limit is arbitrary. + exp := f.MantExp(nil) + if exp > 1e5 || exp < -1e5 || f.IsInf() { // +/- inf, exp is 0 + p.error(p.s.Loc(), "number too big") + return nil + } + + // Note: Use the original string, do *not* round trip from + // the big.Float as it can cause precision loss. + return NumberTerm(json.Number(s)).SetLocation(loc) +} + +func (p *Parser) parseString() *Term { + if p.s.lit[0] == '"' { + if p.s.lit == "\"\"" { + return NewTerm(InternedEmptyStringValue).SetLocation(p.s.Loc()) + } + + inner := p.s.lit[1 : len(p.s.lit)-1] + if !strings.ContainsRune(inner, '\\') { // nothing to un-escape + return StringTerm(inner).SetLocation(p.s.Loc()) + } + + var s string + if err := json.Unmarshal([]byte(p.s.lit), &s); err != nil { + p.errorf(p.s.Loc(), "illegal string literal: %s", p.s.lit) + return nil + } + return StringTerm(s).SetLocation(p.s.Loc()) + } + return p.parseRawString() +} + +func (p *Parser) parseRawString() *Term { + if len(p.s.lit) < 2 { + return nil + } + return StringTerm(p.s.lit[1 : len(p.s.lit)-1]).SetLocation(p.s.Loc()) +} + +func templateStringPartToStringLiteral(tok tokens.Token, lit string) (string, error) { + switch tok { + case tokens.TemplateStringPart, tokens.TemplateStringEnd: + inner := lit[1 : len(lit)-1] + if !strings.ContainsRune(inner, '\\') { // nothing to un-escape + return inner, nil + } + + buf := make([]byte, 0, len(inner)+2) + buf = append(buf, '"') + buf = append(buf, inner...) + buf = append(buf, '"') + var s string + if err := json.Unmarshal(buf, &s); err != nil { + return "", fmt.Errorf("illegal template-string part: %s", lit) + } + return s, nil + case tokens.RawTemplateStringPart, tokens.RawTemplateStringEnd: + return lit[1 : len(lit)-1], nil + default: + return "", errors.New("expected template-string part") + } +} + +func (p *Parser) parseTemplateString(multiLine bool) *Term { + loc := p.s.Loc() + + if !p.po.Capabilities.ContainsFeature(FeatureTemplateStrings) { + p.errorf(loc, "template strings are not supported by current capabilities") + return nil + } + + var parts []Node + + for { + s, err := templateStringPartToStringLiteral(p.s.tok, p.s.lit) + if err != nil { + p.error(p.s.Loc(), err.Error()) + return nil + } + + // Don't add empty strings + if len(s) > 0 { + parts = append(parts, StringTerm(s).SetLocation(p.s.Loc())) + } + + if p.s.tok == tokens.TemplateStringEnd || p.s.tok == tokens.RawTemplateStringEnd { + break + } + + numCommentsBefore := len(p.s.comments) + p.scan() + numCommentsAfter := len(p.s.comments) + + expr := p.parseLiteral() + if expr == nil { + p.error(p.s.Loc(), "invalid template-string expression") + return nil + } + + if expr.Negated { + p.errorf(expr.Loc(), "unexpected negation ('%s') in template-string expression", tokens.KeywordFor(tokens.Not)) + return nil + } + + // Note: Actually unification + if expr.IsEquality() { + p.errorf(expr.Loc(), "unexpected unification ('=') in template-string expression") + return nil + } + + if expr.IsAssignment() { + p.errorf(expr.Loc(), "unexpected assignment (':=') in template-string expression") + return nil + } + + if expr.IsEvery() { + p.errorf(expr.Loc(), "unexpected '%s' in template-string expression", tokens.KeywordFor(tokens.Every)) + return nil + } + + if expr.IsSome() { + p.errorf(expr.Loc(), "unexpected '%s' in template-string expression", tokens.KeywordFor(tokens.Some)) + return nil + } + + // FIXME: Can we optimize for collections and comprehensions too? To qualify, they must not contain refs or calls. + var nonOptional bool + if term, ok := expr.Terms.(*Term); ok && numCommentsAfter == numCommentsBefore { + switch term.Value.(type) { + case String, Number, Boolean, Null: + nonOptional = true + parts = append(parts, term) + } + } + + if !nonOptional { + parts = append(parts, expr) + } + + if p.s.tok != tokens.RBrace { + p.errorf(p.s.Loc(), "expected %s to end template string expression", tokens.RBrace) + return nil + } + + p.doScan(false, scanner.ContinueTemplateString(multiLine)) + } + + // When there are template-expressions, the initial location will only contain the text up to the first expression + loc.Text = p.s.Text(loc.Offset, p.s.tokEnd) + + return TemplateStringTerm(multiLine, parts...).SetLocation(loc) +} + +func (p *Parser) parseCall(operator *Term, offset int) (term *Term) { + if !p.enter() { + return nil + } + defer p.leave() + + loc := operator.Location + var end int + + defer func() { + p.setLoc(term, loc, offset, end) + }() + + p.scan() // steps over '(' + + if p.s.tok == tokens.RParen { // no args, i.e. set() or any.func() + end = p.s.tokEnd + p.scanWS() + if operator.Equal(setConstructor) { + return SetTerm() + } + return CallTerm(operator) + } + + if r := p.parseTermList(tokens.RParen, []*Term{operator}); r != nil { + end = p.s.tokEnd + p.scanWS() + return CallTerm(r...) + } + + return nil +} + +func (p *Parser) parseRef(head *Term, offset int) (term *Term) { + if !p.enter() { + return nil + } + defer p.leave() + + loc := head.Location + var end int + + defer func() { + p.setLoc(term, loc, offset, end) + }() + + switch h := head.Value.(type) { + case Var, *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call: + // ok + default: + p.errorf(loc, "illegal ref (head cannot be %v)", ValueName(h)) + } + + ref := []*Term{head} + + for { + switch p.s.tok { + case tokens.Dot: + p.scanWS() + if p.s.tok != tokens.Ident && !p.isAllowedRefKeyword(p.s.tok) { + p.illegal("expected %v", tokens.Ident) + return nil + } + ref = append(ref, StringTerm(p.s.lit).SetLocation(p.s.Loc())) + p.scanWS() + case tokens.LParen: + term = p.parseCall(p.setLoc(RefTerm(ref...), loc, offset, p.s.loc.Offset), offset) + if term != nil { + switch p.s.tok { + case tokens.Whitespace: + p.scan() + end = p.s.lastEnd + return term + case tokens.Dot, tokens.LBrack: + term = p.parseRef(term, offset) + } + } + end = p.s.lastEnd + return term + case tokens.LBrack: + p.scan() + if term := p.parseTermInfixCall(); term != nil { + if p.s.tok != tokens.RBrack { + p.illegal("expected %v", tokens.LBrack) + return nil + } + ref = append(ref, term) + p.scanWS() + } else { + return nil + } + case tokens.Whitespace: + end = p.s.lastEnd + p.scan() + return RefTerm(ref...) + default: + end = p.s.lastEnd + return RefTerm(ref...) + } + } +} + +func (p *Parser) parseArray() (term *Term) { + if !p.enter() { + return nil + } + defer p.leave() + + loc := p.s.Loc() + offset := p.s.loc.Offset + + defer func() { + p.setLoc(term, loc, offset, p.s.tokEnd) + }() + + p.scan() + + if p.s.tok == tokens.RBrack { + return ArrayTerm() + } + + potentialComprehension := true + + // Skip leading commas, eg [, x, y] + // Supported for backwards compatibility. In the future + // we should make this a parse error. + if p.s.tok == tokens.Comma { + potentialComprehension = false + p.scan() + } + + s := p.save() + + // NOTE(tsandall): The parser cannot attempt a relational term here because + // of ambiguity around comprehensions. For example, given: + // + // {1 | 1} + // + // Does this represent a set comprehension or a set containing binary OR + // call? We resolve the ambiguity by prioritizing comprehensions. + head := p.parseTerm() + if head == nil { + return nil + } + + switch p.s.tok { + case tokens.RBrack: + return ArrayTerm(head) + case tokens.Comma: + p.scan() + if terms := p.parseTermList(tokens.RBrack, []*Term{head}); terms != nil { + return ArrayTerm(terms...) + } + return nil + case tokens.Or: + if potentialComprehension { + // Try to parse as if it is an array comprehension + p.scan() + if body := p.parseBody(tokens.RBrack); body != nil { + return ArrayComprehensionTerm(head, body) + } + if p.s.tok != tokens.Comma { + return nil + } + } + // fall back to parsing as a normal array definition + } + + p.restore(s) + + if terms := p.parseTermList(tokens.RBrack, nil); terms != nil { + return ArrayTerm(terms...) + } + return nil +} + +func (p *Parser) parseSetOrObject() (term *Term) { + if !p.enter() { + return nil + } + defer p.leave() + + loc := p.s.Loc() + offset := p.s.loc.Offset + + defer func() { + p.setLoc(term, loc, offset, p.s.tokEnd) + }() + + p.scan() + + if p.s.tok == tokens.RBrace { + return ObjectTerm() + } + + potentialComprehension := true + + // Skip leading commas, eg {, x, y} + // Supported for backwards compatibility. In the future + // we should make this a parse error. + if p.s.tok == tokens.Comma { + potentialComprehension = false + p.scan() + } + + s := p.save() + + // Try parsing just a single term first to give comprehensions higher + // priority to "or" calls in ambiguous situations. Eg: { a | b } + // will be a set comprehension. + // + // Note: We don't know yet if it is a set or object being defined. + head := p.parseTerm() + if head == nil { + return nil + } + + switch p.s.tok { + case tokens.Or: + if potentialComprehension { + return p.parseSet(s, head, potentialComprehension) + } + case tokens.RBrace, tokens.Comma: + return p.parseSet(s, head, potentialComprehension) + case tokens.Colon: + return p.parseObject(head, potentialComprehension) + } + + p.restore(s) + + head = p.parseTermInfixCallInList() + if head == nil { + return nil + } + + switch p.s.tok { + case tokens.RBrace, tokens.Comma: + return p.parseSet(s, head, false) + case tokens.Colon: + // It still might be an object comprehension, eg { a+1: b | ... } + return p.parseObject(head, potentialComprehension) + } + + p.illegal("non-terminated set") + return nil +} + +func (p *Parser) parseSet(s *state, head *Term, potentialComprehension bool) *Term { + if !p.enter() { + return nil + } + defer p.leave() + + switch p.s.tok { + case tokens.RBrace: + return SetTerm(head) + case tokens.Comma: + p.scan() + if terms := p.parseTermList(tokens.RBrace, []*Term{head}); terms != nil { + return SetTerm(terms...) + } + case tokens.Or: + if potentialComprehension { + // Try to parse as if it is a set comprehension + p.scan() + if body := p.parseBody(tokens.RBrace); body != nil { + return SetComprehensionTerm(head, body) + } + if p.s.tok != tokens.Comma { + return nil + } + } + // Fall back to parsing as normal set definition + p.restore(s) + if terms := p.parseTermList(tokens.RBrace, nil); terms != nil { + return SetTerm(terms...) + } + } + return nil +} + +func (p *Parser) parseObject(k *Term, potentialComprehension bool) *Term { + if !p.enter() { + return nil + } + defer p.leave() + + // NOTE(tsandall): Assumption: this function is called after parsing the key + // of the head element and then receiving a colon token from the scanner. + // Advance beyond the colon and attempt to parse an object. + if p.s.tok != tokens.Colon { + panic("expected colon") + } + p.scan() + + s := p.save() + + // NOTE(sr): We first try to parse the value as a term (`v`), and see + // if we can parse `{ x: v | ...}` as a comprehension. + // However, if we encounter either a Comma or an RBace, it cannot be + // parsed as a comprehension -- so we save double work further down + // where `parseObjectFinish(k, v, false)` would only exercise the + // same code paths once more. + v := p.parseTerm() + if v == nil { + return nil + } + + potentialRelation := true + if potentialComprehension { + switch p.s.tok { + case tokens.RBrace, tokens.Comma: + potentialRelation = false + fallthrough + case tokens.Or: + if term := p.parseObjectFinish(k, v, true); term != nil { + return term + } + } + } + + p.restore(s) + + if potentialRelation { + v := p.parseTermInfixCallInList() + if v == nil { + return nil + } + + switch p.s.tok { + case tokens.RBrace, tokens.Comma: + return p.parseObjectFinish(k, v, false) + } + } + + p.illegal("non-terminated object") + return nil +} + +func (p *Parser) parseObjectFinish(key, val *Term, potentialComprehension bool) *Term { + if !p.enter() { + return nil + } + defer p.leave() + + switch p.s.tok { + case tokens.RBrace: + return ObjectTerm([2]*Term{key, val}) + case tokens.Or: + if potentialComprehension { + p.scan() + if body := p.parseBody(tokens.RBrace); body != nil { + return ObjectComprehensionTerm(key, val, body) + } + } else { + p.illegal("non-terminated object") + } + case tokens.Comma: + p.scan() + if r := p.parseTermPairList(tokens.RBrace, [][2]*Term{{key, val}}); r != nil { + return ObjectTerm(r...) + } + } + return nil +} + +func (p *Parser) parseTermList(end tokens.Token, r []*Term) []*Term { + if p.s.tok == end { + return r + } + for { + term := p.parseTermInfixCallInList() + if term != nil { + r = append(r, term) + switch p.s.tok { + case end: + return r + case tokens.Comma: + p.scan() + if p.s.tok == end { + return r + } + continue + default: + p.illegal("expected %q or %q", tokens.Comma, end) + return nil + } + } + return nil + } +} + +func (p *Parser) parseTermPairList(end tokens.Token, r [][2]*Term) [][2]*Term { + if p.s.tok == end { + return r + } + for { + key := p.parseTermInfixCallInList() + if key != nil { + switch p.s.tok { + case tokens.Colon: + p.scan() + if val := p.parseTermInfixCallInList(); val != nil { + r = append(r, [2]*Term{key, val}) + switch p.s.tok { + case end: + return r + case tokens.Comma: + p.scan() + if p.s.tok == end { + return r + } + continue + default: + p.illegal("expected %q or %q", tokens.Comma, end) + return nil + } + } + default: + p.illegal("expected %q", tokens.Colon) + return nil + } + } + return nil + } +} + +func (p *Parser) parseTermOp(values ...tokens.Token) *Term { + if slices.Contains(values, p.s.tok) { + loc := p.s.Loc() + r := RefTerm(VarTerm(p.s.tok.String()).SetLocation(loc)).SetLocation(loc) + p.scan() + return r + } + return nil +} + +func (p *Parser) parseTermOpName(ref Ref, values ...tokens.Token) *Term { + if slices.Contains(values, p.s.tok) { + cp := ref.Copy() + loc := p.s.Loc() + for _, r := range cp { + r.SetLocation(loc) + } + t := RefTerm(cp...) + t.SetLocation(loc) + p.scan() + return t + } + return nil +} + +func (p *Parser) parseVar() *Term { + if p.s.lit == WildcardString { + // Update wildcard values with unique identifiers + return NewTerm(p.genwildcard()).SetLocation(p.s.Loc()) + } + + return VarTerm(p.s.lit).SetLocation(p.s.Loc()) +} + +func (p *Parser) genwildcard() Value { + var v Value + if p.s.wildcard < len(preAllocWildcards) { + v = preAllocWildcards[p.s.wildcard] + } else { + v = Var(WildcardPrefix + strconv.Itoa(p.s.wildcard)) + } + p.s.wildcard++ + + return v +} + +func writeHints(msg *strings.Builder, hints []string) { + switch len(hints) { + case 0: // nothing to do + case 1: + msg.WriteString(" (hint: ") + msg.WriteString(hints[0]) + msg.WriteByte(')') + default: + msg.WriteString(" (hints: ") + for i, h := range hints { + if i > 0 { + msg.WriteString(", ") + } + msg.WriteString(h) + } + msg.WriteByte(')') + } +} + +func (p *Parser) error(loc *location.Location, reason string) { + msg := reason + if len(p.s.hints) > 0 { + sb := &strings.Builder{} + sb.WriteString(reason) + writeHints(sb, p.s.hints) + msg = sb.String() + } + + p.s.errors = append(p.s.errors, &Error{ + Code: ParseErr, + Message: msg, + Location: loc, + Details: newParserErrorDetail(p.s.s.Bytes(), loc.Offset), + }) + p.s.hints = nil +} + +func (p *Parser) errorf(loc *location.Location, f string, a ...any) { + msg := &strings.Builder{} + fmt.Fprintf(msg, f, a...) + + if len(p.s.hints) > 0 { + writeHints(msg, p.s.hints) + } + + p.s.errors = append(p.s.errors, &Error{ + Code: ParseErr, + Message: msg.String(), + Location: loc, + Details: newParserErrorDetail(p.s.s.Bytes(), loc.Offset), + }) + p.s.hints = nil +} + +func (p *Parser) hint(s string) { + p.s.hints = append(p.s.hints, s) +} + +func (p *Parser) illegal(note string, a ...any) { + if p.s.tok == tokens.Illegal { + p.errorf(p.s.Loc(), "illegal token") + return + } + + tok := p.s.tok.String() + + tokType := "token" + if _, ok := allFutureKeywords[tok]; ok || tokens.IsKeyword(p.s.tok) { + tokType = "keyword" + } + + if len(note) > 0 { + p.errorf(p.s.Loc(), "unexpected %s %s: %s", tok, tokType, fmt.Sprintf(note, a...)) + } else { + p.errorf(p.s.Loc(), "unexpected %s %s", tok, tokType) + } +} + +func (p *Parser) illegalToken() { + p.illegal("") +} + +var noScanOptions []scanner.ScanOption + +func (p *Parser) scan() { + p.doScan(true, noScanOptions...) +} + +func (p *Parser) scanWS() { + p.doScan(false, noScanOptions...) +} + +func (p *Parser) doScan(skipws bool, scanOpts ...scanner.ScanOption) { + + // NOTE(tsandall): the last position is used to compute the "text" field for + // complex AST nodes. Whitespace never affects the last position of an AST + // node so do not update it when scanning. + if p.s.tok != tokens.Whitespace { + p.s.lastEnd = p.s.tokEnd + p.s.skippedNL = false + } + + var errs []scanner.Error + for { + var pos scanner.Position + p.s.tok, pos, p.s.lit, errs = p.s.s.Scan(scanOpts...) + + p.s.tokEnd = pos.End + p.s.loc.Row = pos.Row + p.s.loc.Col = pos.Col + p.s.loc.Offset = pos.Offset + p.s.loc.Text = p.s.Text(pos.Offset, pos.End) + p.s.loc.Tabs = pos.Tabs + + for _, err := range errs { + p.error(p.s.Loc(), err.Message) + } + + if len(errs) > 0 { + p.s.tok = tokens.Illegal + } + + if p.s.tok == tokens.Whitespace { + if p.s.lit == "\n" { + p.s.skippedNL = true + } + if skipws { + continue + } + } + + if p.s.tok != tokens.Comment { + break + } + + // For backwards compatibility leave a nil + // Text value if there is no text rather than + // an empty string. + var commentText []byte + if len(p.s.lit) > 1 { + commentText = []byte(p.s.lit[1:]) + } + comment := NewComment(commentText) + comment.SetLoc(p.s.Loc()) + p.s.comments = append(p.s.comments, comment) + } +} + +func (p *Parser) save() *state { + cpy := *p.s + s := *cpy.s + cpy.s = &s + return &cpy +} + +func (p *Parser) restore(s *state) { + p.s = s +} + +func setLocRecursive(x any, loc *location.Location) { + WalkNodes(x, func(n Node) bool { + n.SetLoc(loc) + return false + }) +} + +func (p *Parser) setLoc(term *Term, loc *location.Location, offset, end int) *Term { + if term != nil { + cpy := *loc + term.Location = &cpy + term.Location.Text = p.s.Text(offset, end) + } + return term +} + +func (p *Parser) validateDefaultRuleValue(rule *Rule) bool { + if rule.Head.Value == nil { + p.error(rule.Loc(), "illegal default rule (must have a value)") + return false + } + + valid := true + vis := NewGenericVisitor(func(x any) bool { + switch x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension: // skip closures + return true + case Ref, Var, Call: + p.error(rule.Loc(), fmt.Sprintf("illegal default rule (value cannot contain %v)", TypeName(x))) + valid = false + return true + } + return false + }) + + vis.Walk(rule.Head.Value.Value) + return valid +} + +func (p *Parser) validateDefaultRuleArgs(rule *Rule) bool { + + valid := true + vars := NewVarSet() + + vis := NewGenericVisitor(func(x any) bool { + switch x := x.(type) { + case Var: + if vars.Contains(x) { + p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot be repeated %v)", x)) + valid = false + return true + } + vars.Add(x) + + case *Term: + switch v := x.Value.(type) { + case Var: // do nothing + default: + p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot contain %v)", ValueName(v))) + valid = false + return true + } + } + + return false + }) + + vis.Walk(rule.Head.Args) + return valid +} + +// We explicitly use yaml unmarshalling, to accommodate for the '_' in 'related_resources', +// which isn't handled properly by json for some reason. +type rawAnnotation struct { + Scope string `yaml:"scope"` + Title string `yaml:"title"` + Entrypoint bool `yaml:"entrypoint"` + Description string `yaml:"description"` + Organizations []string `yaml:"organizations"` + RelatedResources []any `yaml:"related_resources"` + Authors []any `yaml:"authors"` + Schemas []map[string]any `yaml:"schemas"` + Compile map[string]any `yaml:"compile"` + Custom map[string]any `yaml:"custom"` +} + +type metadataParser struct { + comments []*Comment + buf *bytes.Buffer + loc *location.Location +} + +func (b *metadataParser) Reset(loc *location.Location) { + b.comments = b.comments[:0] + b.loc = loc + if b.buf != nil { + b.buf.Reset() + } +} + +func (b *metadataParser) Append(c *Comment) { + b.buf.Write(bytes.TrimPrefix(c.Text, []byte(" "))) + b.buf.WriteByte('\n') + b.comments = append(b.comments, c) +} + +var yamlLineErrRegex = regexp.MustCompile(`^yaml:(?: unmarshal errors:[\n\s]*)? line ([[:digit:]]+):`) + +func (b *metadataParser) Parse() (result *Annotations, err error) { + if len(bytes.TrimSpace(b.buf.Bytes())) == 0 { + return nil, errors.New("expected METADATA block, found whitespace") + } + + var raw rawAnnotation + if err := yaml.Unmarshal(b.buf.Bytes(), &raw); err != nil { + var comment *Comment + match := yamlLineErrRegex.FindStringSubmatch(err.Error()) + if len(match) == 2 { + index, err2 := strconv.Atoi(match[1]) + if err2 == nil { + if index >= len(b.comments) { + comment = b.comments[len(b.comments)-1] + } else { + comment = b.comments[index] + } + b.loc = comment.Location + } + } + + if match == nil && len(b.comments) > 0 { + b.loc = b.comments[0].Location + } + + return nil, augmentYamlError(err, b.comments) + } + + result = &Annotations{ + comments: b.comments, + Scope: raw.Scope, + Entrypoint: raw.Entrypoint, + Title: raw.Title, + Description: raw.Description, + Organizations: raw.Organizations, + } + + for _, v := range raw.RelatedResources { + rr, err := parseRelatedResource(v) + if err != nil { + return nil, fmt.Errorf("invalid related-resource definition %s: %w", v, err) + } + result.RelatedResources = append(result.RelatedResources, rr) + } + + if raw.Compile != nil { + result.Compile = &CompileAnnotation{} + if unknowns, ok := raw.Compile["unknowns"]; ok { + if unknowns, ok := unknowns.([]any); ok { + result.Compile.Unknowns = make([]Ref, len(unknowns)) + for i := range unknowns { + if unknown, ok := unknowns[i].(string); ok { + ref, err := ParseRef(unknown) + if err != nil { + return nil, fmt.Errorf("invalid unknowns element %q: %w", unknown, err) + } + result.Compile.Unknowns[i] = ref + } + } + } + } + if mask, ok := raw.Compile["mask_rule"]; ok { + if mask, ok := mask.(string); ok { + maskTerm, err := ParseTerm(mask) + if err != nil { + return nil, fmt.Errorf("invalid mask_rule annotation %q: %w", mask, err) + } + switch v := maskTerm.Value.(type) { + case Var, String: + result.Compile.MaskRule = Ref{maskTerm} + case Ref: + result.Compile.MaskRule = v + default: + return nil, fmt.Errorf("invalid mask_rule annotation type %q: %[1]T", mask) + } + } + } + } + + for _, pair := range raw.Schemas { + k, v := unwrapPair(pair) + + var a SchemaAnnotation + var err error + + a.Path, err = ParseRef(k) + if err != nil { + return nil, errors.New("invalid document reference") + } + + switch v := v.(type) { + case string: + a.Schema, err = parseSchemaRef(v) + if err != nil { + return nil, err + } + case map[string]any: + w, err := convertYAMLMapKeyTypes(v, nil) + if err != nil { + return nil, fmt.Errorf("invalid schema definition: %w", err) + } + a.Definition = &w + default: + return nil, fmt.Errorf("invalid schema declaration for path %q", k) + } + + result.Schemas = append(result.Schemas, &a) + } + + for _, v := range raw.Authors { + author, err := parseAuthor(v) + if err != nil { + return nil, fmt.Errorf("invalid author definition %s: %w", v, err) + } + result.Authors = append(result.Authors, author) + } + + if raw.Custom != nil { + result.Custom = make(map[string]any, len(raw.Custom)) + for k, v := range raw.Custom { + if result.Custom[k], err = convertYAMLMapKeyTypes(v, nil); err != nil { + return nil, err + } + } + } + + result.Location = b.loc + + // recreate original text of entire metadata block for location text attribute + original := bytes.TrimSuffix(b.buf.Bytes(), newlineBytes) + numLines := bytes.Count(original, newlineBytes) + 1 + preAlloc := len("# METADATA\n") + len(original) + numLines*2 // '# ' prefix added per line + + result.Location.Text = append(make([]byte, 0, preAlloc), "# METADATA\n"...) + + for line := range bytes.SplitAfterSeq(original, newlineBytes) { + result.Location.Text = append(result.Location.Text, "# "...) + result.Location.Text = append(result.Location.Text, line...) + } + + return result, err +} + +// augmentYamlError augments a YAML error with hints intended to help the user figure out the cause of an otherwise +// cryptic error. These are hints, instead of proper errors, because they are educated guesses, and aren't guaranteed +// to be correct. +func augmentYamlError(err error, comments []*Comment) error { + // Adding hints for when key/value ':' separator isn't suffixed with a legal YAML space symbol + for _, comment := range comments { + if bytes.IndexByte(comment.Text, ':') == -1 { + continue + } + parts := bytes.Split(comment.Text, []byte{':'})[1:] + + var invalidSpaces []string + for partIndex, part := range parts { + if len(part) == 0 && partIndex == len(parts)-1 { + break + } + + r, _ := utf8.DecodeRune(part) + if r == ' ' || r == '\t' { + break + } + + invalidSpaces = append(invalidSpaces, fmt.Sprintf("%+q", r)) + } + if len(invalidSpaces) > 0 { + err = fmt.Errorf( + "%s\n Hint: on line %d, symbol(s) %v immediately following a"+ + " key/value separator ':' is not a legal yaml space character", + err.Error(), comment.Location.Row, invalidSpaces) + } + } + return err +} + +func unwrapPair(pair map[string]any) (string, any) { + for k, v := range pair { + return k, v + } + return "", nil +} + +var errInvalidSchemaRef = errors.New("invalid schema reference") + +// NOTE(tsandall): 'schema' is not registered as a root because it's not +// supported by the compiler or evaluator today. Once we fix that, we can remove +// this function. +func parseSchemaRef(s string) (Ref, error) { + + term, err := ParseTerm(s) + if err == nil { + switch v := term.Value.(type) { + case Var: + if term.Equal(SchemaRootDocument) { + return SchemaRootRef.Copy(), nil + } + case Ref: + if v.HasPrefix(SchemaRootRef) { + return v, nil + } + } + } + + return nil, errInvalidSchemaRef +} + +func parseRelatedResource(rr any) (*RelatedResourceAnnotation, error) { + rr, err := convertYAMLMapKeyTypes(rr, nil) + if err != nil { + return nil, err + } + + switch rr := rr.(type) { + case string: + if len(rr) > 0 { + u, err := url.Parse(rr) + if err != nil { + return nil, err + } + return &RelatedResourceAnnotation{Ref: *u}, nil + } + return nil, errors.New("ref URL may not be empty string") + case map[string]any: + description := strings.TrimSpace(getSafeString(rr, "description")) + ref := strings.TrimSpace(getSafeString(rr, "ref")) + if len(ref) > 0 { + u, err := url.Parse(ref) + if err != nil { + return nil, err + } + return &RelatedResourceAnnotation{Description: description, Ref: *u}, nil + } + return nil, errors.New("'ref' value required in object") + } + + return nil, errors.New("invalid value type, must be string or map") +} + +func parseAuthor(a any) (*AuthorAnnotation, error) { + a, err := convertYAMLMapKeyTypes(a, nil) + if err != nil { + return nil, err + } + + switch a := a.(type) { + case string: + return parseAuthorString(a) + case map[string]any: + name := strings.TrimSpace(getSafeString(a, "name")) + email := strings.TrimSpace(getSafeString(a, "email")) + if len(name) > 0 || len(email) > 0 { + return &AuthorAnnotation{name, email}, nil + } + return nil, errors.New("'name' and/or 'email' values required in object") + } + + return nil, errors.New("invalid value type, must be string or map") +} + +func getSafeString(m map[string]any, k string) string { + if v, found := m[k]; found { + if s, ok := v.(string); ok { + return s + } + } + return "" +} + +const emailPrefix = "<" +const emailSuffix = ">" + +// parseAuthor parses a string into an AuthorAnnotation. If the last word of the input string is enclosed within <>, +// it is extracted as the author's email. The email may not contain whitelines, as it then will be interpreted as +// multiple words. +func parseAuthorString(s string) (*AuthorAnnotation, error) { + parts := strings.Fields(s) + + if len(parts) == 0 { + return nil, errors.New("author is an empty string") + } + + namePartCount := len(parts) + trailing := parts[namePartCount-1] + var email string + if len(trailing) >= len(emailPrefix)+len(emailSuffix) && strings.HasPrefix(trailing, emailPrefix) && + strings.HasSuffix(trailing, emailSuffix) { + email = trailing[len(emailPrefix):] + email = email[:len(email)-len(emailSuffix)] + namePartCount -= 1 + } + + name := strings.Join(parts[0:namePartCount], " ") + + return &AuthorAnnotation{Name: name, Email: email}, nil +} + +func convertYAMLMapKeyTypes(x any, path []string) (any, error) { + var err error + switch x := x.(type) { + case map[any]any: + result := make(map[string]any, len(x)) + for k, v := range x { + str, ok := k.(string) + if !ok { + return nil, fmt.Errorf("invalid map key type(s): %v", strings.Join(path, "/")) + } + result[str], err = convertYAMLMapKeyTypes(v, append(path, str)) + if err != nil { + return nil, err + } + } + return result, nil + case []any: + for i := range x { + x[i], err = convertYAMLMapKeyTypes(x[i], append(path, strconv.Itoa(i))) + if err != nil { + return nil, err + } + } + return x, nil + default: + return x, nil + } +} + +// futureKeywords is the source of truth for future keywords that will +// eventually become standard keywords inside of Rego. +var futureKeywords = map[string]tokens.Token{} + +// futureKeywordsV0 is the source of truth for future keywords that were +// not yet a standard part of Rego in v0, and required importing. +var futureKeywordsV0 = map[string]tokens.Token{ + "in": tokens.In, + "every": tokens.Every, + "contains": tokens.Contains, + "if": tokens.If, +} + +var allFutureKeywords map[string]tokens.Token + +func IsFutureKeyword(s string) bool { + return IsFutureKeywordForRegoVersion(s, RegoV1) +} + +func IsFutureKeywordForRegoVersion(s string, v RegoVersion) bool { + var yes bool + + switch v { + case RegoV0, RegoV0CompatV1: + _, yes = futureKeywordsV0[s] + case RegoV1: + _, yes = futureKeywords[s] + } + + return yes +} + +// isFutureKeyword answers if keyword is from the "future" with the parser options set. +func (p *Parser) isFutureKeyword(s string) bool { + return IsFutureKeywordForRegoVersion(s, p.po.RegoVersion) +} + +func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]tokens.Token) { + path := imp.Path.Value.(Ref) + + if len(path) == 1 || !path[1].Equal(InternedTerm("keywords")) { + p.errorf(imp.Path.Location, "invalid import, must be `future.keywords`") + return + } + + if imp.Alias != "" { + p.errorf(imp.Path.Location, "`future` imports cannot be aliased") + return + } + + kwds := util.Keys(allowedFutureKeywords) + + switch len(path) { + case 2: // all keywords imported, nothing to do + case 3: // one keyword imported + kw, ok := path[2].Value.(String) + if !ok { + p.errorf(imp.Path.Location, "invalid import, must be `future.keywords.x`, e.g. `import future.keywords.in`") + return + } + keyword := string(kw) + _, ok = allowedFutureKeywords[keyword] + if !ok { + sort.Strings(kwds) // so the error message is stable + p.errorf(imp.Path.Location, "unexpected keyword, must be one of %v", kwds) + return + } + + kwds = []string{keyword} // overwrite + } + for _, kw := range kwds { + p.s.s.AddKeyword(kw, allowedFutureKeywords[kw]) + } +} + +func (p *Parser) regoV1Import(imp *Import) { + if !p.po.Capabilities.ContainsFeature(FeatureRegoV1Import) && !p.po.Capabilities.ContainsFeature(FeatureRegoV1) { + p.errorf(imp.Path.Location, "invalid import, `%s` is not supported by current capabilities", RegoV1CompatibleRef) + return + } + + path := imp.Path.Value.(Ref) + + // v1 is only valid option + if len(path) == 1 || !path[1].Equal(RegoV1CompatibleRef[1]) || len(path) > 2 { + p.errorf(imp.Path.Location, "invalid import `%s`, must be `%s`", path, RegoV1CompatibleRef) + return + } + + if p.po.EffectiveRegoVersion() == RegoV1 { + // We're parsing for Rego v1, where the 'rego.v1' import is a no-op. + return + } + + if imp.Alias != "" { + p.errorf(imp.Path.Location, "`rego` imports cannot be aliased") + return + } + + // import all future keywords with the rego.v1 import + kwds := util.Keys(futureKeywordsV0) + + p.s.s.SetRegoV1Compatible() + for _, kw := range kwds { + p.s.s.AddKeyword(kw, futureKeywordsV0[kw]) + } +} + +func init() { + allFutureKeywords = map[string]tokens.Token{} + maps.Copy(allFutureKeywords, futureKeywords) + maps.Copy(allFutureKeywords, futureKeywordsV0) +} + +// enter increments the recursion depth counter and checks if it exceeds the maximum. +// Returns false if the maximum is exceeded, true otherwise. +// If p.maxRecursionDepth is 0 or negative, the check is effectively disabled. +func (p *Parser) enter() bool { + p.recursionDepth++ + if p.maxRecursionDepth > 0 && p.recursionDepth > p.maxRecursionDepth { + p.error(p.s.Loc(), ErrMaxParsingRecursionDepthExceeded.Error()) + p.recursionDepth-- + return false + } + return true +} + +// leave decrements the recursion depth counter. +func (p *Parser) leave() { + p.recursionDepth-- +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go b/vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go new file mode 100644 index 0000000000..ab3de33a1f --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go @@ -0,0 +1,808 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// This file contains extra functions for parsing Rego. +// Most of the parsing is handled by the code in parser.go, +// however, there are additional utilities that are +// helpful for dealing with Rego source inputs (e.g., REPL +// statements, source files, etc.) + +package ast + +import ( + "errors" + "fmt" + "slices" + "strings" + "unicode" + + "github.com/open-policy-agent/opa/v1/ast/internal/tokens" +) + +// MustParseBody returns a parsed body. +// If an error occurs during parsing, panic. +func MustParseBody(input string) Body { + return MustParseBodyWithOpts(input, ParserOptions{}) +} + +// MustParseBodyWithOpts returns a parsed body. +// If an error occurs during parsing, panic. +func MustParseBodyWithOpts(input string, opts ParserOptions) Body { + parsed, err := ParseBodyWithOpts(input, opts) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseExpr returns a parsed expression. +// If an error occurs during parsing, panic. +func MustParseExpr(input string) *Expr { + parsed, err := ParseExpr(input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseImports returns a slice of imports. +// If an error occurs during parsing, panic. +func MustParseImports(input string) []*Import { + parsed, err := ParseImports(input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseModule returns a parsed module. +// If an error occurs during parsing, panic. +func MustParseModule(input string) *Module { + return MustParseModuleWithOpts(input, ParserOptions{}) +} + +// MustParseModuleWithOpts returns a parsed module. +// If an error occurs during parsing, panic. +func MustParseModuleWithOpts(input string, opts ParserOptions) *Module { + parsed, err := ParseModuleWithOpts("", input, opts) + if err != nil { + panic(err) + } + return parsed +} + +// MustParsePackage returns a Package. +// If an error occurs during parsing, panic. +func MustParsePackage(input string) *Package { + parsed, err := ParsePackage(input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseStatements returns a slice of parsed statements. +// If an error occurs during parsing, panic. +func MustParseStatements(input string) []Statement { + parsed, _, err := ParseStatements("", input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseStatement returns exactly one statement. +// If an error occurs during parsing, panic. +func MustParseStatement(input string) Statement { + parsed, err := ParseStatement(input) + if err != nil { + panic(err) + } + return parsed +} + +func MustParseStatementWithOpts(input string, popts ParserOptions) Statement { + parsed, err := ParseStatementWithOpts(input, popts) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseRef returns a parsed reference. +// If an error occurs during parsing, panic. +func MustParseRef(input string) Ref { + parsed, err := ParseRef(input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseRule returns a parsed rule. +// If an error occurs during parsing, panic. +func MustParseRule(input string) *Rule { + parsed, err := ParseRule(input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseRuleWithOpts returns a parsed rule. +// If an error occurs during parsing, panic. +func MustParseRuleWithOpts(input string, opts ParserOptions) *Rule { + parsed, err := ParseRuleWithOpts(input, opts) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseTerm returns a parsed term. +// If an error occurs during parsing, panic. +func MustParseTerm(input string) *Term { + parsed, err := ParseTerm(input) + if err != nil { + panic(err) + } + return parsed +} + +// ParseRuleFromBody returns a rule if the body can be interpreted as a rule +// definition. Otherwise, an error is returned. +func ParseRuleFromBody(module *Module, body Body) (*Rule, error) { + + if len(body) != 1 { + return nil, errors.New("multiple expressions cannot be used for rule head") + } + + return ParseRuleFromExpr(module, body[0]) +} + +// ParseRuleFromExpr returns a rule if the expression can be interpreted as a +// rule definition. +func ParseRuleFromExpr(module *Module, expr *Expr) (*Rule, error) { + + if len(expr.With) > 0 { + return nil, errors.New("expressions using with keyword cannot be used for rule head") + } + + if expr.Negated { + return nil, errors.New("negated expressions cannot be used for rule head") + } + + if _, ok := expr.Terms.(*SomeDecl); ok { + return nil, errors.New("'some' declarations cannot be used for rule head") + } + + if term, ok := expr.Terms.(*Term); ok { + switch v := term.Value.(type) { + case Ref: + if len(v) > 2 { // 2+ dots + return ParseCompleteDocRuleWithDotsFromTerm(module, term) + } + return ParsePartialSetDocRuleFromTerm(module, term) + default: + return nil, fmt.Errorf("%v cannot be used for rule name", ValueName(v)) + } + } + + if _, ok := expr.Terms.([]*Term); !ok { + // This is a defensive check in case other kinds of expression terms are + // introduced in the future. + return nil, errors.New("expression cannot be used for rule head") + } + + if expr.IsEquality() { + return parseCompleteRuleFromEq(module, expr) + } else if expr.IsAssignment() { + rule, err := parseCompleteRuleFromEq(module, expr) + if err != nil { + return nil, err + } + rule.Head.Assign = true + return rule, nil + } + + if _, ok := BuiltinMap[expr.Operator().String()]; ok { + return nil, errors.New("rule name conflicts with built-in function") + } + + return ParseRuleFromCallExpr(module, expr.Terms.([]*Term)) +} + +func parseCompleteRuleFromEq(module *Module, expr *Expr) (rule *Rule, err error) { + + // ensure the rule location is set to the expr location + // the helper functions called below try to set the location based + // on the terms they've been provided but that is not as accurate. + defer func() { + if rule != nil { + rule.Location = expr.Location + rule.Head.Location = expr.Location + } + }() + + lhs, rhs := expr.Operand(0), expr.Operand(1) + if lhs == nil || rhs == nil { + return nil, errors.New("assignment requires two operands") + } + + rule, err = ParseRuleFromCallEqExpr(module, lhs, rhs) + if err == nil { + return rule, nil + } + + rule, err = ParsePartialObjectDocRuleFromEqExpr(module, lhs, rhs) + if err == nil { + return rule, nil + } + + return ParseCompleteDocRuleFromEqExpr(module, lhs, rhs) +} + +// ParseCompleteDocRuleFromAssignmentExpr returns a rule if the expression can +// be interpreted as a complete document definition declared with the assignment +// operator. +func ParseCompleteDocRuleFromAssignmentExpr(module *Module, lhs, rhs *Term) (*Rule, error) { + + rule, err := ParseCompleteDocRuleFromEqExpr(module, lhs, rhs) + if err != nil { + return nil, err + } + + rule.Head.Assign = true + + return rule, nil +} + +// ParseCompleteDocRuleFromEqExpr returns a rule if the expression can be +// interpreted as a complete document definition. +func ParseCompleteDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { + var head *Head + + if v, ok := lhs.Value.(Var); ok { + // Modify the code to add the location to the head ref + head = VarHead(v, lhs.Location, nil) + } else if r, ok := lhs.Value.(Ref); ok { // groundness ? + if _, ok := r[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", r) + } + head = RefHead(r) + if len(r) > 1 && !r[len(r)-1].IsGround() { + return nil, errors.New("ref not ground") + } + } else { + return nil, fmt.Errorf("%v cannot be used for rule name", ValueName(lhs.Value)) + } + head.Value = rhs + head.Location = lhs.Location + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) + + return &Rule{ + Location: lhs.Location, + Head: head, + Body: body, + Module: module, + generatedBody: true, + }, nil +} + +func ParseCompleteDocRuleWithDotsFromTerm(module *Module, term *Term) (*Rule, error) { + ref, ok := term.Value.(Ref) + if !ok { + return nil, fmt.Errorf("%v cannot be used for rule name", ValueName(term.Value)) + } + + if _, ok := ref[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", ref) + } + head := RefHead(ref, BooleanTerm(true).SetLocation(term.Location)) + head.generatedValue = true + head.Location = term.Location + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location)) + + return &Rule{ + Location: term.Location, + Head: head, + Body: body, + Module: module, + }, nil +} + +// ParsePartialObjectDocRuleFromEqExpr returns a rule if the expression can be +// interpreted as a partial object document definition. +func ParsePartialObjectDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { + ref, ok := lhs.Value.(Ref) + if !ok { + return nil, fmt.Errorf("%v cannot be used as rule name", ValueName(lhs.Value)) + } + + if _, ok := ref[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", ref) + } + + head := RefHead(ref, rhs) + if len(ref) == 2 { // backcompat for naked `foo.bar = "baz"` statements + head.Name = ref[0].Value.(Var) + head.Key = ref[1] + } + head.Location = rhs.Location + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) + + rule := &Rule{ + Location: rhs.Location, + Head: head, + Body: body, + Module: module, + } + + return rule, nil +} + +// ParsePartialSetDocRuleFromTerm returns a rule if the term can be interpreted +// as a partial set document definition. +func ParsePartialSetDocRuleFromTerm(module *Module, term *Term) (*Rule, error) { + + ref, ok := term.Value.(Ref) + if !ok || len(ref) == 1 { + return nil, fmt.Errorf("%vs cannot be used for rule head", ValueName(term.Value)) + } + if _, ok := ref[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", ref) + } + + head := RefHead(ref) + if len(ref) == 2 { + v, ok := ref[0].Value.(Var) + if !ok { + return nil, fmt.Errorf("%vs cannot be used for rule head", ValueName(term.Value)) + } + // Modify the code to add the location to the head ref + head = VarHead(v, ref[0].Location, nil) + head.Key = ref[1] + } + head.Location = term.Location + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location)) + + rule := &Rule{ + Location: term.Location, + Head: head, + Body: body, + Module: module, + } + + return rule, nil +} + +// ParseRuleFromCallEqExpr returns a rule if the term can be interpreted as a +// function definition (e.g., f(x) = y => f(x) = y { true }). +func ParseRuleFromCallEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { + + call, ok := lhs.Value.(Call) + if !ok { + return nil, errors.New("must be call") + } + + ref, ok := call[0].Value.(Ref) + if !ok { + return nil, fmt.Errorf("%vs cannot be used in function signature", ValueName(call[0].Value)) + } + if _, ok := ref[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", ref) + } + + head := RefHead(ref, rhs) + head.Location = lhs.Location + head.Args = Args(call[1:]) + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) + + rule := &Rule{ + Location: lhs.Location, + Head: head, + Body: body, + Module: module, + } + + return rule, nil +} + +// ParseRuleFromCallExpr returns a rule if the terms can be interpreted as a +// function returning true or some value (e.g., f(x) => f(x) = true { true }). +func ParseRuleFromCallExpr(module *Module, terms []*Term) (*Rule, error) { + + if len(terms) <= 1 { + return nil, errors.New("rule argument list must take at least one argument") + } + + loc := terms[0].Location + ref := terms[0].Value.(Ref) + if _, ok := ref[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", ref) + } + head := RefHead(ref, BooleanTerm(true).SetLocation(loc)) + head.Location = loc + head.Args = terms[1:] + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(loc)).SetLocation(loc)) + + rule := &Rule{ + Location: loc, + Head: head, + Module: module, + Body: body, + } + return rule, nil +} + +// ParseImports returns a slice of Import objects. +func ParseImports(input string) ([]*Import, error) { + stmts, _, err := ParseStatements("", input) + if err != nil { + return nil, err + } + result := []*Import{} + for _, stmt := range stmts { + if imp, ok := stmt.(*Import); ok { + result = append(result, imp) + } else { + return nil, fmt.Errorf("expected import but got %T", stmt) + } + } + return result, nil +} + +// ParseModule returns a parsed Module object. +// For details on Module objects and their fields, see policy.go. +// Empty input will return nil, nil. +func ParseModule(filename, input string) (*Module, error) { + return ParseModuleWithOpts(filename, input, ParserOptions{}) +} + +// ParseModuleWithOpts returns a parsed Module object, and has an additional input ParserOptions +// For details on Module objects and their fields, see policy.go. +// Empty input will return nil, nil. +func ParseModuleWithOpts(filename, input string, popts ParserOptions) (*Module, error) { + stmts, comments, err := ParseStatementsWithOpts(filename, input, popts) + if err != nil { + return nil, err + } + return parseModule(filename, stmts, comments, popts.RegoVersion) +} + +// ParseBody returns exactly one body. +// If multiple bodies are parsed, an error is returned. +func ParseBody(input string) (Body, error) { + return ParseBodyWithOpts(input, ParserOptions{SkipRules: true}) +} + +// ParseBodyWithOpts returns exactly one body. It does _not_ set SkipRules: true on its own, +// but respects whatever ParserOptions it's been given. +func ParseBodyWithOpts(input string, popts ParserOptions) (Body, error) { + + stmts, _, err := ParseStatementsWithOpts("", input, popts) + if err != nil { + return nil, err + } + + result := Body{} + + for _, stmt := range stmts { + switch stmt := stmt.(type) { + case Body: + for i := range stmt { + result.Append(stmt[i]) + } + case *Comment: + // skip + default: + return nil, fmt.Errorf("expected body but got %T", stmt) + } + } + + return result, nil +} + +// ParseExpr returns exactly one expression. +// If multiple expressions are parsed, an error is returned. +func ParseExpr(input string) (*Expr, error) { + body, err := ParseBody(input) + if err != nil { + return nil, fmt.Errorf("failed to parse expression: %w", err) + } + if len(body) != 1 { + return nil, fmt.Errorf("expected exactly one expression but got: %v", body) + } + return body[0], nil +} + +// ParsePackage returns exactly one Package. +// If multiple statements are parsed, an error is returned. +func ParsePackage(input string) (*Package, error) { + stmt, err := ParseStatement(input) + if err != nil { + return nil, err + } + pkg, ok := stmt.(*Package) + if !ok { + return nil, fmt.Errorf("expected package but got %T", stmt) + } + return pkg, nil +} + +// ParseTerm returns exactly one term. +// If multiple terms are parsed, an error is returned. +func ParseTerm(input string) (*Term, error) { + body, err := ParseBody(input) + if err != nil { + return nil, fmt.Errorf("failed to parse term: %w", err) + } + if len(body) != 1 { + return nil, fmt.Errorf("expected exactly one term but got: %v", body) + } + term, ok := body[0].Terms.(*Term) + if !ok { + return nil, fmt.Errorf("expected term but got %v", body[0].Terms) + } + return term, nil +} + +// ParseRef returns exactly one reference. +func ParseRef(input string) (Ref, error) { + term, err := ParseTerm(input) + if err != nil { + return nil, fmt.Errorf("failed to parse ref: %w", err) + } + ref, ok := term.Value.(Ref) + if !ok { + return nil, fmt.Errorf("expected ref but got %v", term) + } + return ref, nil +} + +// ParseRuleWithOpts returns exactly one rule. +// If multiple rules are parsed, an error is returned. +func ParseRuleWithOpts(input string, opts ParserOptions) (*Rule, error) { + stmts, _, err := ParseStatementsWithOpts("", input, opts) + if err != nil { + return nil, err + } + if len(stmts) != 1 { + return nil, fmt.Errorf("expected exactly one statement (rule), got %v = %T, %T", stmts, stmts[0], stmts[1]) + } + rule, ok := stmts[0].(*Rule) + if !ok { + return nil, fmt.Errorf("expected rule but got %T", stmts[0]) + } + return rule, nil +} + +// ParseRule returns exactly one rule. +// If multiple rules are parsed, an error is returned. +func ParseRule(input string) (*Rule, error) { + return ParseRuleWithOpts(input, ParserOptions{}) +} + +// ParseStatement returns exactly one statement. +// A statement might be a term, expression, rule, etc. Regardless, +// this function expects *exactly* one statement. If multiple +// statements are parsed, an error is returned. +func ParseStatement(input string) (Statement, error) { + stmts, _, err := ParseStatements("", input) + if err != nil { + return nil, err + } + if len(stmts) != 1 { + return nil, errors.New("expected exactly one statement") + } + return stmts[0], nil +} + +func ParseStatementWithOpts(input string, popts ParserOptions) (Statement, error) { + stmts, _, err := ParseStatementsWithOpts("", input, popts) + if err != nil { + return nil, err + } + if len(stmts) != 1 { + return nil, errors.New("expected exactly one statement") + } + return stmts[0], nil +} + +// ParseStatements is deprecated. Use ParseStatementWithOpts instead. +func ParseStatements(filename, input string) ([]Statement, []*Comment, error) { + return ParseStatementsWithOpts(filename, input, ParserOptions{}) +} + +// ParseStatementsWithOpts returns a slice of parsed statements. This is the +// default return value from the parser. +func ParseStatementsWithOpts(filename, input string, popts ParserOptions) ([]Statement, []*Comment, error) { + parser := NewParser(). + WithFilename(filename). + WithReader(strings.NewReader(input)). + WithProcessAnnotation(popts.ProcessAnnotation). + WithFutureKeywords(popts.FutureKeywords...). + WithAllFutureKeywords(popts.AllFutureKeywords). + WithCapabilities(popts.Capabilities). + WithSkipRules(popts.SkipRules). + WithRegoVersion(popts.RegoVersion). + withUnreleasedKeywords(popts.unreleasedKeywords) + + stmts, comments, errs := parser.Parse() + if len(errs) > 0 { + return nil, nil, errs + } + + return stmts, comments, nil +} + +func parseModule(filename string, stmts []Statement, comments []*Comment, regoCompatibilityMode RegoVersion) (*Module, error) { + if len(stmts) == 0 { + return nil, NewError(ParseErr, &Location{File: filename}, "empty module") + } + + var errs Errors + + pkg, ok := stmts[0].(*Package) + if !ok { + loc := stmts[0].Loc() + errs = append(errs, NewError(ParseErr, loc, "package expected")) + } + + mod := &Module{ + Package: pkg, + // The comments slice only holds comments that were not their own statements. + Comments: comments, + stmts: stmts, + } + + mod.regoVersion = regoCompatibilityMode + if regoCompatibilityMode == RegoUndefined { + mod.regoVersion = DefaultRegoVersion + } + + for i, stmt := range stmts[1:] { + switch stmt := stmt.(type) { + case *Import: + mod.Imports = append(mod.Imports, stmt) + if mod.regoVersion == RegoV0 && RegoV1CompatibleRef.Equal(stmt.Path.Value) { + mod.regoVersion = RegoV0CompatV1 + } + case *Rule: + setRuleModule(stmt, mod) + mod.Rules = append(mod.Rules, stmt) + case Body: + rule, err := ParseRuleFromBody(mod, stmt) + if err != nil { + errs = append(errs, NewError(ParseErr, stmt[0].Location, "%s", err.Error())) + continue + } + rule.generatedBody = true + mod.Rules = append(mod.Rules, rule) + + // NOTE(tsandall): the statement should now be interpreted as a + // rule so update the statement list. This is important for the + // logic below that associates annotations with statements. + stmts[i+1] = rule + case *Package: + errs = append(errs, NewError(ParseErr, stmt.Loc(), "unexpected package")) + case *Annotations: + mod.Annotations = append(mod.Annotations, stmt) + case *Comment: + // Ignore comments, they're handled above. + default: + panic("illegal value") // Indicates grammar is out-of-sync with code. + } + } + + if mod.regoVersion == RegoV0CompatV1 || mod.regoVersion == RegoV1 { + for _, rule := range mod.Rules { + for r := rule; r != nil; r = r.Else { + errs = append(errs, CheckRegoV1(r)...) + } + } + } + + if len(errs) > 0 { + return nil, errs + } + + errs = append(errs, attachAnnotationsNodes(mod)...) + + if len(errs) > 0 { + return nil, errs + } + + attachRuleAnnotations(mod) + + return mod, nil +} + +func ruleDeclarationHasKeyword(rule *Rule, keyword tokens.Token) bool { + return slices.Contains(rule.Head.keywords, keyword) +} + +func newScopeAttachmentErr(a *Annotations, want string) *Error { + var have string + if a.node != nil { + have = fmt.Sprintf(" (have %v)", TypeName(a.node)) + } + return NewError(ParseErr, a.Loc(), "annotation scope '%v' must be applied to %v%v", a.Scope, want, have) +} + +func setRuleModule(rule *Rule, module *Module) { + rule.Module = module + if rule.Else != nil { + setRuleModule(rule.Else, module) + } +} + +// ParserErrorDetail holds additional details for parser errors. +type ParserErrorDetail struct { + Line string `json:"line"` + Idx int `json:"idx"` +} + +func newParserErrorDetail(bs []byte, offset int) *ParserErrorDetail { + + // Find first non-space character at or before offset position. + if offset >= len(bs) { + offset = len(bs) - 1 + } else if offset < 0 { + offset = 0 + } + + for offset > 0 && unicode.IsSpace(rune(bs[offset])) { + offset-- + } + + // Find beginning of line containing offset. + begin := offset + + for begin > 0 && !isNewLineChar(bs[begin]) { + begin-- + } + + if isNewLineChar(bs[begin]) { + begin++ + } + + // Find end of line containing offset. + end := offset + + for end < len(bs) && !isNewLineChar(bs[end]) { + end++ + } + + if begin > end { + begin = end + } + + // Extract line and compute index of offset byte in line. + line := bs[begin:end] + index := offset - begin + + return &ParserErrorDetail{ + Line: string(line), + Idx: index, + } +} + +// Lines returns the pretty formatted line output for the error details. +func (d ParserErrorDetail) Lines() []string { + line := strings.TrimLeft(d.Line, "\t") // remove leading tabs + tabCount := len(d.Line) - len(line) + indent := max(d.Idx-tabCount, 0) + return []string{line, strings.Repeat(" ", indent) + "^"} +} + +func isNewLineChar(b byte) bool { + return b == '\r' || b == '\n' +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/performance.go b/vendor/github.com/open-policy-agent/opa/v1/ast/performance.go new file mode 100644 index 0000000000..564ee255d1 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/performance.go @@ -0,0 +1,99 @@ +// Copyright 2025 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. +package ast + +import ( + "encoding" + "strings" + "sync" +) + +var builtinNamesByNumParts = sync.OnceValue(func() map[int][]string { + m := map[int][]string{} + for name := range BuiltinMap { + parts := strings.Count(name, ".") + 1 + if parts > 1 { + m[parts] = append(m[parts], name) + } + } + return m +}) + +// BuiltinNameFromRef attempts to extract a known built-in function name from a ref, +// in the most efficient way possible. I.e. without allocating memory for a new string. +// If no built-in function name can be extracted, the second return value is false. +func BuiltinNameFromRef(ref Ref) (string, bool) { + reflen := len(ref) + if reflen == 0 { + return "", false + } + + _var, ok := ref[0].Value.(Var) + if !ok { + return "", false + } + + varName := string(_var) + if reflen == 1 { + if _, ok := BuiltinMap[varName]; ok { + return varName, true + } + return "", false + } + + totalLen := len(varName) + for _, term := range ref[1:] { + if _, ok = term.Value.(String); !ok { + return "", false + } + totalLen += 1 + len(term.Value.(String)) // account for dot + } + + matched, ok := builtinNamesByNumParts()[reflen] + if !ok { + return "", false + } + + for _, name := range matched { + // This check saves us a huge amount of work, as only very few built-in + // names will have the exact same length as the ref we are checking. + if len(name) != totalLen { + continue + } + // Example: `name` is "io.jwt.decode" (and so is ref) + // The first part is varName, which have already been established to be 'io': + // io, jwt.decode io == io + if curr, remaining, _ := strings.Cut(name, "."); curr == varName { + // Loop over the remaining (now known to be string) terms in the ref, e.g. "jwt" and "decode" + for _, term := range ref[1:] { + ts := string(term.Value.(String)) + // First iteration: jwt.decode != jwt, so we continue cutting + // Second iteration: remaining is "decode", and so is term + if remaining == ts { + return name, true + } + // Cutting remaining (e.g. jwt.decode), and we now get: + // jwt, decode, false || jwt != jwt + if curr, remaining, _ = strings.Cut(remaining, "."); remaining == "" || curr != ts { + break + } + } + } + } + + return "", false +} + +func AppendDelimeted[T encoding.TextAppender](buf []byte, appenders []T, delim string) ([]byte, error) { + for i, item := range appenders { + if i > 0 { + buf = append(buf, delim...) + } + var err error + if buf, err = item.AppendText(buf); err != nil { + return nil, err + } + } + return buf, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/policy.go b/vendor/github.com/open-policy-agent/opa/v1/ast/policy.go new file mode 100644 index 0000000000..632b5aa6d5 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/policy.go @@ -0,0 +1,1894 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "bytes" + "encoding/json" + "fmt" + "slices" + "strings" + + "github.com/open-policy-agent/opa/v1/ast/internal/tokens" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/util" +) + +// DefaultRootDocument is the default root document. +// +// All package directives inside source files are implicitly prefixed with the +// DefaultRootDocument value. +var DefaultRootDocument = VarTerm("data") + +// InputRootDocument names the document containing query arguments. +var InputRootDocument = VarTerm("input") + +// SchemaRootDocument names the document containing external data schemas. +var SchemaRootDocument = VarTerm("schema") + +// FunctionArgRootDocument names the document containing function arguments. +// It's only for internal usage, for referencing function arguments between +// the index and topdown. +var FunctionArgRootDocument = VarTerm("args") + +// FutureRootDocument names the document containing new, to-become-default, +// features. +var FutureRootDocument = VarTerm("future") + +// RegoRootDocument names the document containing new, to-become-default, +// features in a future versioned release. +var RegoRootDocument = VarTerm("rego") + +// RootDocumentNames contains the names of top-level documents that can be +// referred to in modules and queries. +// +// Note, the schema document is not currently implemented in the evaluator so it +// is not registered as a root document name (yet). +var RootDocumentNames = NewSet( + DefaultRootDocument, + InputRootDocument, +) + +// DefaultRootRef is a reference to the root of the default document. +// +// All refs to data in the policy engine's storage layer are prefixed with this ref. +var DefaultRootRef = Ref{DefaultRootDocument} + +// InputRootRef is a reference to the root of the input document. +// +// All refs to query arguments are prefixed with this ref. +var InputRootRef = Ref{InputRootDocument} + +// SchemaRootRef is a reference to the root of the schema document. +// +// All refs to schema documents are prefixed with this ref. Note, the schema +// document is not currently implemented in the evaluator so it is not +// registered as a root document ref (yet). +var SchemaRootRef = Ref{SchemaRootDocument} + +// RootDocumentRefs contains the prefixes of top-level documents that all +// non-local references start with. +var RootDocumentRefs = NewSet( + NewTerm(DefaultRootRef), + NewTerm(InputRootRef), +) + +// SystemDocumentKey is the name of the top-level key that identifies the system +// document. +const SystemDocumentKey = String("system") + +// ReservedVars is the set of names that refer to implicitly ground vars. +var ReservedVars = NewVarSet( + DefaultRootDocument.Value.(Var), + InputRootDocument.Value.(Var), +) + +// Wildcard represents the wildcard variable as defined in the language. +var ( + WildcardString = "_" + WildcardValue Value = Var(WildcardString) + Wildcard = &Term{Value: WildcardValue} +) + +// WildcardPrefix is the special character that all wildcard variables are +// prefixed with when the statement they are contained in is parsed. +const WildcardPrefix = "$" + +// Keywords contains strings that map to language keywords. +var Keywords = KeywordsForRegoVersion(DefaultRegoVersion) + +var KeywordsV0 = [...]string{ + "not", + "package", + "import", + "as", + "default", + "else", + "with", + "null", + "true", + "false", + "some", +} + +var KeywordsV1 = [...]string{ + "not", + "package", + "import", + "as", + "default", + "else", + "with", + "null", + "true", + "false", + "some", + "if", + "contains", + "in", + "every", +} + +func KeywordsForRegoVersion(v RegoVersion) []string { + switch v { + case RegoV0: + return KeywordsV0[:] + case RegoV1, RegoV0CompatV1: + return KeywordsV1[:] + } + return nil +} + +// IsKeyword returns true if s is a language keyword. +func IsKeyword(s string) bool { + return IsInKeywords(s, Keywords) +} + +func IsInKeywords(s string, keywords []string) bool { + return slices.Contains(keywords, s) +} + +// IsKeywordInRegoVersion returns true if s is a language keyword. +func IsKeywordInRegoVersion(s string, regoVersion RegoVersion) bool { + switch regoVersion { + case RegoV0: + for _, x := range KeywordsV0 { + if x == s { + return true + } + } + case RegoV1, RegoV0CompatV1: + for _, x := range KeywordsV1 { + if x == s { + return true + } + } + } + + return false +} + +type ( + // Node represents a node in an AST. Nodes may be statements in a policy module + // or elements of an ad-hoc query, expression, etc. + Node interface { + fmt.Stringer + Loc() *Location + SetLoc(*Location) + } + + // Statement represents a single statement in a policy module. + Statement interface { + Node + } +) + +type ( + + // Module represents a collection of policies (defined by rules) + // within a namespace (defined by the package) and optional + // dependencies on external documents (defined by imports). + Module struct { + Package *Package `json:"package"` + Imports []*Import `json:"imports,omitempty"` + Annotations []*Annotations `json:"annotations,omitempty"` + Rules []*Rule `json:"rules,omitempty"` + Comments []*Comment `json:"comments,omitempty"` + stmts []Statement + regoVersion RegoVersion + } + + // Comment contains the raw text from the comment in the definition. + Comment struct { + // TODO: these fields have inconsistent JSON keys with other structs in this package. + Text []byte + Location *Location + } + + // Package represents the namespace of the documents produced + // by rules inside the module. + Package struct { + Path Ref `json:"path"` + Location *Location `json:"location,omitempty"` + } + + // Import represents a dependency on a document outside of the policy + // namespace. Imports are optional. + Import struct { + Path *Term `json:"path"` + Alias Var `json:"alias,omitempty"` + Location *Location `json:"location,omitempty"` + } + + // Rule represents a rule as defined in the language. Rules define the + // content of documents that represent policy decisions. + Rule struct { + Default bool `json:"default,omitempty"` + Head *Head `json:"head"` + Body Body `json:"body"` + Else *Rule `json:"else,omitempty"` + Location *Location `json:"location,omitempty"` + Annotations []*Annotations `json:"annotations,omitempty"` + + // Module is a pointer to the module containing this rule. If the rule + // was NOT created while parsing/constructing a module, this should be + // left unset. The pointer is not included in any standard operations + // on the rule (e.g., printing, comparison, visiting, etc.) + Module *Module `json:"-"` + + generatedBody bool + } + + // Head represents the head of a rule. + Head struct { + Name Var `json:"name,omitempty"` + Reference Ref `json:"ref,omitempty"` + Args Args `json:"args,omitempty"` + Key *Term `json:"key,omitempty"` + Value *Term `json:"value,omitempty"` + Assign bool `json:"assign,omitempty"` + Location *Location `json:"location,omitempty"` + + keywords []tokens.Token + generatedValue bool + } + + // Args represents zero or more arguments to a rule. + Args []*Term + + // Body represents one or more expressions contained inside a rule or user + // function. + Body []*Expr + + // Expr represents a single expression contained inside the body of a rule. + Expr struct { + With []*With `json:"with,omitempty"` + Terms any `json:"terms"` + Index int `json:"index"` + Generated bool `json:"generated,omitempty"` + Negated bool `json:"negated,omitempty"` + Location *Location `json:"location,omitempty"` + + generatedFrom *Expr + generates []*Expr + } + + // SomeDecl represents a variable declaration statement. The symbols are variables. + SomeDecl struct { + Symbols []*Term `json:"symbols"` + Location *Location `json:"location,omitempty"` + } + + Every struct { + Key *Term `json:"key"` + Value *Term `json:"value"` + Domain *Term `json:"domain"` + Body Body `json:"body"` + Location *Location `json:"location,omitempty"` + } + + // With represents a modifier on an expression. + With struct { + Target *Term `json:"target"` + Value *Term `json:"value"` + Location *Location `json:"location,omitempty"` + } +) + +// SetModuleRegoVersion sets the RegoVersion for the Module. +func SetModuleRegoVersion(mod *Module, v RegoVersion) { + mod.regoVersion = v +} + +// Compare returns an integer indicating whether mod is less than, equal to, +// or greater than other. +func (mod *Module) Compare(other *Module) int { + if mod == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + if cmp := mod.Package.Compare(other.Package); cmp != 0 { + return cmp + } + if cmp := importsCompare(mod.Imports, other.Imports); cmp != 0 { + return cmp + } + if cmp := annotationsCompare(mod.Annotations, other.Annotations); cmp != 0 { + return cmp + } + return rulesCompare(mod.Rules, other.Rules) +} + +// Copy returns a deep copy of mod. +func (mod *Module) Copy() *Module { + cpy := *mod + cpy.Rules = make([]*Rule, len(mod.Rules)) + + nodes := make(map[Node]Node, len(mod.Rules)+len(mod.Imports)+1 /* package */) + + for i := range mod.Rules { + cpy.Rules[i] = mod.Rules[i].Copy() + cpy.Rules[i].Module = &cpy + nodes[mod.Rules[i]] = cpy.Rules[i] + } + + cpy.Imports = make([]*Import, len(mod.Imports)) + for i := range mod.Imports { + cpy.Imports[i] = mod.Imports[i].Copy() + nodes[mod.Imports[i]] = cpy.Imports[i] + } + + cpy.Package = mod.Package.Copy() + nodes[mod.Package] = cpy.Package + + cpy.Annotations = make([]*Annotations, len(mod.Annotations)) + for i, a := range mod.Annotations { + cpy.Annotations[i] = a.Copy(nodes[a.node]) + } + + cpy.Comments = make([]*Comment, len(mod.Comments)) + for i := range mod.Comments { + cpy.Comments[i] = mod.Comments[i].Copy() + } + + cpy.stmts = make([]Statement, len(mod.stmts)) + for i := range mod.stmts { + cpy.stmts[i] = nodes[mod.stmts[i]] + } + + return &cpy +} + +// Equal returns true if mod equals other. +func (mod *Module) Equal(other *Module) bool { + return mod.Compare(other) == 0 +} + +func (mod *Module) String() string { + buf, _ := mod.AppendText(make([]byte, 0, mod.StringLength())) + return util.ByteSliceToString(buf) +} + +// RuleSet returns a RuleSet containing named rules in the mod. +func (mod *Module) RuleSet(name Var) RuleSet { + rs := NewRuleSet() + for _, rule := range mod.Rules { + if rule.Head.Name.Equal(name) { + rs.Add(rule) + } + } + return rs +} + +// UnmarshalJSON parses bs and stores the result in mod. The rules in the module +// will have their module pointer set to mod. +func (mod *Module) UnmarshalJSON(bs []byte) error { + + // Declare a new type and use a type conversion to avoid recursively calling + // Module#UnmarshalJSON. + type module Module + + if err := util.UnmarshalJSON(bs, (*module)(mod)); err != nil { + return err + } + + WalkRules(mod, func(rule *Rule) bool { + rule.Module = mod + return false + }) + + return nil +} + +func (mod *Module) regoV1Compatible() bool { + return mod.regoVersion == RegoV1 || mod.regoVersion == RegoV0CompatV1 +} + +func (mod *Module) RegoVersion() RegoVersion { + return mod.regoVersion +} + +// SetRegoVersion sets the RegoVersion for the module. +// Note: Setting a rego-version that does not match the module's rego-version might have unintended consequences. +func (mod *Module) SetRegoVersion(v RegoVersion) { + mod.regoVersion = v +} + +// NewComment returns a new Comment object. +func NewComment(text []byte) *Comment { + return &Comment{ + Text: text, + } +} + +// Loc returns the location of the comment in the definition. +func (c *Comment) Loc() *Location { + if c == nil { + return nil + } + return c.Location +} + +// SetLoc sets the location on c. +func (c *Comment) SetLoc(loc *Location) { + c.Location = loc +} + +func (c *Comment) String() string { + buf, _ := c.AppendText(make([]byte, 0, c.StringLength())) + return util.ByteSliceToString(buf) +} + +// Copy returns a deep copy of c. +func (c *Comment) Copy() *Comment { + cpy := *c + cpy.Text = make([]byte, len(c.Text)) + copy(cpy.Text, c.Text) + return &cpy +} + +// Equal returns true if this comment equals the other comment. +// Unlike other equality checks on AST nodes, comment equality +// depends on location. +func (c *Comment) Equal(other *Comment) bool { + return c.Location.Equal(other.Location) && bytes.Equal(c.Text, other.Text) +} + +// Compare returns an integer indicating whether pkg is less than, equal to, +// or greater than other. +func (pkg *Package) Compare(other *Package) int { + return termSliceCompare(pkg.Path, other.Path) +} + +// Copy returns a deep copy of pkg. +func (pkg *Package) Copy() *Package { + cpy := *pkg + cpy.Path = pkg.Path.Copy() + return &cpy +} + +// Equal returns true if pkg is equal to other. +func (pkg *Package) Equal(other *Package) bool { + return pkg.Compare(other) == 0 +} + +// Loc returns the location of the Package in the definition. +func (pkg *Package) Loc() *Location { + if pkg == nil { + return nil + } + return pkg.Location +} + +// SetLoc sets the location on pkg. +func (pkg *Package) SetLoc(loc *Location) { + pkg.Location = loc +} + +func (pkg *Package) String() string { + buf, _ := pkg.AppendText(make([]byte, 0, pkg.StringLength())) + return util.ByteSliceToString(buf) +} + +func (pkg *Package) MarshalJSON() ([]byte, error) { + data := map[string]any{ + "path": pkg.Path, + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Package { + if pkg.Location != nil { + data["location"] = pkg.Location + } + } + + return json.Marshal(data) +} + +// IsValidImportPath returns an error indicating if the import path is invalid. +// If the import path is valid, err is nil. +func IsValidImportPath(v Value) (err error) { + switch v := v.(type) { + case Var: + if !v.Equal(DefaultRootDocument.Value) && !v.Equal(InputRootDocument.Value) { + return fmt.Errorf("invalid path %v: path must begin with input or data", v) + } + case Ref: + if err := IsValidImportPath(v[0].Value); err != nil { + return fmt.Errorf("invalid path %v: path must begin with input or data", v) + } + for _, e := range v[1:] { + if _, ok := e.Value.(String); !ok { + return fmt.Errorf("invalid path %v: path elements must be strings", v) + } + } + default: + return fmt.Errorf("invalid path %v: path must be ref or var", v) + } + return nil +} + +// Compare returns an integer indicating whether imp is less than, equal to, +// or greater than other. +func (imp *Import) Compare(other *Import) int { + if imp == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + if cmp := Compare(imp.Path, other.Path); cmp != 0 { + return cmp + } + + return VarCompare(imp.Alias, other.Alias) +} + +// Copy returns a deep copy of imp. +func (imp *Import) Copy() *Import { + cpy := *imp + cpy.Path = imp.Path.Copy() + return &cpy +} + +// Equal returns true if imp is equal to other. +func (imp *Import) Equal(other *Import) bool { + return imp.Compare(other) == 0 +} + +// Loc returns the location of the Import in the definition. +func (imp *Import) Loc() *Location { + if imp == nil { + return nil + } + return imp.Location +} + +// SetLoc sets the location on imp. +func (imp *Import) SetLoc(loc *Location) { + imp.Location = loc +} + +// Name returns the variable that is used to refer to the imported virtual +// document. This is the alias if defined otherwise the last element in the +// path. +func (imp *Import) Name() Var { + if imp.Alias != "" { + return imp.Alias + } + switch v := imp.Path.Value.(type) { + case Var: + return v + case Ref: + if len(v) == 1 { + return v[0].Value.(Var) + } + return Var(v[len(v)-1].Value.(String)) + } + panic("illegal import") +} + +func (imp *Import) String() string { + buf, _ := imp.AppendText(make([]byte, 0, imp.StringLength())) + return util.ByteSliceToString(buf) +} + +func (imp *Import) MarshalJSON() ([]byte, error) { + data := map[string]any{ + "path": imp.Path, + } + + if len(imp.Alias) != 0 { + data["alias"] = imp.Alias + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Import { + if imp.Location != nil { + data["location"] = imp.Location + } + } + + return json.Marshal(data) +} + +// Compare returns an integer indicating whether rule is less than, equal to, +// or greater than other. +func (rule *Rule) Compare(other *Rule) int { + if rule == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + if cmp := rule.Head.Compare(other.Head); cmp != 0 { + return cmp + } + if rule.Default != other.Default { + if !rule.Default { + return -1 + } + return 1 + } + if cmp := rule.Body.Compare(other.Body); cmp != 0 { + return cmp + } + + if cmp := annotationsCompare(rule.Annotations, other.Annotations); cmp != 0 { + return cmp + } + + return rule.Else.Compare(other.Else) +} + +// Copy returns a deep copy of rule. +func (rule *Rule) Copy() *Rule { + cpy := *rule + cpy.Head = rule.Head.Copy() + cpy.Body = rule.Body.Copy() + + if len(cpy.Annotations) > 0 { + cpy.Annotations = make([]*Annotations, len(rule.Annotations)) + for i, a := range rule.Annotations { + cpy.Annotations[i] = a.Copy(&cpy) + } + } + + if cpy.Else != nil { + cpy.Else = rule.Else.Copy() + } + return &cpy +} + +// Equal returns true if rule is equal to other. +func (rule *Rule) Equal(other *Rule) bool { + return rule.Compare(other) == 0 +} + +// Loc returns the location of the Rule in the definition. +func (rule *Rule) Loc() *Location { + if rule == nil { + return nil + } + return rule.Location +} + +// SetLoc sets the location on rule. +func (rule *Rule) SetLoc(loc *Location) { + rule.Location = loc +} + +// Path returns a ref referring to the document produced by this rule. If rule +// is not contained in a module, this function panics. +// +// Deprecated: Poor handling of ref rules. Use `(*Rule).Ref()` instead. +func (rule *Rule) Path() Ref { + if rule.Module == nil { + panic("assertion failed") + } + return rule.Module.Package.Path.Extend(rule.Head.Ref().GroundPrefix()) +} + +// Ref returns a ref referring to the document produced by this rule. If rule +// is not contained in a module, this function panics. The returned ref may +// contain variables in the last position. +func (rule *Rule) Ref() Ref { + if rule.Module == nil { + panic("assertion failed") + } + return rule.Module.Package.Path.Extend(rule.Head.Ref()) +} + +func (rule *Rule) String() string { + opts := toStringOpts{} + if rule.Module != nil { + opts.regoVersion = rule.Module.RegoVersion() + } + buf, _ := rule.appendWithOpts(opts, make([]byte, 0, rule.stringLengthWithOpts(opts))) + return util.ByteSliceToString(buf) +} + +type toStringOpts struct { + regoVersion RegoVersion +} + +func (o toStringOpts) RegoVersion() RegoVersion { + if o.regoVersion == RegoUndefined { + return DefaultRegoVersion + } + return o.regoVersion +} + +func (rule *Rule) isFunction() bool { + return len(rule.Head.Args) > 0 +} + +// ruleJSON is used for JSON serialization of Rule to avoid map allocation overhead. +// Field order is alphabetical to match previous map-based output. +type ruleJSON struct { + Annotations []*Annotations `json:"annotations,omitempty"` + Body Body `json:"body"` + Default bool `json:"default,omitempty"` + Else *Rule `json:"else,omitempty"` + Head *Head `json:"head"` + Location *Location `json:"location,omitempty"` +} + +func (rule *Rule) MarshalJSON() ([]byte, error) { + data := ruleJSON{ + Head: rule.Head, + Body: rule.Body, + } + + if rule.Default { + data.Default = true + } + + if rule.Else != nil { + data.Else = rule.Else + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Rule { + data.Location = rule.Location + } + + if len(rule.Annotations) != 0 { + data.Annotations = rule.Annotations + } + + return json.Marshal(data) +} + +// NewHead returns a new Head object. If args are provided, the first will be +// used for the key and the second will be used for the value. +func NewHead(name Var, args ...*Term) *Head { + head := &Head{ + Name: name, // backcompat + Reference: []*Term{NewTerm(name)}, + } + if len(args) == 0 { + return head + } + head.Key = args[0] + if len(args) == 1 { + return head + } + head.Value = args[1] + if head.Key != nil && head.Value != nil { + head.Reference = head.Reference.Append(args[0]) + } + return head +} + +// VarHead creates a head object, initializes its Name and Location and returns the new head. +// NOTE: The JSON options argument is no longer used, and kept only for backwards compatibility. +func VarHead(name Var, location *Location, _ *astJSON.Options) *Head { + h := NewHead(name) + h.Reference[0].Location = location + return h +} + +// RefHead returns a new Head object with the passed Ref. If args are provided, +// the first will be used for the value. +func RefHead(ref Ref, args ...*Term) *Head { + head := &Head{} + head.SetRef(ref) + if len(ref) < 2 { + head.Name = ref[0].Value.(Var) + } + if len(args) >= 1 { + head.Value = args[0] + } + return head +} + +// DocKind represents the collection of document types that can be produced by rules. +type DocKind byte + +const ( + // CompleteDoc represents a document that is completely defined by the rule. + CompleteDoc = iota + + // PartialSetDoc represents a set document that is partially defined by the rule. + PartialSetDoc + + // PartialObjectDoc represents an object document that is partially defined by the rule. + PartialObjectDoc +) // TODO(sr): Deprecate? + +// DocKind returns the type of document produced by this rule. +func (head *Head) DocKind() DocKind { + if head.Key != nil { + if head.Value != nil { + return PartialObjectDoc + } + return PartialSetDoc + } else if head.HasDynamicRef() { + return PartialObjectDoc + } + return CompleteDoc +} + +type RuleKind byte + +const ( + SingleValue = iota + MultiValue +) + +// RuleKind returns the type of rule this is +func (head *Head) RuleKind() RuleKind { + // NOTE(sr): This is bit verbose, since the key is irrelevant for single vs + // multi value, but as good a spot as to assert the invariant. + switch { + case head.Value != nil: + return SingleValue + case head.Key != nil: + return MultiValue + default: + panic("unreachable") + } +} + +// Ref returns the Ref of the rule. If it doesn't have one, it's filled in +// via the Head's Name. +func (head *Head) Ref() Ref { + if len(head.Reference) > 0 { + return head.Reference + } + return Ref{&Term{Value: head.Name}} +} + +// SetRef can be used to set a rule head's Reference +func (head *Head) SetRef(r Ref) { + head.Reference = r +} + +// Compare returns an integer indicating whether head is less than, equal to, +// or greater than other. +func (head *Head) Compare(other *Head) int { + if head == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + if head.Assign && !other.Assign { + return -1 + } else if !head.Assign && other.Assign { + return 1 + } + if cmp := Compare(head.Args, other.Args); cmp != 0 { + return cmp + } + if cmp := Compare(head.Reference, other.Reference); cmp != 0 { + return cmp + } + if cmp := VarCompare(head.Name, other.Name); cmp != 0 { + return cmp + } + if cmp := Compare(head.Key, other.Key); cmp != 0 { + return cmp + } + return Compare(head.Value, other.Value) +} + +// Copy returns a deep copy of head. +func (head *Head) Copy() *Head { + cpy := *head + cpy.Reference = head.Reference.Copy() + cpy.Args = head.Args.Copy() + cpy.Key = head.Key.Copy() + cpy.Value = head.Value.Copy() + cpy.keywords = nil + cpy.Assign = head.Assign + return &cpy +} + +// Equal returns true if this head equals other. +func (head *Head) Equal(other *Head) bool { + return head.Compare(other) == 0 +} + +func (head *Head) String() string { + return head.stringWithOpts(toStringOpts{}) +} + +func (head *Head) stringWithOpts(opts toStringOpts) string { + buf, _ := head.appendWithOpts(opts, make([]byte, 0, head.stringLengthWithOpts(opts))) + return util.ByteSliceToString(buf) +} + +func (head *Head) MarshalJSON() ([]byte, error) { + var loc *Location + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Head && head.Location != nil { + loc = head.Location + } + + // NOTE(sr): we do this to override the rendering of `head.Reference`. + // It's still what'll be used via the default means of encoding/json + // for unmarshaling a json object into a Head struct! + type h Head + return json.Marshal(struct { + h + Ref Ref `json:"ref"` + Location *Location `json:"location,omitempty"` + }{ + h: h(*head), + Ref: head.Ref(), + Location: loc, + }) +} + +// Vars returns a set of vars found in the head. +func (head *Head) Vars() VarSet { + vis := NewVarVisitor() + // TODO: improve test coverage for this. + if head.Args != nil { + vis.WalkArgs(head.Args) + } + if head.Key != nil { + vis.Walk(head.Key) + } + if head.Value != nil { + vis.Walk(head.Value) + } + if len(head.Reference) > 0 { + vis.WalkRef(head.Reference[1:]) + } + return vis.vars +} + +// Loc returns the Location of head. +func (head *Head) Loc() *Location { + if head == nil { + return nil + } + return head.Location +} + +// SetLoc sets the location on head. +func (head *Head) SetLoc(loc *Location) { + head.Location = loc +} + +func (head *Head) HasDynamicRef() bool { + pos := head.Reference.Dynamic() + return pos > 0 && (pos < len(head.Reference)) +} + +// Copy returns a deep copy of a. +func (a Args) Copy() Args { + cpy := make(Args, 0, len(a)) + for _, t := range a { + cpy = append(cpy, t.Copy()) + } + return cpy +} + +func (a Args) String() string { + buf, _ := a.AppendText(make([]byte, 0, a.StringLength())) + return util.ByteSliceToString(buf) +} + +// Loc returns the Location of a. +func (a Args) Loc() *Location { + if len(a) == 0 { + return nil + } + return a[0].Location +} + +// SetLoc sets the location on a. +func (a Args) SetLoc(loc *Location) { + if len(a) != 0 { + a[0].SetLocation(loc) + } +} + +// Vars returns a set of vars that appear in a. +func (a Args) Vars() VarSet { + vis := NewVarVisitor() + vis.WalkArgs(a) + return vis.vars +} + +// NewBody returns a new Body containing the given expressions. The indices of +// the immediate expressions will be reset. +func NewBody(exprs ...*Expr) Body { + for i, expr := range exprs { + expr.Index = i + } + return Body(exprs) +} + +// MarshalJSON returns JSON encoded bytes representing body. +func (body Body) MarshalJSON() ([]byte, error) { + // Serialize empty Body to empty array. This handles both the empty case and the + // nil case (whereas by default the result would be null if body was nil.) + if len(body) == 0 { + return []byte(`[]`), nil + } + ret, err := json.Marshal([]*Expr(body)) + return ret, err +} + +// Append adds the expr to the body and updates the expr's index accordingly. +func (body *Body) Append(expr *Expr) { + n := len(*body) + expr.Index = n + *body = append(*body, expr) +} + +// Set sets the expr in the body at the specified position and updates the +// expr's index accordingly. +func (body Body) Set(expr *Expr, pos int) { + body[pos] = expr + expr.Index = pos +} + +// Compare returns an integer indicating whether body is less than, equal to, +// or greater than other. +// +// If body is a subset of other, it is considered less than (and vice versa). +func (body Body) Compare(other Body) int { + minLen := min(len(other), len(body)) + for i := range minLen { + if cmp := body[i].Compare(other[i]); cmp != 0 { + return cmp + } + } + if len(body) < len(other) { + return -1 + } + if len(other) < len(body) { + return 1 + } + return 0 +} + +// Copy returns a deep copy of body. +func (body Body) Copy() Body { + cpy := make(Body, len(body)) + for i := range body { + cpy[i] = body[i].Copy() + } + return cpy +} + +// Contains returns true if this body contains the given expression. +func (body Body) Contains(x *Expr) bool { + return slices.ContainsFunc(body, x.Equal) +} + +// Equal returns true if this Body is equal to the other Body. +func (body Body) Equal(other Body) bool { + return body.Compare(other) == 0 +} + +// Hash returns the hash code for the Body. +func (body Body) Hash() int { + s := 0 + for _, e := range body { + s += e.Hash() + } + return s +} + +// IsGround returns true if all of the expressions in the Body are ground. +func (body Body) IsGround() bool { + for _, e := range body { + if !e.IsGround() { + return false + } + } + return true +} + +// Loc returns the location of the Body in the definition. +func (body Body) Loc() *Location { + if len(body) == 0 { + return nil + } + return body[0].Location +} + +// SetLoc sets the location on body. +func (body Body) SetLoc(loc *Location) { + if len(body) != 0 { + body[0].SetLocation(loc) + } +} + +func (body Body) String() string { + buf, _ := body.AppendText(make([]byte, 0, body.StringLength())) + return util.ByteSliceToString(buf) +} + +func (body Body) AppendText(buf []byte) ([]byte, error) { + return AppendDelimeted(buf, body, "; ") +} + +// Vars returns a VarSet containing variables in body. The params can be set to +// control which vars are included. +func (body Body) Vars(params VarVisitorParams) VarSet { + vis := NewVarVisitor().WithParams(params) + vis.WalkBody(body) + return vis.Vars() +} + +// NewExpr returns a new Expr object. +func NewExpr(terms any) *Expr { + switch terms.(type) { + case *SomeDecl, *Every, *Term, []*Term: // ok + default: + panic("unreachable") + } + return &Expr{ + Negated: false, + Terms: terms, + Index: 0, + With: nil, + } +} + +// Complement returns a copy of this expression with the negation flag flipped. +func (expr *Expr) Complement() *Expr { + cpy := *expr + cpy.Negated = !cpy.Negated + return &cpy +} + +// ComplementNoWith returns a copy of this expression with the negation flag flipped +// and the with modifier removed. This is the same as calling .Complement().NoWith() +// but without making an intermediate copy. +func (expr *Expr) ComplementNoWith() *Expr { + cpy := *expr + cpy.Negated = !cpy.Negated + cpy.With = nil + return &cpy +} + +// Equal returns true if this Expr equals the other Expr. +func (expr *Expr) Equal(other *Expr) bool { + return expr.Compare(other) == 0 +} + +// Compare returns an integer indicating whether expr is less than, equal to, +// or greater than other. +// +// Expressions are compared as follows: +// +// 1. Declarations are always less than other expressions. +// 2. Preceding expression (by Index) is always less than the other expression. +// 3. Non-negated expressions are always less than negated expressions. +// 4. Single term expressions are always less than built-in expressions. +// +// Otherwise, the expression terms are compared normally. If both expressions +// have the same terms, the modifiers are compared. +func (expr *Expr) Compare(other *Expr) int { + + if expr == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + + o1 := expr.sortOrder() + o2 := other.sortOrder() + if o1 < o2 { + return -1 + } else if o2 < o1 { + return 1 + } + + switch { + case expr.Index < other.Index: + return -1 + case expr.Index > other.Index: + return 1 + } + + switch { + case expr.Negated && !other.Negated: + return 1 + case !expr.Negated && other.Negated: + return -1 + } + + switch t := expr.Terms.(type) { + case *Term: + if cmp := Compare(t.Value, other.Terms.(*Term).Value); cmp != 0 { + return cmp + } + case []*Term: + if cmp := termSliceCompare(t, other.Terms.([]*Term)); cmp != 0 { + return cmp + } + case *SomeDecl: + if cmp := Compare(t, other.Terms.(*SomeDecl)); cmp != 0 { + return cmp + } + case *Every: + if cmp := Compare(t, other.Terms.(*Every)); cmp != 0 { + return cmp + } + } + + return withSliceCompare(expr.With, other.With) +} + +func (expr *Expr) sortOrder() int { + switch expr.Terms.(type) { + case *SomeDecl: + return 0 + case *Term: + return 1 + case []*Term: + return 2 + case *Every: + return 3 + } + return -1 +} + +// CopyWithoutTerms returns a deep copy of expr without its Terms +func (expr *Expr) CopyWithoutTerms() *Expr { + cpy := *expr + + if expr.With != nil { + cpy.With = make([]*With, len(expr.With)) + for i := range expr.With { + cpy.With[i] = expr.With[i].Copy() + } + } + + return &cpy +} + +// Copy returns a deep copy of expr. +func (expr *Expr) Copy() *Expr { + + cpy := expr.CopyWithoutTerms() + + switch ts := expr.Terms.(type) { + case *SomeDecl: + cpy.Terms = ts.Copy() + case []*Term: + cpy.Terms = termSliceCopy(ts) + case *Term: + cpy.Terms = ts.Copy() + case *Every: + cpy.Terms = ts.Copy() + } + + return cpy +} + +// Hash returns the hash code of the Expr. +func (expr *Expr) Hash() int { + s := expr.Index + switch ts := expr.Terms.(type) { + case *SomeDecl: + s += ts.Hash() + case []*Term: + for _, t := range ts { + s += t.Value.Hash() + } + case *Term: + s += ts.Value.Hash() + } + if expr.Negated { + s++ + } + for _, w := range expr.With { + s += w.Hash() + } + return s +} + +// IncludeWith returns a copy of expr with the with modifier appended. +func (expr *Expr) IncludeWith(target *Term, value *Term) *Expr { + cpy := *expr + cpy.With = append(cpy.With, &With{Target: target, Value: value}) + return &cpy +} + +// NoWith returns a copy of expr where the with modifier has been removed. +func (expr *Expr) NoWith() *Expr { + cpy := *expr + cpy.With = nil + return &cpy +} + +// IsEquality returns true if this is an equality expression. +func (expr *Expr) IsEquality() bool { + return isGlobalBuiltin(expr, Var(Equality.Name)) +} + +// IsAssignment returns true if this an assignment expression. +func (expr *Expr) IsAssignment() bool { + return isGlobalBuiltin(expr, Var(Assign.Name)) +} + +// IsCall returns true if this expression calls a function. +func (expr *Expr) IsCall() bool { + _, ok := expr.Terms.([]*Term) + return ok +} + +// IsEvery returns true if this expression is an 'every' expression. +func (expr *Expr) IsEvery() bool { + _, ok := expr.Terms.(*Every) + return ok +} + +// IsSome returns true if this expression is a 'some' expression. +func (expr *Expr) IsSome() bool { + _, ok := expr.Terms.(*SomeDecl) + return ok +} + +// Operator returns the name of the function or built-in this expression refers +// to. If this expression is not a function call, returns nil. +func (expr *Expr) Operator() Ref { + op := expr.OperatorTerm() + if op == nil { + return nil + } + return op.Value.(Ref) +} + +// OperatorTerm returns the name of the function or built-in this expression +// refers to. If this expression is not a function call, returns nil. +func (expr *Expr) OperatorTerm() *Term { + terms, ok := expr.Terms.([]*Term) + if !ok || len(terms) == 0 { + return nil + } + return terms[0] +} + +// Operand returns the term at the zero-based pos. If the expr does not include +// at least pos+1 terms, this function returns nil. +func (expr *Expr) Operand(pos int) *Term { + terms, ok := expr.Terms.([]*Term) + if !ok { + return nil + } + idx := pos + 1 + if idx < len(terms) { + return terms[idx] + } + return nil +} + +// Operands returns the built-in function operands. +func (expr *Expr) Operands() []*Term { + terms, ok := expr.Terms.([]*Term) + if !ok { + return nil + } + return terms[1:] +} + +// IsGround returns true if all of the expression terms are ground. +func (expr *Expr) IsGround() bool { + switch ts := expr.Terms.(type) { + case []*Term: + for _, t := range ts[1:] { + if !t.IsGround() { + return false + } + } + case *Term: + return ts.IsGround() + } + return true +} + +// SetOperator sets the expr's operator and returns the expr itself. If expr is +// not a call expr, this function will panic. +func (expr *Expr) SetOperator(term *Term) *Expr { + expr.Terms.([]*Term)[0] = term + return expr +} + +// SetLocation sets the expr's location and returns the expr itself. +func (expr *Expr) SetLocation(loc *Location) *Expr { + expr.Location = loc + return expr +} + +// Loc returns the Location of expr. +func (expr *Expr) Loc() *Location { + if expr == nil { + return nil + } + return expr.Location +} + +// SetLoc sets the location on expr. +func (expr *Expr) SetLoc(loc *Location) { + expr.SetLocation(loc) +} + +func (expr *Expr) String() string { + buf, _ := expr.AppendText(make([]byte, 0, expr.StringLength())) + return util.ByteSliceToString(buf) +} + +// exprJSON is used for JSON serialization of Expr to avoid map allocation overhead. +// Field order is alphabetical to match previous map-based output. +type exprJSON struct { + Generated bool `json:"generated,omitempty"` + Index int `json:"index"` + Location *Location `json:"location,omitempty"` + Negated bool `json:"negated,omitempty"` + Terms any `json:"terms"` + With []*With `json:"with,omitempty"` +} + +func (expr *Expr) MarshalJSON() ([]byte, error) { + data := exprJSON{ + Index: expr.Index, + Terms: expr.Terms, + } + + if len(expr.With) > 0 { + data.With = expr.With + } + + if expr.Generated { + data.Generated = true + } + + if expr.Negated { + data.Negated = true + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Expr { + data.Location = expr.Location + } + + return json.Marshal(data) +} + +// UnmarshalJSON parses the byte array and stores the result in expr. +func (expr *Expr) UnmarshalJSON(bs []byte) error { + v := map[string]any{} + if err := util.UnmarshalJSON(bs, &v); err != nil { + return err + } + return unmarshalExpr(expr, v) +} + +// Vars returns a VarSet containing variables in expr. The params can be set to +// control which vars are included. +func (expr *Expr) Vars(params VarVisitorParams) VarSet { + vis := NewVarVisitor().WithParams(params) + vis.Walk(expr) + return vis.Vars() +} + +// NewBuiltinExpr creates a new Expr object with the supplied terms. +// The builtin operator must be the first term. +func NewBuiltinExpr(terms ...*Term) *Expr { + return &Expr{Terms: terms} +} + +func (expr *Expr) CogeneratedExprs() []*Expr { + visited := map[*Expr]struct{}{} + visitCogeneratedExprs(expr, func(e *Expr) bool { + if expr.Equal(e) { + return true + } + if _, ok := visited[e]; ok { + return true + } + visited[e] = struct{}{} + return false + }) + + result := make([]*Expr, 0, len(visited)) + for e := range visited { + result = append(result, e) + } + return result +} + +func (expr *Expr) BaseCogeneratedExpr() *Expr { + if expr.generatedFrom == nil { + return expr + } + return expr.generatedFrom.BaseCogeneratedExpr() +} + +func visitCogeneratedExprs(expr *Expr, f func(*Expr) bool) { + if parent := expr.generatedFrom; parent != nil { + if stop := f(parent); !stop { + visitCogeneratedExprs(parent, f) + } + } + for _, child := range expr.generates { + if stop := f(child); !stop { + visitCogeneratedExprs(child, f) + } + } +} + +func (d *SomeDecl) String() string { + buf, _ := d.AppendText(make([]byte, 0, d.StringLength())) + return util.ByteSliceToString(buf) +} + +// SetLoc sets the Location on d. +func (d *SomeDecl) SetLoc(loc *Location) { + d.Location = loc +} + +// Loc returns the Location of d. +func (d *SomeDecl) Loc() *Location { + return d.Location +} + +// Copy returns a deep copy of d. +func (d *SomeDecl) Copy() *SomeDecl { + cpy := *d + cpy.Symbols = termSliceCopy(d.Symbols) + return &cpy +} + +// Compare returns an integer indicating whether d is less than, equal to, or +// greater than other. +func (d *SomeDecl) Compare(other *SomeDecl) int { + return termSliceCompare(d.Symbols, other.Symbols) +} + +// Hash returns a hash code of d. +func (d *SomeDecl) Hash() int { + return termSliceHash(d.Symbols) +} + +func (d *SomeDecl) MarshalJSON() ([]byte, error) { + data := map[string]any{ + "symbols": d.Symbols, + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.SomeDecl { + if d.Location != nil { + data["location"] = d.Location + } + } + + return json.Marshal(data) +} + +func (q *Every) String() string { + if q.Key != nil { + return fmt.Sprintf("every %s, %s in %s { %s }", + q.Key, + q.Value, + q.Domain, + q.Body) + } + return fmt.Sprintf("every %s in %s { %s }", + q.Value, + q.Domain, + q.Body) +} + +func (q *Every) Loc() *Location { + return q.Location +} + +func (q *Every) SetLoc(l *Location) { + q.Location = l +} + +// Copy returns a deep copy of d. +func (q *Every) Copy() *Every { + cpy := *q + cpy.Key = q.Key.Copy() + cpy.Value = q.Value.Copy() + cpy.Domain = q.Domain.Copy() + cpy.Body = q.Body.Copy() + return &cpy +} + +func (q *Every) Compare(other *Every) int { + for _, terms := range [][2]*Term{ + {q.Key, other.Key}, + {q.Value, other.Value}, + {q.Domain, other.Domain}, + } { + if d := Compare(terms[0], terms[1]); d != 0 { + return d + } + } + return q.Body.Compare(other.Body) +} + +// KeyValueVars returns the key and val arguments of an `every` +// expression, if they are non-nil and not wildcards. +func (q *Every) KeyValueVars() VarSet { + vis := NewVarVisitor() + if q.Key != nil { + vis.Walk(q.Key) + } + vis.Walk(q.Value) + return vis.vars +} + +func (q *Every) MarshalJSON() ([]byte, error) { + data := map[string]any{ + "key": q.Key, + "value": q.Value, + "domain": q.Domain, + "body": q.Body, + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Every { + if q.Location != nil { + data["location"] = q.Location + } + } + + return json.Marshal(data) +} + +func (w *With) String() string { + buf, _ := w.AppendText(make([]byte, 0, w.StringLength())) + return util.ByteSliceToString(buf) +} + +// Equal returns true if this With is equals the other With. +func (w *With) Equal(other *With) bool { + return Compare(w, other) == 0 +} + +// Compare returns an integer indicating whether w is less than, equal to, or +// greater than other. +func (w *With) Compare(other *With) int { + if w == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + if cmp := Compare(w.Target, other.Target); cmp != 0 { + return cmp + } + return Compare(w.Value, other.Value) +} + +// Copy returns a deep copy of w. +func (w *With) Copy() *With { + cpy := *w + cpy.Value = w.Value.Copy() + cpy.Target = w.Target.Copy() + return &cpy +} + +// Hash returns the hash code of the With. +func (w With) Hash() int { + return w.Target.Hash() + w.Value.Hash() +} + +// SetLocation sets the location on w. +func (w *With) SetLocation(loc *Location) *With { + w.Location = loc + return w +} + +// Loc returns the Location of w. +func (w *With) Loc() *Location { + if w == nil { + return nil + } + return w.Location +} + +// SetLoc sets the location on w. +func (w *With) SetLoc(loc *Location) { + w.Location = loc +} + +// withJSON is used for JSON serialization of With to avoid map allocation overhead. +// Field order is alphabetical to match previous map-based output. +type withJSON struct { + Location *Location `json:"location,omitempty"` + Target *Term `json:"target"` + Value *Term `json:"value"` +} + +func (w *With) MarshalJSON() ([]byte, error) { + data := withJSON{ + Target: w.Target, + Value: w.Value, + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.With { + data.Location = w.Location + } + + return json.Marshal(data) +} + +// Copy returns a deep copy of the AST node x. If x is not an AST node, x is returned unmodified. +func Copy(x any) any { + switch x := x.(type) { + case *Module: + return x.Copy() + case *Package: + return x.Copy() + case *Import: + return x.Copy() + case *Rule: + return x.Copy() + case *Head: + return x.Copy() + case Args: + return x.Copy() + case Body: + return x.Copy() + case *Expr: + return x.Copy() + case *With: + return x.Copy() + case *SomeDecl: + return x.Copy() + case *Every: + return x.Copy() + case *Term: + return x.Copy() + case *ArrayComprehension: + return x.Copy() + case *SetComprehension: + return x.Copy() + case *ObjectComprehension: + return x.Copy() + case Set: + return x.Copy() + case *object: + return x.Copy() + case *Array: + return x.Copy() + case Ref: + return x.Copy() + case Call: + return x.Copy() + case *Comment: + return x.Copy() + } + return x +} + +// RuleSet represents a collection of rules that produce a virtual document. +type RuleSet []*Rule + +// NewRuleSet returns a new RuleSet containing the given rules. +func NewRuleSet(rules ...*Rule) RuleSet { + rs := make(RuleSet, 0, len(rules)) + for _, rule := range rules { + rs.Add(rule) + } + return rs +} + +// Add inserts the rule into rs. +func (rs *RuleSet) Add(rule *Rule) { + for _, exist := range *rs { + if exist.Equal(rule) { + return + } + } + *rs = append(*rs, rule) +} + +// Contains returns true if rs contains rule. +func (rs RuleSet) Contains(rule *Rule) bool { + for i := range rs { + if rs[i].Equal(rule) { + return true + } + } + return false +} + +// Diff returns a new RuleSet containing rules in rs that are not in other. +func (rs RuleSet) Diff(other RuleSet) RuleSet { + result := NewRuleSet() + for i := range rs { + if !other.Contains(rs[i]) { + result.Add(rs[i]) + } + } + return result +} + +// Equal returns true if rs equals other. +func (rs RuleSet) Equal(other RuleSet) bool { + return len(rs.Diff(other)) == 0 && len(other.Diff(rs)) == 0 +} + +// Merge returns a ruleset containing the union of rules from rs an other. +func (rs RuleSet) Merge(other RuleSet) RuleSet { + result := NewRuleSet() + for i := range rs { + result.Add(rs[i]) + } + for i := range other { + result.Add(other[i]) + } + return result +} + +func (rs RuleSet) String() string { + buf := make([]string, 0, len(rs)) + for _, rule := range rs { + buf = append(buf, rule.String()) + } + return "{" + strings.Join(buf, ", ") + "}" +} + +// Returns true if the equality or assignment expression referred to by expr +// has a valid number of arguments. +func validEqAssignArgCount(expr *Expr) bool { + return len(expr.Operands()) == 2 +} + +// this function checks if the expr refers to a non-namespaced (global) built-in +// function like eq, gt, plus, etc. +func isGlobalBuiltin(expr *Expr, name Var) bool { + terms, ok := expr.Terms.([]*Term) + if !ok { + return false + } + + // NOTE(tsandall): do not use Term#Equal or Value#Compare to avoid + // allocation here. + ref, ok := terms[0].Value.(Ref) + if !ok || len(ref) != 1 { + return false + } + if head, ok := ref[0].Value.(Var); ok { + return head.Equal(name) + } + return false +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/policy_appenders.go b/vendor/github.com/open-policy-agent/opa/v1/ast/policy_appenders.go new file mode 100644 index 0000000000..1c9813aa97 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/policy_appenders.go @@ -0,0 +1,342 @@ +package ast + +import ( + "encoding" + "fmt" + + "github.com/open-policy-agent/opa/v1/util" +) + +func (m *Module) AppendText(buf []byte) ([]byte, error) { + if m == nil { + return append(buf, ""...), nil + } + + var err error + + // NOTE(anderseknert): this DOES allocate still, and while that's unfortunate, + // we'll be better off dealing with that when we have v2 JSON in the stdlib than + // doing manual JSON marshalling (and string length calculations) here. + for _, annotations := range m.Annotations { + // rule annotations are attached to rules, so only check for package scoped ones here + if annotations.Scope == "package" || annotations.Scope == "subpackages" { + buf = append(buf, "# METADATA\n# "...) + buf = append(buf, annotations.String()...) + buf = append(buf, '\n') + } + } + + if buf, err = m.Package.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, '\n') + + if len(m.Imports) > 0 { + for _, imp := range m.Imports { + buf = append(buf, '\n') + if buf, err = imp.AppendText(buf); err != nil { + return nil, err + } + } + buf = append(buf, '\n') + } + + if len(m.Rules) > 0 { + for _, rule := range m.Rules { + buf = append(buf, '\n') + if buf, err = rule.appendWithOpts(toStringOpts{regoVersion: m.regoVersion}, buf); err != nil { + return nil, err + } + } + } + + return buf, nil +} + +func (pkg *Package) AppendText(buf []byte) ([]byte, error) { + var err error + if pkg == nil { + return append(buf, ""...), nil + } + if len(pkg.Path) <= 1 { + buf = append(buf, "package "...), nil + } + + buf = append(buf, "package "...) + + path := pkg.Path[1:] // omit "data" + + return path.AppendText(buf) +} + +func (imp *Import) AppendText(buf []byte) ([]byte, error) { + buf = append(buf, "import "...) + var err error + if buf, err = imp.Path.AppendText(buf); err != nil { + return nil, err + } + if imp.Alias != "" { + buf = append(buf, ' ', 'a', 's', ' ') + buf = append(buf, imp.Alias...) + } + return buf, nil +} + +func (r *Rule) AppendText(buf []byte) ([]byte, error) { + regoVersion := DefaultRegoVersion + if r.Module != nil { + regoVersion = r.Module.RegoVersion() + } + return r.appendWithOpts(toStringOpts{regoVersion: regoVersion}, buf) +} + +func (r *Rule) appendWithOpts(opts toStringOpts, buf []byte) ([]byte, error) { + // See note in [Module.AppendText] regarding annotations. + for _, annotations := range r.Annotations { + buf = append(buf, "# METADATA\n# "...) + buf = append(buf, annotations.String()...) + buf = append(buf, '\n') + } + + if r.Default { + buf = append(buf, "default "...) + } + + var err error + if buf, err = r.Head.appendWithOpts(opts, buf); err != nil { + return nil, err + } + + if !r.Default { + switch opts.RegoVersion() { + case RegoV1, RegoV0CompatV1: + buf = append(buf, " if { "...) + default: + buf = append(buf, " { "...) + } + if buf, err = r.Body.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, " }"...) + } + if r.Else != nil { + if buf, err = r.Else.appendElse(opts, buf); err != nil { + return nil, err + } + } + + return buf, nil +} + +func (r *Rule) appendElse(opts toStringOpts, buf []byte) ([]byte, error) { + buf = append(buf, " else "...) + + var err error + if r.Head.Value != nil { + buf = append(buf, "= "...) + if buf, err = r.Head.Value.AppendText(buf); err != nil { + return nil, err + } + } + + if v := opts.RegoVersion(); v == RegoV1 || v == RegoV0CompatV1 { + buf = append(buf, " if { "...) + } else { + buf = append(buf, " { "...) + } + if buf, err = r.Body.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, " }"...) + + if r.Else != nil { + if buf, err = r.Else.appendElse(opts, buf); err != nil { + return nil, err + } + } + + return buf, nil +} + +func (h *Head) AppendText(buf []byte) ([]byte, error) { + return h.appendWithOpts(toStringOpts{}, buf) +} + +func (h *Head) appendWithOpts(opts toStringOpts, buf []byte) ([]byte, error) { + var err error + if h.Reference == nil { + buf = append(buf, h.Name...) + } else { + if buf, err = h.Reference.AppendText(buf); err != nil { + return nil, err + } + } + + containsAdded := false + switch { + case len(h.Args) != 0: + if buf, err = h.Args.AppendText(buf); err != nil { + return nil, err + } + case len(h.Reference) == 1 && h.Key != nil: + switch opts.RegoVersion() { + case RegoV0: + buf = append(buf, '[') + if buf, err = h.Key.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, ']') + default: + if buf, err = h.Key.AppendText(append(buf, " contains "...)); err != nil { + return nil, err + } + containsAdded = true + } + } + if h.Value != nil { + if h.Assign { + buf = append(buf, " := "...) + } else { + buf = append(buf, " = "...) + } + if buf, err = h.Value.AppendText(buf); err != nil { + return nil, err + } + } else if !containsAdded && h.Name == "" && h.Key != nil { + if buf, err = h.Key.AppendText(append(buf, " contains "...)); err != nil { + return nil, err + } + } + return buf, nil +} + +func (a Args) AppendText(buf []byte) ([]byte, error) { + var err error + buf = append(buf, '(') + if buf, err = AppendDelimeted(buf, a, ", "); err != nil { + return nil, err + } + return append(buf, ')'), nil +} + +func (expr *Expr) AppendText(buf []byte) ([]byte, error) { + if expr.Negated { + buf = append(buf, "not "...) + } + + var err error + + switch t := expr.Terms.(type) { + case []*Term: + if expr.IsEquality() && validEqAssignArgCount(expr) { + if buf, err = t[1].AppendText(buf); err != nil { + return nil, err + } + buf = append(append(append(buf, ' '), Equality.Infix...), ' ') + if buf, err = t[2].AppendText(buf); err != nil { + return nil, err + } + } else if buf, err = Call(t).AppendText(buf); err != nil { + return nil, err + } + case encoding.TextAppender: + if buf, err = t.AppendText(buf); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unsupported expr terms type: %T", expr.Terms) + } + + if len(expr.With) > 0 { + buf = append(buf, ' ') + } + + return AppendDelimeted(buf, expr.With, " ") +} + +func (w *With) AppendText(buf []byte) ([]byte, error) { + buf = append(buf, "with "...) + var err error + if buf, err = w.Target.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, " as "...) + if buf, err = w.Value.AppendText(buf); err != nil { + return nil, err + } + return buf, nil +} + +func (w *Every) AppendText(buf []byte) ([]byte, error) { + buf = append(buf, "every "...) + var err error + if w.Key != nil { + if buf, err = w.Key.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, ", "...) + } + if buf, err = w.Value.AppendText(buf); err == nil { + buf = append(buf, " in "...) + if buf, err = w.Domain.AppendText(buf); err == nil { + buf = append(buf, " { "...) + if buf, err = w.Body.AppendText(buf); err == nil { + buf = append(buf, " }"...) + } + } + } + return buf, err +} + +func (d *SomeDecl) AppendText(buf []byte) ([]byte, error) { + var err error + buf = append(buf, "some "...) + if call, ok := d.Symbols[0].Value.(Call); ok { + if buf, err = call[1].AppendText(buf); err != nil { + return nil, err + } + if len(call) == 3 { + buf = append(buf, " in "...) + } else { + buf = append(buf, ", "...) + } + if buf, err = call[2].AppendText(buf); err != nil { + return nil, err + } + if len(call) == 4 { + buf = append(buf, " in "...) + if buf, err = call[3].AppendText(buf); err != nil { + return nil, err + } + } + return buf, nil + } + + buf, err = AppendDelimeted(buf, d.Symbols, ", ") + + return buf, err +} + +func (c *Comment) AppendText(buf []byte) ([]byte, error) { + return append(append(buf, '#'), c.Text...), nil +} + +// RulePath returns the string representation of the rule's path, i.e. its package path followed by the rule head ref. +func RulePath(r *Rule) string { + if r == nil { + return "" + } + if r.Module == nil { + return "" + } + buf := make([]byte, 0, r.Module.Package.Path.StringLength()+r.Head.Ref().StringLength()+1) + buf, _ = r.Module.Package.Path.AppendText(buf) + buf = append(buf, '.') + buf, _ = r.Head.Ref().AppendText(buf) + + return util.ByteSliceToString(buf) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/pretty.go b/vendor/github.com/open-policy-agent/opa/v1/ast/pretty.go new file mode 100644 index 0000000000..aa34f37471 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/pretty.go @@ -0,0 +1,82 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "io" + "strings" +) + +// Pretty writes a pretty representation of the AST rooted at x to w. +// +// This is function is intended for debug purposes when inspecting ASTs. +func Pretty(w io.Writer, x any) { + pp := &prettyPrinter{ + depth: -1, + w: w, + } + NewBeforeAfterVisitor(pp.Before, pp.After).Walk(x) +} + +type prettyPrinter struct { + depth int + w io.Writer +} + +func (pp *prettyPrinter) Before(x any) bool { + switch x.(type) { + case *Term: + default: + pp.depth++ + } + + switch x := x.(type) { + case *Term: + return false + case Args: + if len(x) == 0 { + return false + } + pp.writeType(x) + case *Expr: + extras := []string{} + if x.Negated { + extras = append(extras, "negated") + } + extras = append(extras, fmt.Sprintf("index=%d", x.Index)) + pp.writeIndent("%v %v", TypeName(x), strings.Join(extras, " ")) + case Null, Boolean, Number, String, Var: + pp.writeValue(x) + default: + pp.writeType(x) + } + return false +} + +func (pp *prettyPrinter) After(x any) { + switch x.(type) { + case *Term: + default: + pp.depth-- + } +} + +func (pp *prettyPrinter) writeValue(x any) { + pp.writeIndent(fmt.Sprint(x)) +} + +func (pp *prettyPrinter) writeType(x any) { + pp.writeIndent(TypeName(x)) +} + +func (pp *prettyPrinter) writeIndent(f string, a ...any) { + pad := strings.Repeat(" ", pp.depth) + pp.write(pad+f, a...) +} + +func (pp *prettyPrinter) write(f string, a ...any) { + fmt.Fprintf(pp.w, f+"\n", a...) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/rego_compiler.go b/vendor/github.com/open-policy-agent/opa/v1/ast/rego_compiler.go new file mode 100644 index 0000000000..78d0efc59a --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/rego_compiler.go @@ -0,0 +1,17 @@ +package ast + +import "context" + +type regoCompileCtx struct{} + +func WithCompiler(ctx context.Context, c *Compiler) context.Context { + return context.WithValue(ctx, regoCompileCtx{}, c) +} + +func CompilerFromContext(ctx context.Context) (*Compiler, bool) { + if ctx == nil { + return nil, false + } + v, ok := ctx.Value(regoCompileCtx{}).(*Compiler) + return v, ok +} diff --git a/vendor/github.com/open-policy-agent/opa/ast/rego_v1.go b/vendor/github.com/open-policy-agent/opa/v1/ast/rego_v1.go similarity index 81% rename from vendor/github.com/open-policy-agent/opa/ast/rego_v1.go rename to vendor/github.com/open-policy-agent/opa/v1/ast/rego_v1.go index b64dfce7be..db9e0f722c 100644 --- a/vendor/github.com/open-policy-agent/opa/ast/rego_v1.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/rego_v1.go @@ -3,7 +3,7 @@ package ast import ( "fmt" - "github.com/open-policy-agent/opa/ast/internal/tokens" + "github.com/open-policy-agent/opa/v1/ast/internal/tokens" ) func checkDuplicateImports(modules []*Module) (errors Errors) { @@ -23,17 +23,16 @@ func checkDuplicateImports(modules []*Module) (errors Errors) { return } -func checkRootDocumentOverrides(node interface{}) Errors { +func checkRootDocumentOverrides(node any) Errors { errors := Errors{} WalkRules(node, func(rule *Rule) bool { - var name string + name := rule.Head.Name if len(rule.Head.Reference) > 0 { - name = rule.Head.Reference[0].Value.(Var).String() - } else { - name = rule.Head.Name.String() + name = rule.Head.Reference[0].Value.(Var) } - if RootDocumentRefs.Contains(RefTerm(VarTerm(name))) { + + if ReservedVars.Contains(name) { errors = append(errors, NewError(CompileErr, rule.Location, "rules must not shadow %v (use a different rule name)", name)) } @@ -52,8 +51,8 @@ func checkRootDocumentOverrides(node interface{}) Errors { if expr.IsAssignment() { // assign() can be called directly, so we need to assert its given first operand exists before checking its name. if nameOp := expr.Operand(0); nameOp != nil { - name := nameOp.String() - if RootDocumentRefs.Contains(RefTerm(VarTerm(name))) { + name := Var(nameOp.String()) + if ReservedVars.Contains(name) { errors = append(errors, NewError(CompileErr, expr.Location, "variables must not shadow %v (use a different variable name)", name)) } } @@ -64,28 +63,26 @@ func checkRootDocumentOverrides(node interface{}) Errors { return errors } -func walkCalls(node interface{}, f func(interface{}) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - switch x := x.(type) { +func walkCalls(node any, f func(any) bool) { + vis := NewGenericVisitor(func(x any) bool { + switch y := x.(type) { case Call: return f(x) case *Expr: - if x.IsCall() { + if y.IsCall() { return f(x) } case *Head: // GenericVisitor doesn't walk the rule head ref - walkCalls(x.Reference, f) + walkCalls(y.Reference, f) } return false - }} + }) vis.Walk(node) } -func checkDeprecatedBuiltins(deprecatedBuiltinsMap map[string]struct{}, node interface{}) Errors { - errs := make(Errors, 0) - - walkCalls(node, func(x interface{}) bool { +func checkDeprecatedBuiltins(deprecatedBuiltinsMap map[string]struct{}, node any) (errs Errors) { + walkCalls(node, func(x any) bool { var operator string var loc *Location @@ -113,7 +110,7 @@ func checkDeprecatedBuiltins(deprecatedBuiltinsMap map[string]struct{}, node int return errs } -func checkDeprecatedBuiltinsForCurrentVersion(node interface{}) Errors { +func checkDeprecatedBuiltinsForCurrentVersion(node any) Errors { deprecatedBuiltins := make(map[string]struct{}) capabilities := CapabilitiesForThisVersion() for _, bi := range capabilities.Builtins { @@ -150,11 +147,11 @@ func NewRegoCheckOptions() RegoCheckOptions { // CheckRegoV1 checks the given module or rule for errors that are specific to Rego v1. // Passing something other than an *ast.Rule or *ast.Module is considered a programming error, and will cause a panic. -func CheckRegoV1(x interface{}) Errors { +func CheckRegoV1(x any) Errors { return CheckRegoV1WithOptions(x, NewRegoCheckOptions()) } -func CheckRegoV1WithOptions(x interface{}, opts RegoCheckOptions) Errors { +func CheckRegoV1WithOptions(x any, opts RegoCheckOptions) Errors { switch x := x.(type) { case *Module: return checkRegoV1Module(x, opts) @@ -191,8 +188,8 @@ func checkRegoV1Rule(rule *Rule, opts RegoCheckOptions) Errors { var errs Errors - if opts.NoKeywordsAsRuleNames && IsKeywordInRegoVersion(rule.Head.Name.String(), RegoV1) { - errs = append(errs, NewError(ParseErr, rule.Location, fmt.Sprintf("%s keyword cannot be used for rule name", rule.Head.Name.String()))) + if opts.NoKeywordsAsRuleNames && len(rule.Head.Reference) < 2 && IsKeywordInRegoVersion(rule.Head.Name.String(), RegoV1) { + errs = append(errs, NewError(ParseErr, rule.Location, "%s keyword cannot be used for rule name", rule.Head.Name.String())) } if opts.RequireRuleBodyOrValue && rule.generatedBody && rule.Head.generatedValue { errs = append(errs, NewError(ParseErr, rule.Location, "%s must have value assignment and/or body declaration", t)) diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/schema.go b/vendor/github.com/open-policy-agent/opa/v1/ast/schema.go new file mode 100644 index 0000000000..3f9e2001d5 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/schema.go @@ -0,0 +1,54 @@ +// Copyright 2021 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" +) + +// SchemaSet holds a map from a path to a schema. +type SchemaSet struct { + m *util.HasherMap[Ref, any] +} + +// NewSchemaSet returns an empty SchemaSet. +func NewSchemaSet() *SchemaSet { + return &SchemaSet{ + m: util.NewHasherMap[Ref, any](RefEqual), + } +} + +// Put inserts a raw schema into the set. +func (ss *SchemaSet) Put(path Ref, raw any) { + ss.m.Put(path, raw) +} + +// Get returns the raw schema identified by the path. +func (ss *SchemaSet) Get(path Ref) any { + if ss != nil { + if x, ok := ss.m.Get(path); ok { + return x + } + } + return nil +} + +func loadSchema(raw any, allowNet []string) (types.Type, error) { + + jsonSchema, err := compileSchema(raw, allowNet) + if err != nil { + return nil, err + } + + tpe, err := newSchemaParser().parseSchema(jsonSchema.RootSchema) + if err != nil { + return nil, fmt.Errorf("type checking: %w", err) + } + + return tpe, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/slices.go b/vendor/github.com/open-policy-agent/opa/v1/ast/slices.go new file mode 100644 index 0000000000..5921ec0ca1 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/slices.go @@ -0,0 +1,15 @@ +// Copyright 2026 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +// CountFunc counts the number of items in a slice S that satisfy predicate function f. +func CountFunc[T any, S ~[]T](items S, f func(T) bool) (n int) { + for i := range items { + if f(items[i]) { + n++ + } + } + return n +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/string_length.go b/vendor/github.com/open-policy-agent/opa/v1/ast/string_length.go new file mode 100644 index 0000000000..fe53227d24 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/string_length.go @@ -0,0 +1,351 @@ +package ast + +import ( + "fmt" + "unicode/utf8" + + "github.com/open-policy-agent/opa/v1/util" +) + +// StringLengther is an interface for types that can report their string length without +// actually constructing the string. This is useful for pre-allocating buffers, like those +// used in AppendText, strings.Builder, bytes.Buffer, etc. +type StringLengther interface { + StringLength() int +} + +// TermSliceStringLength returns the total string length of the given terms, as reported +// by the [StringLengther.StringLength] method implementation of each term's [Value]. The +// delimLen value will be added between each term's length to account for a delimiter, or +// no delimiter if delimLen is 0. +// Implementation note: this function is optimized for inlining, and just meets the threshold +// for that. Don't change without making sure that's still the case. +func TermSliceStringLength(terms []*Term, delimLen int) (n int) { + for i := range terms { + n += terms[i].StringLength() + delimLen + } + return max(n-delimLen, 0) +} + +func (t *Term) StringLength() int { + if sl, ok := t.Value.(StringLengther); ok { + return sl.StringLength() + } + + panic("expected all ast.Value types to implement StringLenghter interface, got: " + ValueName(t.Value)) +} + +func (s String) StringLength() int { + n := 2 // surrounding quotes + bs := util.StringToByteSlice(s) + for i := 0; i < len(bs); { + r, size := utf8.DecodeRune(bs[i:]) + switch r { + case '\\', '"': + n += 2 // escaped backslash or quote + case '\b', '\f', '\n', '\r', '\t': + n += 2 // escaped control characters + default: + if r < 0x20 { + n += 6 // unicode escape for other control characters + } else { + n += size // normal rune + } + } + i += size + } + return n +} + +func (n Number) StringLength() int { + return len(n) +} + +func (b Boolean) StringLength() int { + if b { + return 4 + } + return 5 +} + +func (Null) StringLength() int { + return 4 +} + +func (s *set) StringLength() int { + if s.Len() == 0 { + return 5 // set() + } + // surrounding {} + ", " for every element - 1 + return TermSliceStringLength(s.Slice(), 2) + 2 +} + +func (a *Array) StringLength() int { + if a.Len() == 0 { + return 2 // [] + } + // surrounding brackets + ", " for every element - 1 + return TermSliceStringLength(a.elems, 2) + 2 +} + +func (o *object) StringLength() (n int) { + if o.Len() == 0 { + return 2 // {} + } + // ": " for every item + ", " for every item - 1 + o.Foreach(func(key, value *Term) { + n += key.StringLength() + 4 + value.StringLength() // ": " and ", " + }) + return n // surrounding {} but also minus last ", " +} + +func (l *lazyObj) StringLength() int { + return l.force().(*object).StringLength() +} + +func (ts *TemplateString) StringLength() (n int) { + for _, p := range ts.Parts { + switch x := p.(type) { + case *Expr: + n += 2 + x.StringLength() // for {} + case *Term: + if s, ok := x.Value.(String); ok { + n += len(s) + countUnescapedLeftCurly(string(s)) + } else { + n += x.StringLength() + } + default: + n += 9 // + } + } + return n + 3 // $"" or $`` +} + +func (c Call) StringLength() int { + return c[0].StringLength() + 2 + TermSliceStringLength(c[1:], 2) +} + +func (r Ref) StringLength() (n int) { + rlen := len(r) + if rlen == 0 { + return 0 + } + + if s, ok := r[0].Value.(String); ok { + n = len(s) // first term should never be quoted + } else { + n = r[0].StringLength() + } + + if rlen == 1 { + return n + } + + for _, p := range r[1:] { + switch v := p.Value.(type) { + case String: + str := string(v) + if IsVarCompatibleString(str) && !IsKeyword(str) { + n += 1 + len(str) // dot + name + } else { + n += 2 + p.StringLength() // brackets + } + default: + n += 2 + p.StringLength() // brackets + } + } + return n +} + +func (v Var) StringLength() int { + if v.IsWildcard() { + return 1 + } + return len(v) +} + +func (s *SetComprehension) StringLength() int { + return s.Term.StringLength() + s.Body.StringLength() + 5 // {} and " | " +} + +func (a *ArrayComprehension) StringLength() int { + return a.Term.StringLength() + a.Body.StringLength() + 5 // [] and " | " +} + +func (o *ObjectComprehension) StringLength() (n int) { + n += o.Key.StringLength() + n += o.Value.StringLength() + n += o.Body.StringLength() + return n + 7 // "{}"", " | ", and ": " +} + +func (m *Module) StringLength() (n int) { + if m.Package != nil { + n += m.Package.StringLength() + 2 // newlines + } + + if len(m.Imports) > 0 { + for _, imp := range m.Imports { + n += imp.StringLength() + 1 // newline + } + } + + if len(m.Rules) > 0 { + for _, rule := range m.Rules { + n += rule.stringLengthWithOpts(toStringOpts{regoVersion: m.regoVersion}) + 1 // newline + } + } + + return n +} + +func (p *Package) StringLength() int { + if p == nil { + return 21 // + } + if len(p.Path) <= 1 { + return 25 + p.Path.StringLength() // // package + } + + return 8 + p.Path[1:].StringLength() // "package ..." +} + +func (i *Import) StringLength() (n int) { + n = 7 + i.Path.StringLength() // "import " and path + if i.Alias != "" { + n += 4 + i.Alias.StringLength() // " as " and alias + } + return n +} + +func (r *Rule) StringLength() int { + return r.stringLengthWithOpts(toStringOpts{}) +} + +func (r *Rule) stringLengthWithOpts(opts toStringOpts) int { + n := 0 + if r.Default { + n += 8 // "default " + } + n += r.Head.stringLengthWithOpts(opts) + if !r.Default { + switch opts.RegoVersion() { + case RegoV1, RegoV0CompatV1: + n += 6 // " if { " + default: + n += 3 // " { " + } + n += r.Body.StringLength() + 2 // body and closing " }" + } + if r.Else != nil { + n += r.Else.stringLengthWithOpts(opts) + } + return n +} + +func (h *Head) StringLength() int { + return h.stringLengthWithOpts(toStringOpts{}) +} + +func (h *Head) stringLengthWithOpts(opts toStringOpts) int { + n := h.Reference.StringLength() + containsAdded := false + switch { + case len(h.Args) != 0: + n += h.Args.StringLength() + case len(h.Reference) == 1 && h.Key != nil: + switch opts.RegoVersion() { + case RegoV0: + n += 2 + h.Key.StringLength() // for [] + default: + n += 10 + h.Key.StringLength() // " contains " + containsAdded = true + } + } + if h.Value != nil { + if h.Assign { + n += 4 // " := " + } else { + n += 3 // " = " + } + n += h.Value.StringLength() + } else if !containsAdded && h.Name == "" && h.Key != nil { + n += 10 + h.Key.StringLength() // " contains " + } + return n +} + +func (a Args) StringLength() (n int) { + n = 2 // () + for _, t := range a { + n += t.StringLength() + 2 // ", " + } + return n - 2 // minus last ", " +} + +func (b Body) StringLength() (n int) { + for _, expr := range b { + n += expr.StringLength() + 2 // "; " + } + return max(n-2, 0) // minus last "; " (if `n` isn't 0) +} + +func (e *Expr) StringLength() (n int) { + if e.Negated { + n += 4 // "not " + } + switch terms := e.Terms.(type) { + case []*Term: + if e.IsEquality() && validEqAssignArgCount(e) { + n += terms[1].StringLength() + len(Equality.Infix) + terms[2].StringLength() + 2 // spaces around = + } else { + n += Call(terms).StringLength() + } + case StringLengther: + n += terms.StringLength() + default: + panic(fmt.Sprintf("string length estimation not implemented for type: %T", e.Terms)) + } + + for _, w := range e.With { + n += w.StringLength() + 1 // space before with + } + + return n +} + +func (w *With) StringLength() int { + return w.Target.StringLength() + w.Value.StringLength() + 9 // "with " and " as " +} + +func (e *Every) StringLength() int { + n := 6 // "every " + if e.Key != nil { + n += e.Key.StringLength() + 2 // ", " + } + n += e.Value.StringLength() + 4 // " in " + n += e.Domain.StringLength() + 3 // " { " + n += e.Body.StringLength() + 2 // " }" + return n +} + +func (s *SomeDecl) StringLength() int { + n := 5 // "some " + if call, ok := s.Symbols[0].Value.(Call); ok { + n += 4 // " in " + n += call[1].StringLength() + if len(call) == 4 { + n += 2 // ", " + } + n += call[2].StringLength() + if len(call) == 4 { + n += call[3].StringLength() + } + return n + } + return n + TermSliceStringLength(s.Symbols, 2) +} + +func (c *Comment) StringLength() int { + return 1 + len(c.Text) // '#' + text +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/strings.go b/vendor/github.com/open-policy-agent/opa/v1/ast/strings.go new file mode 100644 index 0000000000..72ec03f8cc --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/strings.go @@ -0,0 +1,56 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "reflect" + "strings" +) + +// TypeName returns a human readable name for the AST element type. +func TypeName(x any) string { + if _, ok := x.(*lazyObj); ok { + return "object" + } + return strings.ToLower(reflect.Indirect(reflect.ValueOf(x)).Type().Name()) +} + +// ValueName returns a human readable name for the AST Value type. +// This is preferrable over calling TypeName when the argument is known to be +// a Value, as this doesn't require reflection (= heap allocations). +func ValueName(x Value) string { + switch x.(type) { + case String: + return "string" + case Boolean: + return "boolean" + case Number: + return "number" + case Null: + return "null" + case Var: + return "var" + case Object: + return "object" + case Set: + return "set" + case Ref: + return "ref" + case Call: + return "call" + case *Array: + return "array" + case *ArrayComprehension: + return "arraycomprehension" + case *ObjectComprehension: + return "objectcomprehension" + case *SetComprehension: + return "setcomprehension" + case *TemplateString: + return "templatestring" + } + + return TypeName(x) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/syncpools.go b/vendor/github.com/open-policy-agent/opa/v1/ast/syncpools.go new file mode 100644 index 0000000000..500bb073cf --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/syncpools.go @@ -0,0 +1,38 @@ +package ast + +import ( + "bytes" + "sync" + + "github.com/open-policy-agent/opa/v1/util" +) + +var ( + TermPtrPool = util.NewSyncPool[Term]() + BytesReaderPool = util.NewSyncPool[bytes.Reader]() + IndexResultPool = util.NewSyncPool[IndexResult]() + + // Needs custom pool because of custom Put logic. + varVisitorPool = &vvPool{ + pool: sync.Pool{ + New: func() any { + return NewVarVisitor() + }, + }, + } +) + +type vvPool struct { + pool sync.Pool +} + +func (p *vvPool) Get() *VarVisitor { + return p.pool.Get().(*VarVisitor) +} + +func (p *vvPool) Put(vv *VarVisitor) { + if vv != nil { + vv.Clear() + p.pool.Put(vv) + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/term.go b/vendor/github.com/open-policy-agent/opa/v1/ast/term.go new file mode 100644 index 0000000000..436b22eb2a --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/term.go @@ -0,0 +1,3283 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "net/url" + "slices" + "strconv" + "strings" + "sync" + "unicode" + + "github.com/cespare/xxhash/v2" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/ast/location" + "github.com/open-policy-agent/opa/v1/util" +) + +// maxBindingsEstimate is the cap for binding count estimates in comprehensions. +// This value aligns with maxLinearScan in topdown/bindings.go. +const maxBindingsEstimate = 16 + +// EstimateBodyBindingCount returns an estimate of the number of bindings needed +// for evaluating a comprehension body. It uses the body length as a heuristic, +// capped at maxBindingsEstimate. +func EstimateBodyBindingCount(body Body) (estimate int) { + return min(len(body), maxBindingsEstimate) +} + +var ( + NullValue Value = Null{} + + errFindNotFound = errors.New("find: not found") +) + +// Location records a position in source code. +type Location = location.Location + +// NewLocation returns a new Location object. +func NewLocation(text []byte, file string, row int, col int) *Location { + return location.NewLocation(text, file, row, col) +} + +// Value declares the common interface for all Term values. Every kind of Term value +// in the language is represented as a type that implements this interface: +// +// - Null, Boolean, Number, String +// - Object, Array, Set +// - Variables, References +// - Array, Set, and Object Comprehensions +// - Calls +// - Template Strings +type Value interface { + Compare(other Value) int // Compare returns <0, 0, or >0 if this Value is less than, equal to, or greater than other, respectively. + Find(path Ref) (Value, error) // Find returns value referred to by path or an error if path is not found. + Hash() int // Returns hash code of the value. + IsGround() bool // IsGround returns true if this value is not a variable or contains no variables. + String() string // String returns a human readable string representation of the value. + + StringLengther // All Values must be able to report their string length during optimization. +} + +// InterfaceToValue converts a native Go value x to a Value. +func InterfaceToValue(x any) (Value, error) { + switch x := x.(type) { + case Value: + return x, nil + case nil: + return NullValue, nil + case bool: + return InternedValue(x), nil + case json.Number: + if interned := InternedIntNumberTermFromString(string(x)); interned != nil { + return interned.Value, nil + } + return Number(x), nil + case int: + return InternedValueOr(x, newIntNumberValue), nil + case int64: + return InternedValueOr(x, newInt64NumberValue), nil + case uint64: + return InternedValueOr(x, newUint64NumberValue), nil + case float64: + return floatNumber(x), nil + case string: + return String(x), nil + case []any: + r := util.NewPtrSlice[Term](len(x)) + for i, e := range x { + e, err := InterfaceToValue(e) + if err != nil { + return nil, err + } + r[i].Value = e + } + return NewArray(r...), nil + case []string: + r := util.NewPtrSlice[Term](len(x)) + for i, e := range x { + r[i].Value = String(e) + } + return NewArray(r...), nil + case map[string]any: + kvs := util.NewPtrSlice[Term](len(x) * 2) + idx := 0 + for k, v := range x { + kvs[idx].Value = String(k) + v, err := InterfaceToValue(v) + if err != nil { + return nil, err + } + kvs[idx+1].Value = v + idx += 2 + } + tuples := make([][2]*Term, len(kvs)/2) + for i := 0; i < len(kvs); i += 2 { + tuples[i/2] = *(*[2]*Term)(kvs[i : i+2]) + } + return NewObject(tuples...), nil + case map[string]string: + r := newobject(len(x)) + for k, v := range x { + r.Insert(StringTerm(k), StringTerm(v)) + } + return r, nil + default: + ptr := util.Reference(x) + if err := util.RoundTrip(ptr); err != nil { + return nil, fmt.Errorf("ast: interface conversion: %w", err) + } + return InterfaceToValue(*ptr) + } +} + +// ValueFromReader returns an AST value from a JSON serialized value in the reader. +func ValueFromReader(r io.Reader) (Value, error) { + var x any + if err := util.NewJSONDecoder(r).Decode(&x); err != nil { + return nil, err + } + return InterfaceToValue(x) +} + +// As converts v into a Go native type referred to by x. +func As(v Value, x any) error { + return util.NewJSONDecoder(strings.NewReader(v.String())).Decode(x) +} + +// Resolver defines the interface for resolving references to native Go values. +type Resolver interface { + Resolve(Ref) (any, error) +} + +// ValueResolver defines the interface for resolving references to AST values. +type ValueResolver interface { + Resolve(Ref) (Value, error) +} + +// UnknownValueErr indicates a ValueResolver was unable to resolve a reference +// because the reference refers to an unknown value. +type UnknownValueErr struct{} + +func (UnknownValueErr) Error() string { + return "unknown value" +} + +// IsUnknownValueErr returns true if the err is an UnknownValueErr. +func IsUnknownValueErr(err error) bool { + _, ok := err.(UnknownValueErr) + return ok +} + +type illegalResolver struct{} + +func (illegalResolver) Resolve(ref Ref) (any, error) { + return nil, fmt.Errorf("illegal value: %v", ref) +} + +// ValueToInterface returns the Go representation of an AST value. The AST +// value should not contain any values that require evaluation (e.g., vars, +// comprehensions, etc.) +func ValueToInterface(v Value, resolver Resolver) (any, error) { + return valueToInterface(v, resolver, JSONOpt{}) +} + +func valueToInterface(v Value, resolver Resolver, opt JSONOpt) (any, error) { + switch v := v.(type) { + case Null: + return nil, nil + case Boolean: + return bool(v), nil + case Number: + return json.Number(v), nil + case String: + return string(v), nil + case *Array: + buf := []any{} + for i := range v.Len() { + x1, err := valueToInterface(v.Elem(i).Value, resolver, opt) + if err != nil { + return nil, err + } + buf = append(buf, x1) + } + return buf, nil + case *object: + buf := make(map[string]any, v.Len()) + err := v.Iter(func(k, v *Term) error { + ki, err := valueToInterface(k.Value, resolver, opt) + if err != nil { + return err + } + var str string + var ok bool + if str, ok = ki.(string); !ok { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(ki); err != nil { + return err + } + str = strings.TrimSpace(buf.String()) + } + vi, err := valueToInterface(v.Value, resolver, opt) + if err != nil { + return err + } + buf[str] = vi + return nil + }) + if err != nil { + return nil, err + } + return buf, nil + case *lazyObj: + if opt.CopyMaps { + return valueToInterface(v.force(), resolver, opt) + } + return v.native, nil + case Set: + buf := []any{} + iter := func(x *Term) error { + x1, err := valueToInterface(x.Value, resolver, opt) + if err != nil { + return err + } + buf = append(buf, x1) + return nil + } + var err error + if opt.SortSets { + err = v.Sorted().Iter(iter) + } else { + err = v.Iter(iter) + } + if err != nil { + return nil, err + } + return buf, nil + case Ref: + return resolver.Resolve(v) + default: + return nil, fmt.Errorf("%v requires evaluation", TypeName(v)) + } +} + +// JSON returns the JSON representation of v. The value must not contain any +// refs or terms that require evaluation (e.g., vars, comprehensions, etc.) +func JSON(v Value) (any, error) { + return JSONWithOpt(v, JSONOpt{}) +} + +// JSONOpt defines parameters for AST to JSON conversion. +type JSONOpt struct { + SortSets bool // sort sets before serializing (this makes conversion more expensive) + CopyMaps bool // enforces copying of map[string]any read from the store +} + +// JSONWithOpt returns the JSON representation of v. The value must not contain any +// refs or terms that require evaluation (e.g., vars, comprehensions, etc.) +func JSONWithOpt(v Value, opt JSONOpt) (any, error) { + return valueToInterface(v, illegalResolver{}, opt) +} + +// MustJSON returns the JSON representation of v. The value must not contain any +// refs or terms that require evaluation (e.g., vars, comprehensions, etc.) If +// the conversion fails, this function will panic. This function is mostly for +// test purposes. +func MustJSON(v Value) any { + r, err := JSON(v) + if err != nil { + panic(err) + } + return r +} + +// MustInterfaceToValue converts a native Go value x to a Value. If the +// conversion fails, this function will panic. This function is mostly for test +// purposes. +func MustInterfaceToValue(x any) Value { + v, err := InterfaceToValue(x) + if err != nil { + panic(err) + } + return v +} + +// Term is an argument to a function. +type Term struct { + Value Value `json:"value"` // the value of the Term as represented in Go + Location *Location `json:"location,omitempty"` // the location of the Term in the source +} + +// NewTerm returns a new Term object. +func NewTerm(v Value) *Term { + return &Term{ + Value: v, + } +} + +// SetLocation updates the term's Location and returns the term itself. +func (term *Term) SetLocation(loc *Location) *Term { + term.Location = loc + return term +} + +// Loc returns the Location of term. +func (term *Term) Loc() *Location { + if term == nil { + return nil + } + return term.Location +} + +// SetLoc sets the location on term. +func (term *Term) SetLoc(loc *Location) { + term.SetLocation(loc) +} + +// Copy returns a deep copy of term. +func (term *Term) Copy() *Term { + if term == nil { + return nil + } + + cpy := *term + + switch v := term.Value.(type) { + case Null, Boolean, Number, String, Var: + cpy.Value = v + case Ref: + cpy.Value = v.Copy() + case *Array: + cpy.Value = v.Copy() + case Set: + cpy.Value = v.Copy() + case *object: + cpy.Value = v.Copy() + case *ArrayComprehension: + cpy.Value = v.Copy() + case *ObjectComprehension: + cpy.Value = v.Copy() + case *SetComprehension: + cpy.Value = v.Copy() + case *TemplateString: + cpy.Value = v.Copy() + case Call: + cpy.Value = v.Copy() + } + + return &cpy +} + +// Equal returns true if this term equals the other term. Equality is +// defined for each kind of term, and does not compare the Location. +func (term *Term) Equal(other *Term) bool { + if term == nil && other != nil { + return false + } + if term != nil && other == nil { + return false + } + if term == other { + return true + } + + return ValueEqual(term.Value, other.Value) +} + +// Get returns a value referred to by name from the term. +func (term *Term) Get(name *Term) *Term { + switch v := term.Value.(type) { + case *object: + return v.Get(name) + case *Array: + return v.Get(name) + case interface { + Get(*Term) *Term + }: + return v.Get(name) + case Set: + if v.Contains(name) { + return name + } + } + return nil +} + +// Hash returns the hash code of the Term's Value. Its Location +// is ignored. +func (term *Term) Hash() int { + return term.Value.Hash() +} + +// IsGround returns true if this term's Value is ground. +func (term *Term) IsGround() bool { + return term.Value.IsGround() +} + +// termJSON is used to serialize Term to JSON without map allocation. +type termJSON struct { + Location *Location `json:"location,omitempty"` + Type string `json:"type"` + Value Value `json:"value"` +} + +// MarshalJSON returns the JSON encoding of the term. +// +// Specialized marshalling logic is required to include a type hint for Value. +func (term *Term) MarshalJSON() ([]byte, error) { + d := termJSON{ + Type: ValueName(term.Value), + Value: term.Value, + } + jsonOptions := astJSON.GetOptions().MarshalOptions + if jsonOptions.IncludeLocation.Term { + d.Location = term.Location + } + return json.Marshal(d) +} + +func (term *Term) String() string { + return term.Value.String() +} + +// UnmarshalJSON parses the byte array and stores the result in term. +// Specialized unmarshalling is required to handle Value and Location. +func (term *Term) UnmarshalJSON(bs []byte) error { + v := map[string]any{} + if err := util.UnmarshalJSON(bs, &v); err != nil { + return err + } + val, err := unmarshalValue(v) + if err != nil { + return err + } + term.Value = val + + if loc, ok := v["location"].(map[string]any); ok { + term.Location = &Location{} + err := unmarshalLocation(term.Location, loc) + if err != nil { + return err + } + } + return nil +} + +// Vars returns a VarSet with variables contained in this term. +func (term *Term) Vars() VarSet { + vis := NewVarVisitor() + vis.Walk(term) + return vis.vars +} + +// IsConstant returns true if the AST value is constant. +// Note that this is only a shallow check as we currently don't have a real +// notion of constant "vars" in the AST implementation. Meaning that while we could +// derive that a reference to a constant value is also constant, we currently don't. +func IsConstant(v Value) bool { + switch v.(type) { + case Null, Boolean, Number, String: + return true + case Var, Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call: + return false + } + + found := false + vis := GenericVisitor{ + func(x any) bool { + switch x.(type) { + case Var, Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call: + found = true + return true + } + return false + }, + } + vis.Walk(v) + return !found +} + +// IsComprehension returns true if the supplied value is a comprehension. +func IsComprehension(x Value) bool { + switch x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension: + return true + } + return false +} + +// ContainsRefs returns true if the Value v contains refs. +func ContainsRefs(v any) bool { + found := false + WalkRefs(v, func(Ref) bool { + found = true + return found + }) + return found +} + +// ContainsComprehensions returns true if the Value v contains comprehensions. +func ContainsComprehensions(v any) bool { + found := false + WalkClosures(v, func(x any) bool { + switch x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension: + found = true + return found + } + return found + }) + return found +} + +// ContainsClosures returns true if the Value v contains closures. +func ContainsClosures(v any) bool { + found := false + WalkClosures(v, func(x any) bool { + switch x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every: + found = true + return found + } + return found + }) + return found +} + +// IsScalar returns true if the AST value is a scalar. +func IsScalar(v Value) bool { + switch v.(type) { + case String, Number, Boolean, Null: + return true + } + return false +} + +// Null represents the null value defined by JSON. +type Null struct{} + +// NullTerm creates a new Term with a Null value. +func NullTerm() *Term { + return &Term{Value: NullValue} +} + +// Equal returns true if the other term Value is also Null. +func (Null) Equal(other Value) bool { + switch other.(type) { + case Null: + return true + default: + return false + } +} + +// Compare compares null to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (Null) Compare(other Value) int { + if _, ok := other.(Null); ok { + return 0 + } + return -1 +} + +// Find returns the current value or a not found error. +func (Null) Find(path Ref) (Value, error) { + if len(path) == 0 { + return NullValue, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (Null) Hash() int { + return 0 +} + +// IsGround always returns true. +func (Null) IsGround() bool { + return true +} + +func (Null) String() string { + return "null" +} + +// Boolean represents a boolean value defined by JSON. +type Boolean bool + +// BooleanTerm creates a new Term with a Boolean value. +func BooleanTerm(b bool) *Term { + return &Term{Value: internedBooleanValue(b)} +} + +// Equal returns true if the other Value is a Boolean and is equal. +func (bol Boolean) Equal(other Value) bool { + switch other := other.(type) { + case Boolean: + return bol == other + default: + return false + } +} + +// Compare compares bol to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (bol Boolean) Compare(other Value) int { + switch other := other.(type) { + case Boolean: + if bol == other { + return 0 + } + if !bol { + return -1 + } + return 1 + case Null: + return 1 + } + + return -1 +} + +// Find returns the current value or a not found error. +func (bol Boolean) Find(path Ref) (Value, error) { + if len(path) == 0 { + return InternedTerm(bool(bol)).Value, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (bol Boolean) Hash() int { + if bol { + return 1 + } + return 0 +} + +// IsGround always returns true. +func (Boolean) IsGround() bool { + return true +} + +func (bol Boolean) String() string { + return strconv.FormatBool(bool(bol)) +} + +// Number represents a numeric value as defined by JSON. +type Number json.Number + +// NumberTerm creates a new Term with a Number value. +func NumberTerm(n json.Number) *Term { + return &Term{Value: Number(n)} +} + +// IntNumberTerm creates a new Term with an integer Number value. +// For values between -1 and 512, returns a cached Term to reduce allocations. +func IntNumberTerm(i int) *Term { + return internedIntNumberTerm(i) +} + +// UIntNumberTerm creates a new Term with an unsigned integer Number value. +func UIntNumberTerm(u uint64) *Term { + return &Term{Value: newUint64NumberValue(u)} +} + +// FloatNumberTerm creates a new Term with a floating point Number value. +func FloatNumberTerm(f float64) *Term { + s := strconv.FormatFloat(f, 'g', -1, 64) + return &Term{Value: Number(s)} +} + +// Equal returns true if the other Value is a Number and is equal. +func (num Number) Equal(other Value) bool { + if other, ok := other.(Number); ok { + return NumberCompare(num, other) == 0 + } + return false +} + +// Compare compares num to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (num Number) Compare(other Value) int { + // Optimize for the common case, as calling Compare allocates on heap. + if otherNum, yes := other.(Number); yes { + return NumberCompare(num, otherNum) + } + + return Compare(num, other) +} + +// Find returns the current value or a not found error. +func (num Number) Find(path Ref) (Value, error) { + if len(path) == 0 { + return num, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (num Number) Hash() int { + if len(num) < 4 { + if i, err := strconv.Atoi(string(num)); err == nil { + return i + } + } + if f, ok := num.Float64(); ok { + return int(f) + } + return int(xxhash.Sum64String(string(num))) +} + +// Int returns the int representation of num if possible. +func (num Number) Int() (int, bool) { + i64, ok := num.Int64() + return int(i64), ok +} + +// Int64 returns the int64 representation of num if possible. +func (num Number) Int64() (int64, bool) { + i, err := json.Number(num).Int64() + if err != nil { + return 0, false + } + return i, true +} + +// Float64 returns the float64 representation of num if possible. +func (num Number) Float64() (float64, bool) { + f, err := json.Number(num).Float64() + if err != nil { + return 0, false + } + return f, true +} + +// IsGround always returns true. +func (Number) IsGround() bool { + return true +} + +// MarshalJSON returns JSON encoded bytes representing num. +func (num Number) MarshalJSON() ([]byte, error) { + return json.Marshal(json.Number(num)) +} + +func (num Number) String() string { + return string(num) +} + +func newIntNumberValue(i int) Value { + return Number(strconv.Itoa(i)) +} + +func newInt64NumberValue(i int64) Value { + return Number(strconv.FormatInt(i, 10)) +} + +func newUint64NumberValue(u uint64) Value { + return Number(strconv.FormatUint(u, 10)) +} + +func floatNumber(f float64) Number { + return Number(strconv.FormatFloat(f, 'g', -1, 64)) +} + +// String represents a string value as defined by JSON. +type String string + +// StringTerm creates a new Term with a String value. +func StringTerm(s string) *Term { + return &Term{Value: String(s)} +} + +// Equal returns true if the other Value is a String and is equal. +func (str String) Equal(other Value) bool { + switch other := other.(type) { + case String: + return str == other + default: + return false + } +} + +// Compare compares str to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (str String) Compare(other Value) int { + // Optimize for the common case of one string being compared to another by + // using a direct comparison of values. This avoids the allocation performed + // when calling Compare and its any argument conversion. + if otherStr, ok := other.(String); ok { + if str == otherStr { + return 0 + } + if str < otherStr { + return -1 + } + return 1 + } + + return Compare(str, other) +} + +// Find returns the current value or a not found error. +func (str String) Find(path Ref) (Value, error) { + if len(path) == 0 { + return str, nil + } + return nil, errFindNotFound +} + +// IsGround always returns true. +func (String) IsGround() bool { + return true +} + +func (str String) String() string { + return strconv.Quote(string(str)) +} + +// Hash returns the hash code for the Value. +func (str String) Hash() int { + return int(xxhash.Sum64String(string(str))) +} + +type TemplateString struct { + Parts []Node `json:"parts"` + MultiLine bool `json:"multi_line"` +} + +func (ts *TemplateString) Copy() *TemplateString { + cpy := &TemplateString{MultiLine: ts.MultiLine, Parts: make([]Node, len(ts.Parts))} + for i, p := range ts.Parts { + switch v := p.(type) { + case *Expr: + cpy.Parts[i] = v.Copy() + case *Term: + cpy.Parts[i] = v.Copy() + } + } + return cpy +} + +func (ts *TemplateString) Equal(other Value) bool { + if o, ok := other.(*TemplateString); ok && ts.MultiLine == o.MultiLine && len(ts.Parts) == len(o.Parts) { + for i, p := range ts.Parts { + switch v := p.(type) { + case *Expr: + if ope, ok := o.Parts[i].(*Expr); !ok || !v.Equal(ope) { + return false + } + case *Term: + if opt, ok := o.Parts[i].(*Term); !ok || !v.Equal(opt) { + return false + } + default: + return false + } + } + return true + } + return false +} + +func (ts *TemplateString) Compare(other Value) int { + if ots, ok := other.(*TemplateString); ok { + if ts.MultiLine != ots.MultiLine { + if !ts.MultiLine { + return -1 + } + return 1 + } + + if len(ts.Parts) != len(ots.Parts) { + return len(ts.Parts) - len(ots.Parts) + } + + for i := range ts.Parts { + if cmp := Compare(ts.Parts[i], ots.Parts[i]); cmp != 0 { + return cmp + } + } + + return 0 + } + return Compare(ts, other) +} + +func (ts *TemplateString) Find(path Ref) (Value, error) { + if len(path) == 0 { + return ts, nil + } + return nil, errFindNotFound +} + +func (ts *TemplateString) Hash() int { + hash := 0 + for _, p := range ts.Parts { + switch x := p.(type) { + case *Expr: + hash += x.Hash() + case *Term: + hash += x.Value.Hash() + default: + panic(fmt.Sprintf("invalid template part type %T", p)) + } + } + return hash +} + +func (*TemplateString) IsGround() bool { + return false +} + +func (ts *TemplateString) String() string { + buf, _ := ts.AppendText(make([]byte, 0, ts.StringLength())) + return util.ByteSliceToString(buf) +} + +func TemplateStringTerm(multiLine bool, parts ...Node) *Term { + return &Term{Value: &TemplateString{MultiLine: multiLine, Parts: parts}} +} + +// EscapeTemplateStringStringPart escapes unescaped left curly braces in s - i.e "{" becomes "\{". +// The internal representation of string terms within a template string does **NOT** +// treat '{' as special, but expects code dealing with template strings to escape them when +// required, such as when serializing the complete template string. Code that programmatically +// constructs template strings should not pre-escape left curly braces in string term parts. +// +// // TODO(anders): a future optimization would be to combine this with the other escaping done +// // for strings (e.g. escaping quotes, backslashes, and JSON control characters) in a single operation +// // to avoid multiple passes and allocations over the same string. That's currently done by +// // strconv.Quote, so we would need to re-implement that logic in code of our own. +// // NOTE(anders): I would love to come up with a better name for this component than +// // "TemplateStringStringPart".. +func EscapeTemplateStringStringPart(s string) string { + numUnescaped := countUnescapedLeftCurly(s) + if numUnescaped == 0 { + return s + } + + return util.ByteSliceToString(AppendEscapedTemplateStringStringPart(make([]byte, 0, len(s)+numUnescaped), s)) +} + +func AppendEscapedTemplateStringStringPart(buf []byte, s string) []byte { + if s[0] == '{' { + buf = append(buf, '\\', s[0]) + } else { + buf = append(buf, s[0]) + } + + for i := 1; i < len(s); i++ { + if s[i] == '{' && s[i-1] != '\\' { + buf = append(buf, '\\', s[i]) + } else { + buf = append(buf, s[i]) + } + } + + return buf +} + +func countUnescapedLeftCurly(s string) (n int) { + // Note(anders): while not the functions I'd intuitively reach for to solve this, + // they are hands down the fastest option here, as they're done in assembly, which + // performs about an order of magnitude better than a manual loop in Go. + if n = strings.Count(s, "{"); n > 0 { + n -= strings.Count(s, `\{`) + } + return n +} + +// Var represents a variable as defined by the language. +type Var string + +// VarTerm creates a new Term with a Variable value. +func VarTerm(v string) *Term { + return &Term{Value: InternedVarValue(v)} +} + +// Equal returns true if the other Value is a Variable and has the same value +// (name). +func (v Var) Equal(other Value) bool { + switch other := other.(type) { + case Var: + return v == other + default: + return false + } +} + +// Compare compares v to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (v Var) Compare(other Value) int { + if otherVar, ok := other.(Var); ok { + return strings.Compare(string(v), string(otherVar)) + } + return Compare(v, other) +} + +// Find returns the current value or a not found error. +func (v Var) Find(path Ref) (Value, error) { + if len(path) == 0 { + return v, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (v Var) Hash() int { + return int(xxhash.Sum64String(string(v))) +} + +// IsGround always returns false. +func (Var) IsGround() bool { + return false +} + +// IsWildcard returns true if this is a wildcard variable. +func (v Var) IsWildcard() bool { + return strings.HasPrefix(string(v), WildcardPrefix) +} + +// IsGenerated returns true if this variable was generated during compilation. +func (v Var) IsGenerated() bool { + return strings.HasPrefix(string(v), "__local") +} + +func (v Var) String() string { + // Special case for wildcard so that string representation is parseable. The + // parser mangles wildcard variables to make their names unique and uses an + // illegal variable name character (WildcardPrefix) to avoid conflicts. When + // we serialize the variable here, we need to make sure it's parseable. + if v.IsWildcard() { + return WildcardString + } + return string(v) +} + +// Ref represents a reference as defined by the language. +type Ref []*Term + +// EmptyRef returns a new, empty reference. +func EmptyRef() Ref { + return Ref([]*Term{}) +} + +// PtrRef returns a new reference against the head for the pointer +// s. Path components in the pointer are unescaped. +func PtrRef(head *Term, s string) (Ref, error) { + s = strings.Trim(s, "/") + if s == "" { + return Ref{head}, nil + } + parts := strings.Split(s, "/") + if maxLen := math.MaxInt32; len(parts) >= maxLen { + return nil, fmt.Errorf("path too long: %s, %d > %d (max)", s, len(parts), maxLen) + } + ref := make(Ref, uint(len(parts))+1) + ref[0] = head + for i := range parts { + var err error + parts[i], err = url.PathUnescape(parts[i]) + if err != nil { + return nil, err + } + ref[i+1] = StringTerm(parts[i]) + } + return ref, nil +} + +// RefTerm creates a new Term with a Ref value. +func RefTerm(r ...*Term) *Term { + return &Term{Value: Ref(r)} +} + +// Append returns a copy of ref with the term appended to the end. +func (ref Ref) Append(term *Term) Ref { + n := len(ref) + dst := make(Ref, n+1) + copy(dst, ref) + dst[n] = term + return dst +} + +// Insert returns a copy of the ref with x inserted at pos. If pos < len(ref), +// existing elements are shifted to the right. If pos > len(ref)+1 this +// function panics. +func (ref Ref) Insert(x *Term, pos int) Ref { + switch { + case pos == len(ref): + return ref.Append(x) + case pos > len(ref)+1: + panic("illegal index") + } + cpy := make(Ref, len(ref)+1) + copy(cpy, ref[:pos]) + cpy[pos] = x + copy(cpy[pos+1:], ref[pos:]) + return cpy +} + +// Extend returns a copy of ref with the terms from other appended. The head of +// other will be converted to a string. +func (ref Ref) Extend(other Ref) Ref { + offset := len(ref) + dst := make(Ref, offset+len(other)) + copy(dst, ref) + + head := other[0].Copy() + head.Value = String(head.Value.(Var)) + + dst[offset] = head + copy(dst[offset+1:], other[1:]) + return dst +} + +// Concat returns a ref with the terms appended. +func (ref Ref) Concat(terms []*Term) Ref { + if len(terms) == 0 { + return ref + } + cpy := make(Ref, len(ref)+len(terms)) + copy(cpy, ref) + copy(cpy[len(ref):], terms) + return cpy +} + +// Dynamic returns the offset of the first non-constant operand of ref. +func (ref Ref) Dynamic() int { + switch ref[0].Value.(type) { + case Call: + return 0 + } + for i := 1; i < len(ref); i++ { + if !IsConstant(ref[i].Value) { + return i + } + } + return -1 +} + +// Copy returns a deep copy of ref. +func (ref Ref) Copy() Ref { + return termSliceCopy(ref) +} + +// CopyNonGround returns a new ref with deep copies of the non-ground parts and shallow +// copies of the ground parts. This is a *much* cheaper operation than Copy for operations +// that only intend to modify (e.g. plug) the non-ground parts. The head element of the ref +// is always shallow copied. +func (ref Ref) CopyNonGround() Ref { + cpy := make(Ref, len(ref)) + cpy[0] = ref[0] + + for i := 1; i < len(ref); i++ { + if ref[i].Value.IsGround() { + cpy[i] = ref[i] + } else { + cpy[i] = ref[i].Copy() + } + } + + return cpy +} + +// Equal returns true if ref is equal to other. +func (ref Ref) Equal(other Value) bool { + switch o := other.(type) { + case Ref: + if len(ref) == len(o) { + for i := range ref { + if !ref[i].Equal(o[i]) { + return false + } + } + + return true + } + } + + return false +} + +// Compare compares ref to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (ref Ref) Compare(other Value) int { + if o, ok := other.(Ref); ok { + return termSliceCompare(ref, o) + } + + return Compare(ref, other) +} + +// Find returns the current value or a "not found" error. +func (ref Ref) Find(path Ref) (Value, error) { + if len(path) == 0 { + return ref, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (ref Ref) Hash() int { + return termSliceHash(ref) +} + +// HasPrefix returns true if the other ref is a prefix of this ref. +func (ref Ref) HasPrefix(other Ref) bool { + if len(other) > len(ref) { + return false + } + for i := range other { + if !ref[i].Equal(other[i]) { + return false + } + } + return true +} + +// ConstantPrefix returns the constant portion of the ref starting from the head. +func (ref Ref) ConstantPrefix() Ref { + i := ref.Dynamic() + if i < 0 { + return ref + } + return ref[:i] +} + +// StringPrefix returns the string portion of the ref starting from the head. +func (ref Ref) StringPrefix() Ref { + for i := 1; i < len(ref); i++ { + switch ref[i].Value.(type) { + case String: // pass + default: // cut off + return ref[:i] + } + } + + return ref +} + +// GroundPrefix returns the ground portion of the ref starting from the head. By +// definition, the head of the reference is always ground. +func (ref Ref) GroundPrefix() Ref { + for i := range ref { + if i > 0 && !ref[i].IsGround() { + return ref[:i] + } + } + + return ref +} + +// DynamicSuffix returns the dynamic portion of the ref. +// If the ref is not dynamic, nil is returned. +func (ref Ref) DynamicSuffix() Ref { + i := ref.Dynamic() + if i < 0 { + return nil + } + return ref[i:] +} + +// IsGround returns true if all of the parts of the Ref are ground. +func (ref Ref) IsGround() bool { + if len(ref) < 2 { + return true + } + return termSliceIsGround(ref[1:]) +} + +// IsNested returns true if this ref contains other Refs. +func (ref Ref) IsNested() bool { + for _, x := range ref { + if _, ok := x.Value.(Ref); ok { + return true + } + } + return false +} + +// Ptr returns a slash-separated path string for this ref. If the ref +// contains non-string terms this function returns an error. Path +// components are escaped. +func (ref Ref) Ptr() (string, error) { + buf := &strings.Builder{} + tail := ref[1:] + + l := max(len(tail)-1, 0) // number of '/' to add + for i := range tail { + str, ok := tail[i].Value.(String) + if !ok { + return "", errors.New("invalid path value type") + } + l += len(str) + } + buf.Grow(l) + + for i := range tail { + if i > 0 { + buf.WriteByte('/') + } + str := string(tail[i].Value.(String)) + // Sadly, the url package does not expose an appender for this. + buf.WriteString(url.PathEscape(str)) + } + return buf.String(), nil +} + +// IsVarCompatibleString returns true if s is a valid variable name. String s is a valid variable +// name if it starts with a letter (a-z or A-Z) or underscore (_) and is followed by +// letters (a-z or A-Z), digits (0-9), and underscores. +func IsVarCompatibleString(s string) bool { + l := len(s) + if l == 0 { + return false + } + // not exactly easy on the eyes, but often orders of magnitude faster + // than using a compiled regex (see benchmarks in term_bench_test.go) + is_letter := func(c byte) bool { + return (c > 96 && c < 123) || (c > 64 && c < 91) + } + is_digit := func(c byte) bool { + return c > 47 && c < 58 + } + + // first character must be a letter or underscore + c := s[0] + if !(is_letter(c) || c == 95) { + return false + } + + // remaining characters must be letters, digits, or underscores + for i := 1; i < l; i++ { + if c = s[i]; !(is_letter(c) || is_digit(c) || c == 95) { + return false + } + } + + return true +} + +func (ref Ref) String() string { + l := len(ref) + // First check for zero-alloc options, as making the buffer for AppendText + // always costs an allocation. + if l == 0 { + return "" + } + if l == 1 { + if s, ok := ref[0].Value.(String); ok { + // Ref head should normally be a Var, but if for some reason + // it's a string, don't quote it. + return string(s) + } + return ref[0].Value.String() + } + if name, ok := BuiltinNameFromRef(ref); ok { + return name + } + + buf, _ := ref.AppendText(make([]byte, 0, ref.StringLength())) + return util.ByteSliceToString(buf) +} + +// OutputVars returns a VarSet containing variables that would be bound by evaluating +// this expression in isolation. +func (ref Ref) OutputVars() VarSet { + vis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true}) + vis.WalkRef(ref) + return vis.Vars() +} + +func (ref Ref) toArray() *Array { + terms := make([]*Term, 0, len(ref)) + for _, term := range ref { + if _, ok := term.Value.(String); ok { + terms = append(terms, term) + } else { + terms = append(terms, InternedTerm(term.Value.String())) + } + } + return NewArray(terms...) +} + +// QueryIterator defines the interface for querying AST documents with references. +type QueryIterator func(map[Var]Value, Value) error + +// ArrayTerm creates a new Term with an Array value. +func ArrayTerm(a ...*Term) *Term { + return NewTerm(NewArray(a...)) +} + +// NewArray creates an Array with the terms provided. The array will +// use the provided term slice. +func NewArray(a ...*Term) *Array { + hs := make([]int, len(a)) + for i, e := range a { + hs[i] = e.Value.Hash() + } + arr := &Array{elems: a, hashs: hs, ground: termSliceIsGround(a)} + arr.rehash() + return arr +} + +// NewArrayWithCapacity returns a new empty Array with the given capacity pre-allocated. +func NewArrayWithCapacity(capacity int) *Array { + return &Array{ + elems: make([]*Term, 0, capacity), + hashs: make([]int, 0, capacity), + ground: true, + } +} + +// Array represents an array as defined by the language. Arrays are similar to the +// same types as defined by JSON with the exception that they can contain Vars +// and References. +type Array struct { + elems []*Term + hashs []int // element hashes + hash int + ground bool +} + +// Copy returns a deep copy of arr. +func (arr *Array) Copy() *Array { + return &Array{ + elems: termSliceCopy(arr.elems), + hashs: slices.Clone(arr.hashs), + hash: arr.hash, + ground: arr.ground, + } +} + +// Equal returns true if arr is equal to other. +func (arr *Array) Equal(other Value) bool { + if arr == other { + return true + } + + if other, ok := other.(*Array); ok && len(arr.elems) == len(other.elems) { + for i := range arr.elems { + if !arr.elems[i].Equal(other.elems[i]) { + return false + } + } + return true + } + + return false +} + +// Compare compares arr to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (arr *Array) Compare(other Value) int { + if b, ok := other.(*Array); ok { + return termSliceCompare(arr.elems, b.elems) + } + + sortA := sortOrder(arr) + sortB := sortOrder(other) + + if sortA < sortB { + return -1 + } else if sortB < sortA { + return 1 + } + + return Compare(arr, other) +} + +// Find returns the value at the index or an out-of-range error. +func (arr *Array) Find(path Ref) (Value, error) { + if len(path) == 0 { + return arr, nil + } + num, ok := path[0].Value.(Number) + if !ok { + return nil, errFindNotFound + } + i, ok := num.Int() + if !ok || i < 0 || i >= arr.Len() { + return nil, errFindNotFound + } + + term := arr.Elem(i) + // Using Find on scalar values costs an allocation (type -> Value conversion) + // and since we already have the Value here, we can avoid that. + if len(path) == 1 && IsScalar(term.Value) { + return term.Value, nil + } + + return term.Value.Find(path[1:]) +} + +// Get returns the element at pos or nil if not possible. +func (arr *Array) Get(pos *Term) *Term { + num, ok := pos.Value.(Number) + if !ok { + return nil + } + + if i, ok := num.Int(); ok && i >= 0 && i < len(arr.elems) { + return arr.elems[i] + } + + return nil +} + +// Sorted returns a new Array that contains the sorted elements of arr. +func (arr *Array) Sorted() *Array { + cpy := make([]*Term, len(arr.elems)) + for i := range cpy { + cpy[i] = arr.elems[i] + } + + slices.SortFunc(cpy, TermValueCompare) + + a := NewArray(cpy...) + a.hashs = arr.hashs + return a +} + +// Hash returns the hash code for the Value. +func (arr *Array) Hash() int { + return arr.hash +} + +// IsGround returns true if all of the Array elements are ground. +func (arr *Array) IsGround() bool { + return arr.ground +} + +// MarshalJSON returns JSON encoded bytes representing arr. +func (arr *Array) MarshalJSON() ([]byte, error) { + if len(arr.elems) == 0 { + return []byte(`[]`), nil + } + return json.Marshal(arr.elems) +} + +func (arr *Array) String() string { + buf, _ := arr.AppendText(make([]byte, 0, arr.StringLength())) + return util.ByteSliceToString(buf) +} + +// Len returns the number of elements in the array. +func (arr *Array) Len() int { + return len(arr.elems) +} + +// Elem returns the element i of arr. +func (arr *Array) Elem(i int) *Term { + return arr.elems[i] +} + +// Set sets the element i of arr. +func (arr *Array) Set(i int, v *Term) { + arr.set(i, v) +} + +// rehash updates the cached hash of arr. +func (arr *Array) rehash() { + arr.hash = 0 + for _, h := range arr.hashs { + arr.hash += h + } +} + +// set sets the element i of arr. +func (arr *Array) set(i int, v *Term) { + arr.ground = arr.ground && v.IsGround() + arr.elems[i] = v + arr.hashs[i] = v.Value.Hash() + arr.rehash() +} + +// Slice returns a slice of arr starting from i index to j. -1 +// indicates the end of the array. The returned value array is not a +// copy and any modifications to either of arrays may be reflected to +// the other. +func (arr *Array) Slice(i, j int) *Array { + var elems []*Term + var hashs []int + if j == -1 { + elems = arr.elems[i:] + hashs = arr.hashs[i:] + } else { + elems = arr.elems[i:j] + hashs = arr.hashs[i:j] + } + // If arr is ground, the slice is, too. + // If it's not, the slice could still be. + gr := arr.ground || termSliceIsGround(elems) + + s := &Array{elems: elems, hashs: hashs, ground: gr} + s.rehash() + return s +} + +// Iter calls f on each element in arr. If f returns an error, +// iteration stops and the return value is the error. +func (arr *Array) Iter(f func(*Term) error) error { + for i := range arr.elems { + if err := f(arr.elems[i]); err != nil { + return err + } + } + return nil +} + +// Until calls f on each element in arr. If f returns true, iteration stops. +func (arr *Array) Until(f func(*Term) bool) bool { + return slices.ContainsFunc(arr.elems, f) +} + +// Foreach calls f on each element in arr. +func (arr *Array) Foreach(f func(*Term)) { + for _, term := range arr.elems { + f(term) + } +} + +// Append appends a term to arr, returning the appended array. +func (arr *Array) Append(v *Term) *Array { + cpy := *arr + cpy.elems = append(arr.elems, v) + cpy.hashs = append(arr.hashs, v.Value.Hash()) + cpy.hash = arr.hash + v.Value.Hash() + cpy.ground = arr.ground && v.IsGround() + return &cpy +} + +// Set represents a set as defined by the language. +type Set interface { + Value + Len() int + Copy() Set + Diff(Set) Set + Intersect(Set) Set + Union(Set) Set + Add(*Term) + Iter(func(*Term) error) error + Until(func(*Term) bool) bool + Foreach(func(*Term)) + Contains(*Term) bool + Map(func(*Term) (*Term, error)) (Set, error) + Reduce(*Term, func(*Term, *Term) (*Term, error)) (*Term, error) + Sorted() *Array + Slice() []*Term +} + +// NewSet returns a new Set containing t. +func NewSet(t ...*Term) Set { + s := newset(len(t)) + for _, term := range t { + s.insert(term, false) + } + return s +} + +// NewSetWithCapacity returns a new empty Set with the given capacity pre-allocated. +func NewSetWithCapacity(capacity int) Set { + return newset(capacity) +} + +func newset(n int) *set { + var keys []*Term + if n > 0 { + keys = make([]*Term, 0, n) + } + return &set{ + elems: make(map[int]*Term, n), + keys: keys, + hash: 0, + ground: true, + sortGuard: sync.Once{}, + } +} + +// SetTerm returns a new Term representing a set containing terms t. +func SetTerm(t ...*Term) *Term { + set := NewSet(t...) + return &Term{ + Value: set, + } +} + +type set struct { + elems map[int]*Term + keys []*Term + hash int + ground bool + // Prevents race condition around sorting. + // We can avoid (the allocation cost of) using a pointer here as all + // methods of `set` use a pointer receiver, and the `sync.Once` value + // is never copied. + sortGuard sync.Once +} + +// Copy returns a deep copy of s. +func (s *set) Copy() Set { + cpy := &set{ + hash: s.hash, + ground: s.ground, + sortGuard: sync.Once{}, + elems: make(map[int]*Term, len(s.elems)), + keys: make([]*Term, 0, len(s.keys)), + } + + for hash := range s.elems { + cpy.elems[hash] = s.elems[hash].Copy() + cpy.keys = append(cpy.keys, cpy.elems[hash]) + } + + return cpy +} + +// IsGround returns true if all terms in s are ground. +func (s *set) IsGround() bool { + return s.ground +} + +// Hash returns a hash code for s. +func (s *set) Hash() int { + return s.hash +} + +func (s *set) String() string { + buf, _ := s.AppendText(make([]byte, 0, s.StringLength())) + return util.ByteSliceToString(buf) +} + +func (s *set) sortedKeys() []*Term { + s.sortGuard.Do(func() { + slices.SortFunc(s.keys, TermValueCompare) + }) + return s.keys +} + +// Compare compares s to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (s *set) Compare(other Value) int { + o1 := sortOrder(s) + o2 := sortOrder(other) + if o1 < o2 { + return -1 + } else if o1 > o2 { + return 1 + } + t := other.(*set) + return termSliceCompare(s.sortedKeys(), t.sortedKeys()) +} + +// Find returns the set or dereferences the element itself. +func (s *set) Find(path Ref) (Value, error) { + if len(path) == 0 { + return s, nil + } + if !s.Contains(path[0]) { + return nil, errFindNotFound + } + return path[0].Value.Find(path[1:]) +} + +// Diff returns elements in s that are not in other. +func (s *set) Diff(other Set) Set { + if s.Compare(other) == 0 { + return NewSet() + } + + result := newset(len(s.keys)) + for _, term := range s.keys { + if !other.Contains(term) { + result.insert(term, false) + } + } + + return result +} + +// Intersect returns the set containing elements in both s and other. +func (s *set) Intersect(other Set) Set { + o := other.(*set) + n, m := s.Len(), o.Len() + ss := s + so := o + if m < n { + ss = o + so = s + n = m + } + + result := newset(n) + for _, term := range ss.keys { + if so.Contains(term) { + result.insert(term, false) + } + } + + return result +} + +// Union returns the set containing all elements of s and other. +func (s *set) Union(other Set) Set { + o := other.(*set) + // Pre-allocate with max size - avoids over-allocation for overlapping sets + // while only requiring one potential grow for disjoint sets. + r := newset(max(len(s.keys), len(o.keys))) + for _, term := range s.keys { + r.insert(term, false) + } + for _, term := range o.keys { + r.insert(term, false) + } + return r +} + +// Add updates s to include t. +func (s *set) Add(t *Term) { + s.insert(t, true) +} + +// Iter calls f on each element in s. If f returns an error, iteration stops +// and the return value is the error. +func (s *set) Iter(f func(*Term) error) error { + for _, term := range s.sortedKeys() { + if err := f(term); err != nil { + return err + } + } + return nil +} + +// Until calls f on each element in s. If f returns true, iteration stops. +func (s *set) Until(f func(*Term) bool) bool { + return slices.ContainsFunc(s.sortedKeys(), f) +} + +// Foreach calls f on each element in s. +func (s *set) Foreach(f func(*Term)) { + for _, term := range s.sortedKeys() { + f(term) + } +} + +// Map returns a new Set obtained by applying f to each value in s. +func (s *set) Map(f func(*Term) (*Term, error)) (Set, error) { + mapped := make([]*Term, 0, len(s.keys)) + for _, x := range s.sortedKeys() { + term, err := f(x) + if err != nil { + return nil, err + } + mapped = append(mapped, term) + } + return NewSet(mapped...), nil +} + +// Reduce returns a Term produced by applying f to each value in s. The first +// argument to f is the reduced value (starting with i) and the second argument +// to f is the element in s. +func (s *set) Reduce(i *Term, f func(*Term, *Term) (*Term, error)) (*Term, error) { + err := s.Iter(func(x *Term) error { + var err error + i, err = f(i, x) + if err != nil { + return err + } + return nil + }) + return i, err +} + +// Contains returns true if t is in s. +func (s *set) Contains(t *Term) bool { + return s.get(t) != nil +} + +// Len returns the number of elements in the set. +func (s *set) Len() int { + return len(s.keys) +} + +// MarshalJSON returns JSON encoded bytes representing s. +func (s *set) MarshalJSON() ([]byte, error) { + if s.keys == nil { + return []byte(`[]`), nil + } + return json.Marshal(s.sortedKeys()) +} + +// Sorted returns an Array that contains the sorted elements of s. +func (s *set) Sorted() *Array { + cpy := make([]*Term, len(s.keys)) + copy(cpy, s.sortedKeys()) + return NewArray(cpy...) +} + +// Slice returns a slice of terms contained in the set. +func (s *set) Slice() []*Term { + return s.sortedKeys() +} + +// NOTE(philipc): We assume a many-readers, single-writer model here. +// This method should NOT be used concurrently, or else we risk data races. +func (s *set) insert(x *Term, resetSortGuard bool) { + hash := x.Hash() + insertHash := hash + + for curr, ok := s.elems[insertHash]; ok; { + if KeyHashEqual(curr.Value, x.Value) { + return + } + + insertHash++ + curr, ok = s.elems[insertHash] + } + + s.elems[insertHash] = x + // O(1) insertion, but we'll have to re-sort the keys later. + s.keys = append(s.keys, x) + + if resetSortGuard { + // Reset the sync.Once instance. + // See https://github.com/golang/go/issues/25955 for why we do it this way. + // Note that this will always be the case when external code calls insert via + // Add, or otherwise. Internal code may however benefit from not having to + // re-create this pointer when it's known not to be needed. + s.sortGuard = sync.Once{} + } + + s.hash += hash + s.ground = s.ground && x.IsGround() +} + +func (s *set) get(x *Term) *Term { + if len(s.elems) == 0 { + return nil + } + + hash := x.Hash() + + for curr, ok := s.elems[hash]; ok; { + // Pointer equality check first + if curr == x { + return curr + } + if KeyHashEqual(curr.Value, x.Value) { + return curr + } + + hash++ + curr, ok = s.elems[hash] + } + return nil +} + +// Object represents an object as defined by the language. +type Object interface { + Value + Len() int + Get(*Term) *Term + Copy() Object + Insert(*Term, *Term) + Iter(func(*Term, *Term) error) error + Until(func(*Term, *Term) bool) bool + Foreach(func(*Term, *Term)) + Map(func(*Term, *Term) (*Term, *Term, error)) (Object, error) + Diff(other Object) Object + Intersect(other Object) [][3]*Term + Merge(other Object) (Object, bool) + MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) + Filter(filter Object) (Object, error) + Keys() []*Term + KeysIterator() ObjectKeysIterator + get(k *Term) *objectElem // To prevent external implementations +} + +// NewObject creates a new Object with t. +func NewObject(t ...[2]*Term) Object { + obj := newobject(len(t)) + for i := range t { + obj.insert(t[i][0], t[i][1], false) + } + return obj +} + +// NewObjectWithCapacity returns a new empty Object with the given capacity pre-allocated. +func NewObjectWithCapacity(capacity int) Object { + return newobject(capacity) +} + +// ObjectTerm creates a new Term with an Object value. +func ObjectTerm(o ...[2]*Term) *Term { + return &Term{Value: NewObject(o...)} +} + +func LazyObject(blob map[string]any) Object { + return &lazyObj{native: blob, cache: map[string]Value{}} +} + +type lazyObj struct { + strict Object + cache map[string]Value + native map[string]any +} + +func (l *lazyObj) force() Object { + if l.strict == nil { + l.strict = MustInterfaceToValue(l.native).(Object) + // NOTE(jf): a possible performance improvement here would be to check how many + // entries have been realized to AST in the cache, and if some threshold compared to the + // total number of keys is exceeded, realize the remaining entries and set l.strict to l.cache. + l.cache = map[string]Value{} // We don't need the cache anymore; drop it to free up memory. + } + return l.strict +} + +func (l *lazyObj) Compare(other Value) int { + o1 := sortOrder(l) + o2 := sortOrder(other) + if o1 < o2 { + return -1 + } else if o2 < o1 { + return 1 + } + return l.force().Compare(other) +} + +func (l *lazyObj) Copy() Object { + return l +} + +func (l *lazyObj) Diff(other Object) Object { + return l.force().Diff(other) +} + +func (l *lazyObj) Intersect(other Object) [][3]*Term { + return l.force().Intersect(other) +} + +func (l *lazyObj) Iter(f func(*Term, *Term) error) error { + return l.force().Iter(f) +} + +func (l *lazyObj) Until(f func(*Term, *Term) bool) bool { + // NOTE(sr): there could be benefits in not forcing here -- if we abort because + // `f` returns true, we could save us from converting the rest of the object. + return l.force().Until(f) +} + +func (l *lazyObj) Foreach(f func(*Term, *Term)) { + l.force().Foreach(f) +} + +func (l *lazyObj) Filter(filter Object) (Object, error) { + return l.force().Filter(filter) +} + +func (l *lazyObj) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) { + return l.force().Map(f) +} + +func (l *lazyObj) MarshalJSON() ([]byte, error) { + return l.force().(*object).MarshalJSON() +} + +func (l *lazyObj) Merge(other Object) (Object, bool) { + return l.force().Merge(other) +} + +func (l *lazyObj) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) { + return l.force().MergeWith(other, conflictResolver) +} + +func (l *lazyObj) Len() int { + return len(l.native) +} + +func (l *lazyObj) String() string { + return l.force().String() +} + +// get is merely there to implement the Object interface -- `get` there serves the +// purpose of prohibiting external implementations. It's never called for lazyObj. +func (*lazyObj) get(*Term) *objectElem { + return nil +} + +func (l *lazyObj) Get(k *Term) *Term { + if l.strict != nil { + return l.strict.Get(k) + } + if s, ok := k.Value.(String); ok { + if v, ok := l.cache[string(s)]; ok { + return NewTerm(v) + } + + if val, ok := l.native[string(s)]; ok { + var converted Value + switch val := val.(type) { + case map[string]any: + converted = LazyObject(val) + default: + converted = MustInterfaceToValue(val) + } + l.cache[string(s)] = converted + return NewTerm(converted) + } + } + return nil +} + +func (l *lazyObj) Insert(k, v *Term) { + l.force().Insert(k, v) +} + +func (*lazyObj) IsGround() bool { + return true +} + +func (l *lazyObj) Hash() int { + return l.force().Hash() +} + +func (l *lazyObj) Keys() []*Term { + if l.strict != nil { + return l.strict.Keys() + } + ret := make([]*Term, 0, len(l.native)) + for k := range l.native { + ret = append(ret, StringTerm(k)) + } + slices.SortFunc(ret, TermValueCompare) + + return ret +} + +func (l *lazyObj) KeysIterator() ObjectKeysIterator { + return &lazyObjKeysIterator{keys: l.Keys()} +} + +type lazyObjKeysIterator struct { + current int + keys []*Term +} + +func (ki *lazyObjKeysIterator) Next() (*Term, bool) { + if ki.current == len(ki.keys) { + return nil, false + } + ki.current++ + return ki.keys[ki.current-1], true +} + +func (l *lazyObj) Find(path Ref) (Value, error) { + if l.strict != nil { + return l.strict.Find(path) + } + if len(path) == 0 { + return l, nil + } + if p0, ok := path[0].Value.(String); ok { + if v, ok := l.cache[string(p0)]; ok { + return v.Find(path[1:]) + } + + if v, ok := l.native[string(p0)]; ok { + var converted Value + switch v := v.(type) { + case map[string]any: + converted = LazyObject(v) + default: + converted = MustInterfaceToValue(v) + } + l.cache[string(p0)] = converted + return converted.Find(path[1:]) + } + } + return nil, errFindNotFound +} + +type object struct { + elems map[int]*objectElem + keys []*objectElem + ground int // number of key and value grounds. Counting is required to support insert's key-value replace. + hash int + sortGuard sync.Once // Prevents race condition around sorting. +} + +func newobject(n int) *object { + var keys []*objectElem + if n > 0 { + keys = make([]*objectElem, 0, n) + } + return &object{ + elems: make(map[int]*objectElem, n), + keys: keys, + sortGuard: sync.Once{}, + } +} + +type objectElem struct { + key *Term + value *Term + next *objectElem +} + +// Item is a helper for constructing an tuple containing two Terms +// representing a key/value pair in an Object. +func Item(key, value *Term) [2]*Term { + return [2]*Term{key, value} +} + +func (obj *object) sortedKeys() []*objectElem { + obj.sortGuard.Do(func() { + slices.SortFunc(obj.keys, func(a, b *objectElem) int { + return a.key.Value.Compare(b.key.Value) + }) + }) + return obj.keys +} + +// Compare compares obj to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (obj *object) Compare(other Value) int { + if x, ok := other.(*lazyObj); ok { + other = x.force() + } + o1 := sortOrder(obj) + o2 := sortOrder(other) + if o1 < o2 { + return -1 + } else if o2 < o1 { + return 1 + } + a := obj + b := other.(*object) + // Ensure that keys are in canonical sorted order before use! + akeys := a.sortedKeys() + bkeys := b.sortedKeys() + minLen := len(akeys) + if len(b.keys) < len(akeys) { + minLen = len(bkeys) + } + for i := range minLen { + keysCmp := Compare(akeys[i].key, bkeys[i].key) + if keysCmp < 0 { + return -1 + } + if keysCmp > 0 { + return 1 + } + valA := akeys[i].value + valB := bkeys[i].value + valCmp := Compare(valA, valB) + if valCmp != 0 { + return valCmp + } + } + if len(akeys) < len(bkeys) { + return -1 + } + if len(bkeys) < len(akeys) { + return 1 + } + return 0 +} + +// Find returns the value at the key or undefined. +func (obj *object) Find(path Ref) (Value, error) { + if len(path) == 0 { + return obj, nil + } + term := obj.Get(path[0]) + if term == nil { + return nil, errFindNotFound + } + // Using Find on scalar values costs an allocation (type -> Value conversion) + // and since we already have the Value here, we can avoid that. + if len(path) == 1 && IsScalar(term.Value) { + return term.Value, nil + } + + return term.Value.Find(path[1:]) +} + +func (obj *object) Insert(k, v *Term) { + obj.insert(k, v, true) +} + +// Get returns the value of k in obj if k exists, otherwise nil. +func (obj *object) Get(k *Term) *Term { + if len(obj.elems) == 0 { + return nil + } + + hash := k.Hash() + for curr := obj.elems[hash]; curr != nil; curr = curr.next { + // Pointer equality check always fastest, and not too unlikely with interning. + if curr.key == k { + return curr.value + } + + if KeyHashEqual(curr.key.Value, k.Value) { + return curr.value + } + } + return nil +} + +func KeyHashEqual(x, y Value) bool { + switch x := x.(type) { + case Null, Boolean, String, Var: + return x == y + case Number: + if y, ok := y.(Number); ok { + return x.Equal(y) + } + } + + return Compare(x, y) == 0 +} + +// Hash returns the hash code for the Value. +func (obj *object) Hash() int { + return obj.hash +} + +// IsGround returns true if all of the Object key/value pairs are ground. +func (obj *object) IsGround() bool { + return obj.ground == 2*len(obj.keys) +} + +// Copy returns a deep copy of obj. +func (obj *object) Copy() Object { + cpy, _ := obj.Map(func(k, v *Term) (*Term, *Term, error) { + return k.Copy(), v.Copy(), nil + }) + cpy.(*object).hash = obj.hash + return cpy +} + +// Diff returns a new Object that contains only the key/value pairs that exist in obj. +func (obj *object) Diff(other Object) Object { + r := newobject(obj.Len()) + for _, node := range obj.sortedKeys() { + if other.Get(node.key) == nil { + r.insert(node.key, node.value, false) + } + } + return r +} + +// Intersect returns a slice of term triplets that represent the intersection of keys +// between obj and other. For each intersecting key, the values from obj and other are included +// as the last two terms in the triplet (respectively). +func (obj *object) Intersect(other Object) [][3]*Term { + r := [][3]*Term{} + obj.Foreach(func(k, v *Term) { + if v2 := other.Get(k); v2 != nil { + r = append(r, [3]*Term{k, v, v2}) + } + }) + return r +} + +// Iter calls the function f for each key-value pair in the object. If f +// returns an error, iteration stops and the error is returned. +func (obj *object) Iter(f func(*Term, *Term) error) error { + for _, node := range obj.sortedKeys() { + if err := f(node.key, node.value); err != nil { + return err + } + } + return nil +} + +// Until calls f for each key-value pair in the object. If f returns +// true, iteration stops and Until returns true. Otherwise, return +// false. +func (obj *object) Until(f func(*Term, *Term) bool) bool { + for _, node := range obj.sortedKeys() { + if f(node.key, node.value) { + return true + } + } + return false +} + +// Foreach calls f for each key-value pair in the object. +func (obj *object) Foreach(f func(*Term, *Term)) { + for _, node := range obj.sortedKeys() { + f(node.key, node.value) + } +} + +// Map returns a new Object constructed by mapping each element in the object +// using the function f. If f returns an error, the error is returned by Map. +// If f return a nil key, the element is skipped. +func (obj *object) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) { + cpy := newobject(obj.Len()) + for _, node := range obj.sortedKeys() { + k, v, err := f(node.key, node.value) + if err != nil { + return nil, err + } + if k != nil { + cpy.insert(k, v, false) + } + } + return cpy, nil +} + +// Keys returns the keys of obj. +func (obj *object) Keys() []*Term { + keys := make([]*Term, len(obj.keys)) + + for i, elem := range obj.sortedKeys() { + keys[i] = elem.key + } + + return keys +} + +// Returns an iterator over the obj's keys. +func (obj *object) KeysIterator() ObjectKeysIterator { + return newobjectKeysIterator(obj) +} + +// MarshalJSON returns JSON encoded bytes representing obj. +func (obj *object) MarshalJSON() ([]byte, error) { + sl := make([][2]*Term, obj.Len()) + for i, node := range obj.sortedKeys() { + sl[i] = Item(node.key, node.value) + } + return json.Marshal(sl) +} + +// Merge returns a new Object containing the non-overlapping keys of obj and other. If there are +// overlapping keys between obj and other, the values of associated with the keys are merged. Only +// objects can be merged with other objects. If the values cannot be merged, the second turn value +// will be false. +func (obj *object) Merge(other Object) (Object, bool) { + return obj.MergeWith(other, func(v1, v2 *Term) (*Term, bool) { + obj1, ok1 := v1.Value.(Object) + obj2, ok2 := v2.Value.(Object) + if !ok1 || !ok2 { + return nil, true + } + obj3, ok := obj1.Merge(obj2) + if !ok { + return nil, true + } + return NewTerm(obj3), false + }) +} + +// MergeWith returns a new Object containing the merged keys of obj and other. +// If there are overlapping keys between obj and other, the conflictResolver +// is called. The conflictResolver can return a merged value and a boolean +// indicating if the merge has failed and should stop. +func (obj *object) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) { + // Might overallocate assuming no conflicts is the common case, + // but that's typically faster than iterating over each object twice. + result := newobject(obj.Len() + other.Len()) + stop := obj.Until(func(k, v *Term) bool { + v2 := other.Get(k) + // The key didn't exist in other, keep the original value + if v2 == nil { + result.insert(k, v, false) + return false + } + + // The key exists in both, resolve the conflict if possible + merged, stop := conflictResolver(v, v2) + if !stop { + result.insert(k, merged, false) + } + return stop + }) + + if stop { + return nil, false + } + + // Copy in any values from other for keys that don't exist in obj + other.Foreach(func(k, v *Term) { + if v2 := obj.Get(k); v2 == nil { + result.insert(k, v, false) + } + }) + return result, true +} + +// Filter returns a new object from values in obj where the keys are +// found in filter. Array indices for values can be specified as +// number strings. +func (obj *object) Filter(filter Object) (Object, error) { + filtered, err := filterObject(obj, filter) + if err != nil { + return nil, err + } + return filtered.(Object), nil +} + +// Len returns the number of elements in the object. +func (obj *object) Len() int { + return len(obj.keys) +} + +func (obj *object) String() string { + buf, _ := obj.AppendText(make([]byte, 0, obj.StringLength())) + return util.ByteSliceToString(buf) +} + +func (*object) get(*Term) *objectElem { + return nil +} + +// NOTE(philipc): We assume a many-readers, single-writer model here. +// This method should NOT be used concurrently, or else we risk data races. +func (obj *object) insert(k, v *Term, resetSortGuard bool) { + hash := k.Hash() + head := obj.elems[hash] + + for curr := head; curr != nil; curr = curr.next { + if KeyHashEqual(curr.key.Value, k.Value) { + if curr.value.IsGround() { + obj.ground-- + } + if v.IsGround() { + obj.ground++ + } + + // Update hash based on the new value + curr.value = v + obj.elems[hash] = curr + obj.hash = 0 + for ehash := range obj.elems { + obj.hash += ehash + obj.elems[ehash].value.Hash() + } + + return + } + } + + obj.elems[hash] = &objectElem{key: k, value: v, next: head} + // O(1) insertion, but we'll have to re-sort the keys later. + obj.keys = append(obj.keys, obj.elems[hash]) + + if resetSortGuard { + // Reset the sync.Once instance. + // See https://github.com/golang/go/issues/25955 for why we do it this way. + // Note that this will always be the case when external code calls insert via + // Add, or otherwise. Internal code may however benefit from not having to + // re-create this when it's known not to be needed. + obj.sortGuard = sync.Once{} + } + + obj.hash += hash + v.Hash() + + if k.IsGround() { + obj.ground++ + } + if v.IsGround() { + obj.ground++ + } +} + +func filterObject(o Value, filter Value) (Value, error) { + if (Null{}).Equal(filter) { + return o, nil + } + + filteredObj, ok := filter.(*object) + if !ok { + return nil, fmt.Errorf("invalid filter value %q, expected an object", filter) + } + + switch v := o.(type) { + case String, Number, Boolean, Null: + return o, nil + case *Array: + values := make([]*Term, 0, v.Len()) + for i := range v.Len() { + subFilter := filteredObj.Get(InternedIntegerString(i)) + if subFilter != nil { + filteredValue, err := filterObject(v.Elem(i).Value, subFilter.Value) + if err != nil { + return nil, err + } + values = append(values, NewTerm(filteredValue)) + } + } + return NewArray(values...), nil + case Set: + terms := make([]*Term, 0, v.Len()) + for _, t := range v.Slice() { + if filteredObj.Get(t) != nil { + filteredValue, err := filterObject(t.Value, filteredObj.Get(t).Value) + if err != nil { + return nil, err + } + terms = append(terms, NewTerm(filteredValue)) + } + } + return NewSet(terms...), nil + case *object: + values := NewObject() + + iterObj := v + other := filteredObj + if v.Len() < filteredObj.Len() { + iterObj = filteredObj + other = v + } + + err := iterObj.Iter(func(key *Term, _ *Term) error { + if other.Get(key) != nil { + filteredValue, err := filterObject(v.Get(key).Value, filteredObj.Get(key).Value) + if err != nil { + return err + } + values.Insert(key, NewTerm(filteredValue)) + } + return nil + }) + return values, err + default: + return nil, fmt.Errorf("invalid object value type %q", v) + } +} + +// NOTE(philipc): The only way to get an ObjectKeyIterator should be +// from an Object. This ensures that the iterator can have implementation- +// specific details internally, with no contracts except to the very +// limited interface. +type ObjectKeysIterator interface { + Next() (*Term, bool) +} + +type objectKeysIterator struct { + obj *object + numKeys int + index int +} + +func newobjectKeysIterator(o *object) ObjectKeysIterator { + return &objectKeysIterator{ + obj: o, + numKeys: o.Len(), + index: 0, + } +} + +func (oki *objectKeysIterator) Next() (*Term, bool) { + if oki.index == oki.numKeys || oki.numKeys == 0 { + return nil, false + } + oki.index++ + return oki.obj.sortedKeys()[oki.index-1].key, true +} + +// ArrayComprehension represents an array comprehension as defined in the language. +type ArrayComprehension struct { + Term *Term `json:"term"` + Body Body `json:"body"` +} + +// ArrayComprehensionTerm creates a new Term with an ArrayComprehension value. +func ArrayComprehensionTerm(term *Term, body Body) *Term { + return &Term{ + Value: &ArrayComprehension{ + Term: term, + Body: body, + }, + } +} + +// Copy returns a deep copy of ac. +func (ac *ArrayComprehension) Copy() *ArrayComprehension { + cpy := *ac + cpy.Body = ac.Body.Copy() + cpy.Term = ac.Term.Copy() + return &cpy +} + +// Equal returns true if ac is equal to other. +func (ac *ArrayComprehension) Equal(other Value) bool { + return Compare(ac, other) == 0 +} + +// Compare compares ac to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (ac *ArrayComprehension) Compare(other Value) int { + return Compare(ac, other) +} + +// Find returns the current value or a not found error. +func (ac *ArrayComprehension) Find(path Ref) (Value, error) { + if len(path) == 0 { + return ac, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code of the Value. +func (ac *ArrayComprehension) Hash() int { + return ac.Term.Hash() + ac.Body.Hash() +} + +// IsGround returns true if the Term and Body are ground. +func (ac *ArrayComprehension) IsGround() bool { + return ac.Term.IsGround() && ac.Body.IsGround() +} + +func (ac *ArrayComprehension) String() string { + buf, _ := ac.AppendText(make([]byte, 0, ac.StringLength())) + return util.ByteSliceToString(buf) +} + +// ObjectComprehension represents an object comprehension as defined in the language. +type ObjectComprehension struct { + Key *Term `json:"key"` + Value *Term `json:"value"` + Body Body `json:"body"` +} + +// ObjectComprehensionTerm creates a new Term with an ObjectComprehension value. +func ObjectComprehensionTerm(key, value *Term, body Body) *Term { + return &Term{ + Value: &ObjectComprehension{ + Key: key, + Value: value, + Body: body, + }, + } +} + +// Copy returns a deep copy of oc. +func (oc *ObjectComprehension) Copy() *ObjectComprehension { + cpy := *oc + cpy.Body = oc.Body.Copy() + cpy.Key = oc.Key.Copy() + cpy.Value = oc.Value.Copy() + return &cpy +} + +// Equal returns true if oc is equal to other. +func (oc *ObjectComprehension) Equal(other Value) bool { + return Compare(oc, other) == 0 +} + +// Compare compares oc to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (oc *ObjectComprehension) Compare(other Value) int { + return Compare(oc, other) +} + +// Find returns the current value or a not found error. +func (oc *ObjectComprehension) Find(path Ref) (Value, error) { + if len(path) == 0 { + return oc, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code of the Value. +func (oc *ObjectComprehension) Hash() int { + return oc.Key.Hash() + oc.Value.Hash() + oc.Body.Hash() +} + +// IsGround returns true if the Key, Value and Body are ground. +func (oc *ObjectComprehension) IsGround() bool { + return oc.Key.IsGround() && oc.Value.IsGround() && oc.Body.IsGround() +} + +func (oc *ObjectComprehension) String() string { + buf, _ := oc.AppendText(make([]byte, 0, oc.StringLength())) + return util.ByteSliceToString(buf) +} + +// SetComprehension represents a set comprehension as defined in the language. +type SetComprehension struct { + Term *Term `json:"term"` + Body Body `json:"body"` +} + +// SetComprehensionTerm creates a new Term with an SetComprehension value. +func SetComprehensionTerm(term *Term, body Body) *Term { + return &Term{ + Value: &SetComprehension{ + Term: term, + Body: body, + }, + } +} + +// Copy returns a deep copy of sc. +func (sc *SetComprehension) Copy() *SetComprehension { + cpy := *sc + cpy.Body = sc.Body.Copy() + cpy.Term = sc.Term.Copy() + return &cpy +} + +// Equal returns true if sc is equal to other. +func (sc *SetComprehension) Equal(other Value) bool { + return Compare(sc, other) == 0 +} + +// Compare compares sc to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (sc *SetComprehension) Compare(other Value) int { + return Compare(sc, other) +} + +// Find returns the current value or a not found error. +func (sc *SetComprehension) Find(path Ref) (Value, error) { + if len(path) == 0 { + return sc, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code of the Value. +func (sc *SetComprehension) Hash() int { + return sc.Term.Hash() + sc.Body.Hash() +} + +// IsGround returns true if the Term and Body are ground. +func (sc *SetComprehension) IsGround() bool { + return sc.Term.IsGround() && sc.Body.IsGround() +} + +func (sc *SetComprehension) String() string { + buf, _ := sc.AppendText(make([]byte, 0, sc.StringLength())) + return util.ByteSliceToString(buf) +} + +// Call represents as function call in the language. +type Call []*Term + +// CallTerm returns a new Term with a Call value defined by terms. The first +// term is the operator and the rest are operands. +func CallTerm(terms ...*Term) *Term { + return NewTerm(Call(terms)) +} + +// Copy returns a deep copy of c. +func (c Call) Copy() Call { + return termSliceCopy(c) +} + +// Compare compares c to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (c Call) Compare(other Value) int { + return Compare(c, other) +} + +// Find returns the current value or a not found error. +func (Call) Find(Ref) (Value, error) { + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (c Call) Hash() int { + return termSliceHash(c) +} + +// IsGround returns true if the Value is ground. +func (c Call) IsGround() bool { + return termSliceIsGround(c) +} + +// MakeExpr returns a new Expr from this call. +func (c Call) MakeExpr(output *Term) *Expr { + terms := []*Term(c) + return NewExpr(append(terms, output)) +} + +func (c Call) Operator() Ref { + if len(c) == 0 { + return nil + } + + return c[0].Value.(Ref) +} + +func (c Call) Operands() []*Term { + if len(c) < 1 { + return nil + } + + return c[1:] +} + +func (c Call) String() string { + buf, _ := c.AppendText(make([]byte, 0, c.StringLength())) + return util.ByteSliceToString(buf) +} + +func termSliceCopy(a []*Term) []*Term { + cpy := make([]*Term, len(a)) + for i := range a { + cpy[i] = a[i].Copy() + } + return cpy +} + +func termSliceEqual(a, b []*Term) bool { + if len(a) == len(b) { + for i := range a { + if !a[i].Equal(b[i]) { + return false + } + } + return true + } + return false +} + +func termSliceHash(a []*Term) int { + var hash int + for _, v := range a { + hash += v.Value.Hash() + } + return hash +} + +func termSliceIsGround(a []*Term) bool { + for _, v := range a { + if !v.IsGround() { + return false + } + } + return true +} + +// Detect when String() need to use expensive JSON‐escaped form +func isControlOrBackslash(r rune) bool { + return r == '\\' || unicode.IsControl(r) +} + +// NOTE(tsandall): The unmarshalling errors in these functions are not +// helpful for callers because they do not identify the source of the +// unmarshalling error. Because OPA doesn't accept JSON describing ASTs +// from callers, this is acceptable (for now). If that changes in the future, +// the error messages should be revisited. The current approach focuses +// on the happy path and treats all errors the same. If better error +// reporting is needed, the error paths will need to be fleshed out. + +func unmarshalBody(b []any) (Body, error) { + buf := Body{} + for _, e := range b { + if m, ok := e.(map[string]any); ok { + expr := &Expr{} + if err := unmarshalExpr(expr, m); err == nil { + buf = append(buf, expr) + continue + } + } + goto unmarshal_error + } + return buf, nil +unmarshal_error: + return nil, errors.New("ast: unable to unmarshal body") +} + +func unmarshalExpr(expr *Expr, v map[string]any) error { + if x, ok := v["negated"]; ok { + if b, ok := x.(bool); ok { + expr.Negated = b + } else { + return fmt.Errorf("ast: unable to unmarshal negated field with type: %T (expected true or false)", v["negated"]) + } + } + if generatedRaw, ok := v["generated"]; ok { + if b, ok := generatedRaw.(bool); ok { + expr.Generated = b + } else { + return fmt.Errorf("ast: unable to unmarshal generated field with type: %T (expected true or false)", v["generated"]) + } + } + + if err := unmarshalExprIndex(expr, v); err != nil { + return err + } + switch ts := v["terms"].(type) { + case map[string]any: + t, err := unmarshalTerm(ts) + if err != nil { + return err + } + expr.Terms = t + case []any: + terms, err := unmarshalTermSlice(ts) + if err != nil { + return err + } + expr.Terms = terms + default: + return fmt.Errorf(`ast: unable to unmarshal terms field with type: %T (expected {"value": ..., "type": ...} or [{"value": ..., "type": ...}, ...])`, v["terms"]) + } + if x, ok := v["with"]; ok { + if sl, ok := x.([]any); ok { + ws := make([]*With, len(sl)) + for i := range sl { + var err error + ws[i], err = unmarshalWith(sl[i]) + if err != nil { + return err + } + } + expr.With = ws + } + } + if loc, ok := v["location"].(map[string]any); ok { + expr.Location = &Location{} + if err := unmarshalLocation(expr.Location, loc); err != nil { + return err + } + } + return nil +} + +func unmarshalLocation(loc *Location, v map[string]any) error { + if x, ok := v["file"]; ok { + if s, ok := x.(string); ok { + loc.File = s + } else { + return fmt.Errorf("ast: unable to unmarshal file field with type: %T (expected string)", v["file"]) + } + } + if x, ok := v["row"]; ok { + if n, ok := x.(json.Number); ok { + i64, err := n.Int64() + if err != nil { + return err + } + loc.Row = int(i64) + } else { + return fmt.Errorf("ast: unable to unmarshal row field with type: %T (expected number)", v["row"]) + } + } + if x, ok := v["col"]; ok { + if n, ok := x.(json.Number); ok { + i64, err := n.Int64() + if err != nil { + return err + } + loc.Col = int(i64) + } else { + return fmt.Errorf("ast: unable to unmarshal col field with type: %T (expected number)", v["col"]) + } + } + + return nil +} + +func unmarshalExprIndex(expr *Expr, v map[string]any) error { + if x, ok := v["index"]; ok { + if n, ok := x.(json.Number); ok { + i, err := n.Int64() + if err == nil { + expr.Index = int(i) + return nil + } + } + } + return fmt.Errorf("ast: unable to unmarshal index field with type: %T (expected integer)", v["index"]) +} + +func unmarshalTerm(m map[string]any) (*Term, error) { + var term Term + + v, err := unmarshalValue(m) + if err != nil { + return nil, err + } + term.Value = v + + if loc, ok := m["location"].(map[string]any); ok { + term.Location = &Location{} + if err := unmarshalLocation(term.Location, loc); err != nil { + return nil, err + } + } + + return &term, nil +} + +func unmarshalTermSlice(s []any) ([]*Term, error) { + buf := []*Term{} + for _, x := range s { + if m, ok := x.(map[string]any); ok { + t, err := unmarshalTerm(m) + if err == nil { + buf = append(buf, t) + continue + } + return nil, err + } + return nil, errors.New("ast: unable to unmarshal term") + } + return buf, nil +} + +func unmarshalTermSliceValue(d map[string]any) ([]*Term, error) { + if s, ok := d["value"].([]any); ok { + return unmarshalTermSlice(s) + } + return nil, errors.New(`ast: unable to unmarshal term (expected {"value": [...], "type": ...} where type is one of: ref, array, or set)`) +} + +func unmarshalWith(i any) (*With, error) { + if m, ok := i.(map[string]any); ok { + tgt, _ := m["target"].(map[string]any) + target, err := unmarshalTerm(tgt) + if err == nil { + val, _ := m["value"].(map[string]any) + value, err := unmarshalTerm(val) + if err == nil { + return &With{ + Target: target, + Value: value, + }, nil + } + return nil, err + } + return nil, err + } + return nil, errors.New(`ast: unable to unmarshal with modifier (expected {"target": {...}, "value": {...}})`) +} + +func unmarshalValue(d map[string]any) (Value, error) { + v := d["value"] + switch d["type"] { + case "null": + return NullValue, nil + case "boolean": + if b, ok := v.(bool); ok { + return Boolean(b), nil + } + case "number": + if n, ok := v.(json.Number); ok { + return Number(n), nil + } + case "string": + if s, ok := v.(string); ok { + return String(s), nil + } + case "var": + if s, ok := v.(string); ok { + return Var(s), nil + } + case "ref": + if s, err := unmarshalTermSliceValue(d); err == nil { + return Ref(s), nil + } + case "array": + if s, err := unmarshalTermSliceValue(d); err == nil { + return NewArray(s...), nil + } + case "set": + if s, err := unmarshalTermSliceValue(d); err == nil { + return NewSet(s...), nil + } + case "object": + if s, ok := v.([]any); ok { + buf := NewObject() + for _, x := range s { + if i, ok := x.([]any); ok && len(i) == 2 { + p, err := unmarshalTermSlice(i) + if err == nil { + buf.Insert(p[0], p[1]) + continue + } + } + goto unmarshal_error + } + return buf, nil + } + case "arraycomprehension", "setcomprehension": + if m, ok := v.(map[string]any); ok { + t, ok := m["term"].(map[string]any) + if !ok { + goto unmarshal_error + } + + term, err := unmarshalTerm(t) + if err != nil { + goto unmarshal_error + } + + b, ok := m["body"].([]any) + if !ok { + goto unmarshal_error + } + + body, err := unmarshalBody(b) + if err != nil { + goto unmarshal_error + } + + if d["type"] == "arraycomprehension" { + return &ArrayComprehension{Term: term, Body: body}, nil + } + return &SetComprehension{Term: term, Body: body}, nil + } + case "objectcomprehension": + if m, ok := v.(map[string]any); ok { + k, ok := m["key"].(map[string]any) + if !ok { + goto unmarshal_error + } + + key, err := unmarshalTerm(k) + if err != nil { + goto unmarshal_error + } + + v, ok := m["value"].(map[string]any) + if !ok { + goto unmarshal_error + } + + value, err := unmarshalTerm(v) + if err != nil { + goto unmarshal_error + } + + b, ok := m["body"].([]any) + if !ok { + goto unmarshal_error + } + + body, err := unmarshalBody(b) + if err != nil { + goto unmarshal_error + } + + return &ObjectComprehension{Key: key, Value: value, Body: body}, nil + } + case "call": + if s, err := unmarshalTermSliceValue(d); err == nil { + return Call(s), nil + } + } +unmarshal_error: + return nil, errors.New("ast: unable to unmarshal term") +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/term_appenders.go b/vendor/github.com/open-policy-agent/opa/v1/ast/term_appenders.go new file mode 100644 index 0000000000..93c0803910 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/term_appenders.go @@ -0,0 +1,266 @@ +package ast + +import ( + "encoding" + "strconv" + "strings" + + "github.com/open-policy-agent/opa/v1/util" +) + +// AppendText appends the text representation of term (i.e. as printed in policy) to +// buf and returns the extended buffer. +func (term *Term) AppendText(buf []byte) ([]byte, error) { + if app, ok := term.Value.(encoding.TextAppender); ok { + return app.AppendText(buf) + } + + return append(buf, term.Value.String()...), nil +} + +func (v Var) AppendText(buf []byte) ([]byte, error) { + if v.IsWildcard() { + return append(buf, WildcardString...), nil + } + return append(buf, v...), nil +} + +func (b Boolean) AppendText(buf []byte) ([]byte, error) { + if b { + return append(buf, "true"...), nil + } + return append(buf, "false"...), nil +} + +func (Null) AppendText(buf []byte) ([]byte, error) { + return append(buf, "null"...), nil +} + +func (str String) AppendText(buf []byte) ([]byte, error) { + return strconv.AppendQuote(buf, string(str)), nil +} + +func (str String) appendNoQuote(buf []byte) []byte { + // Append using strconv.AppendQuote for proper escaping, but trim off + // the leading and trailing quotes afterwards. + oldLen := len(buf) + buf = strconv.AppendQuote(buf, string(str)) + newLen := len(buf) + quoted := buf[oldLen:newLen] + + return append(buf[:oldLen], quoted[1:len(quoted)-1]...) +} + +func (num Number) AppendText(buf []byte) ([]byte, error) { + return append(buf, num...), nil +} + +func (arr *Array) AppendText(buf []byte) ([]byte, error) { + buf, err := AppendDelimeted(append(buf, '['), arr.elems, ", ") + if err != nil { + return nil, err + } + return append(buf, ']'), nil +} + +func (obj *object) AppendText(buf []byte) ([]byte, error) { + olen := obj.Len() + if olen == 0 { + return append(buf, "{}"...), nil + } + + buf = append(buf, '{') + + var err error + + // first key-value pair + keys := obj.sortedKeys() + for i := range keys { + if buf, err = keys[i].key.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, ": "...) + if buf, err = keys[i].value.AppendText(buf); err != nil { + return nil, err + } + if i < olen-1 { + buf = append(buf, ", "...) + } + } + + return append(buf, '}'), nil +} + +func (obj *lazyObj) AppendText(buf []byte) ([]byte, error) { + return append(buf, obj.force().String()...), nil +} + +func (s *set) AppendText(buf []byte) ([]byte, error) { + slen := s.Len() + if slen == 0 { + return append(buf, "set()"...), nil + } + + var err error + + buf = append(buf, '{') + if buf, err = AppendDelimeted(buf, s.sortedKeys(), ", "); err != nil { + return nil, err + } + + return append(buf, '}'), nil +} + +func (c Call) AppendText(buf []byte) ([]byte, error) { + if len(c) == 0 { + return buf, nil + } + + var err error + + if buf, err = c[0].AppendText(buf); err != nil { + return nil, err + } + + if buf, err = AppendDelimeted(append(buf, '('), c[1:], ", "); err != nil { + return nil, err + } + return append(buf, ')'), nil +} + +func (ts *TemplateString) AppendText(buf []byte) ([]byte, error) { + buf = append(buf, "$\""...) + for _, p := range ts.Parts { + switch x := p.(type) { + case *Expr: + buf = append(buf, '{') + var err error + if buf, err = x.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, '}') + case *Term: + if str, ok := x.Value.(String); ok { + // TODO(anders): this is a bit of a mess, but as explained by the comment on + // [EscapeTemplateStringStringPart], required as long as we rely on strconv for escaping, which adds + // quotes around the string that we don't want here, and trying to "unappend" them is not nice at all.. + s := string(str) + ulc := countUnescapedLeftCurly(s) + sl := str.StringLength() + ulc - 2 // no surrounding quotes + + if sl == len(s) { // no escaping needed + buf = append(buf, s...) + } else { // some escaping needed + if sl == len(s)+ulc { // only unescaped { + buf = AppendEscapedTemplateStringStringPart(buf, string(str)) + } else { // full escaping needed. this is expensive but luckily rare + tmp := str.appendNoQuote(make([]byte, 0, sl)) + ets := EscapeTemplateStringStringPart(util.ByteSliceToString(tmp)) + buf = append(buf, ets...) + } + } + } else { + var err error + if buf, err = x.AppendText(buf); err != nil { + return nil, err + } + } + default: + buf = append(buf, ""...) + } + } + return append(buf, '"'), nil +} + +func (r Ref) AppendText(buf []byte) ([]byte, error) { + reflen := len(r) + if reflen == 0 { + return buf, nil + } + if reflen == 1 { + if s, ok := r[0].Value.(String); ok { + // While a ref head is typically a Var, a lone String term should not be quoted + return append(buf, s...), nil + } + return r[0].AppendText(buf) + } + if name, ok := BuiltinNameFromRef(r); ok { + return append(buf, name...), nil + } + + var err error + if s, ok := r[0].Value.(String); ok { + buf = append(buf, s...) + } else if buf, err = r[0].AppendText(buf); err != nil { + return nil, err + } + + for _, p := range r[1:] { + switch v := p.Value.(type) { + case String: + str := string(v) + if IsVarCompatibleString(str) && !IsKeyword(str) { + buf = append(append(buf, '.'), str...) + } else { + buf = append(buf, '[') + // Determine whether we need the full JSON-escaped form + if strings.ContainsFunc(str, isControlOrBackslash) { + if buf, err = v.AppendText(buf); err != nil { + return nil, err + } + } else { + buf = append(append(append(buf, '"'), str...), '"') + } + buf = append(buf, ']') + } + default: + buf = append(buf, '[') + if buf, err = p.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, ']') + } + } + + return buf, nil +} + +func (sc *SetComprehension) AppendText(buf []byte) ([]byte, error) { + buf = append(buf, '{') + var err error + if buf, err = sc.Term.AppendText(buf); err != nil { + return nil, err + } + if buf, err = sc.Body.AppendText(append(buf, " | "...)); err != nil { + return nil, err + } + return append(buf, '}'), nil +} + +func (ac *ArrayComprehension) AppendText(buf []byte) ([]byte, error) { + buf = append(buf, '[') + var err error + if buf, err = ac.Term.AppendText(buf); err != nil { + return nil, err + } + if buf, err = ac.Body.AppendText(append(buf, " | "...)); err != nil { + return nil, err + } + return append(buf, ']'), nil +} + +func (oc *ObjectComprehension) AppendText(buf []byte) ([]byte, error) { + buf = append(buf, '{') + var err error + if buf, err = oc.Key.AppendText(buf); err != nil { + return nil, err + } + buf = append(buf, ": "...) + if buf, err = oc.Value.AppendText(buf); err != nil { + return nil, err + } + if buf, err = oc.Body.AppendText(append(buf, " | "...)); err != nil { + return nil, err + } + return append(buf, '}'), nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/transform.go b/vendor/github.com/open-policy-agent/opa/v1/ast/transform.go new file mode 100644 index 0000000000..a71bc0a77c --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/transform.go @@ -0,0 +1,444 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" +) + +// Transformer defines the interface for transforming AST elements. If the +// transformer returns nil and does not indicate an error, the AST element will +// be set to nil and no transformations will be applied to children of the +// element. +type Transformer interface { + Transform(any) (any, error) +} + +// Transform iterates the AST and calls the Transform function on the +// Transformer t for x before recursing. +func Transform(t Transformer, x any) (any, error) { + if term, ok := x.(*Term); ok { + return Transform(t, term.Value) + } + + y, err := t.Transform(x) + if err != nil { + return x, err + } + + if y == nil { + return nil, nil + } + + var ok bool + switch y := y.(type) { + case *Module: + p, err := Transform(t, y.Package) + if err != nil { + return nil, err + } + if y.Package, ok = p.(*Package); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Package, p) + } + for i := range y.Imports { + imp, err := Transform(t, y.Imports[i]) + if err != nil { + return nil, err + } + if y.Imports[i], ok = imp.(*Import); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Imports[i], imp) + } + } + for i := range y.Rules { + rule, err := Transform(t, y.Rules[i]) + if err != nil { + return nil, err + } + if y.Rules[i], ok = rule.(*Rule); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Rules[i], rule) + } + } + for i := range y.Annotations { + a, err := Transform(t, y.Annotations[i]) + if err != nil { + return nil, err + } + if y.Annotations[i], ok = a.(*Annotations); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Annotations[i], a) + } + } + for i := range y.Comments { + comment, err := Transform(t, y.Comments[i]) + if err != nil { + return nil, err + } + if y.Comments[i], ok = comment.(*Comment); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Comments[i], comment) + } + } + return y, nil + case *Package: + ref, err := Transform(t, y.Path) + if err != nil { + return nil, err + } + if y.Path, ok = ref.(Ref); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Path, ref) + } + return y, nil + case *Import: + y.Path, err = transformTerm(t, y.Path) + if err != nil { + return nil, err + } + if y.Alias, err = transformVar(t, y.Alias); err != nil { + return nil, err + } + return y, nil + case *Rule: + if y.Head, err = transformHead(t, y.Head); err != nil { + return nil, err + } + if y.Body, err = transformBody(t, y.Body); err != nil { + return nil, err + } + if y.Else != nil { + rule, err := Transform(t, y.Else) + if err != nil { + return nil, err + } + if y.Else, ok = rule.(*Rule); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Else, rule) + } + } + return y, nil + case *Head: + if y.Reference, err = transformRef(t, y.Reference); err != nil { + return nil, err + } + if y.Name, err = transformVar(t, y.Name); err != nil { + return nil, err + } + if y.Args, err = transformArgs(t, y.Args); err != nil { + return nil, err + } + if y.Key != nil { + if y.Key, err = transformTerm(t, y.Key); err != nil { + return nil, err + } + } + if y.Value != nil { + if y.Value, err = transformTerm(t, y.Value); err != nil { + return nil, err + } + } + return y, nil + case Args: + for i := range y { + if y[i], err = transformTerm(t, y[i]); err != nil { + return nil, err + } + } + return y, nil + case Body: + for i, e := range y { + e, err := Transform(t, e) + if err != nil { + return nil, err + } + if y[i], ok = e.(*Expr); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y[i], e) + } + } + return y, nil + case *Expr: + switch ts := y.Terms.(type) { + case *SomeDecl: + decl, err := Transform(t, ts) + if err != nil { + return nil, err + } + if y.Terms, ok = decl.(*SomeDecl); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y, decl) + } + return y, nil + case []*Term: + for i := range ts { + if ts[i], err = transformTerm(t, ts[i]); err != nil { + return nil, err + } + } + case *Term: + if y.Terms, err = transformTerm(t, ts); err != nil { + return nil, err + } + case *Every: + if ts.Key != nil { + ts.Key, err = transformTerm(t, ts.Key) + if err != nil { + return nil, err + } + } + ts.Value, err = transformTerm(t, ts.Value) + if err != nil { + return nil, err + } + ts.Domain, err = transformTerm(t, ts.Domain) + if err != nil { + return nil, err + } + ts.Body, err = transformBody(t, ts.Body) + if err != nil { + return nil, err + } + y.Terms = ts + } + for i, w := range y.With { + w, err := Transform(t, w) + if err != nil { + return nil, err + } + if y.With[i], ok = w.(*With); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.With[i], w) + } + } + return y, nil + case *With: + if y.Target, err = transformTerm(t, y.Target); err != nil { + return nil, err + } + if y.Value, err = transformTerm(t, y.Value); err != nil { + return nil, err + } + return y, nil + case Ref: + for i, term := range y { + if y[i], err = transformTerm(t, term); err != nil { + return nil, err + } + } + return y, nil + case *object: + return y.Map(func(k, v *Term) (*Term, *Term, error) { + k, err := transformTerm(t, k) + if err != nil { + return nil, nil, err + } + v, err = transformTerm(t, v) + if err != nil { + return nil, nil, err + } + return k, v, nil + }) + case *Array: + for i := range y.Len() { + v, err := transformTerm(t, y.Elem(i)) + if err != nil { + return nil, err + } + y.set(i, v) + } + return y, nil + case Set: + y, err = y.Map(func(term *Term) (*Term, error) { + return transformTerm(t, term) + }) + if err != nil { + return nil, err + } + return y, nil + case *ArrayComprehension: + if y.Term, err = transformTerm(t, y.Term); err != nil { + return nil, err + } + if y.Body, err = transformBody(t, y.Body); err != nil { + return nil, err + } + return y, nil + case *ObjectComprehension: + if y.Key, err = transformTerm(t, y.Key); err != nil { + return nil, err + } + if y.Value, err = transformTerm(t, y.Value); err != nil { + return nil, err + } + if y.Body, err = transformBody(t, y.Body); err != nil { + return nil, err + } + return y, nil + case *SetComprehension: + if y.Term, err = transformTerm(t, y.Term); err != nil { + return nil, err + } + if y.Body, err = transformBody(t, y.Body); err != nil { + return nil, err + } + return y, nil + case Call: + for i := range y { + if y[i], err = transformTerm(t, y[i]); err != nil { + return nil, err + } + } + return y, nil + case *TemplateString: + for i := range y.Parts { + if expr, ok := y.Parts[i].(*Expr); ok { + transformed, err := Transform(t, expr) + if err != nil { + return nil, err + } + if y.Parts[i], ok = transformed.(*Expr); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", expr, transformed) + } + } + } + return y, nil + default: + return y, nil + } +} + +// TransformRefs calls the function f on all references under x. +func TransformRefs(x any, f func(Ref) (Value, error)) (any, error) { + t := NewGenericTransformer(func(x any) (any, error) { + if r, ok := x.(Ref); ok { + return f(r) + } + return x, nil + }) + return Transform(t, x) +} + +// TransformVars calls the function f on all vars under x. +func TransformVars(x any, f func(Var) (Value, error)) (any, error) { + t := NewGenericTransformer(func(x any) (any, error) { + if v, ok := x.(Var); ok { + return f(v) + } + return x, nil + }) + return Transform(t, x) +} + +// TransformComprehensions calls the function f on all comprehensions under x. +func TransformComprehensions(x any, f func(any) (Value, error)) (any, error) { + t := NewGenericTransformer(func(x any) (any, error) { + switch x := x.(type) { + case *ArrayComprehension: + return f(x) + case *SetComprehension: + return f(x) + case *ObjectComprehension: + return f(x) + } + return x, nil + }) + return Transform(t, x) +} + +// GenericTransformer implements the Transformer interface to provide a utility +// to transform AST nodes using a closure. +type GenericTransformer struct { + f func(any) (any, error) +} + +// NewGenericTransformer returns a new GenericTransformer that will transform +// AST nodes using the function f. +func NewGenericTransformer(f func(x any) (any, error)) *GenericTransformer { + return &GenericTransformer{ + f: f, + } +} + +// Transform calls the function f on the GenericTransformer. +func (t *GenericTransformer) Transform(x any) (any, error) { + return t.f(x) +} + +func transformHead(t Transformer, head *Head) (*Head, error) { + y, err := Transform(t, head) + if err != nil { + return nil, err + } + h, ok := y.(*Head) + if !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", head, y) + } + return h, nil +} + +func transformArgs(t Transformer, args Args) (Args, error) { + y, err := Transform(t, args) + if err != nil { + return nil, err + } + a, ok := y.(Args) + if !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", args, y) + } + return a, nil +} + +func transformBody(t Transformer, body Body) (Body, error) { + y, err := Transform(t, body) + if err != nil { + return nil, err + } + r, ok := y.(Body) + if !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", body, y) + } + return r, nil +} + +func transformTerm(t Transformer, term *Term) (*Term, error) { + v, err := transformValue(t, term.Value) + if err != nil { + return nil, err + } + return &Term{Value: v, Location: term.Location}, nil +} + +func transformValue(t Transformer, v Value) (Value, error) { + v1, err := Transform(t, v) + if err != nil { + return nil, err + } + r, ok := v1.(Value) + if !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", v, v1) + } + return r, nil +} + +func transformVar(t Transformer, v Var) (Var, error) { + tv, err := t.Transform(v) + if err != nil { + return "", err + } + + if tv == nil { + return "", nil + } + + r, ok := tv.(Var) + if !ok { + return "", fmt.Errorf("illegal transform: %T != %T", v, tv) + } + return r, nil +} + +func transformRef(t Transformer, r Ref) (Ref, error) { + r1, err := Transform(t, r) + if err != nil { + return nil, err + } + r2, ok := r1.(Ref) + if !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", r, r2) + } + return r2, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/unify.go b/vendor/github.com/open-policy-agent/opa/v1/ast/unify.go new file mode 100644 index 0000000000..67b89a2a93 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/unify.go @@ -0,0 +1,239 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +func isRefSafe(ref Ref, safe VarSet) bool { + switch head := ref[0].Value.(type) { + case Var: + return safe.Contains(head) + case Call: + return isCallSafe(head, safe) + default: + vis := varVisitorPool.Get().WithParams(SafetyCheckVisitorParams) + vis.Walk(ref[0]) + isSafe := vis.Vars().DiffCount(safe) == 0 + varVisitorPool.Put(vis) + return isSafe + } +} + +func isCallSafe(call Call, safe VarSet) bool { + vis := varVisitorPool.Get().WithParams(SafetyCheckVisitorParams) + vis.Walk(call) + isSafe := vis.Vars().DiffCount(safe) == 0 + varVisitorPool.Put(vis) + + return isSafe +} + +// Unify returns a set of variables that will be unified when the equality expression defined by +// terms a and b is evaluated. The unifier assumes that variables in the VarSet safe are already +// unified. +func Unify(safe VarSet, a *Term, b *Term) VarSet { + u := &unifier{ + safe: safe, + unified: VarSet{}, + unknown: map[Var]VarSet{}, + } + u.unify(a, b) + return u.unified +} + +type unifier struct { + safe VarSet + unified VarSet + unknown map[Var]VarSet +} + +func (u *unifier) isSafe(x Var) bool { + return u.safe.Contains(x) || u.unified.Contains(x) +} + +func (u *unifier) unify(a *Term, b *Term) { + + switch a := a.Value.(type) { + + case Var: + switch b := b.Value.(type) { + case Var: + if u.isSafe(b) { + u.markSafe(a) + } else if u.isSafe(a) { + u.markSafe(b) + } else { + u.markUnknown(a, b) + u.markUnknown(b, a) + } + case *Array, Object: + u.unifyAll(a, b) + case Ref: + if isRefSafe(b, u.safe) { + u.markSafe(a) + } + case Call: + if isCallSafe(b, u.safe) { + u.markSafe(a) + } + default: + u.markSafe(a) + } + + case Ref: + if isRefSafe(a, u.safe) { + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + case *Array, Object: + u.markAllSafe(b) + } + } + + case Call: + if isCallSafe(a, u.safe) { + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + case *Array, Object: + u.markAllSafe(b) + } + } + + case *ArrayComprehension: + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + case *Array: + u.markAllSafe(b) + } + case *ObjectComprehension: + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + case *object: + u.markAllSafe(b) + } + case *SetComprehension: + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + } + + case *Array: + switch b := b.Value.(type) { + case Var: + u.unifyAll(b, a) + case *ArrayComprehension, *ObjectComprehension, *SetComprehension: + u.markAllSafe(a) + case Ref: + if isRefSafe(b, u.safe) { + u.markAllSafe(a) + } + case Call: + if isCallSafe(b, u.safe) { + u.markAllSafe(a) + } + case *Array: + if a.Len() == b.Len() { + for i := range a.Len() { + u.unify(a.Elem(i), b.Elem(i)) + } + } + } + + case *object: + switch b := b.Value.(type) { + case Var: + u.unifyAll(b, a) + case Ref: + if isRefSafe(b, u.safe) { + u.markAllSafe(a) + } + case Call: + if isCallSafe(b, u.safe) { + u.markAllSafe(a) + } + case *object: + if a.Len() == b.Len() { + _ = a.Iter(func(k, v *Term) error { + if v2 := b.Get(k); v2 != nil { + u.unify(v, v2) + } + return nil + }) // impossible to return error + } + } + + default: + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + } + } +} + +func (u *unifier) markAllSafe(x Value) { + vis := varVisitorPool.Get().WithParams(VarVisitorParams{ + SkipRefHead: true, + SkipObjectKeys: true, + SkipClosures: true, + }) + vis.Walk(x) + for v := range vis.Vars() { + u.markSafe(v) + } + varVisitorPool.Put(vis) +} + +func (u *unifier) markSafe(x Var) { + u.unified.Add(x) + + // Add dependencies of 'x' to safe set + vs := u.unknown[x] + delete(u.unknown, x) + for v := range vs { + u.markSafe(v) + } + + // Add dependants of 'x' to safe set if they have no more + // dependencies. + for v, deps := range u.unknown { + if deps.Contains(x) { + delete(deps, x) + if len(deps) == 0 { + u.markSafe(v) + } + } + } +} + +func (u *unifier) markUnknown(a, b Var) { + if _, ok := u.unknown[a]; !ok { + u.unknown[a] = NewVarSet(b) + } else { + u.unknown[a].Add(b) + } +} + +func (u *unifier) unifyAll(a Var, b Value) { + if u.isSafe(a) { + u.markAllSafe(b) + } else { + vis := varVisitorPool.Get().WithParams(VarVisitorParams{ + SkipRefHead: true, + SkipObjectKeys: true, + SkipClosures: true, + }) + vis.Walk(b) + unsafe := vis.Vars().Diff(u.safe).Diff(u.unified) + if len(unsafe) == 0 { + u.markSafe(a) + } else { + for v := range unsafe { + u.markUnknown(a, v) + } + } + varVisitorPool.Put(vis) + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/varset.go b/vendor/github.com/open-policy-agent/opa/v1/ast/varset.go new file mode 100644 index 0000000000..55bbea80d0 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/varset.go @@ -0,0 +1,131 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "slices" + + "github.com/open-policy-agent/opa/v1/util" +) + +// VarSet represents a set of variables. +type VarSet map[Var]struct{ *Location } + +// NewVarSet returns a new VarSet containing the specified variables. +func NewVarSet(vs ...Var) VarSet { + s := make(VarSet, len(vs)) + for _, v := range vs { + s.Add(v) + } + return s +} + +// NewVarSet returns a new VarSet containing the specified variables. +func NewVarSetOfSize(size int) VarSet { + return make(VarSet, size) +} + +// Add updates the set to include the variable "v". +func (s VarSet) Add(v Var) { + if _, ok := s[v]; !ok { + s[v] = struct{ *Location }{} + } +} + +func (s VarSet) AddLocation(v Var, l *Location) { + if entry, ok := s[v]; ok { + entry.Location = l + s[v] = entry + } +} + +// Contains returns true if the set contains the variable "v". +func (s VarSet) Contains(v Var) bool { + _, ok := s[v] + return ok +} + +// Copy returns a shallow copy of the VarSet. +func (s VarSet) Copy() VarSet { + cpy := NewVarSetOfSize(len(s)) + for v := range s { + cpy.Add(v) + } + return cpy +} + +// Diff returns a VarSet containing variables in s that are not in vs. +func (s VarSet) Diff(vs VarSet) VarSet { + r := NewVarSetOfSize(s.DiffCount(vs)) + for v := range s { + if !vs.Contains(v) { + r.Add(v) + r.AddLocation(v, s[v].Location) + } + } + return r +} + +// DiffCount returns the number of variables in s that are not in vs. +func (s VarSet) DiffCount(vs VarSet) (i int) { + for v := range s { + if !vs.Contains(v) { + i++ + } + } + return +} + +// Equal returns true if s contains exactly the same elements as vs. +func (s VarSet) Equal(vs VarSet) bool { + if len(s) != len(vs) { + return false + } + for v := range s { + if !vs.Contains(v) { + return false + } + } + return true +} + +// Intersect returns a VarSet containing variables in s that are in vs. +func (s VarSet) Intersect(vs VarSet) VarSet { + i := 0 + for v := range s { + if vs.Contains(v) { + i++ + } + } + r := NewVarSetOfSize(i) + for v := range s { + if vs.Contains(v) { + r.Add(v) + } + } + return r +} + +// Sorted returns a new sorted slice of vars from s. +func (s VarSet) Sorted() []Var { + sorted := make([]Var, 0, len(s)) + for v := range s { + sorted = append(sorted, v) + } + slices.SortFunc(sorted, VarCompare) + return sorted +} + +// Update merges the other VarSet into this VarSet. +func (s VarSet) Update(vs VarSet) { + for v := range vs { + s.Add(v) + } +} + +func (s VarSet) String() string { + return fmt.Sprintf("%v", util.KeysSorted(s)) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json b/vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json new file mode 100644 index 0000000000..bd0df82ad6 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json @@ -0,0 +1,1079 @@ +{ + "builtins": { + "abs": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "all": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "and": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "any": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "array.concat": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "array.flatten": { + "Major": 1, + "Minor": 13, + "Patch": 0 + }, + "array.reverse": { + "Major": 0, + "Minor": 36, + "Patch": 0 + }, + "array.slice": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "assign": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "base64.decode": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "base64.encode": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "base64.is_valid": { + "Major": 0, + "Minor": 24, + "Patch": 0 + }, + "base64url.decode": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "base64url.encode": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "base64url.encode_no_pad": { + "Major": 0, + "Minor": 25, + "Patch": 0, + "PreRelease": "rc2" + }, + "bits.and": { + "Major": 0, + "Minor": 18, + "Patch": 0 + }, + "bits.lsh": { + "Major": 0, + "Minor": 18, + "Patch": 0 + }, + "bits.negate": { + "Major": 0, + "Minor": 18, + "Patch": 0 + }, + "bits.or": { + "Major": 0, + "Minor": 18, + "Patch": 0 + }, + "bits.rsh": { + "Major": 0, + "Minor": 18, + "Patch": 0 + }, + "bits.xor": { + "Major": 0, + "Minor": 18, + "Patch": 0 + }, + "cast_array": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "cast_boolean": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "cast_null": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "cast_object": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "cast_set": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "cast_string": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "ceil": { + "Major": 0, + "Minor": 26, + "Patch": 0 + }, + "concat": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "contains": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "count": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "crypto.hmac.equal": { + "Major": 0, + "Minor": 52, + "Patch": 0 + }, + "crypto.hmac.md5": { + "Major": 0, + "Minor": 36, + "Patch": 0 + }, + "crypto.hmac.sha1": { + "Major": 0, + "Minor": 36, + "Patch": 0 + }, + "crypto.hmac.sha256": { + "Major": 0, + "Minor": 36, + "Patch": 0 + }, + "crypto.hmac.sha512": { + "Major": 0, + "Minor": 36, + "Patch": 0 + }, + "crypto.md5": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "crypto.parse_private_keys": { + "Major": 0, + "Minor": 55, + "Patch": 0 + }, + "crypto.sha1": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "crypto.sha256": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "crypto.x509.parse_and_verify_certificates": { + "Major": 0, + "Minor": 31, + "Patch": 0 + }, + "crypto.x509.parse_and_verify_certificates_with_options": { + "Major": 0, + "Minor": 63, + "Patch": 0 + }, + "crypto.x509.parse_certificate_request": { + "Major": 0, + "Minor": 21, + "Patch": 0 + }, + "crypto.x509.parse_certificates": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "crypto.x509.parse_keypair": { + "Major": 0, + "Minor": 53, + "Patch": 0 + }, + "crypto.x509.parse_rsa_private_key": { + "Major": 0, + "Minor": 33, + "Patch": 0 + }, + "div": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "endswith": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "eq": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "equal": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "floor": { + "Major": 0, + "Minor": 26, + "Patch": 0 + }, + "format_int": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "glob.match": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "glob.quote_meta": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "graph.reachable": { + "Major": 0, + "Minor": 20, + "Patch": 0 + }, + "graph.reachable_paths": { + "Major": 0, + "Minor": 37, + "Patch": 0 + }, + "graphql.is_valid": { + "Major": 0, + "Minor": 41, + "Patch": 0 + }, + "graphql.parse": { + "Major": 0, + "Minor": 41, + "Patch": 0 + }, + "graphql.parse_and_verify": { + "Major": 0, + "Minor": 41, + "Patch": 0 + }, + "graphql.parse_query": { + "Major": 0, + "Minor": 41, + "Patch": 0 + }, + "graphql.parse_schema": { + "Major": 0, + "Minor": 41, + "Patch": 0 + }, + "graphql.schema_is_valid": { + "Major": 0, + "Minor": 46, + "Patch": 0 + }, + "gt": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "gte": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "hex.decode": { + "Major": 0, + "Minor": 25, + "Patch": 0, + "PreRelease": "rc2" + }, + "hex.encode": { + "Major": 0, + "Minor": 25, + "Patch": 0, + "PreRelease": "rc2" + }, + "http.send": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "indexof": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "indexof_n": { + "Major": 0, + "Minor": 37, + "Patch": 0 + }, + "internal.member_2": { + "Major": 0, + "Minor": 34, + "Patch": 0 + }, + "internal.member_3": { + "Major": 0, + "Minor": 34, + "Patch": 0 + }, + "internal.print": { + "Major": 0, + "Minor": 34, + "Patch": 0 + }, + "internal.template_string": { + "Major": 1, + "Minor": 12, + "Patch": 0 + }, + "internal.test_case": { + "Major": 1, + "Minor": 2, + "Patch": 0 + }, + "intersection": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "io.jwt.decode": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "io.jwt.decode_verify": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "io.jwt.encode_sign": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "io.jwt.encode_sign_raw": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "io.jwt.verify_eddsa": { + "Major": 1, + "Minor": 8, + "Patch": 0 + }, + "io.jwt.verify_es256": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "io.jwt.verify_es384": { + "Major": 0, + "Minor": 20, + "Patch": 0 + }, + "io.jwt.verify_es512": { + "Major": 0, + "Minor": 20, + "Patch": 0 + }, + "io.jwt.verify_hs256": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "io.jwt.verify_hs384": { + "Major": 0, + "Minor": 20, + "Patch": 0 + }, + "io.jwt.verify_hs512": { + "Major": 0, + "Minor": 20, + "Patch": 0 + }, + "io.jwt.verify_ps256": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "io.jwt.verify_ps384": { + "Major": 0, + "Minor": 20, + "Patch": 0 + }, + "io.jwt.verify_ps512": { + "Major": 0, + "Minor": 20, + "Patch": 0 + }, + "io.jwt.verify_rs256": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "io.jwt.verify_rs384": { + "Major": 0, + "Minor": 20, + "Patch": 0 + }, + "io.jwt.verify_rs512": { + "Major": 0, + "Minor": 20, + "Patch": 0 + }, + "is_array": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "is_boolean": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "is_null": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "is_number": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "is_object": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "is_set": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "is_string": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "json.filter": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "json.is_valid": { + "Major": 0, + "Minor": 25, + "Patch": 0, + "PreRelease": "rc1" + }, + "json.marshal": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "json.marshal_with_options": { + "Major": 0, + "Minor": 64, + "Patch": 0 + }, + "json.match_schema": { + "Major": 0, + "Minor": 50, + "Patch": 0 + }, + "json.patch": { + "Major": 0, + "Minor": 25, + "Patch": 0 + }, + "json.remove": { + "Major": 0, + "Minor": 18, + "Patch": 0 + }, + "json.unmarshal": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "json.verify_schema": { + "Major": 0, + "Minor": 50, + "Patch": 0 + }, + "lower": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "lt": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "lte": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "max": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "min": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "minus": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "mul": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "neq": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "net.cidr_contains": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "net.cidr_contains_matches": { + "Major": 0, + "Minor": 19, + "Patch": 0, + "PreRelease": "rc1" + }, + "net.cidr_expand": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "net.cidr_intersects": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "net.cidr_is_valid": { + "Major": 0, + "Minor": 46, + "Patch": 0 + }, + "net.cidr_merge": { + "Major": 0, + "Minor": 24, + "Patch": 0 + }, + "net.cidr_overlap": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "net.lookup_ip_addr": { + "Major": 0, + "Minor": 35, + "Patch": 0 + }, + "numbers.range": { + "Major": 0, + "Minor": 22, + "Patch": 0 + }, + "numbers.range_step": { + "Major": 0, + "Minor": 56, + "Patch": 0 + }, + "object.filter": { + "Major": 0, + "Minor": 17, + "Patch": 2 + }, + "object.get": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "object.keys": { + "Major": 0, + "Minor": 47, + "Patch": 0 + }, + "object.remove": { + "Major": 0, + "Minor": 17, + "Patch": 2 + }, + "object.subset": { + "Major": 0, + "Minor": 42, + "Patch": 0 + }, + "object.union": { + "Major": 0, + "Minor": 17, + "Patch": 2 + }, + "object.union_n": { + "Major": 0, + "Minor": 37, + "Patch": 0 + }, + "opa.runtime": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "or": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "plus": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "print": { + "Major": 0, + "Minor": 34, + "Patch": 0 + }, + "product": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "providers.aws.sign_req": { + "Major": 0, + "Minor": 47, + "Patch": 0 + }, + "rand.intn": { + "Major": 0, + "Minor": 31, + "Patch": 0 + }, + "re_match": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "regex.find_all_string_submatch_n": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "regex.find_n": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "regex.globs_match": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "regex.is_valid": { + "Major": 0, + "Minor": 23, + "Patch": 0 + }, + "regex.match": { + "Major": 0, + "Minor": 23, + "Patch": 0 + }, + "regex.replace": { + "Major": 0, + "Minor": 45, + "Patch": 0 + }, + "regex.split": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "regex.template_match": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "rego.metadata.chain": { + "Major": 0, + "Minor": 40, + "Patch": 0 + }, + "rego.metadata.rule": { + "Major": 0, + "Minor": 40, + "Patch": 0 + }, + "rego.parse_module": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "rem": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "replace": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "round": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "semver.compare": { + "Major": 0, + "Minor": 22, + "Patch": 0 + }, + "semver.is_valid": { + "Major": 0, + "Minor": 22, + "Patch": 0 + }, + "set_diff": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "sort": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "split": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "sprintf": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "startswith": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "strings.any_prefix_match": { + "Major": 0, + "Minor": 44, + "Patch": 0 + }, + "strings.any_suffix_match": { + "Major": 0, + "Minor": 44, + "Patch": 0 + }, + "strings.count": { + "Major": 0, + "Minor": 67, + "Patch": 0 + }, + "strings.render_template": { + "Major": 0, + "Minor": 59, + "Patch": 0 + }, + "strings.replace_n": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "strings.reverse": { + "Major": 0, + "Minor": 36, + "Patch": 0 + }, + "substring": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "sum": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "time.add_date": { + "Major": 0, + "Minor": 19, + "Patch": 0 + }, + "time.clock": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "time.date": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "time.diff": { + "Major": 0, + "Minor": 28, + "Patch": 0 + }, + "time.format": { + "Major": 0, + "Minor": 48, + "Patch": 0 + }, + "time.now_ns": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "time.parse_duration_ns": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "time.parse_ns": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "time.parse_rfc3339_ns": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "time.weekday": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "to_number": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "trace": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "trim": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "trim_left": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "trim_prefix": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "trim_right": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "trim_space": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "trim_suffix": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "type_name": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "union": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "units.parse": { + "Major": 0, + "Minor": 41, + "Patch": 0 + }, + "units.parse_bytes": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "upper": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "urlquery.decode": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "urlquery.decode_object": { + "Major": 0, + "Minor": 24, + "Patch": 0 + }, + "urlquery.encode": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "urlquery.encode_object": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "uuid.parse": { + "Major": 0, + "Minor": 57, + "Patch": 0 + }, + "uuid.rfc4122": { + "Major": 0, + "Minor": 20, + "Patch": 0 + }, + "walk": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "yaml.is_valid": { + "Major": 0, + "Minor": 25, + "Patch": 0, + "PreRelease": "rc1" + }, + "yaml.marshal": { + "Major": 0, + "Minor": 17, + "Patch": 0 + }, + "yaml.unmarshal": { + "Major": 0, + "Minor": 17, + "Patch": 0 + } + }, + "features": { + "keywords_in_refs": { + "Major": 1, + "Minor": 6, + "Patch": 0 + }, + "rego_v1": { + "Major": 1, + "Minor": 0, + "Patch": 0 + }, + "rego_v1_import": { + "Major": 0, + "Minor": 59, + "Patch": 0 + }, + "rule_head_ref_string_prefixes": { + "Major": 0, + "Minor": 46, + "Patch": 0 + }, + "rule_head_refs": { + "Major": 0, + "Minor": 59, + "Patch": 0 + }, + "template_strings": { + "Major": 1, + "Minor": 12, + "Patch": 0 + } + }, + "keywords": { + "contains": { + "Major": 0, + "Minor": 42, + "Patch": 0 + }, + "every": { + "Major": 0, + "Minor": 38, + "Patch": 0 + }, + "if": { + "Major": 0, + "Minor": 42, + "Patch": 0 + }, + "in": { + "Major": 0, + "Minor": 34, + "Patch": 0 + } + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ast/visit.go b/vendor/github.com/open-policy-agent/opa/v1/ast/visit.go new file mode 100644 index 0000000000..d7725f5a51 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ast/visit.go @@ -0,0 +1,1026 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +var ( + termTypeVisitor = newTypeVisitor[*Term]() + varTypeVisitor = newTypeVisitor[Var]() + exprTypeVisitor = newTypeVisitor[*Expr]() + ruleTypeVisitor = newTypeVisitor[*Rule]() + refTypeVisitor = newTypeVisitor[Ref]() + bodyTypeVisitor = newTypeVisitor[Body]() + withTypeVisitor = newTypeVisitor[*With]() +) + +type ( + // GenericVisitor provides a utility to walk over AST nodes using a + // closure. If the closure returns true, the visitor will not walk + // over AST nodes under x. + GenericVisitor struct { + f func(x any) bool + } + + // BeforeAfterVisitor provides a utility to walk over AST nodes using + // closures. If the before closure returns true, the visitor will not + // walk over AST nodes under x. The after closure is invoked always + // after visiting a node. + BeforeAfterVisitor struct { + before func(x any) bool + after func(x any) + } + + // VarVisitor walks AST nodes under a given node and collects all encountered + // variables. The collected variables can be controlled by specifying + // VarVisitorParams when creating the visitor. + VarVisitor struct { + params VarVisitorParams + vars VarSet + } + + // VarVisitorParams contains settings for a VarVisitor. + VarVisitorParams struct { + SkipRefHead bool + SkipRefCallHead bool + SkipObjectKeys bool + SkipClosures bool + SkipWithTarget bool + SkipSets bool + SkipTemplateStrings bool + } + + // Visitor defines the interface for iterating AST elements. The Visit function + // can return a Visitor w which will be used to visit the children of the AST + // element v. If the Visit function returns nil, the children will not be + // visited. + // + // Deprecated: use [GenericVisitor] or another visitor implementation + Visitor interface { + Visit(v any) (w Visitor) + } + + // BeforeAndAfterVisitor wraps Visitor to provide hooks for being called before + // and after the AST has been visited. + // + // Deprecated: use [GenericVisitor] or another visitor implementation + BeforeAndAfterVisitor interface { + Visitor + Before(x any) + After(x any) + } + + // typeVisitor is a generic visitor for a specific type T (the "generic" name was + // however taken). Contrary to the [GenericVisitor], the typeVisitor only invokes + // the visit function for nodes of type T, saving both CPU cycles and type assertions. + // typeVisitor implementations carry no state, and can be shared freely across + // goroutines. Access is private for the time being, as there is already inflation + // in visitor types exposed in the AST package. The various WalkXXX functions however + // now leverage typeVisitor under the hood. + // + // While a typeVisitor is generally a more performant option over a GenericVisitor, + // it is not as flexible: a type visitor can only visit nodes of a single type T, + // whereas a GenericVisitor visits all nodes. Adding to that, a typeVisitor can only + // be instantiated for **concrete types** — not interfaces (e.g., [*Expr], not [Node]), + // as reflection would be required to determine the concrete type at runtime, thus + // nullifying the performance benefits of the typeVisitor in the first place. + typeVisitor[T any] struct { + typ any + } +) + +// Walk iterates the AST by calling the Visit function on the [Visitor] +// v for x before recursing. +// +// Deprecated: use [GenericVisitor.Walk] +func Walk(v Visitor, x any) { + if bav, ok := v.(BeforeAndAfterVisitor); !ok { + walk(v, x) + } else { + bav.Before(x) + walk(bav, x) + bav.After(x) + } +} + +// WalkBeforeAndAfter iterates the AST by calling the Visit function on the +// Visitor v for x before recursing. +// +// Deprecated: use [GenericVisitor.Walk] +func WalkBeforeAndAfter(v BeforeAndAfterVisitor, x any) { + Walk(v, x) +} + +func walk(v Visitor, x any) { + w := v.Visit(x) + if w == nil { + return + } + switch x := x.(type) { + case *Module: + Walk(w, x.Package) + for i := range x.Imports { + Walk(w, x.Imports[i]) + } + for i := range x.Rules { + Walk(w, x.Rules[i]) + } + for i := range x.Annotations { + Walk(w, x.Annotations[i]) + } + for i := range x.Comments { + Walk(w, x.Comments[i]) + } + case *Package: + Walk(w, x.Path) + case *Import: + Walk(w, x.Path) + Walk(w, x.Alias) + case *Rule: + Walk(w, x.Head) + Walk(w, x.Body) + if x.Else != nil { + Walk(w, x.Else) + } + case *Head: + Walk(w, x.Name) + Walk(w, x.Args) + if x.Key != nil { + Walk(w, x.Key) + } + if x.Value != nil { + Walk(w, x.Value) + } + case Body: + for i := range x { + Walk(w, x[i]) + } + case Args: + for i := range x { + Walk(w, x[i]) + } + case *Expr: + switch ts := x.Terms.(type) { + case *Term, *SomeDecl, *Every: + Walk(w, ts) + case []*Term: + for i := range ts { + Walk(w, ts[i]) + } + } + for i := range x.With { + Walk(w, x.With[i]) + } + case *With: + Walk(w, x.Target) + Walk(w, x.Value) + case *Term: + Walk(w, x.Value) + case Ref: + for i := range x { + Walk(w, x[i]) + } + case *object: + x.Foreach(func(k, vv *Term) { + Walk(w, k) + Walk(w, vv) + }) + case *Array: + x.Foreach(func(t *Term) { + Walk(w, t) + }) + case Set: + x.Foreach(func(t *Term) { + Walk(w, t) + }) + case *ArrayComprehension: + Walk(w, x.Term) + Walk(w, x.Body) + case *ObjectComprehension: + Walk(w, x.Key) + Walk(w, x.Value) + Walk(w, x.Body) + case *SetComprehension: + Walk(w, x.Term) + Walk(w, x.Body) + case Call: + for i := range x { + Walk(w, x[i]) + } + case *Every: + if x.Key != nil { + Walk(w, x.Key) + } + Walk(w, x.Value) + Walk(w, x.Domain) + Walk(w, x.Body) + case *SomeDecl: + for i := range x.Symbols { + Walk(w, x.Symbols[i]) + } + case *TemplateString: + for i := range x.Parts { + Walk(w, x.Parts[i]) + } + } +} + +// WalkVars calls the function f on all vars under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkVars(x any, f func(Var) bool) { + varTypeVisitor.walk(x, f) +} + +// WalkClosures calls the function f on all closures under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkClosures(x any, f func(any) bool) { + vis := NewGenericVisitor(func(x any) bool { + switch x := x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every: + return f(x) + } + return false + }) + vis.Walk(x) +} + +// WalkRefs calls the function f on all references under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkRefs(x any, f func(Ref) bool) { + refTypeVisitor.walk(x, f) +} + +// WalkTerms calls the function f on all terms under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkTerms(x any, f func(*Term) bool) { + termTypeVisitor.walk(x, f) +} + +// WalkWiths calls the function f on all with modifiers under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkWiths(x any, f func(*With) bool) { + withTypeVisitor.walk(x, f) +} + +// WalkExprs calls the function f on all expressions under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkExprs(x any, f func(*Expr) bool) { + exprTypeVisitor.walk(x, f) +} + +// WalkBodies calls the function f on all bodies under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkBodies(x any, f func(Body) bool) { + bodyTypeVisitor.walk(x, f) +} + +// WalkRules calls the function f on all rules under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkRules(x any, f func(*Rule) bool) { + switch x := x.(type) { + case *Module: + for i := range x.Rules { + if !f(x.Rules[i]) && x.Rules[i].Else != nil { + WalkRules(x.Rules[i].Else, f) + } + } + case *Rule: + if !f(x) && x.Else != nil { + WalkRules(x.Else, f) + } + default: + ruleTypeVisitor.walk(x, f) + } +} + +// WalkNodes calls the function f on all nodes under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkNodes(x any, f func(Node) bool) { + vis := NewGenericVisitor(func(x any) bool { + if n, ok := x.(Node); ok { + return f(n) + } + return false + }) + vis.Walk(x) +} + +func newTypeVisitor[T any]() *typeVisitor[T] { + var t T + + return &typeVisitor[T]{typ: any(t)} +} + +func (tv *typeVisitor[T]) walkArgs(args Args, visit func(x T) bool) { + // If T is not Args, avoid allocation by inlining the walk. + if _, ok := tv.typ.(Args); !ok { + for i := range args { + tv.walk(args[i], visit) + } + } else { + tv.walk(args, visit) // allocates + } +} + +func (tv *typeVisitor[T]) walkBody(body Body, visit func(x T) bool) { + if _, ok := tv.typ.(Body); !ok { + for i := range body { + tv.walk(body[i], visit) + } + } else { + tv.walk(body, visit) // allocates + } +} + +func (tv *typeVisitor[T]) walkRef(ref Ref, visit func(x T) bool) { + if _, ok := tv.typ.(Ref); !ok { + for i := range ref { + tv.walk(ref[i], visit) + } + } else { + tv.walk(ref, visit) // allocates + } +} + +func (tv *typeVisitor[T]) walk(x any, visit func(x T) bool) { + if v, ok := x.(T); ok && visit(v) { + return + } + + switch x := x.(type) { + case *Module: + tv.walk(x.Package, visit) + for i := range x.Imports { + tv.walk(x.Imports[i], visit) + } + for i := range x.Rules { + tv.walk(x.Rules[i], visit) + } + for i := range x.Annotations { + tv.walk(x.Annotations[i], visit) + } + for i := range x.Comments { + tv.walk(x.Comments[i], visit) + } + case *Package: + tv.walkRef(x.Path, visit) + case *Import: + tv.walk(x.Path, visit) + if _, ok := tv.typ.(Var); ok { + tv.walk(x.Alias, visit) + } + case *Rule: + tv.walk(x.Head, visit) + tv.walkBody(x.Body, visit) + if x.Else != nil { + tv.walk(x.Else, visit) + } + case *Head: + if _, ok := tv.typ.(Var); ok { + tv.walk(x.Name, visit) + } + tv.walkArgs(x.Args, visit) + if x.Key != nil { + tv.walk(x.Key, visit) + } + if x.Value != nil { + tv.walk(x.Value, visit) + } + case Body: + for i := range x { + tv.walk(x[i], visit) + } + case Args: + for i := range x { + tv.walk(x[i], visit) + } + case *Expr: + switch ts := x.Terms.(type) { + case *Term, *SomeDecl, *Every: + tv.walk(ts, visit) + case []*Term: + for i := range ts { + tv.walk(ts[i], visit) + } + } + for i := range x.With { + tv.walk(x.With[i], visit) + } + case *With: + tv.walk(x.Target, visit) + tv.walk(x.Value, visit) + case *Term: + tv.walk(x.Value, visit) + case Ref: + for i := range x { + tv.walk(x[i], visit) + } + case *object: + x.Foreach(func(k, v *Term) { + tv.walk(k, visit) + tv.walk(v, visit) + }) + case Object: + for _, k := range x.Keys() { + tv.walk(k, visit) + tv.walk(x.Get(k), visit) + } + case *Array: + for i := range x.Len() { + tv.walk(x.Elem(i), visit) + } + case Set: + xSlice := x.Slice() + for i := range xSlice { + tv.walk(xSlice[i], visit) + } + case *ArrayComprehension: + tv.walk(x.Term, visit) + tv.walkBody(x.Body, visit) + case *ObjectComprehension: + tv.walk(x.Key, visit) + tv.walk(x.Value, visit) + tv.walkBody(x.Body, visit) + case *SetComprehension: + tv.walk(x.Term, visit) + tv.walkBody(x.Body, visit) + case Call: + for i := range x { + tv.walk(x[i], visit) + } + case *Every: + if x.Key != nil { + tv.walk(x.Key, visit) + } + tv.walk(x.Value, visit) + tv.walk(x.Domain, visit) + tv.walkBody(x.Body, visit) + case *SomeDecl: + for i := range x.Symbols { + tv.walk(x.Symbols[i], visit) + } + case *TemplateString: + for i := range x.Parts { + tv.walk(x.Parts[i], visit) + } + } +} + +// NewGenericVisitor returns a new GenericVisitor that will invoke the function +// f on AST nodes. Note that while it returns a pointer, the creating a GenericVisitor +// doesn't commonly allocate it on the heap, as long as it doesn't escape the function +// in which it is created and used (as it's trivially inlined). +func NewGenericVisitor(f func(x any) bool) *GenericVisitor { + return &GenericVisitor{f} +} + +// Walk iterates the AST by calling the function f on the +// GenericVisitor before recursing. Contrary to the generic Walk, this +// does not require allocating the visitor from heap. +func (vis *GenericVisitor) Walk(x any) { + if vis.f(x) { + return + } + + switch x := x.(type) { + case *Module: + vis.Walk(x.Package) + for i := range x.Imports { + vis.Walk(x.Imports[i]) + } + for i := range x.Rules { + vis.Walk(x.Rules[i]) + } + for i := range x.Annotations { + vis.Walk(x.Annotations[i]) + } + for i := range x.Comments { + vis.Walk(x.Comments[i]) + } + case *Package: + vis.Walk(x.Path) + case *Import: + vis.Walk(x.Path) + if x.Alias != "" { + vis.f(x.Alias) + } + case *Rule: + vis.Walk(x.Head) + vis.Walk(x.Body) + if x.Else != nil { + vis.Walk(x.Else) + } + case *Head: + if x.Name != "" { + vis.f(x.Name) + } + if x.Args != nil { + vis.Walk(x.Args) + } + if x.Key != nil { + vis.Walk(x.Key) + } + if x.Value != nil { + vis.Walk(x.Value) + } + case Body: + for i := range x { + vis.Walk(x[i]) + } + case Args: + for i := range x { + vis.Walk(x[i]) + } + case *Expr: + switch ts := x.Terms.(type) { + case *Term, *SomeDecl, *Every: + vis.Walk(ts) + case []*Term: + for i := range ts { + vis.Walk(ts[i]) + } + } + for i := range x.With { + vis.Walk(x.With[i]) + } + case *With: + vis.Walk(x.Target) + vis.Walk(x.Value) + case *Term: + vis.Walk(x.Value) + case Ref: + for i := range x { + vis.Walk(x[i]) + } + case *object: + x.Foreach(func(k, _ *Term) { + vis.Walk(k) + vis.Walk(x.Get(k)) + }) + case Object: + for _, k := range x.Keys() { + vis.Walk(k) + vis.Walk(x.Get(k)) + } + case *Array: + for i := range x.Len() { + vis.Walk(x.Elem(i)) + } + case Set: + xSlice := x.Slice() + for i := range xSlice { + vis.Walk(xSlice[i]) + } + case *ArrayComprehension: + vis.Walk(x.Term) + vis.Walk(x.Body) + case *ObjectComprehension: + vis.Walk(x.Key) + vis.Walk(x.Value) + vis.Walk(x.Body) + case *SetComprehension: + vis.Walk(x.Term) + vis.Walk(x.Body) + case Call: + for i := range x { + vis.Walk(x[i]) + } + case *Every: + if x.Key != nil { + vis.Walk(x.Key) + } + vis.Walk(x.Value) + vis.Walk(x.Domain) + vis.Walk(x.Body) + case *SomeDecl: + for i := range x.Symbols { + vis.Walk(x.Symbols[i]) + } + case *TemplateString: + for i := range x.Parts { + vis.Walk(x.Parts[i]) + } + } +} + +// NewBeforeAfterVisitor returns a new BeforeAndAfterVisitor that +// will invoke the functions before and after AST nodes. +func NewBeforeAfterVisitor(before func(x any) bool, after func(x any)) *BeforeAfterVisitor { + return &BeforeAfterVisitor{before, after} +} + +// Walk iterates the AST by calling the functions on the +// BeforeAndAfterVisitor before and after recursing. Contrary to the +// generic Walk, this does not require allocating the visitor from +// heap. +func (vis *BeforeAfterVisitor) Walk(x any) { + defer vis.after(x) + if vis.before(x) { + return + } + + switch x := x.(type) { + case *Module: + vis.Walk(x.Package) + for i := range x.Imports { + vis.Walk(x.Imports[i]) + } + for i := range x.Rules { + vis.Walk(x.Rules[i]) + } + for i := range x.Annotations { + vis.Walk(x.Annotations[i]) + } + for i := range x.Comments { + vis.Walk(x.Comments[i]) + } + case *Package: + vis.Walk(x.Path) + case *Import: + vis.Walk(x.Path) + vis.Walk(x.Alias) + case *Rule: + vis.Walk(x.Head) + vis.Walk(x.Body) + if x.Else != nil { + vis.Walk(x.Else) + } + case *Head: + if len(x.Reference) > 0 { + vis.Walk(x.Reference) + } else { + vis.Walk(x.Name) + if x.Key != nil { + vis.Walk(x.Key) + } + } + vis.Walk(x.Args) + if x.Value != nil { + vis.Walk(x.Value) + } + case Body: + for i := range x { + vis.Walk(x[i]) + } + case Args: + for i := range x { + vis.Walk(x[i]) + } + case *Expr: + switch ts := x.Terms.(type) { + case *Term, *SomeDecl, *Every: + vis.Walk(ts) + case []*Term: + for i := range ts { + vis.Walk(ts[i]) + } + } + for i := range x.With { + vis.Walk(x.With[i]) + } + case *With: + vis.Walk(x.Target) + vis.Walk(x.Value) + case *Term: + vis.Walk(x.Value) + case Ref: + for i := range x { + vis.Walk(x[i]) + } + case *object: + x.Foreach(func(k, _ *Term) { + vis.Walk(k) + vis.Walk(x.Get(k)) + }) + case Object: + x.Foreach(func(k, _ *Term) { + vis.Walk(k) + vis.Walk(x.Get(k)) + }) + case *Array: + x.Foreach(func(t *Term) { + vis.Walk(t) + }) + case Set: + xSlice := x.Slice() + for i := range xSlice { + vis.Walk(xSlice[i]) + } + case *ArrayComprehension: + vis.Walk(x.Term) + vis.Walk(x.Body) + case *ObjectComprehension: + vis.Walk(x.Key) + vis.Walk(x.Value) + vis.Walk(x.Body) + case *SetComprehension: + vis.Walk(x.Term) + vis.Walk(x.Body) + case Call: + for i := range x { + vis.Walk(x[i]) + } + case *Every: + if x.Key != nil { + vis.Walk(x.Key) + } + vis.Walk(x.Value) + vis.Walk(x.Domain) + vis.Walk(x.Body) + case *SomeDecl: + for i := range x.Symbols { + vis.Walk(x.Symbols[i]) + } + } +} + +// NewVarVisitor returns a new [VarVisitor] object. +func NewVarVisitor() *VarVisitor { + return &VarVisitor{ + vars: NewVarSet(), + } +} + +// ClearOrNewVarVisitor clears a non-nil [VarVisitor] or returns a new one. +func ClearOrNewVarVisitor(vis *VarVisitor) *VarVisitor { + if vis == nil { + return NewVarVisitor() + } + + return vis.Clear() +} + +// ClearOrNew resets the visitor to its initial state, or returns a new one if nil. +// +// Deprecated: use [ClearOrNewVarVisitor] instead. +func (vis *VarVisitor) ClearOrNew() *VarVisitor { + return ClearOrNewVarVisitor(vis) +} + +// Clear resets the visitor to its initial state, and returns it for chaining. +func (vis *VarVisitor) Clear() *VarVisitor { + vis.params = VarVisitorParams{} + clear(vis.vars) + + return vis +} + +// WithParams sets the parameters in params on vis. +func (vis *VarVisitor) WithParams(params VarVisitorParams) *VarVisitor { + vis.params = params + return vis +} + +// Add adds a variable v to the visitor's set of variables. +func (vis *VarVisitor) Add(v Var) { + if vis.vars == nil { + vis.vars = NewVarSet(v) + } else { + vis.vars.Add(v) + } +} + +// Vars returns a [VarSet] that contains collected vars. +func (vis *VarVisitor) Vars() VarSet { + return vis.vars +} + +// visit determines if the VarVisitor will recurse into x: if it returns `true`, +// the visitor will _skip_ that branch of the AST +func (vis *VarVisitor) visit(v any) bool { + if vis.params.SkipObjectKeys { + if o, ok := v.(Object); ok { + o.Foreach(func(_, v *Term) { + vis.Walk(v) + }) + return true + } + } + if vis.params.SkipRefHead { + if r, ok := v.(Ref); ok { + rSlice := r[1:] + for i := range rSlice { + vis.Walk(rSlice[i]) + } + return true + } + } + if vis.params.SkipClosures { + switch v := v.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *TemplateString: + return true + case *Expr: + if ev, ok := v.Terms.(*Every); ok { + vis.Walk(ev.Domain) + // We're _not_ walking ev.Body -- that's the closure here + return true + } + } + } + if vis.params.SkipWithTarget { + if v, ok := v.(*With); ok { + vis.Walk(v.Value) + return true + } + } + if vis.params.SkipSets { + if _, ok := v.(Set); ok { + return true + } + } + if vis.params.SkipRefCallHead { + switch v := v.(type) { + case *Expr: + if terms, ok := v.Terms.([]*Term); ok { + termSlice := terms[0].Value.(Ref)[1:] + for i := range termSlice { + vis.Walk(termSlice[i]) + } + for i := 1; i < len(terms); i++ { + vis.Walk(terms[i]) + } + for i := range v.With { + vis.Walk(v.With[i]) + } + return true + } + case Call: + operator := v[0].Value.(Ref) + for i := 1; i < len(operator); i++ { + vis.Walk(operator[i]) + } + for i := 1; i < len(v); i++ { + vis.Walk(v[i]) + } + return true + case *With: + if ref, ok := v.Target.Value.(Ref); ok { + refSlice := ref[1:] + for i := range refSlice { + vis.Walk(refSlice[i]) + } + } + if ref, ok := v.Value.Value.(Ref); ok { + refSlice := ref[1:] + for i := range refSlice { + vis.Walk(refSlice[i]) + } + } else { + vis.Walk(v.Value) + } + return true + } + } + if vis.params.SkipTemplateStrings { + if _, ok := v.(*TemplateString); ok { + return true + } + } + if v, ok := v.(Var); ok { + vis.Add(v) + return true + } + return false +} + +// Walk iterates the AST by calling the function f on the [VarVisitor] before recursing. +// Contrary to the deprecated [Walk] function, this does not require allocating the visitor from heap. +func (vis *VarVisitor) Walk(x any) { + if vis.visit(x) { + return + } + + switch x := x.(type) { + case *Module: + for i := range x.Rules { + vis.Walk(x.Rules[i]) + } + case *Package: + vis.WalkRef(x.Path) + case *Import: + vis.Walk(x.Path) + if x.Alias != "" { + vis.Add(x.Alias) + } + case *Rule: + vis.Walk(x.Head) + vis.WalkBody(x.Body) + if x.Else != nil { + vis.Walk(x.Else) + } + case *Head: + if len(x.Reference) > 0 { + vis.WalkRef(x.Reference) + } else { + vis.Add(x.Name) + if x.Key != nil { + vis.Walk(x.Key) + } + } + vis.WalkArgs(x.Args) + if x.Value != nil { + vis.Walk(x.Value) + } + case Body: + vis.WalkBody(x) + case Args: + vis.WalkArgs(x) + case *Expr: + switch ts := x.Terms.(type) { + case *Term, *SomeDecl, *Every: + vis.Walk(ts) + case []*Term: + for i := range ts { + vis.Walk(ts[i].Value) + } + } + for i := range x.With { + vis.Walk(x.With[i]) + } + case *With: + vis.Walk(x.Target.Value) + vis.Walk(x.Value.Value) + case *Term: + vis.Walk(x.Value) + if vVar, ok := x.Value.(Var); ok { + vis.vars.AddLocation(vVar, x.Location) + } + case Ref: + for i := range x { + vis.Walk(x[i].Value) + } + case *object: + x.Foreach(func(k, v *Term) { + vis.Walk(k) + vis.Walk(v) + }) + case *Array: + x.Foreach(func(t *Term) { + vis.Walk(t) + }) + case Set: + xSlice := x.Slice() + for i := range xSlice { + vis.Walk(xSlice[i]) + } + case *ArrayComprehension: + vis.Walk(x.Term.Value) + vis.WalkBody(x.Body) + case *ObjectComprehension: + vis.Walk(x.Key.Value) + vis.Walk(x.Value.Value) + vis.WalkBody(x.Body) + case *SetComprehension: + vis.Walk(x.Term.Value) + vis.WalkBody(x.Body) + case Call: + for i := range x { + vis.Walk(x[i].Value) + } + case *Every: + if x.Key != nil { + vis.Walk(x.Key.Value) + } + vis.Walk(x.Value) + vis.Walk(x.Domain) + vis.WalkBody(x.Body) + case *SomeDecl: + for i := range x.Symbols { + vis.Walk(x.Symbols[i]) + } + case *TemplateString: + for i := range x.Parts { + vis.Walk(x.Parts[i]) + } + } +} + +// WalkArgs exists only to avoid the allocation cost of boxing Args to `any` in the VarVisitor. +// Use it when you know beforehand that the type to walk is Args. +func (vis *VarVisitor) WalkArgs(x Args) { + for i := range x { + vis.Walk(x[i].Value) + } +} + +// WalkRef exists only to avoid the allocation cost of boxing Ref to `any` in the VarVisitor. +// Use it when you know beforehand that the type to walk is a Ref. +func (vis *VarVisitor) WalkRef(ref Ref) { + if vis.params.SkipRefHead { + ref = ref[1:] + } + for _, term := range ref { + vis.Walk(term.Value) + if vVar, ok := term.Value.(Var); ok { + vis.vars.AddLocation(vVar, term.Location) + } + } +} + +// WalkBody exists only to avoid the allocation cost of boxing Body to `any` in the VarVisitor. +// Use it when you know beforehand that the type to walk is a Body. +func (vis *VarVisitor) WalkBody(body Body) { + for _, expr := range body { + vis.Walk(expr) + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go new file mode 100644 index 0000000000..bf00e96ca2 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go @@ -0,0 +1,1783 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package bundle implements bundle loading. +package bundle + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "maps" + "net/url" + "os" + "path" + "path/filepath" + "reflect" + "strings" + "sync" + + "github.com/gobwas/glob" + "golang.org/x/sync/errgroup" + + "github.com/open-policy-agent/opa/internal/file/archive" + "github.com/open-policy-agent/opa/internal/merge" + "github.com/open-policy-agent/opa/v1/ast" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/format" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/util" +) + +// Common file extensions and file names. +const ( + RegoExt = ".rego" + WasmFile = "policy.wasm" + PlanFile = "plan.json" + ManifestExt = ".manifest" + SignaturesFile = "signatures.json" + patchFile = "patch.json" + dataFile = "data.json" + yamlDataFile = "data.yaml" + ymlDataFile = "data.yml" + defaultHashingAlg = "SHA-256" + DefaultSizeLimitBytes = (1024 * 1024 * 1024) // limit bundle reads to 1GB to protect against gzip bombs + DeltaBundleType = "delta" + SnapshotBundleType = "snapshot" +) + +var ( + empty Bundle +) + +// Bundle represents a loaded bundle. The bundle can contain data and policies. +type Bundle struct { + Signatures SignaturesConfig + Manifest Manifest + Data map[string]any + Modules []ModuleFile + Wasm []byte // Deprecated. Use WasmModules instead + WasmModules []WasmModuleFile + PlanModules []PlanModuleFile + Patch Patch + Etag string + Raw []Raw + + lazyLoadingMode bool + sizeLimitBytes int64 +} + +// Raw contains raw bytes representing the bundle's content +type Raw struct { + Path string + Value []byte + module *ModuleFile +} + +// Patch contains an array of objects wherein each object represents the patch operation to be +// applied to the bundle data. +type Patch struct { + Data []PatchOperation `json:"data,omitempty"` +} + +// PatchOperation models a single patch operation against a document. +type PatchOperation struct { + Op string `json:"op"` + Path string `json:"path"` + Value any `json:"value"` +} + +// SignaturesConfig represents an array of JWTs that encapsulate the signatures for the bundle. +type SignaturesConfig struct { + Signatures []string `json:"signatures,omitempty"` + Plugin string `json:"plugin,omitempty"` +} + +// isEmpty returns if the SignaturesConfig is empty. +func (s SignaturesConfig) isEmpty() bool { + return s.Signatures == nil && s.Plugin == "" +} + +// DecodedSignature represents the decoded JWT payload. +type DecodedSignature struct { + Files []FileInfo `json:"files"` + KeyID string `json:"keyid"` // Deprecated, use kid in the JWT header instead. + Scope string `json:"scope"` + IssuedAt int64 `json:"iat"` + Issuer string `json:"iss"` +} + +// FileInfo contains the hashing algorithm used, resulting digest etc. +type FileInfo struct { + Name string `json:"name"` + Hash string `json:"hash"` + Algorithm string `json:"algorithm"` +} + +// NewFile returns a new FileInfo. +func NewFile(name, hash, alg string) FileInfo { + return FileInfo{ + Name: name, + Hash: hash, + Algorithm: alg, + } +} + +// Manifest represents the manifest from a bundle. The manifest may contain +// metadata such as the bundle revision. +type Manifest struct { + Revision string `json:"revision"` + Roots *[]string `json:"roots,omitempty"` + WasmResolvers []WasmResolver `json:"wasm,omitempty"` + // RegoVersion is the global Rego version for the bundle described by this Manifest. + // The Rego version of individual files can be overridden in FileRegoVersions. + // We don't use ast.RegoVersion here, as this iota type's order isn't guaranteed to be stable over time. + // We use a pointer so that we can support hand-made bundles that don't have an explicit version appropriately. + // E.g. in OPA 0.x if --v1-compatible is used when consuming the bundle, and there is no specified version, + // we should default to v1; if --v1-compatible isn't used, we should default to v0. In OPA 1.0, no --x-compatible + // flag and no explicit bundle version should default to v1. + RegoVersion *int `json:"rego_version,omitempty"` + // FileRegoVersions is a map from file paths to Rego versions. + // This allows individual files to override the global Rego version specified by RegoVersion. + FileRegoVersions map[string]int `json:"file_rego_versions,omitempty"` + Metadata map[string]any `json:"metadata,omitempty"` + + compiledFileRegoVersions []fileRegoVersion +} + +type fileRegoVersion struct { + path glob.Glob + version int +} + +// WasmResolver maps a wasm module to an entrypoint ref. +type WasmResolver struct { + Entrypoint string `json:"entrypoint,omitempty"` + Module string `json:"module,omitempty"` + Annotations []*ast.Annotations `json:"annotations,omitempty"` +} + +// Init initializes the manifest. If you instantiate a manifest +// manually, call Init to ensure that the roots are set properly. +func (m *Manifest) Init() { + if m.Roots == nil { + defaultRoots := []string{""} + m.Roots = &defaultRoots + } +} + +// AddRoot adds r to the roots of m. This function is idempotent. +func (m *Manifest) AddRoot(r string) { + m.Init() + if !RootPathsContain(*m.Roots, r) { + *m.Roots = append(*m.Roots, r) + } +} + +func (m *Manifest) SetRegoVersion(v ast.RegoVersion) { + m.Init() + regoVersion := 0 + if v == ast.RegoV1 { + regoVersion = 1 + } + m.RegoVersion = ®oVersion +} + +// Equal returns true if m is semantically equivalent to other. +func (m Manifest) Equal(other Manifest) bool { + // This is safe since both are passed by value. + m.Init() + other.Init() + + if m.Revision != other.Revision { + return false + } + + if m.RegoVersion == nil && other.RegoVersion != nil { + return false + } + if m.RegoVersion != nil && other.RegoVersion == nil { + return false + } + if m.RegoVersion != nil && other.RegoVersion != nil && *m.RegoVersion != *other.RegoVersion { + return false + } + + // If both are nil, or both are empty, we consider them equal. + if !(len(m.FileRegoVersions) == 0 && len(other.FileRegoVersions) == 0) && + !reflect.DeepEqual(m.FileRegoVersions, other.FileRegoVersions) { + return false + } + + if !reflect.DeepEqual(m.Metadata, other.Metadata) { + return false + } + + return m.equalWasmResolversAndRoots(other) +} + +func (m Manifest) Empty() bool { + return m.Equal(Manifest{}) +} + +// Copy returns a deep copy of the manifest. +func (m Manifest) Copy() Manifest { + m.Init() + roots := make([]string, len(*m.Roots)) + copy(roots, *m.Roots) + m.Roots = &roots + + wasmModules := make([]WasmResolver, len(m.WasmResolvers)) + copy(wasmModules, m.WasmResolvers) + m.WasmResolvers = wasmModules + + metadata := m.Metadata + + if metadata != nil { + m.Metadata = make(map[string]any) + maps.Copy(m.Metadata, metadata) + } + + return m +} + +func (m Manifest) String() string { + m.Init() + if m.RegoVersion != nil { + return fmt.Sprintf("", + m.Revision, *m.RegoVersion, *m.Roots, m.WasmResolvers, m.Metadata) + } + return fmt.Sprintf("", + m.Revision, *m.Roots, m.WasmResolvers, m.Metadata) +} + +func (m Manifest) rootSet() stringSet { + rs := map[string]struct{}{} + + for _, r := range *m.Roots { + rs[r] = struct{}{} + } + + return stringSet(rs) +} + +func (m Manifest) equalWasmResolversAndRoots(other Manifest) bool { + if len(m.WasmResolvers) != len(other.WasmResolvers) { + return false + } + + for i := range len(m.WasmResolvers) { + if !m.WasmResolvers[i].Equal(&other.WasmResolvers[i]) { + return false + } + } + + return m.rootSet().Equal(other.rootSet()) +} + +func (wr *WasmResolver) Equal(other *WasmResolver) bool { + if wr == nil && other == nil { + return true + } + + if wr == nil || other == nil { + return false + } + + if wr.Module != other.Module { + return false + } + + if wr.Entrypoint != other.Entrypoint { + return false + } + + annotLen := len(wr.Annotations) + if annotLen != len(other.Annotations) { + return false + } + + for i := range annotLen { + if wr.Annotations[i].Compare(other.Annotations[i]) != 0 { + return false + } + } + + return true +} + +type stringSet map[string]struct{} + +func (ss stringSet) Equal(other stringSet) bool { + if len(ss) != len(other) { + return false + } + for k := range other { + if _, ok := ss[k]; !ok { + return false + } + } + return true +} + +func (m *Manifest) validateAndInjectDefaults(b Bundle) error { + m.Init() + + // Validate roots in bundle. + roots := *m.Roots + + // Standardize the roots (no starting or trailing slash) + for i := range roots { + roots[i] = strings.Trim(roots[i], "/") + } + + for i := range len(roots) - 1 { + for j := i + 1; j < len(roots); j++ { + if RootPathsOverlap(roots[i], roots[j]) { + return fmt.Errorf("manifest has overlapped roots: '%s' and '%s'", roots[i], roots[j]) + } + } + } + + // Validate modules in bundle. + for _, module := range b.Modules { + found := false + if path, err := module.Parsed.Package.Path.Ptr(); err == nil { + found = RootPathsContain(roots, path) + } + if !found { + return fmt.Errorf("manifest roots %v do not permit '%v' in module '%s'", roots, module.Parsed.Package, module.Path) + } + } + + // Build a set of wasm module entrypoints to validate + wasmModuleToEps := map[string]string{} + seenEps := map[string]struct{}{} + for _, wm := range b.WasmModules { + wasmModuleToEps[wm.Path] = "" + } + + for _, wmConfig := range b.Manifest.WasmResolvers { + _, ok := wasmModuleToEps[wmConfig.Module] + if !ok { + return fmt.Errorf("manifest references wasm module '%s' but the module file does not exist", wmConfig.Module) + } + + // Ensure wasm module entrypoint in within bundle roots + if !RootPathsContain(roots, wmConfig.Entrypoint) { + return fmt.Errorf("manifest roots %v do not permit '%s' entrypoint for wasm module '%s'", roots, wmConfig.Entrypoint, wmConfig.Module) + } + + if _, ok := seenEps[wmConfig.Entrypoint]; ok { + return fmt.Errorf("entrypoint '%s' cannot be used by more than one wasm module", wmConfig.Entrypoint) + } + seenEps[wmConfig.Entrypoint] = struct{}{} + + wasmModuleToEps[wmConfig.Module] = wmConfig.Entrypoint + } + + // Validate data patches in bundle. + for _, patch := range b.Patch.Data { + path := strings.Trim(patch.Path, "/") + if !RootPathsContain(roots, path) { + return fmt.Errorf("manifest roots %v do not permit data patch at path '%s'", roots, path) + } + } + + if b.lazyLoadingMode { + return nil + } + + // Validate data in bundle. + return dfs(b.Data, "", func(path string, node any) (bool, error) { + path = strings.Trim(path, "/") + if RootPathsContain(roots, path) { + return true, nil + } + + if _, ok := node.(map[string]any); ok { + for i := range roots { + if RootPathsContain(strings.Split(path, "/"), roots[i]) { + return false, nil + } + } + } + return false, fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, path) + }) +} + +// ModuleFile represents a single module contained in a bundle. +type ModuleFile struct { + URL string + Path string + RelativePath string + Raw []byte + Parsed *ast.Module +} + +// WasmModuleFile represents a single wasm module contained in a bundle. +type WasmModuleFile struct { + URL string + Path string + Entrypoints []ast.Ref + Raw []byte +} + +// PlanModuleFile represents a single plan module contained in a bundle. +// +// NOTE(tsandall): currently the plans are just opaque binary blobs. In the +// future we could inject the entrypoints so that the plans could be executed +// inside of OPA proper like we do for Wasm modules. +type PlanModuleFile struct { + URL string + Path string + Raw []byte +} + +var ( + pluginMtx sync.Mutex + + // The bundle activator to use by default. + bundleExtActivator string + + // The function to use for creating a storage.Store for bundles. + BundleExtStore func() storage.Store +) + +// RegisterDefaultBundleActivator sets the default bundle activator for OPA to use for bundle activation. +// The id must already have been registered with RegisterActivator. +func RegisterDefaultBundleActivator(id string) { + pluginMtx.Lock() + defer pluginMtx.Unlock() + + bundleExtActivator = id +} + +// RegisterStoreFunc sets the function to use for creating storage for bundles +// in OPA. If no function is registered, OPA will use situational defaults to +// decide on what sort of storage.Store to create when bundle storage is +// needed. Typically the default is inmem.Store. +func RegisterStoreFunc(s func() storage.Store) { + pluginMtx.Lock() + defer pluginMtx.Unlock() + + BundleExtStore = s +} + +// HasExtension returns true if a default bundle activator has been set +// with RegisterDefaultBundleActivator. +func HasExtension() bool { + pluginMtx.Lock() + defer pluginMtx.Unlock() + + return bundleExtActivator != "" +} + +// Reader contains the reader to load the bundle from. +type Reader struct { + loader DirectoryLoader + includeManifestInData bool + metrics metrics.Metrics + baseDir string + verificationConfig *VerificationConfig + skipVerify bool + processAnnotations bool + capabilities *ast.Capabilities + files map[string]FileInfo // files in the bundle signature payload + sizeLimitBytes int64 + etag string + lazyLoadingMode bool + name string + persist bool + regoVersion ast.RegoVersion + followSymlinks bool +} + +// NewReader is deprecated. Use NewCustomReader instead. +func NewReader(r io.Reader) *Reader { + return NewCustomReader(NewTarballLoader(r)) +} + +// NewCustomReader returns a new Reader configured to use the +// specified DirectoryLoader. +func NewCustomReader(loader DirectoryLoader) *Reader { + return &Reader{ + loader: loader, + metrics: metrics.NoOp(), + files: make(map[string]FileInfo), + sizeLimitBytes: DefaultSizeLimitBytes + 1, + lazyLoadingMode: HasExtension(), + } +} + +// IncludeManifestInData sets whether the manifest metadata should be +// included in the bundle's data. +func (r *Reader) IncludeManifestInData(includeManifestInData bool) *Reader { + r.includeManifestInData = includeManifestInData + return r +} + +// WithMetrics sets the metrics object to be used while loading bundles +func (r *Reader) WithMetrics(m metrics.Metrics) *Reader { + r.metrics = m + return r +} + +// WithBaseDir sets a base directory for file paths of loaded Rego +// modules. This will *NOT* affect the loaded path of data files. +func (r *Reader) WithBaseDir(dir string) *Reader { + r.baseDir = dir + return r +} + +// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle +func (r *Reader) WithBundleVerificationConfig(config *VerificationConfig) *Reader { + r.verificationConfig = config + return r +} + +// WithSkipBundleVerification skips verification of a signed bundle +func (r *Reader) WithSkipBundleVerification(skipVerify bool) *Reader { + r.skipVerify = skipVerify + return r +} + +// WithProcessAnnotations enables annotation processing during .rego file parsing. +func (r *Reader) WithProcessAnnotations(yes bool) *Reader { + r.processAnnotations = yes + return r +} + +// WithCapabilities sets the supported capabilities when loading the files +func (r *Reader) WithCapabilities(caps *ast.Capabilities) *Reader { + r.capabilities = caps + return r +} + +// WithJSONOptions sets the JSON options on the parser (now a no-op). +// +// Deprecated: Use SetOptions in the json package instead, where a longer description +// of why this is deprecated also can be found. +func (r *Reader) WithJSONOptions(*astJSON.Options) *Reader { + return r +} + +// WithSizeLimitBytes sets the size limit to apply to files in the bundle. If files are larger +// than this, an error will be returned by the reader. +func (r *Reader) WithSizeLimitBytes(n int64) *Reader { + r.sizeLimitBytes = n + 1 + return r +} + +// WithBundleEtag sets the given etag value on the bundle +func (r *Reader) WithBundleEtag(etag string) *Reader { + r.etag = etag + return r +} + +// WithBundleName specifies the bundle name +func (r *Reader) WithBundleName(name string) *Reader { + r.name = name + return r +} + +func (r *Reader) WithFollowSymlinks(yes bool) *Reader { + r.followSymlinks = yes + return r +} + +// WithLazyLoadingMode sets the bundle loading mode. If true, +// bundles will be read in lazy mode. In this mode, data files in the bundle will not be +// deserialized and the check to validate that the bundle data does not contain paths +// outside the bundle's roots will not be performed while reading the bundle. +func (r *Reader) WithLazyLoadingMode(yes bool) *Reader { + r.lazyLoadingMode = yes + return r +} + +// WithBundlePersistence specifies if the downloaded bundle will eventually be persisted to disk. +func (r *Reader) WithBundlePersistence(persist bool) *Reader { + r.persist = persist + return r +} + +func (r *Reader) WithRegoVersion(version ast.RegoVersion) *Reader { + r.regoVersion = version + return r +} + +func (r *Reader) ParserOptions() ast.ParserOptions { + return ast.ParserOptions{ + ProcessAnnotation: r.processAnnotations, + Capabilities: r.capabilities, + RegoVersion: r.regoVersion, + } +} + +// Read returns a new Bundle loaded from the reader. +func (r *Reader) Read() (Bundle, error) { + bundle, descriptors, err := preProcessBundle(r.loader, r.skipVerify, r.sizeLimitBytes) + if err != nil { + return empty, err + } + + bundle.lazyLoadingMode = r.lazyLoadingMode + bundle.sizeLimitBytes = r.sizeLimitBytes + + if bundle.Type() == SnapshotBundleType { + if err := r.checkSignaturesAndDescriptors(bundle.Signatures); err != nil { + return empty, err + } + + bundle.Data = map[string]any{} + } + + var modules []ModuleFile + for _, f := range descriptors { + buf, err := readFile(f, r.sizeLimitBytes) + if err != nil { + return empty, err + } + + // verify the file content + if bundle.Type() == SnapshotBundleType && !bundle.Signatures.isEmpty() { + path := f.Path() + if r.baseDir != "" { + path = f.URL() + } + path = strings.TrimPrefix(path, "/") + + // check if the file is to be excluded from bundle verification + if r.isFileExcluded(path) { + delete(r.files, path) + } else { + if err = r.verifyBundleFile(path, buf); err != nil { + return empty, err + } + } + } + + // Normalize the paths to use `/` separators + path := filepath.ToSlash(f.Path()) + + if strings.HasSuffix(path, RegoExt) { + fullPath := r.fullPath(path) + bs := buf.Bytes() + + // Modules are parsed after we've had a chance to read the manifest + mf := ModuleFile{ + URL: f.URL(), + Path: fullPath, + RelativePath: path, + Raw: bs, + } + modules = append(modules, mf) + + if r.lazyLoadingMode { + p := fullPath + if r.name != "" { + p = modulePathWithPrefix(r.name, fullPath) + } + + bundle.Raw = append(bundle.Raw, Raw{Path: p, Value: bs, module: &mf}) + } + } else if filepath.Base(path) == WasmFile { + bundle.WasmModules = append(bundle.WasmModules, WasmModuleFile{ + URL: f.URL(), + Path: r.fullPath(path), + Raw: buf.Bytes(), + }) + } else if filepath.Base(path) == PlanFile { + bundle.PlanModules = append(bundle.PlanModules, PlanModuleFile{ + URL: f.URL(), + Path: r.fullPath(path), + Raw: buf.Bytes(), + }) + } else if filepath.Base(path) == dataFile { + if r.lazyLoadingMode { + bundle.Raw = append(bundle.Raw, Raw{Path: path, Value: buf.Bytes()}) + continue + } + + var value any + + r.metrics.Timer(metrics.RegoDataParse).Start() + err := util.UnmarshalJSON(buf.Bytes(), &value) + r.metrics.Timer(metrics.RegoDataParse).Stop() + + if err != nil { + return empty, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err) + } + + if err := insertValue(bundle, path, value); err != nil { + return empty, err + } + + } else if filepath.Base(path) == yamlDataFile || filepath.Base(path) == ymlDataFile { + if r.lazyLoadingMode { + bundle.Raw = append(bundle.Raw, Raw{Path: path, Value: buf.Bytes()}) + continue + } + + var value any + + r.metrics.Timer(metrics.RegoDataParse).Start() + err := util.Unmarshal(buf.Bytes(), &value) + r.metrics.Timer(metrics.RegoDataParse).Stop() + + if err != nil { + return empty, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err) + } + + if err := insertValue(bundle, path, value); err != nil { + return empty, err + } + + } else if strings.HasSuffix(path, ManifestExt) { + if err := util.NewJSONDecoder(&buf).Decode(&bundle.Manifest); err != nil { + return empty, fmt.Errorf("bundle load failed on manifest decode: %w", err) + } + } + } + + // Parse modules + popts := r.ParserOptions() + popts.RegoVersion = bundle.RegoVersion(popts.EffectiveRegoVersion()) + + g := &errgroup.Group{} + r.metrics.Timer(metrics.RegoModuleParse).Start() + + for i, mf := range modules { + mpopts := popts + if regoVersion, err := bundle.RegoVersionForFile(mf.RelativePath, popts.EffectiveRegoVersion()); err != nil { + return *bundle, err + } else if regoVersion != ast.RegoUndefined { + // We don't expect ast.RegoUndefined here, but don't override + // configured rego-version if we do just to be extra protective + mpopts.RegoVersion = regoVersion + } + + g.Go(func() (err error) { + if mf.Parsed, err = ast.ParseModuleWithOpts(mf.Path, util.ByteSliceToString(mf.Raw), mpopts); err == nil { + modules[i] = mf + } + return err + }) + } + + err = g.Wait() + r.metrics.Timer(metrics.RegoModuleParse).Stop() + if err != nil { + return empty, err + } + + bundle.Modules = modules + + if bundle.Type() == DeltaBundleType { + if len(bundle.Data) != 0 { + return empty, errors.New("delta bundle expected to contain only patch file but data files found") + } + + if len(bundle.Modules) != 0 { + return empty, errors.New("delta bundle expected to contain only patch file but policy files found") + } + + if len(bundle.WasmModules) != 0 { + return empty, errors.New("delta bundle expected to contain only patch file but wasm files found") + } + + if r.persist { + return empty, errors.New( + "'persist' property is true in config. persisting delta bundle to disk is not supported") + } + } + + // check if the bundle signatures specify any files that weren't found in the bundle + if bundle.Type() == SnapshotBundleType && len(r.files) != 0 { + return empty, fmt.Errorf( + "file(s) %v specified in bundle signatures but not found in the target bundle", util.Keys(r.files)) + } + + if err := bundle.Manifest.validateAndInjectDefaults(*bundle); err != nil { + return empty, err + } + + // Inject the wasm module entrypoint refs into the WasmModuleFile structs + epMap := map[string][]string{} + for _, r := range bundle.Manifest.WasmResolvers { + epMap[r.Module] = append(epMap[r.Module], r.Entrypoint) + } + for i := range len(bundle.WasmModules) { + entrypoints := epMap[bundle.WasmModules[i].Path] + for _, entrypoint := range entrypoints { + ref, err := ast.PtrRef(ast.DefaultRootDocument, entrypoint) + if err != nil { + return empty, fmt.Errorf("failed to parse wasm module entrypoint '%s': %s", entrypoint, err) + } + bundle.WasmModules[i].Entrypoints = append(bundle.WasmModules[i].Entrypoints, ref) + } + } + + if r.includeManifestInData { + b, err := json.Marshal(&bundle.Manifest) + if err != nil { + return empty, fmt.Errorf("bundle load failed on manifest marshal: %w", err) + } + + var metadata map[string]any + if err := util.UnmarshalJSON(b, &metadata); err != nil { + return empty, fmt.Errorf("bundle load failed on manifest unmarshal: %w", err) + } + + // For backwards compatibility always write to the old unnamed manifest path + // This will *not* be correct if >1 bundle is in use... + if err := bundle.insertData(legacyManifestStoragePath, metadata); err != nil { + return empty, fmt.Errorf("bundle load failed on %v: %w", legacyRevisionStoragePath, err) + } + } + + bundle.Etag = r.etag + + return *bundle, nil +} + +func (r *Reader) isFileExcluded(path string) bool { + for _, e := range r.verificationConfig.Exclude { + match, _ := filepath.Match(e, path) + if match { + return true + } + } + return false +} + +func (r *Reader) checkSignaturesAndDescriptors(signatures SignaturesConfig) error { + if r.skipVerify { + return nil + } + + if signatures.isEmpty() && r.verificationConfig != nil && r.verificationConfig.KeyID != "" { + return errors.New("bundle missing .signatures.json file") + } + + if !signatures.isEmpty() { + if r.verificationConfig == nil { + return errors.New("verification key not provided") + } + + // verify the JWT signatures included in the `.signatures.json` file + return r.verifyBundleSignature(signatures) + } + + return nil +} + +func (r *Reader) verifyBundleSignature(sc SignaturesConfig) error { + var err error + r.files, err = VerifyBundleSignature(sc, r.verificationConfig) + return err +} + +func (r *Reader) verifyBundleFile(path string, data bytes.Buffer) error { + return VerifyBundleFile(path, data, r.files) +} + +func (r *Reader) fullPath(path string) string { + if r.baseDir != "" { + path = filepath.Join(r.baseDir, path) + } + return path +} + +// Write is deprecated. Use NewWriter instead. +func Write(w io.Writer, bundle Bundle) error { + return NewWriter(w). + UseModulePath(true). + DisableFormat(true). + Write(bundle) +} + +// Writer implements bundle serialization. +type Writer struct { + usePath bool + disableFormat bool + w io.Writer +} + +// NewWriter returns a bundle writer that writes to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + } +} + +// UseModulePath configures the writer to use the module file path instead of the +// module file URL during serialization. This is for backwards compatibility. +func (w *Writer) UseModulePath(yes bool) *Writer { + w.usePath = yes + return w +} + +// DisableFormat configures the writer to just write out raw bytes instead +// of formatting modules before serialization. +func (w *Writer) DisableFormat(yes bool) *Writer { + w.disableFormat = yes + return w +} + +// Write writes the bundle to the writer's output stream. +func (w *Writer) Write(bundle Bundle) error { + tw := archive.NewTarGzWriter(w.w) + + if bundle.Type() == SnapshotBundleType { + if err := tw.WriteJSONFile("/data.json", bundle.Data); err != nil { + return err + } + + for _, module := range bundle.Modules { + path := module.URL + if w.usePath { + path = module.Path + } + + if err := tw.WriteFile(util.WithPrefix(path, "/"), module.Raw); err != nil { + return err + } + } + + if err := w.writeWasm(tw, bundle); err != nil { + return err + } + + if err := writeSignatures(tw, bundle); err != nil { + return err + } + + if err := w.writePlan(tw, bundle); err != nil { + return err + } + } else if bundle.Type() == DeltaBundleType { + if err := tw.WriteJSONFile("/patch.json", bundle.Patch); err != nil { + return err + } + } + + if !bundle.Manifest.Empty() { + if err := tw.WriteJSONFile("/.manifest", bundle.Manifest); err != nil { + return err + } + } + + return tw.Close() +} + +func (w *Writer) writeWasm(tw *archive.TarGzWriter, bundle Bundle) error { + for _, wm := range bundle.WasmModules { + path := wm.URL + if w.usePath { + path = wm.Path + } + + if err := tw.WriteFile(util.WithPrefix(path, "/"), wm.Raw); err != nil { + return err + } + } + + if len(bundle.Wasm) == 0 { + return nil + } + + return tw.WriteFile(util.WithPrefix(WasmFile, "/"), bundle.Wasm) +} + +func (w *Writer) writePlan(tw *archive.TarGzWriter, bundle Bundle) error { + for _, wm := range bundle.PlanModules { + path := wm.URL + if w.usePath { + path = wm.Path + } + + if err := tw.WriteFile(util.WithPrefix(path, "/"), wm.Raw); err != nil { + return err + } + } + + return nil +} + +func writeSignatures(tw *archive.TarGzWriter, bundle Bundle) error { + if bundle.Signatures.isEmpty() { + return nil + } + + bs, err := json.MarshalIndent(bundle.Signatures, "", " ") + if err != nil { + return err + } + + return tw.WriteFile(util.WithPrefix(SignaturesFile, "/."), bs) +} + +func hashBundleFiles(hash SignatureHasher, b *Bundle) ([]FileInfo, error) { + + files := []FileInfo{} + + bs, err := hash.HashFile(b.Data) + if err != nil { + return files, err + } + files = append(files, NewFile(strings.TrimPrefix("data.json", "/"), hex.EncodeToString(bs), defaultHashingAlg)) + + if len(b.Wasm) != 0 { + bs, err := hash.HashFile(b.Wasm) + if err != nil { + return files, err + } + files = append(files, NewFile(strings.TrimPrefix(WasmFile, "/"), hex.EncodeToString(bs), defaultHashingAlg)) + } + + for _, wasmModule := range b.WasmModules { + bs, err := hash.HashFile(wasmModule.Raw) + if err != nil { + return files, err + } + files = append(files, NewFile(strings.TrimPrefix(wasmModule.Path, "/"), hex.EncodeToString(bs), defaultHashingAlg)) + } + + for _, planmodule := range b.PlanModules { + bs, err := hash.HashFile(planmodule.Raw) + if err != nil { + return files, err + } + files = append(files, NewFile(strings.TrimPrefix(planmodule.Path, "/"), hex.EncodeToString(bs), defaultHashingAlg)) + } + + // If the manifest is essentially empty, don't add it to the signatures since it + // won't be written to the bundle. Otherwise: + // parse the manifest into a JSON structure; + // then recursively order the fields of all objects alphabetically and then apply + // the hash function to result to compute the hash. + if !b.Manifest.Empty() { + mbs, err := json.Marshal(b.Manifest) + if err != nil { + return files, err + } + + var result map[string]any + if err := util.Unmarshal(mbs, &result); err != nil { + return files, err + } + + if bs, err = hash.HashFile(result); err != nil { + return files, err + } + + files = append(files, NewFile(strings.TrimPrefix(ManifestExt, "/"), hex.EncodeToString(bs), defaultHashingAlg)) + } + + return files, err +} + +// FormatModules formats Rego modules +// Modules will be formatted to comply with [ast.DefaultRegoVersion], but Rego compatibility of individual parsed modules will be respected (e.g. if 'rego.v1' is imported). +func (b *Bundle) FormatModules(useModulePath bool) error { + return b.FormatModulesForRegoVersion(ast.DefaultRegoVersion, true, useModulePath) +} + +// FormatModulesForRegoVersion formats Rego modules to comply with a given Rego version +func (b *Bundle) FormatModulesForRegoVersion(version ast.RegoVersion, preserveModuleRegoVersion bool, useModulePath bool) error { + return b.FormatModulesWithOptions(BundleFormatOptions{ + RegoVersion: version, + PreserveModuleRegoVersion: preserveModuleRegoVersion, + UseModulePath: useModulePath, + }) +} + +type BundleFormatOptions struct { + RegoVersion ast.RegoVersion + Capabilities *ast.Capabilities + PreserveModuleRegoVersion bool + UseModulePath bool +} + +// FormatModulesWithOptions formats Rego modules with the given options. +func (b *Bundle) FormatModulesWithOptions(opts BundleFormatOptions) error { + var err error + + for i, module := range b.Modules { + fmtOpts := format.Opts{ + RegoVersion: opts.RegoVersion, + Capabilities: opts.Capabilities, + } + + if module.Parsed != nil { + fmtOpts.ParserOptions = &ast.ParserOptions{ + RegoVersion: module.Parsed.RegoVersion(), + } + if opts.PreserveModuleRegoVersion { + fmtOpts.RegoVersion = module.Parsed.RegoVersion() + } + } + + if fmtOpts.Capabilities == nil { + fmtOpts.Capabilities = ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(fmtOpts.RegoVersion)) + } + + if module.Raw == nil { + module.Raw, err = format.AstWithOpts(module.Parsed, fmtOpts) + if err != nil { + return err + } + } else { + p := module.URL + if opts.UseModulePath { + p = module.Path + } + + module.Raw, err = format.SourceWithOpts(p, module.Raw, fmtOpts) + if err != nil { + return err + } + } + b.Modules[i].Raw = module.Raw + } + return nil +} + +// GenerateSignature generates the signature for the given bundle. +func (b *Bundle) GenerateSignature(signingConfig *SigningConfig, keyID string, useModulePath bool) error { + + hash, err := NewSignatureHasher(HashingAlgorithm(defaultHashingAlg)) + if err != nil { + return err + } + + files := []FileInfo{} + + for _, module := range b.Modules { + bytes, err := hash.HashFile(module.Raw) + if err != nil { + return err + } + + path := module.URL + if useModulePath { + path = module.Path + } + files = append(files, NewFile(strings.TrimPrefix(path, "/"), hex.EncodeToString(bytes), defaultHashingAlg)) + } + + result, err := hashBundleFiles(hash, b) + if err != nil { + return err + } + files = append(files, result...) + + // generate signed token + token, err := GenerateSignedToken(files, signingConfig, keyID) + if err != nil { + return err + } + + if signingConfig.Plugin != "" { + b.Signatures.Plugin = signingConfig.Plugin + } + + b.Signatures.Signatures = []string{token} + + return nil +} + +// ParsedModules returns a map of parsed modules with names that are +// unique and human readable for the given a bundle name. +func (b *Bundle) ParsedModules(bundleName string) map[string]*ast.Module { + mods := make(map[string]*ast.Module, len(b.Modules)) + + for _, mf := range b.Modules { + mods[modulePathWithPrefix(bundleName, mf.Path)] = mf.Parsed + } + + return mods +} + +func (b *Bundle) RegoVersion(def ast.RegoVersion) ast.RegoVersion { + if v := b.Manifest.RegoVersion; v != nil { + switch *v { + case 0: + return ast.RegoV0 + case 1: + return ast.RegoV1 + } + } + return def +} + +func (b *Bundle) SetRegoVersion(v ast.RegoVersion) { + b.Manifest.SetRegoVersion(v) +} + +// RegoVersionForFile returns the rego-version for the specified file path. +// If there is no defined version for the given path, the default version def is returned. +// If the version does not correspond to ast.RegoV0 or ast.RegoV1, an error is returned. +func (b *Bundle) RegoVersionForFile(path string, def ast.RegoVersion) (ast.RegoVersion, error) { + version, err := b.Manifest.numericRegoVersionForFile(path) + if err != nil { + return def, err + } else if version == nil { + return def, nil + } else if *version == 0 { + return ast.RegoV0, nil + } else if *version == 1 { + return ast.RegoV1, nil + } + return def, fmt.Errorf("unknown bundle rego-version %d for file '%s'", *version, path) +} + +func (m *Manifest) RegoVersionForFile(path string) (ast.RegoVersion, error) { + v, err := m.numericRegoVersionForFile(path) + if err != nil { + return ast.RegoUndefined, err + } + + if v == nil { + return ast.RegoUndefined, nil + } + + return ast.RegoVersionFromInt(*v), nil +} + +func (m *Manifest) numericRegoVersionForFile(path string) (*int, error) { + var version *int + + if len(m.FileRegoVersions) != len(m.compiledFileRegoVersions) { + m.compiledFileRegoVersions = make([]fileRegoVersion, 0, len(m.FileRegoVersions)) + for pattern, v := range m.FileRegoVersions { + compiled, err := glob.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("failed to compile glob pattern %s: %s", pattern, err) + } + m.compiledFileRegoVersions = append(m.compiledFileRegoVersions, fileRegoVersion{compiled, v}) + } + } + + for _, fv := range m.compiledFileRegoVersions { + if fv.path.Match(path) { + version = &fv.version + break + } + } + + if version == nil { + version = m.RegoVersion + } + return version, nil +} + +// Equal returns true if this bundle's contents equal the other bundle's +// contents. +func (b Bundle) Equal(other Bundle) bool { + if len(b.Modules) != len(other.Modules) { + return false + } + for i := range b.Modules { + // To support bundles built from rootless filesystems we ignore a "/" prefix + // for URLs and Paths, such that "/file" and "file" are equivalent + if strings.TrimPrefix(b.Modules[i].URL, string(filepath.Separator)) != + strings.TrimPrefix(other.Modules[i].URL, string(filepath.Separator)) { + return false + } + if strings.TrimPrefix(b.Modules[i].Path, string(filepath.Separator)) != + strings.TrimPrefix(other.Modules[i].Path, string(filepath.Separator)) { + return false + } + if !b.Modules[i].Parsed.Equal(other.Modules[i].Parsed) { + return false + } + if !bytes.Equal(b.Modules[i].Raw, other.Modules[i].Raw) { + return false + } + } + if (b.Wasm == nil && other.Wasm != nil) || (b.Wasm != nil && other.Wasm == nil) { + return false + } + + if !reflect.DeepEqual(b.Data, other.Data) { + return false + } + + return bytes.Equal(b.Wasm, other.Wasm) +} + +// Copy returns a deep copy of the bundle. +func (b Bundle) Copy() Bundle { + + // Copy data. + var x any = b.Data + + if err := util.RoundTrip(&x); err != nil { + panic(err) + } + + if x != nil { + b.Data = x.(map[string]any) + } + + // Copy modules. + for i := range b.Modules { + bs := make([]byte, len(b.Modules[i].Raw)) + copy(bs, b.Modules[i].Raw) + b.Modules[i].Raw = bs + b.Modules[i].Parsed = b.Modules[i].Parsed.Copy() + } + + // Copy manifest. + b.Manifest = b.Manifest.Copy() + + return b +} + +func (b *Bundle) insertData(key []string, value any) error { + // Build an object with the full structure for the value + obj, err := mktree(key, value) + if err != nil { + return err + } + + // Merge the new data in with the current bundle data object + merged, ok := merge.InterfaceMaps(b.Data, obj) + if !ok { + return fmt.Errorf("failed to insert data file from path %s", filepath.Join(key...)) + } + + b.Data = merged + + return nil +} + +func (b *Bundle) readData(key []string) *any { + + if len(key) == 0 { + if len(b.Data) == 0 { + return nil + } + var result any = b.Data + return &result + } + + node := b.Data + + for i := range len(key) - 1 { + + child, ok := node[key[i]] + if !ok { + return nil + } + + childObj, ok := child.(map[string]any) + if !ok { + return nil + } + + node = childObj + } + + child, ok := node[key[len(key)-1]] + if !ok { + return nil + } + + return &child +} + +// Type returns the type of the bundle. +func (b *Bundle) Type() string { + if len(b.Patch.Data) != 0 { + return DeltaBundleType + } + return SnapshotBundleType +} + +func mktree(path []string, value any) (map[string]any, error) { + if len(path) == 0 { + // For 0 length path the value is the full tree. + obj, ok := value.(map[string]any) + if !ok { + return nil, errors.New("root value must be object") + } + return obj, nil + } + + dir := map[string]any{} + for i := len(path) - 1; i > 0; i-- { + dir[path[i]] = value + value = dir + dir = map[string]any{} + } + dir[path[0]] = value + + return dir, nil +} + +// Merge accepts a set of bundles and merges them into a single result bundle. If there are +// any conflicts during the merge (e.g., with roots) an error is returned. The result bundle +// will have an empty revision except in the special case where a single bundle is provided +// (and in that case the bundle is just returned unmodified.) +func Merge(bundles []*Bundle) (*Bundle, error) { + return MergeWithRegoVersion(bundles, ast.DefaultRegoVersion, false) +} + +// MergeWithRegoVersion creates a merged bundle from the provided bundles, similar to Merge. +// If more than one bundle is provided, the rego version of the result bundle is set to the provided regoVersion. +// Any Rego files in a bundle of conflicting rego version will be marked in the result's manifest with the rego version +// of its original bundle. If the Rego file already had an overriding rego version, it will be preserved. +// If a single bundle is provided, it will retain any rego version information it already had. If it has none, the +// provided regoVersion will be applied to it. +// If usePath is true, per-file rego-versions will be calculated using the file's ModuleFile.Path; otherwise, the file's +// ModuleFile.URL will be used. +func MergeWithRegoVersion(bundles []*Bundle, regoVersion ast.RegoVersion, usePath bool) (*Bundle, error) { + if len(bundles) == 0 { + return nil, errors.New("expected at least one bundle") + } + + if regoVersion == ast.RegoUndefined { + regoVersion = ast.DefaultRegoVersion + } + + if len(bundles) == 1 { + result := bundles[0] + // We respect the bundle rego-version, defaulting to the provided rego version if not set. + result.SetRegoVersion(result.RegoVersion(regoVersion)) + fileRegoVersions, err := bundleRegoVersions(result, result.RegoVersion(regoVersion), usePath) + if err != nil { + return nil, err + } + result.Manifest.FileRegoVersions = fileRegoVersions + return result, nil + } + + var roots []string + var result Bundle + + for _, b := range bundles { + if b.Manifest.Roots == nil { + return nil, errors.New("bundle manifest not initialized") + } + + roots = append(roots, *b.Manifest.Roots...) + + result.Modules = append(result.Modules, b.Modules...) + + for _, root := range *b.Manifest.Roots { + key := strings.Split(root, "/") + if val := b.readData(key); val != nil { + if err := result.insertData(key, *val); err != nil { + return nil, err + } + } + } + + result.Manifest.WasmResolvers = append(result.Manifest.WasmResolvers, b.Manifest.WasmResolvers...) + result.WasmModules = append(result.WasmModules, b.WasmModules...) + result.PlanModules = append(result.PlanModules, b.PlanModules...) + + if b.Manifest.RegoVersion != nil || len(b.Manifest.FileRegoVersions) > 0 { + if result.Manifest.FileRegoVersions == nil { + result.Manifest.FileRegoVersions = map[string]int{} + } + + fileRegoVersions, err := bundleRegoVersions(b, regoVersion, usePath) + if err != nil { + return nil, err + } + maps.Copy(result.Manifest.FileRegoVersions, fileRegoVersions) + } + } + + // We respect the bundle rego-version, defaulting to the provided rego version if not set. + result.SetRegoVersion(result.RegoVersion(regoVersion)) + + if result.Data == nil { + result.Data = map[string]any{} + } + + result.Manifest.Roots = &roots + + if err := result.Manifest.validateAndInjectDefaults(result); err != nil { + return nil, err + } + + return &result, nil +} + +func bundleRegoVersions(bundle *Bundle, regoVersion ast.RegoVersion, usePath bool) (map[string]int, error) { + fileRegoVersions := map[string]int{} + + // we drop the bundle-global rego versions and record individual rego versions for each module. + for _, m := range bundle.Modules { + // We fetch rego-version by the path relative to the bundle root, as the complete path of the module might + // contain the path between OPA working directory and the bundle root. + v, err := bundle.RegoVersionForFile(bundleRelativePath(m, usePath), bundle.RegoVersion(regoVersion)) + if err != nil { + return nil, err + } + + // only record the rego version if it's different from the one applied globally to the result bundle + if v != ast.RegoUndefined { + if regoVersion == ast.RegoUndefined { + // We store the rego version by the absolute path to the bundle root, as this will be the - possibly new - path + // to the module inside the merged bundle. + fileRegoVersions[bundleAbsolutePath(m, usePath)] = v.Int() + } else { + vInt := v.Int() + gVInt := regoVersion.Int() + if vInt != gVInt { + fileRegoVersions[bundleAbsolutePath(m, usePath)] = vInt + } + } + } + } + + return fileRegoVersions, nil +} + +func bundleRelativePath(m ModuleFile, usePath bool) string { + p := m.RelativePath + if p == "" { + if usePath { + p = m.Path + } else { + p = m.URL + } + } + return p +} + +func bundleAbsolutePath(m ModuleFile, usePath bool) string { + p := m.URL + if usePath { + p = m.Path + } + return path.Clean(util.WithPrefix(p, "/")) +} + +// RootPathsOverlap takes in two bundle root paths and returns true if they overlap. +func RootPathsOverlap(pathA string, pathB string) bool { + a := rootPathSegments(pathA) + b := rootPathSegments(pathB) + return rootContains(a, b) || rootContains(b, a) +} + +// RootPathsContain takes a set of bundle root paths and returns true if the path is contained. +func RootPathsContain(roots []string, path string) bool { + segments := rootPathSegments(path) + for i := range roots { + if rootContains(rootPathSegments(roots[i]), segments) { + return true + } + } + return false +} + +func rootPathSegments(path string) []string { + return strings.Split(path, "/") +} + +func rootContains(root []string, other []string) bool { + // A single segment, empty string root always contains the other. + if len(root) == 1 && root[0] == "" { + return true + } + + if len(root) > len(other) { + return false + } + + for j := range root { + if root[j] != other[j] { + return false + } + } + + return true +} + +func insertValue(b *Bundle, path string, value any) error { + if err := b.insertData(getNormalizedPath(path), value); err != nil { + return fmt.Errorf("bundle load failed on %v: %w", path, err) + } + return nil +} + +func getNormalizedPath(path string) []string { + // Remove leading / and . characters from the directory path. If the bundle + // was written with OPA then the paths will contain a leading slash. On the + // other hand, if the path is empty, filepath.Dir will return '.'. + // Note: filepath.Dir can return paths with '\' separators, always use + // filepath.ToSlash to keep them normalized. + dirpath := strings.TrimLeft(filepath.ToSlash(filepath.Dir(path)), "/.") + var key []string + if dirpath != "" { + key = strings.Split(dirpath, "/") + } + return key +} + +func dfs(value any, path string, fn func(string, any) (bool, error)) error { + if stop, err := fn(path, value); err != nil { + return err + } else if stop { + return nil + } + obj, ok := value.(map[string]any) + if !ok { + return nil + } + for key := range obj { + if err := dfs(obj[key], path+"/"+key, fn); err != nil { + return err + } + } + return nil +} + +func modulePathWithPrefix(bundleName string, modulePath string) string { + // Bundle names are sometimes just file paths, some of which + // are full urls (file:///foo/). Parse these and only use the path. + parsed, err := url.Parse(bundleName) + if err == nil { + return path.Join(parsed.Host, parsed.Path, modulePath) + } + + return path.Join(bundleName, modulePath) +} + +// IsStructuredDoc checks if the file name equals a structured file extension ex. ".json" +func IsStructuredDoc(name string) bool { + base := filepath.Base(name) + return base == dataFile || base == yamlDataFile || base == SignaturesFile || base == ManifestExt +} + +func preProcessBundle(loader DirectoryLoader, skipVerify bool, sizeLimitBytes int64) (*Bundle, []*Descriptor, error) { + bundle := &Bundle{} + descriptors := []*Descriptor{} + + for { + f, err := loader.NextFile() + if err != nil { + if err == io.EOF { + break + } + return bundle, nil, fmt.Errorf("bundle read failed: %w", err) + } + + isSignaturesFile := strings.HasSuffix(f.Path(), SignaturesFile) + + if !skipVerify && isSignaturesFile { + buf, err := readFile(f, sizeLimitBytes) + if err != nil { + return bundle, nil, err + } + + if err := util.NewJSONDecoder(&buf).Decode(&bundle.Signatures); err != nil { + return bundle, nil, fmt.Errorf("bundle load failed on signatures decode: %w", err) + } + } else if !isSignaturesFile { + descriptors = append(descriptors, f) + + base := filepath.Base(f.Path()) + + if base == patchFile { + + var b bytes.Buffer + tee := io.TeeReader(f.reader, &b) + f.reader = tee + + buf, err := readFile(f, sizeLimitBytes) + if err != nil { + return bundle, nil, err + } + + if err := util.NewJSONDecoder(&buf).Decode(&bundle.Patch); err != nil { + return bundle, nil, fmt.Errorf("bundle load failed on patch decode: %w", err) + } + + f.reader = &b + } + } + } + + return bundle, descriptors, nil +} + +func readFile(f *Descriptor, sizeLimitBytes int64) (bytes.Buffer, error) { + // Case for pre-loaded byte buffers, like those from the tarballLoader. + if bb, ok := f.reader.(*bytes.Buffer); ok { + _ = f.Close() // always close, even on error + + if int64(bb.Len()) >= sizeLimitBytes { + return *bb, fmt.Errorf("bundle file '%v' size (%d bytes) exceeded max size (%v bytes)", + strings.TrimPrefix(f.Path(), "/"), bb.Len(), sizeLimitBytes-1) + } + + return *bb, nil + } + + // Case for *lazyFile readers: + if lf, ok := f.reader.(*lazyFile); ok { + var buf bytes.Buffer + if lf.file == nil { + var err error + if lf.file, err = os.Open(lf.path); err != nil { + return buf, fmt.Errorf("failed to open file %s: %w", f.path, err) + } + } + // Bail out if we can't read the whole file-- there's nothing useful we can do at that point! + fileSize, _ := fstatFileSize(lf.file) + if fileSize > sizeLimitBytes { + return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), fileSize, sizeLimitBytes-1) + } + // Prealloc the buffer for the file read. + buffer := make([]byte, fileSize) + _, err := io.ReadFull(lf.file, buffer) + if err != nil { + return buf, err + } + _ = lf.file.Close() // always close, even on error + + // Note(philipc): Replace the lazyFile reader in the *Descriptor with a + // pointer to the wrapping bytes.Buffer, so that we don't re-read the + // file on disk again by accident. + buf = *bytes.NewBuffer(buffer) + f.reader = &buf + return buf, nil + } + + // Fallback case: + var buf bytes.Buffer + n, err := f.Read(&buf, sizeLimitBytes) + _ = f.Close() // always close, even on error + + if err != nil && err != io.EOF { + return buf, err + } else if err == nil && n >= sizeLimitBytes { + return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), n, sizeLimitBytes-1) + } + + return buf, nil +} + +// Takes an already open file handle and invokes the os.Stat system call on it +// to determine the file's size. Passes any errors from *File.Stat on up to the +// caller. +func fstatFileSize(f *os.File) (int64, error) { + fileInfo, err := f.Stat() + if err != nil { + return 0, err + } + return fileInfo.Size(), nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go new file mode 100644 index 0000000000..4897ee7b91 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go @@ -0,0 +1,514 @@ +package bundle + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/open-policy-agent/opa/v1/loader/filter" + + "github.com/open-policy-agent/opa/v1/storage" +) + +const maxSizeLimitBytesErrMsg = "bundle file %s size (%d bytes) exceeds configured size_limit_bytes (%d bytes)" + +// Descriptor contains information about a file and +// can be used to read the file contents. +type Descriptor struct { + url string + path string + reader io.Reader + closer io.Closer + closeOnce *sync.Once +} + +// lazyFile defers reading the file until the first call of Read +type lazyFile struct { + path string + file *os.File +} + +// newLazyFile creates a new instance of lazyFile +func newLazyFile(path string) *lazyFile { + return &lazyFile{path: path} +} + +// Read implements io.Reader. It will check if the file has been opened +// and open it if it has not before attempting to read using the file's +// read method +func (f *lazyFile) Read(b []byte) (int, error) { + var err error + + if f.file == nil { + if f.file, err = os.Open(f.path); err != nil { + return 0, fmt.Errorf("failed to open file %s: %w", f.path, err) + } + } + + return f.file.Read(b) +} + +// Close closes the lazy file if it has been opened using the file's +// close method +func (f *lazyFile) Close() error { + if f.file != nil { + return f.file.Close() + } + + return nil +} + +func NewDescriptor(url, path string, reader io.Reader) *Descriptor { + return &Descriptor{ + url: url, + path: path, + reader: reader, + } +} + +func (d *Descriptor) WithCloser(closer io.Closer) *Descriptor { + d.closer = closer + d.closeOnce = new(sync.Once) + return d +} + +// Path returns the path of the file. +func (d *Descriptor) Path() string { + return d.path +} + +// URL returns the url of the file. +func (d *Descriptor) URL() string { + return d.url +} + +// Read will read all the contents from the file the Descriptor refers to +// into the dest writer up n bytes. Will return an io.EOF error +// if EOF is encountered before n bytes are read. +func (d *Descriptor) Read(dest io.Writer, n int64) (int64, error) { + n, err := io.CopyN(dest, d.reader, n) + return n, err +} + +// Close the file, on some Loader implementations this might be a no-op. +// It should *always* be called regardless of file. +func (d *Descriptor) Close() error { + var err error + if d.closer != nil { + d.closeOnce.Do(func() { + err = d.closer.Close() + }) + } + return err +} + +type PathFormat int64 + +const ( + Chrooted PathFormat = iota + SlashRooted + Passthrough +) + +// DirectoryLoader defines an interface which can be used to load +// files from a directory by iterating over each one in the tree. +type DirectoryLoader interface { + // NextFile must return io.EOF if there is no next value. The returned + // descriptor should *always* be closed when no longer needed. + NextFile() (*Descriptor, error) + WithFilter(filter filter.LoaderFilter) DirectoryLoader + WithPathFormat(PathFormat) DirectoryLoader + WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader + WithFollowSymlinks(followSymlinks bool) DirectoryLoader +} + +type dirLoader struct { + root string + files []string + idx int + filter filter.LoaderFilter + pathFormat PathFormat + maxSizeLimitBytes int64 + followSymlinks bool +} + +// Normalize root directory, ex "./src/bundle" -> "src/bundle" +// We don't need an absolute path, but this makes the joined/trimmed +// paths more uniform. +func normalizeRootDirectory(root string) string { + if len(root) > 1 { + if root[0] == '.' && root[1] == filepath.Separator { + if len(root) == 2 { + root = root[:1] // "./" -> "." + } else { + root = root[2:] // remove leading "./" + } + } + } + return root +} + +// NewDirectoryLoader returns a basic DirectoryLoader implementation +// that will load files from a given root directory path. +func NewDirectoryLoader(root string) DirectoryLoader { + d := dirLoader{ + root: normalizeRootDirectory(root), + pathFormat: Chrooted, + } + return &d +} + +// WithFilter specifies the filter object to use to filter files while loading bundles +func (d *dirLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader { + d.filter = filter + return d +} + +// WithPathFormat specifies how a path is formatted in a Descriptor +func (d *dirLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader { + d.pathFormat = pathFormat + return d +} + +// WithSizeLimitBytes specifies the maximum size of any file in the directory to read +func (d *dirLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { + d.maxSizeLimitBytes = sizeLimitBytes + return d +} + +// WithFollowSymlinks specifies whether to follow symlinks when loading files from the directory +func (d *dirLoader) WithFollowSymlinks(followSymlinks bool) DirectoryLoader { + d.followSymlinks = followSymlinks + return d +} + +func formatPath(fileName string, root string, pathFormat PathFormat) string { + switch pathFormat { + case SlashRooted: + if !strings.HasPrefix(fileName, string(filepath.Separator)) { + return string(filepath.Separator) + fileName + } + return fileName + case Chrooted: + // Trim off the root directory and return path as if chrooted + result := strings.TrimPrefix(fileName, filepath.FromSlash(root)) + if root == "." && filepath.Base(fileName) == ManifestExt { + result = fileName + } + if !strings.HasPrefix(result, string(filepath.Separator)) { + result = string(filepath.Separator) + result + } + return result + case Passthrough: + fallthrough + default: + return fileName + } +} + +// NextFile iterates to the next file in the directory tree +// and returns a file Descriptor for the file. +func (d *dirLoader) NextFile() (*Descriptor, error) { + // build a list of all files we will iterate over and read, but only one time + if d.files == nil { + d.files = []string{} + err := filepath.Walk(d.root, func(path string, info os.FileInfo, _ error) error { + if info == nil { + return nil + } + + if info.Mode().IsRegular() { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { + return nil + } + if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { + return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes) + } + d.files = append(d.files, path) + } else if d.followSymlinks && info.Mode().Type()&fs.ModeSymlink == fs.ModeSymlink { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { + return nil + } + if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { + return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes) + } + d.files = append(d.files, path) + } else if info.Mode().IsDir() { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) { + return filepath.SkipDir + } + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list files: %w", err) + } + } + + // If done reading files then just return io.EOF + // errors for each NextFile() call + if d.idx >= len(d.files) { + return nil, io.EOF + } + + fileName := d.files[d.idx] + d.idx++ + fh := newLazyFile(fileName) + + cleanedPath := formatPath(fileName, d.root, d.pathFormat) + f := NewDescriptor(filepath.Join(d.root, cleanedPath), cleanedPath, fh).WithCloser(fh) + return f, nil +} + +type tarballLoader struct { + baseURL string + r io.Reader + tr *tar.Reader + files []file + idx int + filter filter.LoaderFilter + skipDir map[string]struct{} + pathFormat PathFormat + maxSizeLimitBytes int64 +} + +type file struct { + name string + reader io.Reader + path storage.Path + raw []byte +} + +// NewTarballLoader is deprecated. Use NewTarballLoaderWithBaseURL instead. +func NewTarballLoader(r io.Reader) DirectoryLoader { + l := tarballLoader{ + r: r, + pathFormat: Passthrough, + } + return &l +} + +// NewTarballLoaderWithBaseURL returns a new DirectoryLoader that reads +// files out of a gzipped tar archive. The file URLs will be prefixed +// with the baseURL. +func NewTarballLoaderWithBaseURL(r io.Reader, baseURL string) DirectoryLoader { + l := tarballLoader{ + baseURL: strings.TrimSuffix(baseURL, "/"), + r: r, + pathFormat: Passthrough, + } + return &l +} + +// WithFilter specifies the filter object to use to filter files while loading bundles +func (t *tarballLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader { + t.filter = filter + return t +} + +// WithPathFormat specifies how a path is formatted in a Descriptor +func (t *tarballLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader { + t.pathFormat = pathFormat + return t +} + +// WithSizeLimitBytes specifies the maximum size of any file in the tarball to read +func (t *tarballLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { + t.maxSizeLimitBytes = sizeLimitBytes + return t +} + +// WithFollowSymlinks is a no-op for tarballLoader +func (t *tarballLoader) WithFollowSymlinks(_ bool) DirectoryLoader { + return t +} + +// NextFile iterates to the next file in the directory tree +// and returns a file Descriptor for the file. +func (t *tarballLoader) NextFile() (*Descriptor, error) { + if t.tr == nil { + gr, err := gzip.NewReader(t.r) + if err != nil { + return nil, fmt.Errorf("archive read failed: %w", err) + } + + t.tr = tar.NewReader(gr) + } + + if t.files == nil { + t.files = []file{} + + if t.skipDir == nil { + t.skipDir = map[string]struct{}{} + } + + for { + header, err := t.tr.Next() + if err != nil { + if err == io.EOF { + break + } + return nil, err + } + + // Keep iterating on the archive until we find a normal file + if header.Typeflag == tar.TypeReg { + + if t.filter != nil { + if t.filter(filepath.ToSlash(header.Name), header.FileInfo(), getdepth(header.Name, false)) { + continue + } + + basePath := strings.Trim(filepath.Dir(filepath.ToSlash(header.Name)), "/") + + // check if the directory is to be skipped + if _, ok := t.skipDir[basePath]; ok { + continue + } + + match := false + for p := range t.skipDir { + if strings.HasPrefix(basePath, p) { + match = true + break + } + } + + if match { + continue + } + } + + if t.maxSizeLimitBytes > 0 && header.Size > t.maxSizeLimitBytes { + return nil, fmt.Errorf(maxSizeLimitBytesErrMsg, header.Name, header.Size, t.maxSizeLimitBytes) + } + + f := file{name: header.Name} + + // Note(philipc): We rely on the previous size check in this loop for safety. + buf := bytes.NewBuffer(make([]byte, 0, header.Size)) + if _, err := io.Copy(buf, t.tr); err != nil { + return nil, fmt.Errorf("failed to copy file %s: %w", header.Name, err) + } + + f.reader = buf + + t.files = append(t.files, f) + } else if header.Typeflag == tar.TypeDir { + cleanedPath := filepath.ToSlash(header.Name) + if t.filter != nil && t.filter(cleanedPath, header.FileInfo(), getdepth(header.Name, true)) { + t.skipDir[strings.Trim(cleanedPath, "/")] = struct{}{} + } + } + } + } + + // If done reading files then just return io.EOF + // errors for each NextFile() call + if t.idx >= len(t.files) { + return nil, io.EOF + } + + f := t.files[t.idx] + t.idx++ + + cleanedPath := formatPath(f.name, "", t.pathFormat) + d := NewDescriptor(filepath.Join(t.baseURL, cleanedPath), cleanedPath, f.reader) + return d, nil +} + +// Next implements the storage.Iterator interface. +// It iterates to the next policy or data file in the directory tree +// and returns a storage.Update for the file. +func (it *iterator) Next() (*storage.Update, error) { + if it.files == nil { + it.files = []file{} + + for _, item := range it.raw { + f := file{name: item.Path} + + p, err := getFileStoragePath(f.name) + if err != nil { + return nil, err + } + + f.path = p + + f.raw = item.Value + + it.files = append(it.files, f) + } + + sortFilePathAscend(it.files) + } + + // If done reading files then just return io.EOF + // errors for each NextFile() call + if it.idx >= len(it.files) { + return nil, io.EOF + } + + f := it.files[it.idx] + it.idx++ + + var isPolicy bool + if strings.HasSuffix(f.name, RegoExt) { + isPolicy = true + } + + return &storage.Update{ + Path: f.path, + Value: f.raw, + IsPolicy: isPolicy, + }, nil +} + +type iterator struct { + raw []Raw + files []file + idx int +} + +func NewIterator(raw []Raw) storage.Iterator { + it := iterator{ + raw: raw, + } + return &it +} + +func sortFilePathAscend(files []file) { + sort.Slice(files, func(i, j int) bool { + return len(files[i].path) < len(files[j].path) + }) +} + +func getdepth(path string, isDir bool) int { + if isDir { + cleanedPath := strings.Trim(filepath.ToSlash(path), "/") + return len(strings.Split(cleanedPath, "/")) + } + + basePath := strings.Trim(filepath.Dir(filepath.ToSlash(path)), "/") + return len(strings.Split(basePath, "/")) +} + +func getFileStoragePath(path string) (storage.Path, error) { + fpath := strings.TrimLeft(filepath.ToSlash(filepath.Dir(path)), "/.") + if strings.HasSuffix(path, RegoExt) { + fpath = strings.Trim(filepath.ToSlash(path), "/") + } + + p, ok := storage.ParsePathEscaped("/" + fpath) + if !ok { + return nil, fmt.Errorf("storage path invalid: %v", path) + } + return p, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go new file mode 100644 index 0000000000..fc6fc1896f --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go @@ -0,0 +1,140 @@ +package bundle + +import ( + "fmt" + "io" + "io/fs" + "path/filepath" + "sync" + + "github.com/open-policy-agent/opa/v1/loader/filter" +) + +const ( + defaultFSLoaderRoot = "." +) + +type dirLoaderFS struct { + sync.Mutex + filesystem fs.FS + files []string + idx int + filter filter.LoaderFilter + root string + pathFormat PathFormat + maxSizeLimitBytes int64 + followSymlinks bool +} + +// NewFSLoader returns a basic DirectoryLoader implementation +// that will load files from a fs.FS interface +func NewFSLoader(filesystem fs.FS) (DirectoryLoader, error) { + return NewFSLoaderWithRoot(filesystem, defaultFSLoaderRoot), nil +} + +// NewFSLoaderWithRoot returns a basic DirectoryLoader implementation +// that will load files from a fs.FS interface at the supplied root +func NewFSLoaderWithRoot(filesystem fs.FS, root string) DirectoryLoader { + d := dirLoaderFS{ + filesystem: filesystem, + root: normalizeRootDirectory(root), + pathFormat: Chrooted, + } + + return &d +} + +func (d *dirLoaderFS) walkDir(path string, dirEntry fs.DirEntry, err error) error { + if err != nil { + return err + } + + if dirEntry != nil { + info, err := dirEntry.Info() + if err != nil { + return err + } + + if dirEntry.Type().IsRegular() { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { + return nil + } + + if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { + return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes) + } + + d.files = append(d.files, path) + } else if dirEntry.Type()&fs.ModeSymlink != 0 && d.followSymlinks { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { + return nil + } + + if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { + return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes) + } + + d.files = append(d.files, path) + } else if dirEntry.Type().IsDir() { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) { + return fs.SkipDir + } + } + } + return nil +} + +// WithFilter specifies the filter object to use to filter files while loading bundles +func (d *dirLoaderFS) WithFilter(filter filter.LoaderFilter) DirectoryLoader { + d.filter = filter + return d +} + +// WithPathFormat specifies how a path is formatted in a Descriptor +func (d *dirLoaderFS) WithPathFormat(pathFormat PathFormat) DirectoryLoader { + d.pathFormat = pathFormat + return d +} + +// WithSizeLimitBytes specifies the maximum size of any file in the filesystem directory to read +func (d *dirLoaderFS) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { + d.maxSizeLimitBytes = sizeLimitBytes + return d +} + +func (d *dirLoaderFS) WithFollowSymlinks(followSymlinks bool) DirectoryLoader { + d.followSymlinks = followSymlinks + return d +} + +// NextFile iterates to the next file in the directory tree +// and returns a file Descriptor for the file. +func (d *dirLoaderFS) NextFile() (*Descriptor, error) { + d.Lock() + defer d.Unlock() + + if d.files == nil { + err := fs.WalkDir(d.filesystem, d.root, d.walkDir) + if err != nil { + return nil, fmt.Errorf("failed to list files: %w", err) + } + } + + // If done reading files then just return io.EOF + // errors for each NextFile() call + if d.idx >= len(d.files) { + return nil, io.EOF + } + + fileName := d.files[d.idx] + d.idx++ + + fh, err := d.filesystem.Open(fileName) + if err != nil { + return nil, fmt.Errorf("failed to open file %s: %w", fileName, err) + } + + cleanedPath := formatPath(fileName, d.root, d.pathFormat) + f := NewDescriptor(cleanedPath, cleanedPath, fh).WithCloser(fh) + return f, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go new file mode 100644 index 0000000000..dd9dfe5149 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go @@ -0,0 +1,135 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package bundle + +import ( + "bytes" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/json" + "fmt" + "hash" + "io" + + "github.com/open-policy-agent/opa/v1/util" +) + +// HashingAlgorithm represents a subset of hashing algorithms implemented in Go +type HashingAlgorithm string + +// Supported values for HashingAlgorithm +const ( + MD5 HashingAlgorithm = "MD5" + SHA1 HashingAlgorithm = "SHA-1" + SHA224 HashingAlgorithm = "SHA-224" + SHA256 HashingAlgorithm = "SHA-256" + SHA384 HashingAlgorithm = "SHA-384" + SHA512 HashingAlgorithm = "SHA-512" + SHA512224 HashingAlgorithm = "SHA-512-224" + SHA512256 HashingAlgorithm = "SHA-512-256" +) + +// String returns the string representation of a HashingAlgorithm +func (alg HashingAlgorithm) String() string { + return string(alg) +} + +// SignatureHasher computes a signature digest for a file with (structured or unstructured) data and policy +type SignatureHasher interface { + HashFile(v any) ([]byte, error) +} + +type hasher struct { + h func() hash.Hash // hash function factory +} + +// NewSignatureHasher returns a signature hasher suitable for a particular hashing algorithm +func NewSignatureHasher(alg HashingAlgorithm) (SignatureHasher, error) { + h := &hasher{} + + switch alg { + case MD5: + h.h = md5.New + case SHA1: + h.h = sha1.New + case SHA224: + h.h = sha256.New224 + case SHA256: + h.h = sha256.New + case SHA384: + h.h = sha512.New384 + case SHA512: + h.h = sha512.New + case SHA512224: + h.h = sha512.New512_224 + case SHA512256: + h.h = sha512.New512_256 + default: + return nil, fmt.Errorf("unsupported hashing algorithm: %s", alg) + } + + return h, nil +} + +// HashFile hashes the file content, JSON or binary, both in golang native format. +func (h *hasher) HashFile(v any) ([]byte, error) { + hf := h.h() + walk(v, hf) + return hf.Sum(nil), nil +} + +// walk hashes the file content, JSON or binary, both in golang native format. +// +// Computation for unstructured documents is a hash of the document. +// +// Computation for the types of structured JSON document is as follows: +// +// object: Hash {, then each key (in alphabetical order) and digest of the value, then comma (between items) and finally }. +// +// array: Hash [, then digest of the value, then comma (between items) and finally ]. +func walk(v any, h io.Writer) { + + switch x := v.(type) { + case map[string]any: + _, _ = h.Write([]byte("{")) + + for i, key := range util.KeysSorted(x) { + if i > 0 { + _, _ = h.Write([]byte(",")) + } + + _, _ = h.Write(encodePrimitive(key)) + _, _ = h.Write([]byte(":")) + walk(x[key], h) + } + + _, _ = h.Write([]byte("}")) + case []any: + _, _ = h.Write([]byte("[")) + + for i, e := range x { + if i > 0 { + _, _ = h.Write([]byte(",")) + } + walk(e, h) + } + + _, _ = h.Write([]byte("]")) + case []byte: + _, _ = h.Write(x) + default: + _, _ = h.Write(encodePrimitive(x)) + } +} + +func encodePrimitive(v any) []byte { + var buf bytes.Buffer + encoder := json.NewEncoder(&buf) + encoder.SetEscapeHTML(false) + _ = encoder.Encode(v) + return bytes.Trim(buf.Bytes(), "\n") +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/keys.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/keys.go new file mode 100644 index 0000000000..f16fe37fc7 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/keys.go @@ -0,0 +1,173 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package bundle provide helpers that assist in creating the verification and signing key configuration +package bundle + +import ( + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "os" + "strings" + + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/open-policy-agent/opa/v1/keys" + + "github.com/open-policy-agent/opa/v1/util" +) + +const ( + defaultTokenSigningAlg = "RS256" +) + +// KeyConfig holds the keys used to sign or verify bundles and tokens +// Moved to own package, alias kept for backwards compatibility +type KeyConfig = keys.Config + +// VerificationConfig represents the key configuration used to verify a signed bundle +type VerificationConfig struct { + PublicKeys map[string]*KeyConfig + KeyID string `json:"keyid"` + Scope string `json:"scope"` + Exclude []string `json:"exclude_files"` +} + +// NewVerificationConfig return a new VerificationConfig +func NewVerificationConfig(keys map[string]*KeyConfig, id, scope string, exclude []string) *VerificationConfig { + return &VerificationConfig{ + PublicKeys: keys, + KeyID: id, + Scope: scope, + Exclude: exclude, + } +} + +// ValidateAndInjectDefaults validates the config and inserts default values +func (vc *VerificationConfig) ValidateAndInjectDefaults(keys map[string]*KeyConfig) error { + vc.PublicKeys = keys + + if vc.KeyID != "" { + found := false + for key := range keys { + if key == vc.KeyID { + found = true + break + } + } + + if !found { + return fmt.Errorf("key id %s not found", vc.KeyID) + } + } + return nil +} + +// GetPublicKey returns the public key corresponding to the given key id +func (vc *VerificationConfig) GetPublicKey(id string) (*KeyConfig, error) { + var kc *KeyConfig + var ok bool + + if kc, ok = vc.PublicKeys[id]; !ok { + return nil, fmt.Errorf("verification key corresponding to ID %v not found", id) + } + return kc, nil +} + +// SigningConfig represents the key configuration used to generate a signed bundle +type SigningConfig struct { + Plugin string + Key string + Algorithm string + ClaimsPath string +} + +// NewSigningConfig return a new SigningConfig +func NewSigningConfig(key, alg, claimsPath string) *SigningConfig { + if alg == "" { + alg = defaultTokenSigningAlg + } + + return &SigningConfig{ + Plugin: defaultSignerID, + Key: key, + Algorithm: alg, + ClaimsPath: claimsPath, + } +} + +// WithPlugin sets the signing plugin in the signing config +func (s *SigningConfig) WithPlugin(plugin string) *SigningConfig { + if plugin != "" { + s.Plugin = plugin + } + return s +} + +// GetPrivateKey returns the private key or secret from the signing config +func (s *SigningConfig) GetPrivateKey() (any, error) { + var keyData string + + alg, ok := jwa.LookupSignatureAlgorithm(s.Algorithm) + if !ok { + return nil, fmt.Errorf("unknown signature algorithm: %s", s.Algorithm) + } + + // Check if the key looks like PEM data first (starts with -----BEGIN) + if strings.HasPrefix(s.Key, "-----BEGIN") { + keyData = s.Key + } else { + // Try to read as a file path + if _, err := os.Stat(s.Key); err == nil { + bs, err := os.ReadFile(s.Key) + if err != nil { + return nil, err + } + keyData = string(bs) + } else if os.IsNotExist(err) { + // Not a file, treat as raw key data + keyData = s.Key + } else { + return nil, err + } + } + + // For HMAC algorithms, return the key as bytes + if alg == jwa.HS256() || alg == jwa.HS384() || alg == jwa.HS512() { + return []byte(keyData), nil + } + + // For RSA/ECDSA algorithms, parse the PEM-encoded key + block, _ := pem.Decode([]byte(keyData)) + if block == nil { + return nil, errors.New("failed to parse PEM block containing the key") + } + + switch block.Type { + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(block.Bytes) + case "PRIVATE KEY": + return x509.ParsePKCS8PrivateKey(block.Bytes) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(block.Bytes) + default: + return nil, fmt.Errorf("unsupported key type: %s", block.Type) + } +} + +// GetClaims returns the claims by reading the file specified in the signing config +func (s *SigningConfig) GetClaims() (map[string]any, error) { + var claims map[string]any + + bs, err := os.ReadFile(s.ClaimsPath) + if err != nil { + return claims, err + } + + if err := util.UnmarshalJSON(bs, &claims); err != nil { + return claims, err + } + return claims, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/sign.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/sign.go new file mode 100644 index 0000000000..30640731ef --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/sign.go @@ -0,0 +1,130 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package bundle provide helpers that assist in the creating a signed bundle +package bundle + +import ( + "fmt" + + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwk" + "github.com/lestrrat-go/jwx/v3/jwt" +) + +const defaultSignerID = "_default" + +var signers map[string]Signer + +// Signer is the interface expected for implementations that generate bundle signatures. +type Signer interface { + GenerateSignedToken([]FileInfo, *SigningConfig, string) (string, error) +} + +// GenerateSignedToken will retrieve the Signer implementation based on the Plugin specified +// in SigningConfig, and call its implementation of GenerateSignedToken. The signer generates +// a signed token given the list of files to be included in the payload and the bundle +// signing config. The keyID if non-empty, represents the value for the "keyid" claim in the token. +func GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) { + var plugin string + // for backwards compatibility, check if there is no plugin specified, and use default + if sc.Plugin == "" { + plugin = defaultSignerID + } else { + plugin = sc.Plugin + } + signer, err := GetSigner(plugin) + if err != nil { + return "", err + } + return signer.GenerateSignedToken(files, sc, keyID) +} + +// DefaultSigner is the default bundle signing implementation. It signs bundles by generating +// a JWT and signing it using a locally-accessible private key. +type DefaultSigner struct{} + +// GenerateSignedToken generates a signed token given the list of files to be +// included in the payload and the bundle signing config. The keyID if non-empty, +// represents the value for the "keyid" claim in the token +func (*DefaultSigner) GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) { + token, err := generateToken(files, sc, keyID) + if err != nil { + return "", err + } + + privateKey, err := sc.GetPrivateKey() + if err != nil { + return "", err + } + + // Parse the algorithm string to jwa.SignatureAlgorithm + alg, ok := jwa.LookupSignatureAlgorithm(sc.Algorithm) + if !ok { + return "", fmt.Errorf("unknown signature algorithm: %s", sc.Algorithm) + } + + // In order to sign the token with a kid, we need a key ID _on_ the key + // (note: we might be able to make this more efficient if we just load + // the key as a JWK from the start) + jwkKey, err := jwk.Import(privateKey) + if err != nil { + return "", fmt.Errorf("failed to import private key: %w", err) + } + if err := jwkKey.Set(jwk.KeyIDKey, keyID); err != nil { + return "", fmt.Errorf("failed to set key ID on JWK: %w", err) + } + + // Since v3.0.6, jwx will take the fast path for signing the token if + // there's exactly one WithKey in the options with no sub-options + signed, err := jwt.Sign(token, jwt.WithKey(alg, jwkKey)) + if err != nil { + return "", err + } + return string(signed), nil +} + +func generateToken(files []FileInfo, sc *SigningConfig, keyID string) (jwt.Token, error) { + tb := jwt.NewBuilder() + tb.Claim("files", files) + + if sc.ClaimsPath != "" { + claims, err := sc.GetClaims() + if err != nil { + return nil, err + } + + for k, v := range claims { + tb.Claim(k, v) + } + } else if keyID != "" { + // keyid claim is deprecated but include it for backwards compatibility. + tb.Claim("keyid", keyID) + } + return tb.Build() +} + +// GetSigner returns the Signer registered under the given id +func GetSigner(id string) (Signer, error) { + signer, ok := signers[id] + if !ok { + return nil, fmt.Errorf("no signer exists under id %s", id) + } + return signer, nil +} + +// RegisterSigner registers a Signer under the given id +func RegisterSigner(id string, s Signer) error { + if id == defaultSignerID { + return fmt.Errorf("signer id %s is reserved, use a different id", id) + } + signers[id] = s + return nil +} + +func init() { + signers = map[string]Signer{ + defaultSignerID: &DefaultSigner{}, + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/store.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/store.go new file mode 100644 index 0000000000..f6c13c75fc --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/store.go @@ -0,0 +1,1255 @@ +// Copyright 2019 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package bundle + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "maps" + "path/filepath" + "slices" + "sort" + "strings" + "sync" + + iCompiler "github.com/open-policy-agent/opa/internal/compiler" + "github.com/open-policy-agent/opa/internal/json/patch" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/util" +) + +const defaultActivatorID = "_default" + +var ( + activators = map[string]Activator{ + defaultActivatorID: &DefaultActivator{}, + } + activatorMtx sync.Mutex +) + +// BundlesBasePath is the storage path used for storing bundle metadata +var BundlesBasePath = storage.MustParsePath("/system/bundles") + +var ModulesInfoBasePath = storage.MustParsePath("/system/modules") + +// Note: As needed these helpers could be memoized. + +// ManifestStoragePath is the storage path used for the given named bundle manifest. +func ManifestStoragePath(name string) storage.Path { + return append(BundlesBasePath, name, "manifest") +} + +// EtagStoragePath is the storage path used for the given named bundle etag. +func EtagStoragePath(name string) storage.Path { + return append(BundlesBasePath, name, "etag") +} + +func namedBundlePath(name string) storage.Path { + return append(BundlesBasePath, name) +} + +func rootsPath(name string) storage.Path { + return append(BundlesBasePath, name, "manifest", "roots") +} + +func revisionPath(name string) storage.Path { + return append(BundlesBasePath, name, "manifest", "revision") +} + +func wasmModulePath(name string) storage.Path { + return append(BundlesBasePath, name, "wasm") +} + +func wasmEntrypointsPath(name string) storage.Path { + return append(BundlesBasePath, name, "manifest", "wasm") +} + +func metadataPath(name string) storage.Path { + return append(BundlesBasePath, name, "manifest", "metadata") +} + +func moduleRegoVersionPath(id string) storage.Path { + return append(ModulesInfoBasePath, strings.Trim(id, "/"), "rego_version") +} + +func moduleInfoPath(id string) storage.Path { + return append(ModulesInfoBasePath, strings.Trim(id, "/")) +} + +func read(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (any, error) { + value, err := store.Read(ctx, txn, path) + if err != nil { + if storage.IsNotFound(err) { + return nil, &storage.Error{ + Code: storage.NotFoundErr, + Message: strings.TrimPrefix(path.String(), "/system") + ": document does not exist", + } + } + return nil, err + } + + if astValue, ok := value.(ast.Value); ok { + value, err = ast.JSON(astValue) + if err != nil { + return nil, err + } + } + + return value, nil +} + +// ReadBundleNamesFromStore will return a list of bundle names which have had their metadata stored. +func ReadBundleNamesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) ([]string, error) { + value, err := read(ctx, store, txn, BundlesBasePath) + if err != nil { + return nil, err + } + + bundleMap, ok := value.(map[string]any) + if !ok { + return nil, errors.New("corrupt manifest roots") + } + + bundles := make([]string, len(bundleMap)) + idx := 0 + for name := range bundleMap { + bundles[idx] = name + idx++ + } + return bundles, nil +} + +// WriteManifestToStore will write the manifest into the storage. This function is called when +// the bundle is activated. +func WriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, manifest Manifest) error { + return write(ctx, store, txn, ManifestStoragePath(name), manifest) +} + +// WriteEtagToStore will write the bundle etag into the storage. This function is called when the bundle is activated. +func WriteEtagToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name, etag string) error { + return write(ctx, store, txn, EtagStoragePath(name), etag) +} + +func write(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path, value any) error { + if err := util.RoundTrip(&value); err != nil { + return err + } + + var dir []string + if len(path) > 1 { + dir = path[:len(path)-1] + } + + if err := storage.MakeDir(ctx, store, txn, dir); err != nil { + return err + } + + return store.Write(ctx, txn, storage.AddOp, path, value) +} + +// EraseManifestFromStore will remove the manifest from storage. This function is called +// when the bundle is deactivated. +func EraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { + path := namedBundlePath(name) + err := store.Write(ctx, txn, storage.RemoveOp, path, nil) + return suppressNotFound(err) +} + +// eraseBundleEtagFromStore will remove the bundle etag from storage. This function is called +// when the bundle is deactivated. +func eraseBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { + path := EtagStoragePath(name) + err := store.Write(ctx, txn, storage.RemoveOp, path, nil) + return suppressNotFound(err) +} + +func suppressNotFound(err error) error { + if err == nil || storage.IsNotFound(err) { + return nil + } + return err +} + +func writeWasmModulesToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, b *Bundle) error { + basePath := wasmModulePath(name) + for _, wm := range b.WasmModules { + path := append(basePath, wm.Path) + err := write(ctx, store, txn, path, base64.StdEncoding.EncodeToString(wm.Raw)) + if err != nil { + return err + } + } + return nil +} + +func eraseWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { + path := wasmModulePath(name) + + err := store.Write(ctx, txn, storage.RemoveOp, path, nil) + return suppressNotFound(err) +} + +func eraseModuleRegoVersionsFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, modules []string) error { + for _, module := range modules { + err := store.Write(ctx, txn, storage.RemoveOp, moduleInfoPath(module), nil) + if err := suppressNotFound(err); err != nil { + return err + } + } + return nil +} + +// ReadWasmMetadataFromStore will read Wasm module resolver metadata from the store. +func ReadWasmMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]WasmResolver, error) { + path := wasmEntrypointsPath(name) + value, err := read(ctx, store, txn, path) + if err != nil { + return nil, err + } + + bs, err := json.Marshal(value) + if err != nil { + return nil, errors.New("corrupt wasm manifest data") + } + + var wasmMetadata []WasmResolver + + err = util.UnmarshalJSON(bs, &wasmMetadata) + if err != nil { + return nil, errors.New("corrupt wasm manifest data") + } + + return wasmMetadata, nil +} + +// ReadWasmModulesFromStore will write Wasm module resolver metadata from the store. +func ReadWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string][]byte, error) { + path := wasmModulePath(name) + value, err := read(ctx, store, txn, path) + if err != nil { + return nil, err + } + + encodedModules, ok := value.(map[string]any) + if !ok { + return nil, errors.New("corrupt wasm modules") + } + + rawModules := map[string][]byte{} + for path, enc := range encodedModules { + encStr, ok := enc.(string) + if !ok { + return nil, errors.New("corrupt wasm modules") + } + bs, err := base64.StdEncoding.DecodeString(encStr) + if err != nil { + return nil, err + } + rawModules[path] = bs + } + return rawModules, nil +} + +// ReadBundleRootsFromStore returns the roots in the specified bundle. +// If the bundle is not activated, this function will return +// storage NotFound error. +func ReadBundleRootsFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]string, error) { + value, err := read(ctx, store, txn, rootsPath(name)) + if err != nil { + return nil, err + } + + sl, ok := value.([]any) + if !ok { + return nil, errors.New("corrupt manifest roots") + } + + roots := make([]string, len(sl)) + + for i := range sl { + roots[i], ok = sl[i].(string) + if !ok { + return nil, errors.New("corrupt manifest root") + } + } + + return roots, nil +} + +// ReadBundleRevisionFromStore returns the revision in the specified bundle. +// If the bundle is not activated, this function will return +// storage NotFound error. +func ReadBundleRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) { + return readRevisionFromStore(ctx, store, txn, revisionPath(name)) +} + +func readRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) { + value, err := read(ctx, store, txn, path) + if err != nil { + return "", err + } + + str, ok := value.(string) + if !ok { + return "", errors.New("corrupt manifest revision") + } + + return str, nil +} + +// ReadBundleMetadataFromStore returns the metadata in the specified bundle. +// If the bundle is not activated, this function will return +// storage NotFound error. +func ReadBundleMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string]any, error) { + return readMetadataFromStore(ctx, store, txn, metadataPath(name)) +} + +func readMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (map[string]any, error) { + value, err := read(ctx, store, txn, path) + if err != nil { + return nil, suppressNotFound(err) + } + + data, ok := value.(map[string]any) + if !ok { + return nil, errors.New("corrupt manifest metadata") + } + + return data, nil +} + +// ReadBundleEtagFromStore returns the etag for the specified bundle. +// If the bundle is not activated, this function will return +// storage NotFound error. +func ReadBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) { + return readEtagFromStore(ctx, store, txn, EtagStoragePath(name)) +} + +func readEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) { + value, err := read(ctx, store, txn, path) + if err != nil { + return "", err + } + + str, ok := value.(string) + if !ok { + return "", errors.New("corrupt bundle etag") + } + + return str, nil +} + +// Activator is the interface expected for implementations that activate bundles. +type Activator interface { + Activate(*ActivateOpts) error +} + +// ActivateOpts defines options for the Activate API call. +type ActivateOpts struct { + Ctx context.Context + Store storage.Store + Txn storage.Transaction + TxnCtx *storage.Context + Compiler *ast.Compiler + Metrics metrics.Metrics + Bundles map[string]*Bundle // Optional + ExtraModules map[string]*ast.Module // Optional + AuthorizationDecisionRef ast.Ref + ParserOptions ast.ParserOptions + Plugin string + + legacy bool +} + +type DefaultActivator struct{} + +func (*DefaultActivator) Activate(opts *ActivateOpts) error { + opts.legacy = false + return activateBundles(opts) +} + +// Activate the bundle(s) by loading into the given Store. This will load policies, data, and record +// the manifest in storage. The compiler provided will have had the polices compiled on it. +func Activate(opts *ActivateOpts) error { + plugin := opts.Plugin + + // For backwards compatibility, check if there is no plugin specified, and use default. + if plugin == "" { + // Invoke extension activator if supplied. Otherwise, use default. + if HasExtension() { + plugin = bundleExtActivator + } else { + plugin = defaultActivatorID + } + } + + activator, err := GetActivator(plugin) + if err != nil { + return err + } + + return activator.Activate(opts) +} + +// DeactivateOpts defines options for the Deactivate API call +type DeactivateOpts struct { + Ctx context.Context + Store storage.Store + Txn storage.Transaction + BundleNames map[string]struct{} + ParserOptions ast.ParserOptions +} + +// Deactivate the bundle(s). This will erase associated data, policies, and the manifest entry from the store. +func Deactivate(opts *DeactivateOpts) error { + erase := map[string]struct{}{} + for name := range opts.BundleNames { + roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name) + if suppressNotFound(err) != nil { + return err + } + for _, root := range roots { + erase[root] = struct{}{} + } + } + _, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.ParserOptions, opts.BundleNames, erase) + return err +} + +func activateBundles(opts *ActivateOpts) error { + + // Build collections of bundle names, modules, and roots to erase + erase := map[string]struct{}{} + names := map[string]struct{}{} + deltaBundles := map[string]*Bundle{} + snapshotBundles := map[string]*Bundle{} + + for name, b := range opts.Bundles { + if b.Type() == DeltaBundleType { + deltaBundles[name] = b + } else { + snapshotBundles[name] = b + names[name] = struct{}{} + + roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name) + if suppressNotFound(err) != nil { + return err + } + for _, root := range roots { + erase[root] = struct{}{} + } + + // Erase data at new roots to prepare for writing the new data + for _, root := range *b.Manifest.Roots { + erase[root] = struct{}{} + } + } + } + + // Before changing anything make sure the roots don't collide with any + // other bundles that already are activated or other bundles being activated. + err := hasRootsOverlap(opts.Ctx, opts.Store, opts.Txn, opts.Bundles) + if err != nil { + return err + } + + if len(deltaBundles) != 0 { + err := activateDeltaBundles(opts, deltaBundles) + if err != nil { + return err + } + } + + // Erase data and policies at new + old roots, and remove the old + // manifests before activating a new snapshot bundle. + remaining, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.ParserOptions, names, erase) + if err != nil { + return err + } + + // Validate data in bundle does not contain paths outside the bundle's roots. + for _, b := range snapshotBundles { + + if b.lazyLoadingMode { + + for _, item := range b.Raw { + path := filepath.ToSlash(item.Path) + + if filepath.Base(path) == dataFile || filepath.Base(path) == yamlDataFile { + var val map[string]json.RawMessage + err = util.Unmarshal(item.Value, &val) + if err == nil { + err = doDFS(val, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots) + if err != nil { + return err + } + } else { + // Build an object for the value + p := getNormalizedPath(path) + + if len(p) == 0 { + return errors.New("root value must be object") + } + + // verify valid YAML or JSON value + var x any + err := util.Unmarshal(item.Value, &x) + if err != nil { + return err + } + + value := item.Value + dir := map[string]json.RawMessage{} + for i := len(p) - 1; i > 0; i-- { + dir[p[i]] = value + + bs, err := json.Marshal(dir) + if err != nil { + return err + } + + value = bs + dir = map[string]json.RawMessage{} + } + dir[p[0]] = value + + err = doDFS(dir, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots) + if err != nil { + return err + } + } + } + } + } + } + + // Compile the modules all at once to avoid having to re-do work. + remainingAndExtra := make(map[string]*ast.Module) + maps.Copy(remainingAndExtra, remaining) + maps.Copy(remainingAndExtra, opts.ExtraModules) + + err = compileModules(opts.Compiler, opts.Metrics, snapshotBundles, remainingAndExtra, opts.legacy, opts.AuthorizationDecisionRef) + if err != nil { + return err + } + + if err := writeDataAndModules(opts.Ctx, opts.Store, opts.Txn, opts.TxnCtx, snapshotBundles, opts.legacy, opts.ParserOptions.RegoVersion); err != nil { + return err + } + + if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 { + return err + } + + for name, b := range snapshotBundles { + if err := writeManifestToStore(opts, name, b.Manifest); err != nil { + return err + } + + if err := writeEtagToStore(opts, name, b.Etag); err != nil { + return err + } + + if err := writeWasmModulesToStore(opts.Ctx, opts.Store, opts.Txn, name, b); err != nil { + return err + } + } + + return nil +} + +func doDFS(obj map[string]json.RawMessage, path string, roots []string) error { + if len(roots) == 1 && roots[0] == "" { + return nil + } + + for key := range obj { + newPath := filepath.Join(strings.Trim(path, "/"), key) + + // Note: filepath.Join can return paths with '\' separators, always use + // filepath.ToSlash to keep them normalized. + newPath = strings.TrimLeft(filepath.ToSlash(newPath), "/.") + + contains := false + prefix := false + if RootPathsContain(roots, newPath) { + contains = true + } else { + for i := range roots { + if strings.HasPrefix(strings.Trim(roots[i], "/"), newPath) { + prefix = true + break + } + } + } + + if !contains && !prefix { + return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath) + } + + if contains { + continue + } + + var next map[string]json.RawMessage + err := util.Unmarshal(obj[key], &next) + if err != nil { + return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath) + } + + if err := doDFS(next, newPath, roots); err != nil { + return err + } + } + return nil +} + +func activateDeltaBundles(opts *ActivateOpts, bundles map[string]*Bundle) error { + + // Check that the manifest roots and wasm resolvers in the delta bundle + // match with those currently in the store + for name, b := range bundles { + value, err := opts.Store.Read(opts.Ctx, opts.Txn, ManifestStoragePath(name)) + if err != nil { + if storage.IsNotFound(err) { + continue + } + return err + } + + manifest, err := valueToManifest(value) + if err != nil { + return fmt.Errorf("corrupt manifest data: %w", err) + } + + if !b.Manifest.equalWasmResolversAndRoots(manifest) { + return fmt.Errorf("delta bundle '%s' has wasm resolvers or manifest roots that are different from those in the store", name) + } + } + + for _, b := range bundles { + err := applyPatches(opts.Ctx, opts.Store, opts.Txn, b.Patch.Data) + if err != nil { + return err + } + } + + if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 { + return err + } + + for name, b := range bundles { + if err := writeManifestToStore(opts, name, b.Manifest); err != nil { + return err + } + + if err := writeEtagToStore(opts, name, b.Etag); err != nil { + return err + } + } + + return nil +} + +func valueToManifest(v any) (Manifest, error) { + if astV, ok := v.(ast.Value); ok { + var err error + v, err = ast.JSON(astV) + if err != nil { + return Manifest{}, err + } + } + + var manifest Manifest + + bs, err := json.Marshal(v) + if err != nil { + return Manifest{}, err + } + + err = util.UnmarshalJSON(bs, &manifest) + if err != nil { + return Manifest{}, err + } + + return manifest, nil +} + +// erase bundles by name and roots. This will clear all policies and data at its roots and remove its +// manifest from storage. +func eraseBundles(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, names map[string]struct{}, roots map[string]struct{}) (map[string]*ast.Module, error) { + + if err := eraseData(ctx, store, txn, roots); err != nil { + return nil, err + } + + remaining, removed, err := erasePolicies(ctx, store, txn, parserOpts, roots) + if err != nil { + return nil, err + } + + for name := range names { + if err := EraseManifestFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { + return nil, err + } + + if err := LegacyEraseManifestFromStore(ctx, store, txn); suppressNotFound(err) != nil { + return nil, err + } + + if err := eraseBundleEtagFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { + return nil, err + } + + if err := eraseWasmModulesFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { + return nil, err + } + } + + err = eraseModuleRegoVersionsFromStore(ctx, store, txn, removed) + if err != nil { + return nil, err + } + + return remaining, nil +} + +func eraseData(ctx context.Context, store storage.Store, txn storage.Transaction, roots map[string]struct{}) error { + for root := range roots { + path, ok := storage.ParsePathEscaped("/" + root) + if !ok { + return fmt.Errorf("manifest root path invalid: %v", root) + } + + if len(path) > 0 { + if err := store.Write(ctx, txn, storage.RemoveOp, path, nil); suppressNotFound(err) != nil { + return err + } + } + } + return nil +} + +type moduleInfo struct { + RegoVersion ast.RegoVersion `json:"rego_version"` +} + +func readModuleInfoFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) (map[string]moduleInfo, error) { + value, err := read(ctx, store, txn, ModulesInfoBasePath) + if suppressNotFound(err) != nil { + return nil, err + } + + if value == nil { + return nil, nil + } + + if m, ok := value.(map[string]any); ok { + versions := make(map[string]moduleInfo, len(m)) + + for k, v := range m { + if m0, ok := v.(map[string]any); ok { + if ver, ok := m0["rego_version"]; ok { + if vs, ok := ver.(json.Number); ok { + i, err := vs.Int64() + if err != nil { + return nil, errors.New("corrupt rego version") + } + versions[k] = moduleInfo{RegoVersion: ast.RegoVersionFromInt(int(i))} + } + } + } + } + return versions, nil + } + + return nil, errors.New("corrupt rego version") +} + +func erasePolicies(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, roots map[string]struct{}) (map[string]*ast.Module, []string, error) { + + ids, err := store.ListPolicies(ctx, txn) + if err != nil { + return nil, nil, err + } + + modulesInfo, err := readModuleInfoFromStore(ctx, store, txn) + if err != nil { + return nil, nil, fmt.Errorf("failed to read module info from store: %w", err) + } + + getRegoVersion := func(modId string) (ast.RegoVersion, bool) { + info, ok := modulesInfo[modId] + if !ok { + return ast.RegoUndefined, false + } + return info.RegoVersion, true + } + + remaining := map[string]*ast.Module{} + var removed []string + + for _, id := range ids { + bs, err := store.GetPolicy(ctx, txn, id) + if err != nil { + return nil, nil, err + } + + parserOptsCpy := parserOpts + if regoVersion, ok := getRegoVersion(id); ok { + parserOptsCpy.RegoVersion = regoVersion + } + + module, err := ast.ParseModuleWithOpts(id, string(bs), parserOptsCpy) + if err != nil { + return nil, nil, err + } + path, err := module.Package.Path.Ptr() + if err != nil { + return nil, nil, err + } + deleted := false + for root := range roots { + if RootPathsContain([]string{root}, path) { + if err := store.DeletePolicy(ctx, txn, id); err != nil { + return nil, nil, err + } + deleted = true + break + } + } + + if deleted { + removed = append(removed, id) + } else { + remaining[id] = module + } + } + + return remaining, removed, nil +} + +func writeManifestToStore(opts *ActivateOpts, name string, manifest Manifest) error { + // Always write manifests to the named location. If the plugin is in the older style config + // then also write to the old legacy unnamed location. + if err := WriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, name, manifest); err != nil { + return err + } + + if opts.legacy { + if err := LegacyWriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, manifest); err != nil { + return err + } + } + + return nil +} + +func writeEtagToStore(opts *ActivateOpts, name, etag string) error { + if err := WriteEtagToStore(opts.Ctx, opts.Store, opts.Txn, name, etag); err != nil { + return err + } + + return nil +} + +func writeModuleRegoVersionToStore(ctx context.Context, store storage.Store, txn storage.Transaction, b *Bundle, + mf ModuleFile, storagePath string, runtimeRegoVersion ast.RegoVersion) error { + + var regoVersion ast.RegoVersion + if mf.Parsed != nil { + regoVersion = mf.Parsed.RegoVersion() + } + + if regoVersion == ast.RegoUndefined { + var err error + regoVersion, err = b.RegoVersionForFile(mf.Path, runtimeRegoVersion) + if err != nil { + return fmt.Errorf("failed to get rego version for module '%s' in bundle: %w", mf.Path, err) + } + } + + if regoVersion != ast.RegoUndefined && regoVersion != runtimeRegoVersion { + if err := write(ctx, store, txn, moduleRegoVersionPath(storagePath), regoVersion.Int()); err != nil { + return fmt.Errorf("failed to write rego version for module '%s': %w", storagePath, err) + } + } + return nil +} + +func writeDataAndModules(ctx context.Context, store storage.Store, txn storage.Transaction, txnCtx *storage.Context, bundles map[string]*Bundle, legacy bool, runtimeRegoVersion ast.RegoVersion) error { + params := storage.WriteParams + params.Context = txnCtx + + for name, b := range bundles { + if len(b.Raw) == 0 { + // Write data from each new bundle into the store. Only write under the + // roots contained in their manifest. + if err := writeData(ctx, store, txn, *b.Manifest.Roots, b.Data); err != nil { + return err + } + + for _, mf := range b.Modules { + var path string + + // For backwards compatibility, in legacy mode, upsert policies to + // the unprefixed path. + if legacy { + path = mf.Path + } else { + path = modulePathWithPrefix(name, mf.Path) + } + + if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil { + return err + } + + if err := writeModuleRegoVersionToStore(ctx, store, txn, b, mf, path, runtimeRegoVersion); err != nil { + return err + } + } + } else { + params.BasePaths = *b.Manifest.Roots + + err := store.Truncate(ctx, txn, params, NewIterator(b.Raw)) + if err != nil { + return fmt.Errorf("store truncate failed for bundle '%s': %v", name, err) + } + + for _, f := range b.Raw { + if strings.HasSuffix(f.Path, RegoExt) { + p, err := getFileStoragePath(f.Path) + if err != nil { + return fmt.Errorf("failed get storage path for module '%s' in bundle '%s': %w", f.Path, name, err) + } + + if m := f.module; m != nil { + // 'f.module.Path' contains the module's path as it relates to the bundle root, and can be used for looking up the rego-version. + // 'f.Path' can differ, based on how the bundle reader was initialized. + if err := writeModuleRegoVersionToStore(ctx, store, txn, b, *m, p.String(), runtimeRegoVersion); err != nil { + return err + } + } + } + } + } + } + + return nil +} + +func writeData(ctx context.Context, store storage.Store, txn storage.Transaction, roots []string, data map[string]any) error { + for _, root := range roots { + path, ok := storage.ParsePathEscaped("/" + root) + if !ok { + return fmt.Errorf("manifest root path invalid: %v", root) + } + if value, ok := lookup(path, data); ok { + if len(path) > 0 { + if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { + return err + } + } + if err := store.Write(ctx, txn, storage.AddOp, path, value); err != nil { + return err + } + } + } + return nil +} + +func compileModules(compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool, authorizationDecisionRef ast.Ref) error { + + m.Timer(metrics.RegoModuleCompile).Start() + defer m.Timer(metrics.RegoModuleCompile).Stop() + + modules := make(map[string]*ast.Module, len(compiler.Modules)+len(extraModules)+len(bundles)) + + // preserve any modules already on the compiler + maps.Copy(modules, compiler.Modules) + + // preserve any modules passed in from the store + maps.Copy(modules, extraModules) + + // include all the new bundle modules + for bundleName, b := range bundles { + if legacy { + for _, mf := range b.Modules { + modules[mf.Path] = mf.Parsed + } + } else { + maps.Copy(modules, b.ParsedModules(bundleName)) + } + } + + if compiler.Compile(modules); compiler.Failed() { + return compiler.Errors + } + + if authorizationDecisionRef.Equal(ast.EmptyRef()) { + return nil + } + + return iCompiler.VerifyAuthorizationPolicySchema(compiler, authorizationDecisionRef) +} + +func writeModules(ctx context.Context, store storage.Store, txn storage.Transaction, compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool) error { + + m.Timer(metrics.RegoModuleCompile).Start() + defer m.Timer(metrics.RegoModuleCompile).Stop() + + modules := map[string]*ast.Module{} + + // preserve any modules already on the compiler + maps.Copy(modules, compiler.Modules) + + // preserve any modules passed in from the store + maps.Copy(modules, extraModules) + + // include all the new bundle modules + for bundleName, b := range bundles { + if legacy { + for _, mf := range b.Modules { + modules[mf.Path] = mf.Parsed + } + } else { + maps.Copy(modules, b.ParsedModules(bundleName)) + } + } + + if compiler.Compile(modules); compiler.Failed() { + return compiler.Errors + } + for bundleName, b := range bundles { + for _, mf := range b.Modules { + var path string + + // For backwards compatibility, in legacy mode, upsert policies to + // the unprefixed path. + if legacy { + path = mf.Path + } else { + path = modulePathWithPrefix(bundleName, mf.Path) + } + + if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil { + return err + } + } + } + return nil +} + +func lookup(path storage.Path, data map[string]any) (any, bool) { + if len(path) == 0 { + return data, true + } + for i := range len(path) - 1 { + value, ok := data[path[i]] + if !ok { + return nil, false + } + obj, ok := value.(map[string]any) + if !ok { + return nil, false + } + data = obj + } + value, ok := data[path[len(path)-1]] + return value, ok +} + +func hasRootsOverlap(ctx context.Context, store storage.Store, txn storage.Transaction, newBundles map[string]*Bundle) error { + storeBundles, err := ReadBundleNamesFromStore(ctx, store, txn) + if suppressNotFound(err) != nil { + return err + } + + allRoots := map[string][]string{} + bundlesWithEmptyRoots := map[string]bool{} + + // Build a map of roots for existing bundles already in the system + for _, name := range storeBundles { + roots, err := ReadBundleRootsFromStore(ctx, store, txn, name) + if suppressNotFound(err) != nil { + return err + } + allRoots[name] = roots + if slices.Contains(roots, "") { + bundlesWithEmptyRoots[name] = true + } + } + + // Add in any bundles that are being activated, overwrite existing roots + // with new ones where bundles are in both groups. + for name, bundle := range newBundles { + allRoots[name] = *bundle.Manifest.Roots + if slices.Contains(*bundle.Manifest.Roots, "") { + bundlesWithEmptyRoots[name] = true + } + } + + // Now check for each new bundle if it conflicts with any of the others + collidingBundles := map[string]bool{} + conflictSet := map[string]bool{} + for name, bundle := range newBundles { + for otherBundle, otherRoots := range allRoots { + if name == otherBundle { + // Skip the current bundle being checked + continue + } + + // Compare the "new" roots with other existing (or a different bundles new roots) + for _, newRoot := range *bundle.Manifest.Roots { + for _, otherRoot := range otherRoots { + if !RootPathsOverlap(newRoot, otherRoot) { + continue + } + + collidingBundles[name] = true + collidingBundles[otherBundle] = true + + // Different message required if the roots are same + if newRoot == otherRoot { + conflictSet[fmt.Sprintf("root %s is in multiple bundles", newRoot)] = true + } else { + paths := []string{newRoot, otherRoot} + sort.Strings(paths) + conflictSet[fmt.Sprintf("%s overlaps %s", paths[0], paths[1])] = true + } + } + } + } + } + + if len(collidingBundles) == 0 { + return nil + } + + bundleNames := strings.Join(util.KeysSorted(collidingBundles), ", ") + + if len(bundlesWithEmptyRoots) > 0 { + return fmt.Errorf( + "bundles [%s] have overlapping roots and cannot be activated simultaneously because bundle(s) [%s] specify empty root paths ('') which overlap with any other bundle root", + bundleNames, + strings.Join(util.KeysSorted(bundlesWithEmptyRoots), ", "), + ) + } + + return fmt.Errorf("detected overlapping roots in manifests for these bundles: [%s] (%s)", bundleNames, strings.Join(util.KeysSorted(conflictSet), ", ")) +} + +func applyPatches(ctx context.Context, store storage.Store, txn storage.Transaction, patches []PatchOperation) error { + for _, pat := range patches { + + // construct patch path + path, ok := patch.ParsePatchPathEscaped("/" + strings.Trim(pat.Path, "/")) + if !ok { + return errors.New("error parsing patch path") + } + + var op storage.PatchOp + switch pat.Op { + case "upsert": + op = storage.AddOp + + _, err := store.Read(ctx, txn, path[:len(path)-1]) + if err != nil { + if !storage.IsNotFound(err) { + return err + } + + if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { + return err + } + } + case "remove": + op = storage.RemoveOp + case "replace": + op = storage.ReplaceOp + default: + return fmt.Errorf("bad patch operation: %v", pat.Op) + } + + // apply the patch + if err := store.Write(ctx, txn, op, path, pat.Value); err != nil { + return err + } + } + + return nil +} + +// Helpers for the older single (unnamed) bundle style manifest storage. + +// LegacyManifestStoragePath is the older unnamed bundle path for manifests to be stored. +// +// Deprecated: Use ManifestStoragePath and named bundles instead. +var legacyManifestStoragePath = storage.MustParsePath("/system/bundle/manifest") +var legacyRevisionStoragePath = append(legacyManifestStoragePath, "revision") + +// LegacyWriteManifestToStore will write the bundle manifest to the older single (unnamed) bundle manifest location. +// +// Deprecated: Use WriteManifestToStore and named bundles instead. +func LegacyWriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, manifest Manifest) error { + return write(ctx, store, txn, legacyManifestStoragePath, manifest) +} + +// LegacyEraseManifestFromStore will erase the bundle manifest from the older single (unnamed) bundle manifest location. +// +// Deprecated: Use WriteManifestToStore and named bundles instead. +func LegacyEraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) error { + err := store.Write(ctx, txn, storage.RemoveOp, legacyManifestStoragePath, nil) + if err != nil { + return err + } + return nil +} + +// LegacyReadRevisionFromStore will read the bundle manifest revision from the older single (unnamed) bundle manifest location. +// +// Deprecated: Use ReadBundleRevisionFromStore and named bundles instead. +func LegacyReadRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) (string, error) { + return readRevisionFromStore(ctx, store, txn, legacyRevisionStoragePath) +} + +// ActivateLegacy calls Activate for the bundles but will also write their manifest to the older unnamed store location. +// +// Deprecated: Use Activate with named bundles instead. +func ActivateLegacy(opts *ActivateOpts) error { + opts.legacy = true + return activateBundles(opts) +} + +// GetActivator returns the Activator registered under the given id +func GetActivator(id string) (Activator, error) { + activator, ok := activators[id] + + if !ok { + return nil, fmt.Errorf("no activator exists under id %s", id) + } + + return activator, nil +} + +// RegisterActivator registers a bundle Activator under the given id. +// The id value can later be referenced in ActivateOpts.Plugin to specify +// which activator should be used for that bundle activation operation. +// Note: This must be called *before* RegisterDefaultBundleActivator. +func RegisterActivator(id string, a Activator) { + activatorMtx.Lock() + defer activatorMtx.Unlock() + + if id == defaultActivatorID { + panic("cannot use reserved activator id, use a different id") + } + + activators[id] = a +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go b/vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go new file mode 100644 index 0000000000..42c8908f73 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go @@ -0,0 +1,286 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package bundle provide helpers that assist in the bundle signature verification process +package bundle + +import ( + "bytes" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jws/jwsbb" + "github.com/open-policy-agent/opa/v1/util" +) + +// parseVerificationKey converts a string key to the appropriate type for jws.Verify +func parseVerificationKey(keyData string, alg jwa.SignatureAlgorithm) (any, error) { + // For HMAC algorithms, return the key as bytes + if alg == jwa.HS256() || alg == jwa.HS384() || alg == jwa.HS512() { + return []byte(keyData), nil + } + + // For RSA/ECDSA algorithms, try to parse as PEM first + block, _ := pem.Decode([]byte(keyData)) + if block != nil { + switch block.Type { + case "RSA PUBLIC KEY": + return x509.ParsePKCS1PublicKey(block.Bytes) + case "PUBLIC KEY": + return x509.ParsePKIXPublicKey(block.Bytes) + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(block.Bytes) + case "PRIVATE KEY": + return x509.ParsePKCS8PrivateKey(block.Bytes) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(block.Bytes) + case "CERTIFICATE": + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + return cert.PublicKey, nil + } + } + + return nil, errors.New("failed to parse PEM block containing the key") +} + +const defaultVerifierID = "_default" + +var verifiers map[string]Verifier + +// Verifier is the interface expected for implementations that verify bundle signatures. +type Verifier interface { + VerifyBundleSignature(SignaturesConfig, *VerificationConfig) (map[string]FileInfo, error) +} + +// VerifyBundleSignature will retrieve the Verifier implementation based +// on the Plugin specified in SignaturesConfig, and call its implementation +// of VerifyBundleSignature. VerifyBundleSignature verifies the bundle signature +// using the given public keys or secret. If a signature is verified, it keeps +// track of the files specified in the JWT payload +func VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) { + // default implementation does not return a nil for map, so don't + // do it here either + files := make(map[string]FileInfo) + var plugin string + // for backwards compatibility, check if there is no plugin specified, and use default + if sc.Plugin == "" { + plugin = defaultVerifierID + } else { + plugin = sc.Plugin + } + verifier, err := GetVerifier(plugin) + if err != nil { + return files, err + } + return verifier.VerifyBundleSignature(sc, bvc) +} + +// DefaultVerifier is the default bundle verification implementation. It verifies bundles by checking +// the JWT signature using a locally-accessible public key. +type DefaultVerifier struct{} + +// VerifyBundleSignature verifies the bundle signature using the given public keys or secret. +// If a signature is verified, it keeps track of the files specified in the JWT payload +func (*DefaultVerifier) VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) { + files := make(map[string]FileInfo) + + if len(sc.Signatures) == 0 { + return files, errors.New(".signatures.json: missing JWT (expected exactly one)") + } + + if len(sc.Signatures) > 1 { + return files, errors.New(".signatures.json: multiple JWTs not supported (expected exactly one)") + } + + for _, token := range sc.Signatures { + payload, err := verifyJWTSignature(token, bvc) + if err != nil { + return files, err + } + + for _, file := range payload.Files { + files[file.Name] = file + } + } + return files, nil +} + +func verifyJWTSignature(token string, bvc *VerificationConfig) (*DecodedSignature, error) { + tokbytes := []byte(token) + hdrb64, payloadb64, signatureb64, err := jwsbb.SplitCompact(tokbytes) + if err != nil { + return nil, fmt.Errorf("failed to split compact JWT: %w", err) + } + + // check for the id of the key to use for JWT signature verification + // first in the OPA config. If not found, then check the JWT kid. + keyID := bvc.KeyID + if keyID == "" { + // Use jwsbb.Header to access into the "kid" header field, which we will + // use to determine the key to use for verification. + hdr := jwsbb.HeaderParseCompact(hdrb64) + v, err := jwsbb.HeaderGetString(hdr, "kid") + switch { + case err == nil: + // err == nils means we found the key ID in the header + keyID = v + case errors.Is(err, jwsbb.ErrHeaderNotFound()): + // no "kid" in the header. no op. + default: + // some other error occurred while trying to extract the key ID + return nil, fmt.Errorf("failed to extract key ID from headers: %w", err) + } + } + + // Because we want to fallback to ds.KeyID when we can't find the + // keyID, we need to parse the payload here already. + decoder := base64.RawURLEncoding + payload := make([]byte, decoder.DecodedLen(len(payloadb64))) + if _, err := decoder.Decode(payload, payloadb64); err != nil { + return nil, fmt.Errorf("failed to base64 decode JWT payload: %w", err) + } + + var ds DecodedSignature + if err := json.Unmarshal(payload, &ds); err != nil { + return nil, err + } + + // If header has no key id, check the deprecated key claim. + if keyID == "" { + keyID = ds.KeyID + } + + // If we still don't have a keyID, we cannot proceed + if keyID == "" { + return nil, errors.New("verification key ID is empty") + } + + // now that we have the keyID, fetch the actual key + keyConfig, err := bvc.GetPublicKey(keyID) + if err != nil { + return nil, err + } + + alg, ok := jwa.LookupSignatureAlgorithm(keyConfig.Algorithm) + if !ok { + return nil, fmt.Errorf("unknown signature algorithm: %s", keyConfig.Algorithm) + } + + // Parse the key into the appropriate type + parsedKey, err := parseVerificationKey(keyConfig.Key, alg) + if err != nil { + return nil, err + } + + signature := make([]byte, decoder.DecodedLen(len(signatureb64))) + if _, err = decoder.Decode(signature, signatureb64); err != nil { + return nil, fmt.Errorf("failed to base64 decode JWT signature: %w", err) + } + + signbuf := make([]byte, len(hdrb64)+1+len(payloadb64)) + copy(signbuf, hdrb64) + signbuf[len(hdrb64)] = '.' + copy(signbuf[len(hdrb64)+1:], payloadb64) + + if err := jwsbb.Verify(parsedKey, alg.String(), signbuf, signature); err != nil { + return nil, fmt.Errorf("failed to verify JWT signature: %w", err) + } + + // verify the scope + scope := bvc.Scope + if scope == "" { + scope = keyConfig.Scope + } + + if ds.Scope != scope { + return nil, errors.New("scope mismatch") + } + return &ds, nil +} + +// VerifyBundleFile verifies the hash of a file in the bundle matches to that provided in the bundle's signature +func VerifyBundleFile(path string, data bytes.Buffer, files map[string]FileInfo) error { + var file FileInfo + var ok bool + + if file, ok = files[path]; !ok { + return fmt.Errorf("file %v not included in bundle signature", path) + } + + if file.Algorithm == "" { + return fmt.Errorf("no hashing algorithm provided for file %v", path) + } + + hash, err := NewSignatureHasher(HashingAlgorithm(file.Algorithm)) + if err != nil { + return err + } + + // hash the file content + // For unstructured files, hash the byte stream of the file + // For structured files, read the byte stream and parse into a JSON structure; + // then recursively order the fields of all objects alphabetically and then apply + // the hash function to result to compute the hash. This ensures that the digital signature is + // independent of whitespace and other non-semantic JSON features. + var value any + if IsStructuredDoc(path) { + err := util.Unmarshal(data.Bytes(), &value) + if err != nil { + return err + } + } else { + value = data.Bytes() + } + + bs, err := hash.HashFile(value) + if err != nil { + return err + } + + // compare file hash with same file in the JWT payloads + fb, err := hex.DecodeString(file.Hash) + if err != nil { + return err + } + + if !bytes.Equal(fb, bs) { + return fmt.Errorf("%v: digest mismatch (want: %x, got: %x)", path, fb, bs) + } + + delete(files, path) + return nil +} + +// GetVerifier returns the Verifier registered under the given id +func GetVerifier(id string) (Verifier, error) { + verifier, ok := verifiers[id] + if !ok { + return nil, fmt.Errorf("no verifier exists under id %s", id) + } + return verifier, nil +} + +// RegisterVerifier registers a Verifier under the given id +func RegisterVerifier(id string, v Verifier) error { + if id == defaultVerifierID { + return fmt.Errorf("verifier id %s is reserved, use a different id", id) + } + verifiers[id] = v + return nil +} + +func init() { + verifiers = map[string]Verifier{ + defaultVerifierID: &DefaultVerifier{}, + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go b/vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go new file mode 100644 index 0000000000..69ac718ebd --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go @@ -0,0 +1,15 @@ +// Copyright 2021 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package capabilities + +import ( + v0 "github.com/open-policy-agent/opa/capabilities" //nolint:staticcheck +) + +// FS contains the embedded capabilities/ directory of the built version, +// which has all the capabilities of previous versions: +// "v0.18.0.json" contains the capabilities JSON of version v0.18.0, etc + +var FS = v0.FS diff --git a/vendor/github.com/open-policy-agent/opa/v1/format/format.go b/vendor/github.com/open-policy-agent/opa/v1/format/format.go new file mode 100644 index 0000000000..c14a6ce798 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/format/format.go @@ -0,0 +1,2377 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package format implements formatting of Rego source files. +package format + +import ( + "bytes" + "errors" + "fmt" + "slices" + "sort" + "strings" + "unicode" + + "github.com/open-policy-agent/opa/internal/future" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" +) + +// defaultLocationFile is the file name used in `Ast()` for terms +// without a location, as could happen when pretty-printing the +// results of partial eval. +const defaultLocationFile = "__format_default__" + +var ( + expandedConst = ast.NewBody(ast.NewExpr(ast.InternedTerm(true))) + commentsSlicePool = util.NewSlicePool[*ast.Comment](50) +) + +// Opts lets you control the code formatting via `AstWithOpts()`. +type Opts struct { + // IgnoreLocations instructs the formatter not to use the AST nodes' locations + // into account when laying out the code: notably, when the input is the result + // of partial evaluation, arguments maybe have been shuffled around, but still + // carry along their original source locations. + IgnoreLocations bool + + // RegoVersion is the version of Rego to format code for. + RegoVersion ast.RegoVersion + + // ParserOptions is the parser options used when parsing the module to be formatted. + ParserOptions *ast.ParserOptions + + // DropV0Imports instructs the formatter to drop all v0 imports from the module; i.e. 'rego.v1' and 'future.keywords' imports. + // Imports are only removed if [Opts.RegoVersion] makes them redundant. + DropV0Imports bool + + // SkipDefensiveCopying, if true, will avoid deep-copying the AST before formatting it. + // This is true by default for all Source* functions, but false by default for Ast* functions, + // as some formatting operations may otherwise mutate the AST. + SkipDefensiveCopying bool + + Capabilities *ast.Capabilities +} + +func (o Opts) effectiveRegoVersion() ast.RegoVersion { + if o.RegoVersion == ast.RegoUndefined { + return ast.DefaultRegoVersion + } + return o.RegoVersion +} + +// Source formats a Rego source file. The bytes provided must describe a complete +// Rego module. If they don't, Source will return an error resulting from the attempt +// to parse the bytes. +func Source(filename string, src []byte) ([]byte, error) { + return SourceWithOpts(filename, src, Opts{SkipDefensiveCopying: true}) +} + +func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) { + regoVersion := opts.effectiveRegoVersion() + + var parserOpts ast.ParserOptions + if opts.ParserOptions != nil { + parserOpts = *opts.ParserOptions + } else if regoVersion == ast.RegoV1 { + // If the rego version is V1, we need to parse it as such, to allow for future keywords not being imported. + // Otherwise, we'll default to the default rego-version. + parserOpts.RegoVersion = ast.RegoV1 + } + + // Copying the node does not make sense when both input and output are bytes. + opts.SkipDefensiveCopying = true + + if parserOpts.RegoVersion == ast.RegoUndefined { + parserOpts.RegoVersion = ast.DefaultRegoVersion + } + + module, err := ast.ParseModuleWithOpts(filename, string(src), parserOpts) + if err != nil { + return nil, err + } + + if regoVersion == ast.RegoV0CompatV1 || regoVersion == ast.RegoV1 { + checkOpts := ast.NewRegoCheckOptions() + // The module is parsed as v0, so we need to disable checks that will be automatically amended by the AstWithOpts call anyways. + checkOpts.RequireIfKeyword = false + checkOpts.RequireContainsKeyword = false + checkOpts.RequireRuleBodyOrValue = false + errs := ast.CheckRegoV1WithOptions(module, checkOpts) + if len(errs) > 0 { + return nil, errs + } + } + + formatted, err := AstWithOpts(module, opts) + if err != nil { + return nil, fmt.Errorf("%s: %v", filename, err) + } + + return formatted, nil +} + +// MustAst is a helper function to format a Rego AST element. If any errors +// occur this function will panic. This is mostly used for test +func MustAst(x any) []byte { + bs, err := Ast(x) + if err != nil { + panic(err) + } + return bs +} + +// MustAstWithOpts is a helper function to format a Rego AST element. If any errors +// occur this function will panic. This is mostly used for test +func MustAstWithOpts(x any, opts Opts) []byte { + bs, err := AstWithOpts(x, opts) + if err != nil { + panic(err) + } + return bs +} + +// Ast formats a Rego AST element. If the passed value is not a valid AST +// element, Ast returns nil and an error. If AST nodes are missing locations +// an arbitrary location will be used. +func Ast(x any) ([]byte, error) { + return AstWithOpts(x, Opts{}) +} + +type fmtOpts struct { + // When the future keyword "contains" is imported, all the pretty-printed + // modules will use that format for partial sets. + // NOTE(sr): For ref-head rules, this will be the default behaviour, since + // we need "contains" to disambiguate complete rules from partial sets. + contains bool + + // Same logic applies as for "contains": if `future.keywords.if` (or all + // future keywords) is imported, we'll render rules that can use `if` with + // `if`. + ifs bool + + // We check all rule ref heads to see if any of them _requires_ support + // for ref heads -- if they do, we'll print all of them in a different way + // than if they don't. + refHeads bool + + regoV1 bool + regoV1Imported bool + futureKeywords []string + + // If true, the formatter will retain keywords in refs, e.g. `p.not ` instead of `p["not"]`. + // The format of the original ref is preserved, so `p["not"]` will still be formatted as `p["not"]`. + allowKeywordsInRefs bool +} + +func (o fmtOpts) keywords() []string { + if o.regoV1 { + return ast.KeywordsV1[:] + } + kws := ast.KeywordsV0[:] + return append(kws, o.futureKeywords...) +} + +func AstWithOpts(x any, opts Opts) ([]byte, error) { + // The node has to be deep copied because it may be mutated below. Alternatively, + // we could avoid the copy by checking if mutation will occur first. For now, + // since format is not latency sensitive, just deep copy in all cases. + if !opts.SkipDefensiveCopying { + x = ast.Copy(x) + } + + wildcards := map[ast.Var]*ast.Term{} + + // NOTE(sr): When the formatter encounters a call to internal.member_2 + // or internal.member_3, it will sugarize them into usage of the `in` + // operator. It has to ensure that the proper future keyword import is + // present. + extraFutureKeywordImports := map[string]struct{}{} + + o := fmtOpts{} + + regoVersion := opts.effectiveRegoVersion() + if regoVersion == ast.RegoV0CompatV1 || regoVersion == ast.RegoV1 { + o.regoV1 = true + o.ifs = true + o.contains = true + } + + capabilities := opts.Capabilities + if capabilities == nil { + capabilities = ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(opts.effectiveRegoVersion())) + } + o.allowKeywordsInRefs = capabilities.ContainsFeature(ast.FeatureKeywordsInRefs) + + memberRef := ast.Member.Ref() + memberWithKeyRef := ast.MemberWithKey.Ref() + + // Preprocess the AST. Set any required defaults and calculate + // values required for printing the formatted output. + ast.WalkNodes(x, func(x ast.Node) bool { + switch n := x.(type) { + case ast.Body: + if len(n) == 0 { + return false + } + case *ast.Term: + unmangleWildcardVar(wildcards, n) + + case *ast.Expr: + switch { + case n.IsCall() && memberRef.Equal(n.Operator()) || memberWithKeyRef.Equal(n.Operator()): + extraFutureKeywordImports["in"] = struct{}{} + case n.IsEvery(): + extraFutureKeywordImports["every"] = struct{}{} + } + + case *ast.Import: + if kw, ok := future.WhichFutureKeyword(n); ok { + o.futureKeywords = append(o.futureKeywords, kw) + } + + switch { + case isRegoV1Compatible(n): + o.regoV1Imported = true + o.contains = true + o.ifs = true + case future.IsAllFutureKeywords(n): + o.contains = true + o.ifs = true + case future.IsFutureKeyword(n, "contains"): + o.contains = true + case future.IsFutureKeyword(n, "if"): + o.ifs = true + } + + case *ast.Rule: + headLen := len(n.Head.Ref()) + if headLen > 2 { + o.refHeads = true + } + if headLen == 2 && n.Head.Key != nil && n.Head.Value == nil { // p.q contains "x" + o.refHeads = true + } + } + + if opts.IgnoreLocations || x.Loc() == nil { + x.SetLoc(defaultLocation(x)) + } + return false + }) + + w := &writer{ + indent: "\t", + errs: make([]*ast.Error, 0), + fmtOpts: o, + } + + switch x := x.(type) { + case *ast.Module: + if regoVersion == ast.RegoV1 && opts.DropV0Imports { + x.Imports = filterRegoV1Import(x.Imports) + } else if regoVersion == ast.RegoV0CompatV1 { + x.Imports = ensureRegoV1Import(x.Imports) + } + + regoV1Imported := slices.ContainsFunc(x.Imports, isRegoV1Compatible) + if regoVersion == ast.RegoV0CompatV1 || regoVersion == ast.RegoV1 || regoV1Imported { + if !opts.DropV0Imports && !regoV1Imported { + for _, kw := range o.futureKeywords { + x.Imports = ensureFutureKeywordImport(x.Imports, kw) + } + } else { + x.Imports = future.FilterFutureImports(x.Imports) + } + } else { + for kw := range extraFutureKeywordImports { + x.Imports = ensureFutureKeywordImport(x.Imports, kw) + } + } + err := w.writeModule(x) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error())) + } + case *ast.Package: + _, err := w.writePackage(x, nil) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error())) + } + case *ast.Import: + _, err := w.writeImports([]*ast.Import{x}, nil) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error())) + } + case *ast.Rule: + _, err := w.writeRule(x, false /* isElse */, nil) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error())) + } + case *ast.Head: + _, err := w.writeHead(x, + false, // isDefault + false, // isExpandedConst + nil) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error())) + } + case ast.Body: + _, err := w.writeBody(x, nil) + if err != nil { + return nil, err + } + case *ast.Expr: + _, err := w.writeExpr(x, nil) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error())) + } + case *ast.With: + _, err := w.writeWith(x, nil, false) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error())) + } + case *ast.Term: + _, err := w.writeTerm(x, nil) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error())) + } + case ast.Value: + _, err := w.writeTerm(&ast.Term{Value: x, Location: &ast.Location{}}, nil) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error())) + } + case *ast.Comment: + err := w.writeComments([]*ast.Comment{x}) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error())) + } + default: + return nil, fmt.Errorf("not an ast element: %v", x) + } + + if len(w.errs) > 0 { + return nil, w.errs + } + + return squashTrailingNewlines(w.buf.Bytes()), nil +} + +func unmangleWildcardVar(wildcards map[ast.Var]*ast.Term, n *ast.Term) { + + v, ok := n.Value.(ast.Var) + if !ok || !v.IsWildcard() { + return + } + + first, ok := wildcards[v] + if !ok { + wildcards[v] = n + return + } + + w := v[len(ast.WildcardPrefix):] + + // Prepend an underscore to ensure the variable will parse. + if len(w) == 0 || w[0] != '_' { + w = "_" + w + } + + if first != nil { + first.Value = w + wildcards[v] = nil + } + + n.Value = w +} + +func squashTrailingNewlines(bs []byte) []byte { + if bytes.HasSuffix(bs, []byte("\n")) { + return append(bytes.TrimRight(bs, "\n"), '\n') + } + return bs +} + +func defaultLocation(x ast.Node) *ast.Location { + return ast.NewLocation([]byte(x.String()), defaultLocationFile, 1, 1) +} + +type writer struct { + buf bytes.Buffer + + indent string + level int + inline bool + beforeEnd *ast.Comment + delay bool + errs ast.Errors + fmtOpts fmtOpts + writeCommentOnFinalLine bool +} + +func (w *writer) writeModule(module *ast.Module) error { + var pkg *ast.Package + var others []any + var comments []*ast.Comment + visitor := ast.NewGenericVisitor(func(x any) bool { + switch x := x.(type) { + case *ast.Comment: + comments = append(comments, x) + return true + case *ast.Import, *ast.Rule: + others = append(others, x) + return true + case *ast.Package: + pkg = x + return true + default: + return false + } + }) + visitor.Walk(module) + + sort.Slice(comments, func(i, j int) bool { + l, err := locLess(comments[i], comments[j]) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error())) + } + return l + }) + + sort.Slice(others, func(i, j int) bool { + l, err := locLess(others[i], others[j]) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error())) + } + return l + }) + + comments = trimTrailingWhitespaceInComments(comments) + + var err error + comments, err = w.writePackage(pkg, comments) + if err != nil { + return err + } + var imports []*ast.Import + var rules []*ast.Rule + for len(others) > 0 { + imports, others = gatherImports(others) + comments, err = w.writeImports(imports, comments) + if err != nil { + return err + } + rules, others = gatherRules(others) + comments, err = w.writeRules(rules, comments) + if err != nil { + return err + } + } + + for i, c := range comments { + w.writeLine(c.String()) + if i == len(comments)-1 { + w.write("\n") + } + } + + return nil +} + +func trimTrailingWhitespaceInComments(comments []*ast.Comment) []*ast.Comment { + for _, c := range comments { + c.Text = bytes.TrimRightFunc(c.Text, unicode.IsSpace) + } + + return comments +} + +func (w *writer) writePackage(pkg *ast.Package, comments []*ast.Comment) ([]*ast.Comment, error) { + var err error + comments, err = w.insertComments(comments, pkg.Location) + if err != nil { + return nil, err + } + + w.startLine() + + // Omit head as all packages have the DefaultRootDocument prepended at parse time. + path := make(ast.Ref, len(pkg.Path)-1) + if len(path) == 0 { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, pkg.Location, "invalid package path: %s", pkg.Path)) + return comments, nil + } + + path[0] = ast.VarTerm(string(pkg.Path[1].Value.(ast.String))) + copy(path[1:], pkg.Path[2:]) + + w.write("package ") + _, err = w.writeRef(path, nil) + if err != nil { + return nil, err + } + + w.blankLine() + + return comments, nil +} + +func (w *writer) writeComments(comments []*ast.Comment) error { + for i := range comments { + if i > 0 { + l, err := locCmp(comments[i], comments[i-1]) + if err != nil { + return err + } + if l > 1 { + w.blankLine() + } + } + + w.writeLine(comments[i].String()) + } + + return nil +} + +func (w *writer) writeRules(rules []*ast.Rule, comments []*ast.Comment) ([]*ast.Comment, error) { + for i, rule := range rules { + var err error + comments, err = w.insertComments(comments, rule.Location) + if err != nil && !errors.As(err, &unexpectedCommentError{}) { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error())) + } + + comments, err = w.writeRule(rule, false, comments) + if err != nil && !errors.As(err, &unexpectedCommentError{}) { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error())) + } + + if i < len(rules)-1 && w.groupableOneLiner(rule) { + next := rules[i+1] + if w.groupableOneLiner(next) && next.Location.Row == rule.Location.Row+1 { + // Current rule and the next are both groupable one-liners, and + // adjacent in the original policy (i.e. no extra newlines between them). + continue + } + } + w.blankLine() + } + return comments, nil +} + +func (w *writer) groupableOneLiner(rule *ast.Rule) bool { + // Location required to determine if two rules are adjacent in the policy. + // If not, we respect line breaks between rules. + if len(rule.Body) > 1 || rule.Default || rule.Location == nil { + return false + } + + partialSetException := w.fmtOpts.contains || rule.Head.Value != nil + + return (w.fmtOpts.regoV1 || w.fmtOpts.ifs) && partialSetException +} + +func (w *writer) writeRule(rule *ast.Rule, isElse bool, comments []*ast.Comment) ([]*ast.Comment, error) { + if rule == nil { + return comments, nil + } + + if !isElse { + w.startLine() + } + + if rule.Default { + w.write("default ") + } + + // OPA transforms lone bodies like `foo = {"a": "b"}` into rules of the form + // `foo = {"a": "b"} { true }` in the AST. We want to preserve that notation + // in the formatted code instead of expanding the bodies into rules, so we + // pretend that the rule has no body in this case. + isExpandedConst := rule.Body.Equal(expandedConst) && rule.Else == nil + w.writeCommentOnFinalLine = isExpandedConst + + var err error + var unexpectedComment bool + comments, err = w.writeHead(rule.Head, rule.Default, isExpandedConst, comments) + if err != nil { + if errors.As(err, &unexpectedCommentError{}) { + unexpectedComment = true + } else { + return nil, err + } + } + + if len(rule.Body) == 0 || isExpandedConst { + w.endLine() + return comments, nil + } + + w.writeCommentOnFinalLine = true + + // this excludes partial sets UNLESS `contains` is used + partialSetException := w.fmtOpts.contains || rule.Head.Value != nil + + if (w.fmtOpts.regoV1 || w.fmtOpts.ifs) && partialSetException { + w.write(" if") + if len(rule.Body) == 1 { + if rule.Body[0].Location.Row == rule.Head.Location.Row { + w.write(" ") + var err error + comments, err = w.writeExpr(rule.Body[0], comments) + if err != nil { + return nil, err + } + w.endLine() + if rule.Else != nil { + comments, err = w.writeElse(rule, comments) + if err != nil { + return nil, err + } + } + return comments, nil + } + } + } + if unexpectedComment && len(comments) > 0 { + w.write(" { ") + } else { + w.write(" {") + w.endLine() + } + + w.up() + + comments, err = w.writeBody(rule.Body, comments) + if err != nil { + // the unexpected comment error is passed up to be handled by writeHead + if !errors.As(err, &unexpectedCommentError{}) { + return nil, err + } + } + + var closeLoc *ast.Location + + if len(rule.Head.Args) > 0 { + closeLoc = closingLoc('(', ')', '{', '}', rule.Location) + } else if rule.Head.Key != nil { + closeLoc = closingLoc('[', ']', '{', '}', rule.Location) + } else { + closeLoc = closingLoc(0, 0, '{', '}', rule.Location) + } + + comments, err = w.insertComments(comments, closeLoc) + if err != nil { + return nil, err + } + + if err := w.down(); err != nil { + return nil, err + } + w.startLine() + w.write("}") + if rule.Else != nil { + comments, err = w.writeElse(rule, comments) + if err != nil { + return nil, err + } + } + return comments, nil +} + +func (w *writer) writeElse(rule *ast.Rule, comments []*ast.Comment) ([]*ast.Comment, error) { + // If there was nothing else on the line before the "else" starts + // then preserve this style of else block, otherwise it will be + // started as an "inline" else eg: + // + // p { + // ... + // } + // + // else { + // ... + // } + // + // versus + // + // p { + // ... + // } else { + // ... + // } + // + // Note: This doesn't use the `close` as it currently isn't accurate for all + // types of values. Checking the actual line text is the most consistent approach. + wasInline := false + ruleLines := bytes.Split(rule.Location.Text, []byte("\n")) + relativeElseRow := rule.Else.Location.Row - rule.Location.Row + if relativeElseRow > 0 && relativeElseRow < len(ruleLines) { + elseLine := ruleLines[relativeElseRow] + if !bytes.HasPrefix(bytes.TrimSpace(elseLine), []byte("else")) { + wasInline = true + } + } + + // If there are any comments between the closing brace of the previous rule and the start + // of the else block we will always insert a new blank line between them. + hasCommentAbove := len(comments) > 0 && comments[0].Location.Row-rule.Else.Head.Location.Row < 0 || w.beforeEnd != nil + + if !hasCommentAbove && wasInline { + w.write(" ") + } else { + w.blankLine() + w.startLine() + } + + rule.Else.Head.Name = "else" // NOTE(sr): whaaat + + elseHeadReference := ast.VarTerm("else") // construct a reference for the term + elseHeadReference.Location = rule.Else.Head.Location // and set the location to match the rule location + + rule.Else.Head.Reference = ast.Ref{elseHeadReference} + rule.Else.Head.Args = nil + var err error + comments, err = w.insertComments(comments, rule.Else.Head.Location) + if err != nil { + return nil, err + } + + if hasCommentAbove && !wasInline { + // The comments would have ended the line, be sure to start one again + // before writing the rest of the "else" rule. + w.startLine() + } + + // For backwards compatibility adjust the rule head value location + // TODO: Refactor the logic for inserting comments, or special + // case comments in a rule head value so this can be removed + if rule.Else.Head.Value != nil { + rule.Else.Head.Value.Location = rule.Else.Head.Location + } + + return w.writeRule(rule.Else, true, comments) +} + +func (w *writer) writeHead(head *ast.Head, isDefault bool, isExpandedConst bool, comments []*ast.Comment) ([]*ast.Comment, error) { + ref := head.Ref() + if head.Key != nil && head.Value == nil && !head.HasDynamicRef() { + ref = ref.GroundPrefix() + } + if w.fmtOpts.refHeads || len(ref) == 1 { + var err error + comments, err = w.writeRef(ref, comments) + if err != nil { + return nil, err + } + } else { + // if there are comments within the object in the rule head, don't format it + if len(comments) > 0 && ref[1].Location.Row == comments[0].Location.Row { + comments, err := w.writeUnformatted(head.Location, comments) + if err != nil { + return nil, err + } + return comments, nil + } + + w.write(ref[0].String()) + w.write("[") + w.write(ref[1].String()) + w.write("]") + } + + if len(head.Args) > 0 { + w.write("(") + var args []any + for _, arg := range head.Args { + args = append(args, arg) + } + var err error + comments, err = w.writeIterable(args, head.Location, closingLoc(0, 0, '(', ')', head.Location), comments, w.listWriter()) + w.write(")") + if err != nil { + return comments, err + } + } + if head.Key != nil { + if w.fmtOpts.contains && head.Value == nil { + w.write(" contains ") + var err error + comments, err = w.writeTerm(head.Key, comments) + if err != nil { + return comments, err + } + } else if head.Value == nil { // no `if` for p[x] notation + w.write("[") + var err error + comments, err = w.writeTerm(head.Key, comments) + if err != nil { + return comments, err + } + w.write("]") + } + } + + if head.Value != nil && + (head.Key != nil || !ast.InternedTerm(true).Equal(head.Value) || isExpandedConst || isDefault) { + + // in rego v1, explicitly print value for ref-head constants that aren't partial set assignments, e.g.: + // * a -> parser error, won't reach here + // * a.b -> a contains "b" + // * a.b.c -> a.b.c := true + // * a.b.c.d -> a.b.c.d := true + isRegoV1RefConst := w.fmtOpts.regoV1 && isExpandedConst && head.Key == nil && len(head.Args) == 0 + + if head.Location == head.Value.Location && + head.Name != "else" && + ast.InternedTerm(true).Equal(head.Value) && + !isRegoV1RefConst { + // If the value location is the same as the location of the head, + // we know that the value is generated, i.e. f(1) + // Don't print the value (` = true`) as it is implied. + return comments, nil + } + + if head.Assign || w.fmtOpts.regoV1 { + // preserve assignment operator, and enforce it if formatting for Rego v1 + w.write(" := ") + } else { + w.write(" = ") + } + var err error + comments, err = w.writeTerm(head.Value, comments) + if err != nil { + return comments, err + } + } + return comments, nil +} + +func (w *writer) insertComments(comments []*ast.Comment, loc *ast.Location) ([]*ast.Comment, error) { + before, at, comments := partitionComments(comments, loc) + + err := w.writeComments(before) + if err != nil { + return nil, err + } + if len(before) > 0 && loc.Row-before[len(before)-1].Location.Row > 1 { + w.blankLine() + } + + return comments, w.beforeLineEnd(at) +} + +func (w *writer) writeBody(body ast.Body, comments []*ast.Comment) ([]*ast.Comment, error) { + var err error + comments, err = w.insertComments(comments, body.Loc()) + if err != nil { + return comments, err + } + for i, expr := range body { + // Insert a blank line in before the expression if it was not right + // after the previous expression. + if i > 0 { + lastRow := body[i-1].Location.Row + for _, c := range body[i-1].Location.Text { + if c == '\n' { + lastRow++ + } + } + if expr.Location.Row > lastRow+1 { + w.blankLine() + } + } + w.startLine() + + comments, err = w.writeExpr(expr, comments) + if err != nil && !errors.As(err, &unexpectedCommentError{}) { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error())) + } + w.endLine() + } + return comments, nil +} + +func (w *writer) writeExpr(expr *ast.Expr, comments []*ast.Comment) ([]*ast.Comment, error) { + var err error + comments, err = w.insertComments(comments, expr.Location) + if err != nil { + return comments, err + } + if !w.inline { + w.startLine() + } + + if expr.Negated { + w.write("not ") + } + + switch t := expr.Terms.(type) { + case *ast.SomeDecl: + comments, err = w.writeSomeDecl(t, comments) + if err != nil { + return nil, err + } + case *ast.Every: + comments, err = w.writeEvery(t, comments) + if err != nil { + return nil, err + } + case []*ast.Term: + comments, err = w.writeFunctionCall(expr, comments) + if err != nil { + return comments, err + } + case *ast.Term: + comments, err = w.writeTerm(t, comments) + if err != nil { + return comments, err + } + } + + var indented, down bool + for i, with := range expr.With { + if i == 0 || with.Location.Row == expr.With[i-1].Location.Row { // we're on the same line + comments, err = w.writeWith(with, comments, false) + if err != nil { + return nil, err + } + } else { // we're on a new line + if !indented { + indented = true + + w.up() + down = true + } + w.endLine() + w.startLine() + comments, err = w.writeWith(with, comments, true) + if err != nil { + return nil, err + } + } + } + + if down { + if err := w.down(); err != nil { + return nil, err + } + } + + return comments, nil +} + +func (w *writer) writeSomeDecl(decl *ast.SomeDecl, comments []*ast.Comment) ([]*ast.Comment, error) { + var err error + comments, err = w.insertComments(comments, decl.Location) + if err != nil { + return nil, err + } + w.write("some ") + + row := decl.Location.Row + + for i, term := range decl.Symbols { + switch val := term.Value.(type) { + case ast.Var: + if term.Location.Row > row { + w.endLine() + w.startLine() + w.write(w.indent) + row = term.Location.Row + } else if i > 0 { + w.write(" ") + } + + comments, err = w.writeTerm(term, comments) + if err != nil { + return nil, err + } + + if i < len(decl.Symbols)-1 { + w.write(",") + } + case ast.Call: + comments, err = w.writeInOperator(false, val[1:], comments, decl.Location, ast.BuiltinMap[val[0].String()].Decl) + if err != nil { + return nil, err + } + } + } + + return comments, nil +} + +func (w *writer) writeEvery(every *ast.Every, comments []*ast.Comment) ([]*ast.Comment, error) { + var err error + comments, err = w.insertComments(comments, every.Location) + if err != nil { + return nil, err + } + w.write("every ") + if every.Key != nil { + comments, err = w.writeTerm(every.Key, comments) + if err != nil { + return nil, err + } + w.write(", ") + } + comments, err = w.writeTerm(every.Value, comments) + if err != nil { + return nil, err + } + w.write(" in ") + comments, err = w.writeTerm(every.Domain, comments) + if err != nil { + return nil, err + } + w.write(" {") + comments, err = w.writeComprehensionBody('{', '}', every.Body, every.Loc(), every.Loc(), comments) + if err != nil { + // the unexpected comment error is passed up to be handled by writeHead + if !errors.As(err, &unexpectedCommentError{}) { + return nil, err + } + } + + if len(every.Body) == 1 && + every.Body[0].Location.Row == every.Location.Row { + w.write(" ") + } + w.write("}") + return comments, nil +} + +func (w *writer) writeFunctionCall(expr *ast.Expr, comments []*ast.Comment) ([]*ast.Comment, error) { + + terms := expr.Terms.([]*ast.Term) + operator := terms[0].Value.String() + + switch operator { + case ast.Member.Name, ast.MemberWithKey.Name: + return w.writeInOperator(false, terms[1:], comments, terms[0].Location, ast.BuiltinMap[terms[0].String()].Decl) + } + + bi, ok := ast.BuiltinMap[operator] + if !ok || bi.Infix == "" { + return w.writeFunctionCallPlain(terms, comments) + } + + numDeclArgs := bi.Decl.Arity() + numCallArgs := len(terms) - 1 + + var err error + switch numCallArgs { + case numDeclArgs: // Print infix where result is unassigned (e.g., x != y) + comments, err = w.writeTerm(terms[1], comments) + if err != nil { + return nil, err + } + w.write(" " + bi.Infix + " ") + return w.writeTerm(terms[2], comments) + case numDeclArgs + 1: // Print infix where result is assigned (e.g., z = x + y) + comments, err = w.writeTerm(terms[3], comments) + if err != nil { + return nil, err + } + w.write(" " + ast.Equality.Infix + " ") + comments, err = w.writeTerm(terms[1], comments) + if err != nil { + return nil, err + } + w.write(" " + bi.Infix + " ") + comments, err = w.writeTerm(terms[2], comments) + if err != nil { + return nil, err + } + return comments, nil + } + // NOTE(Trolloldem): in this point we are operating with a built-in function with the + // wrong arity even when the assignment notation is used + w.errs = append(w.errs, ArityFormatMismatchError(terms[1:], terms[0].String(), terms[0].Location, bi.Decl)) + return w.writeFunctionCallPlain(terms, comments) +} + +func (w *writer) writeFunctionCallPlain(terms []*ast.Term, comments []*ast.Comment) ([]*ast.Comment, error) { + if r, ok := terms[0].Value.(ast.Ref); ok { + if c, err := w.writeRef(r, comments); err != nil { + return c, err + } + } else { + w.write(terms[0].String()) + } + w.write("(") + defer w.write(")") + + args := make([]any, len(terms)-1) + for i, t := range terms[1:] { + args[i] = t + } + loc := terms[0].Location + var err error + comments, err = w.writeIterable(args, loc, closingLoc(0, 0, '(', ')', loc), comments, w.listWriter()) + if err != nil { + return nil, err + } + return comments, nil +} + +func (w *writer) writeWith(with *ast.With, comments []*ast.Comment, indented bool) ([]*ast.Comment, error) { + var err error + comments, err = w.insertComments(comments, with.Location) + if err != nil { + return nil, err + } + if !indented { + w.write(" ") + } + w.write("with ") + comments, err = w.writeTerm(with.Target, comments) + if err != nil { + return nil, err + } + w.write(" as ") + comments, err = w.writeTerm(with.Value, comments) + if err != nil { + return nil, err + } + return comments, nil +} + +// saveComments saves a copy of the comments slice in a pooled slice to and returns it. +// This is to avoid having to create a new slice every time we need to save comments. +// The caller is responsible for putting the slice back in the pool when done. +func saveComments(comments []*ast.Comment) *[]*ast.Comment { + cmlen := len(comments) + saved := commentsSlicePool.Get(cmlen) + + copy(*saved, comments) + + return saved +} + +func (w *writer) writeTerm(term *ast.Term, comments []*ast.Comment) ([]*ast.Comment, error) { + if len(comments) == 0 { + return w.writeTermParens(false, term, comments) + } + + currentLen := w.buf.Len() + currentComments := saveComments(comments) + defer commentsSlicePool.Put(currentComments) + + comments, err := w.writeTermParens(false, term, comments) + if err != nil { + if errors.As(err, &unexpectedCommentError{}) { + w.buf.Truncate(currentLen) + + comments, uErr := w.writeUnformatted(term.Location, *currentComments) + if uErr != nil { + return nil, uErr + } + return comments, err + } + return nil, err + } + + return comments, nil +} + +// writeUnformatted writes the unformatted text instead and updates the comment state +func (w *writer) writeUnformatted(location *ast.Location, currentComments []*ast.Comment) ([]*ast.Comment, error) { + if len(location.Text) == 0 { + return nil, errors.New("original unformatted text is empty") + } + + rowNum := bytes.Count(location.Text, []byte{'\n'}) + 1 + + w.writeBytes(location.Text) + + comments := make([]*ast.Comment, 0, len(currentComments)) + for _, c := range currentComments { + // if there is a body then wait to write the last comment + if w.writeCommentOnFinalLine && c.Location.Row == location.Row+rowNum-1 { + w.write(" ") + w.writeBytes(c.Location.Text) + continue + } + + // drop comments that occur within the rule raw text + if c.Location.Row < location.Row+rowNum-1 { + continue + } + comments = append(comments, c) + } + return comments, nil +} + +func (w *writer) writeTermParens(parens bool, term *ast.Term, comments []*ast.Comment) ([]*ast.Comment, error) { + var err error + comments, err = w.insertComments(comments, term.Location) + if err != nil { + return nil, err + } + if !w.inline { + w.startLine() + } + + switch x := term.Value.(type) { + case ast.Ref: + comments, err = w.writeRef(x, comments) + if err != nil { + return nil, err + } + case ast.Object: + comments, err = w.writeObject(x, term.Location, comments) + if err != nil { + return nil, err + } + case *ast.Array: + comments, err = w.writeArray(x, term.Location, comments) + if err != nil { + return nil, err + } + case ast.Set: + comments, err = w.writeSet(x, term.Location, comments) + if err != nil { + return nil, err + } + case *ast.ArrayComprehension: + comments, err = w.writeArrayComprehension(x, term.Location, comments) + if err != nil { + return nil, err + } + case *ast.ObjectComprehension: + comments, err = w.writeObjectComprehension(x, term.Location, comments) + if err != nil { + return nil, err + } + case *ast.SetComprehension: + comments, err = w.writeSetComprehension(x, term.Location, comments) + if err != nil { + return nil, err + } + case ast.String: + if term.Location.Text[0] == '`' { + // To preserve raw strings, we need to output the original text, + w.writeBytes(term.Location.Text) + } else { + // x.String() cannot be used by default because it can change the input string "\u0000" to "\x00" + var after, quote []byte + var found bool + // term.Location.Text could contain the prefix `else :=`, remove it + switch term.Location.Text[len(term.Location.Text)-1] { + case '"': + quote = []byte{'"'} + _, after, found = bytes.Cut(term.Location.Text, quote) + case '`': + quote = []byte{'`'} + _, after, found = bytes.Cut(term.Location.Text, quote) + } + + if !found { + // If no quoted string was found, that means it is a key being formatted to a string + // e.g. partial_set.y to partial_set["y"] + w.write(x.String()) + } else { + w.writeBytes(quote) + w.writeBytes(after) + } + + } + case *ast.TemplateString: + comments, err = w.writeTemplateString(x, comments) + if err != nil { + return nil, err + } + case ast.Var: + w.write(w.formatVar(x)) + case ast.Call: + comments, err = w.writeCall(parens, x, term.Location, comments) + if err != nil { + return nil, err + } + case fmt.Stringer: + w.write(x.String()) + } + + if !w.inline { + w.startLine() + } + return comments, nil +} + +func (w *writer) writeTemplateString(ts *ast.TemplateString, comments []*ast.Comment) ([]*ast.Comment, error) { + w.write("$") + if ts.MultiLine { + w.write("`") + } else { + w.write(`"`) + } + + for i, p := range ts.Parts { + switch x := p.(type) { + case *ast.Expr: + w.write("{") + w.up() + + if w.beforeEnd != nil { + // We have a comment on the same line as the opening template-expression brace '{' + w.endLine() + w.startLine() + } else { + // We might have comments to write; the first of which should be on the same line as the opening template-expression brace '{' + before, _, _ := partitionComments(comments, x.Location) + if len(before) > 0 { + w.write(" ") + w.inline = true + if err := w.writeComments(before); err != nil { + return nil, err + } + + comments = comments[len(before):] + } + } + + var err error + comments, err = w.writeExpr(x, comments) + if err != nil { + return comments, err + } + + // write trailing comments + if i+1 < len(ts.Parts) { + before, _, _ := partitionComments(comments, ts.Parts[i+1].Loc()) + if len(before) > 0 { + w.endLine() + if err := w.writeComments(before); err != nil { + return nil, err + } + + comments = comments[len(before):] + w.startLine() + } + } + + w.write("}") + + if err := w.down(); err != nil { + return nil, err + } + case *ast.Term: + if s, ok := x.Value.(ast.String); ok { + if ts.MultiLine { + w.write(ast.EscapeTemplateStringStringPart(string(s))) + } else { + str := ast.EscapeTemplateStringStringPart(s.String()) + w.write(str[1 : len(str)-1]) + } + } else { + s := x.String() + s = strings.TrimPrefix(s, "\"") + s = strings.TrimSuffix(s, "\"") + w.write(s) + } + default: + w.write("") + } + } + + if ts.MultiLine { + w.write("`") + } else { + w.write(`"`) + } + + return comments, nil +} + +func (w *writer) writeRef(x ast.Ref, comments []*ast.Comment) ([]*ast.Comment, error) { + if len(x) > 0 { + parens := false + _, ok := x[0].Value.(ast.Call) + if ok { + parens = x[0].Location.Text[0] == 40 // Starts with "(" + } + var err error + comments, err = w.writeTermParens(parens, x[0], comments) + if err != nil { + return nil, err + } + path := x[1:] + for _, t := range path { + switch p := t.Value.(type) { + case ast.String: + w.writeRefStringPath(p, t.Location) + case ast.Var: + w.writeBracketed(w.formatVar(p)) + default: + w.write("[") + comments, err = w.writeTerm(t, comments) + if err != nil { + if errors.As(err, &unexpectedCommentError{}) { + // add a new line so that the closing bracket isn't part of the unexpected comment + w.write("\n") + } else { + return nil, err + } + } + w.write("]") + } + } + } + + return comments, nil +} + +func (w *writer) writeBracketed(str string) { + w.write("[" + str + "]") +} + +func (w *writer) writeRefStringPath(s ast.String, l *ast.Location) { + str := string(s) + if w.shouldBracketRefTerm(str, l) { + w.writeBracketed(s.String()) + } else { + w.write("." + str) + } +} + +func (w *writer) shouldBracketRefTerm(s string, l *ast.Location) bool { + if !ast.IsVarCompatibleString(s) { + return true + } + + if ast.IsInKeywords(s, w.fmtOpts.keywords()) { + if !w.fmtOpts.allowKeywordsInRefs { + return true + } + + if l != nil && l.Text[0] == 34 { // If the original term text starts with '"', we preserve the brackets and quotes + return true + } + } + + return false +} + +func (*writer) formatVar(v ast.Var) string { + if v.IsWildcard() { + return ast.Wildcard.String() + } + return v.String() +} + +func (w *writer) writeCall(parens bool, x ast.Call, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { + bi, ok := ast.BuiltinMap[x[0].String()] + if !ok || bi.Infix == "" { + return w.writeFunctionCallPlain(x, comments) + } + + if bi.Infix == "in" { + // NOTE(sr): `in` requires special handling, mirroring what happens in the parser, + // since there can be one or two lhs arguments. + return w.writeInOperator(true, x[1:], comments, loc, bi.Decl) + } + + // TODO(tsandall): improve to consider precedence? + if parens { + w.write("(") + } + + // NOTE(Trolloldem): writeCall is only invoked when the function call is a term + // of another function. The only valid arity is the one of the + // built-in function + if bi.Decl.Arity() != len(x)-1 { + w.errs = append(w.errs, ArityFormatMismatchError(x[1:], x[0].String(), loc, bi.Decl)) + return comments, nil + } + + var err error + comments, err = w.writeTermParens(true, x[1], comments) + if err != nil { + return nil, err + } + w.write(" " + bi.Infix + " ") + comments, err = w.writeTermParens(true, x[2], comments) + if err != nil { + return nil, err + } + if parens { + w.write(")") + } + + return comments, nil +} + +func (w *writer) writeInOperator(parens bool, operands []*ast.Term, comments []*ast.Comment, loc *ast.Location, f *types.Function) ([]*ast.Comment, error) { + + if len(operands) != f.Arity() { + // The number of operands does not math the arity of the `in` operator + operator := ast.Member.Name + if f.Arity() == 3 { + operator = ast.MemberWithKey.Name + } + w.errs = append(w.errs, ArityFormatMismatchError(operands, operator, loc, f)) + return comments, nil + } + kw := "in" + var err error + switch len(operands) { + case 2: + comments, err = w.writeTermParens(true, operands[0], comments) + if err != nil { + return nil, err + } + w.write(" ") + w.write(kw) + w.write(" ") + comments, err = w.writeTermParens(true, operands[1], comments) + if err != nil { + return nil, err + } + case 3: + if parens { + w.write("(") + defer w.write(")") + } + comments, err = w.writeTermParens(true, operands[0], comments) + if err != nil { + return nil, err + } + w.write(", ") + comments, err = w.writeTermParens(true, operands[1], comments) + if err != nil { + return nil, err + } + w.write(" ") + w.write(kw) + w.write(" ") + comments, err = w.writeTermParens(true, operands[2], comments) + if err != nil { + return nil, err + } + } + return comments, nil +} + +func (w *writer) writeObject(obj ast.Object, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { + w.write("{") + defer w.write("}") + + var s []any + obj.Foreach(func(k, v *ast.Term) { + s = append(s, ast.Item(k, v)) + }) + return w.writeIterable(s, loc, closingLoc(0, 0, '{', '}', loc), comments, w.objectWriter()) +} + +func (w *writer) writeArray(arr *ast.Array, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { + w.write("[") + defer w.write("]") + + var s []any + arr.Foreach(func(t *ast.Term) { + s = append(s, t) + }) + var err error + comments, err = w.writeIterable(s, loc, closingLoc(0, 0, '[', ']', loc), comments, w.listWriter()) + if err != nil { + return nil, err + } + return comments, nil +} + +func (w *writer) writeSet(set ast.Set, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { + + if set.Len() == 0 { + w.write("set()") + var err error + comments, err = w.insertComments(comments, closingLoc(0, 0, '(', ')', loc)) + if err != nil { + return nil, err + } + return comments, nil + } + + w.write("{") + defer w.write("}") + + var s []any + set.Foreach(func(t *ast.Term) { + s = append(s, t) + }) + var err error + comments, err = w.writeIterable(s, loc, closingLoc(0, 0, '{', '}', loc), comments, w.listWriter()) + if err != nil { + return nil, err + } + return comments, nil +} + +func (w *writer) writeArrayComprehension(arr *ast.ArrayComprehension, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { + w.write("[") + defer w.write("]") + + return w.writeComprehension('[', ']', arr.Term, arr.Body, loc, comments) +} + +func (w *writer) writeSetComprehension(set *ast.SetComprehension, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { + w.write("{") + defer w.write("}") + + return w.writeComprehension('{', '}', set.Term, set.Body, loc, comments) +} + +func (w *writer) writeObjectComprehension(object *ast.ObjectComprehension, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { + w.write("{") + defer w.write("}") + + object.Value.Location = object.Key.Location // Ensure the value is not written on the next line. + if object.Key.Location.Row-loc.Row > 1 { + w.endLine() + w.startLine() + } + + var err error + comments, err = w.writeTerm(object.Key, comments) + if err != nil { + return nil, err + } + w.write(": ") + return w.writeComprehension('{', '}', object.Value, object.Body, loc, comments) +} + +func (w *writer) writeComprehension(openChar, closeChar byte, term *ast.Term, body ast.Body, loc *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { + if term.Location.Row-loc.Row >= 1 { + w.endLine() + w.startLine() + } + + parens := false + _, ok := term.Value.(ast.Call) + if ok { + parens = term.Location.Text[0] == 40 // Starts with "(" + } + var err error + comments, err = w.writeTermParens(parens, term, comments) + if err != nil { + return nil, err + } + w.write(" |") + + return w.writeComprehensionBody(openChar, closeChar, body, term.Location, loc, comments) +} + +func (w *writer) writeComprehensionBody(openChar, closeChar byte, body ast.Body, term, compr *ast.Location, comments []*ast.Comment) ([]*ast.Comment, error) { + exprs := make([]any, 0, len(body)) + for _, expr := range body { + exprs = append(exprs, expr) + } + lines, err := w.groupIterable(exprs, term) + if err != nil { + return nil, err + } + + if body.Loc().Row-term.Row > 0 || len(lines) > 1 { + w.endLine() + w.up() + defer w.startLine() + defer func() { + if err := w.down(); err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error())) + } + }() + + var err error + comments, err = w.writeBody(body, comments) + if err != nil { + return comments, err + } + } else { + w.write(" ") + i := 0 + for ; i < len(body)-1; i++ { + comments, err = w.writeExpr(body[i], comments) + if err != nil { + return comments, err + } + w.write("; ") + } + comments, err = w.writeExpr(body[i], comments) + if err != nil { + return comments, err + } + } + comments, err = w.insertComments(comments, closingLoc(0, 0, openChar, closeChar, compr)) + if err != nil { + return nil, err + } + return comments, nil +} + +func (w *writer) writeImports(imports []*ast.Import, comments []*ast.Comment) ([]*ast.Comment, error) { + m, comments := mapImportsToComments(imports, comments) + + groups := groupImports(imports) + for _, group := range groups { + var err error + comments, err = w.insertComments(comments, group[0].Loc()) + if err != nil { + return nil, err + } + + // Sort imports within a newline grouping. + slices.SortFunc(group, (*ast.Import).Compare) + for _, i := range group { + w.startLine() + err = w.writeImport(i) + if err != nil { + return nil, err + } + if c, ok := m[i]; ok { + w.write(" " + c.String()) + } + w.endLine() + } + w.blankLine() + } + + return comments, nil +} + +func (w *writer) writeImport(imp *ast.Import) error { + path := imp.Path.Value.(ast.Ref) + + w.write("import ") + + if _, ok := future.WhichFutureKeyword(imp); ok { + // We don't want to wrap future.keywords imports in parens, so we create a new writer that doesn't + w2 := writer{ + buf: bytes.Buffer{}, + } + _, err := w2.writeRef(path, nil) + if err != nil { + return err + } + w.write(w2.buf.String()) + } else { + _, err := w.writeRef(path, nil) + if err != nil { + return err + } + } + + if len(imp.Alias) > 0 { + w.write(" as " + imp.Alias.String()) + } + + return nil +} + +type entryWriter func(any, []*ast.Comment) ([]*ast.Comment, error) + +func (w *writer) writeIterable(elements []any, last *ast.Location, close *ast.Location, comments []*ast.Comment, fn entryWriter) ([]*ast.Comment, error) { + lines, err := w.groupIterable(elements, last) + if err != nil { + return nil, err + } + if len(lines) > 1 { + w.delayBeforeEnd() + w.startMultilineSeq() + } + + i := 0 + for ; i < len(lines)-1; i++ { + comments, err = w.writeIterableLine(lines[i], comments, fn) + if err != nil { + return nil, err + } + w.write(",") + + w.endLine() + w.startLine() + } + + comments, err = w.writeIterableLine(lines[i], comments, fn) + if err != nil { + return nil, err + } + + if len(lines) > 1 { + w.write(",") + w.endLine() + comments, err = w.insertComments(comments, close) + if err != nil { + return nil, err + } + if err := w.down(); err != nil { + return nil, err + } + w.startLine() + } + + return comments, nil +} + +func (w *writer) writeIterableLine(elements []any, comments []*ast.Comment, fn entryWriter) ([]*ast.Comment, error) { + if len(elements) == 0 { + return comments, nil + } + + i := 0 + for ; i < len(elements)-1; i++ { + var err error + comments, err = fn(elements[i], comments) + if err != nil { + return nil, err + } + w.write(", ") + } + + return fn(elements[i], comments) +} + +func (w *writer) objectWriter() entryWriter { + return func(x any, comments []*ast.Comment) ([]*ast.Comment, error) { + entry := x.([2]*ast.Term) + + call, isCall := entry[0].Value.(ast.Call) + + paren := false + if isCall && ast.Or.Ref().Equal(call[0].Value) && entry[0].Location.Text[0] == 40 { // Starts with "(" + paren = true + w.write("(") + } + + var err error + comments, err = w.writeTerm(entry[0], comments) + if err != nil { + return nil, err + } + if paren { + w.write(")") + } + + w.write(": ") + + call, isCall = entry[1].Value.(ast.Call) + if isCall && ast.Or.Ref().Equal(call[0].Value) && entry[1].Location.Text[0] == 40 { // Starts with "(" + w.write("(") + defer w.write(")") + } + + return w.writeTerm(entry[1], comments) + } +} + +func (w *writer) listWriter() entryWriter { + return func(x any, comments []*ast.Comment) ([]*ast.Comment, error) { + t, ok := x.(*ast.Term) + if ok { + call, isCall := t.Value.(ast.Call) + if isCall && ast.Or.Ref().Equal(call[0].Value) && t.Location.Text[0] == 40 { // Starts with "(" + w.write("(") + defer w.write(")") + } + } + + return w.writeTerm(t, comments) + } +} + +// groupIterable will group the `elements` slice into slices according to their +// location: anything on the same line will be put into a slice. +func (w *writer) groupIterable(elements []any, last *ast.Location) ([][]any, error) { + // Generated vars occur in the AST when we're rendering the result of + // partial evaluation in a bundle build with optimization. + // Those variables, and wildcard variables have the "default location", + // set in `Ast()`). That is no proper file location, and the grouping + // based on source location will yield a bad result. + // Another case is generated variables: they do have proper file locations, + // but their row/col information may no longer match their AST location. + // So, for generated variables, we also don't trust the location, but + // keep them ungrouped. + def := false // default location found? + for _, elem := range elements { + ast.WalkTerms(elem, func(t *ast.Term) bool { + if t.Location.File == defaultLocationFile { + def = true + return true + } + return false + }) + ast.WalkVars(elem, func(v ast.Var) bool { + if v.IsGenerated() { + def = true + return true + } + return false + }) + if def { // return as-is + return [][]any{elements}, nil + } + } + + slices.SortFunc(elements, func(i, j any) int { + l, err := locCmp(i, j) + if err != nil { + w.errs = append(w.errs, ast.NewError(ast.FormatErr, &ast.Location{}, "%s", err.Error())) + } + return l + }) + + var lines [][]any + cur := make([]any, 0, len(elements)) + for i, t := range elements { + elem := t + loc, err := getLoc(elem) + if err != nil { + return nil, err + } + lineDiff := loc.Row - last.Row + if lineDiff > 0 && i > 0 { + lines = append(lines, cur) + cur = nil + } + + last = loc + cur = append(cur, elem) + } + return append(lines, cur), nil +} + +func mapImportsToComments(imports []*ast.Import, comments []*ast.Comment) (map[*ast.Import]*ast.Comment, []*ast.Comment) { + var leftovers []*ast.Comment + m := map[*ast.Import]*ast.Comment{} + + for _, c := range comments { + matched := false + for _, i := range imports { + if c.Loc().Row == i.Loc().Row { + m[i] = c + matched = true + break + } + } + if !matched { + leftovers = append(leftovers, c) + } + } + + return m, leftovers +} + +func groupImports(imports []*ast.Import) [][]*ast.Import { + switch len(imports) { // shortcuts + case 0: + return nil + case 1: + return [][]*ast.Import{imports} + } + // there are >=2 imports to group + + var groups [][]*ast.Import + group := []*ast.Import{imports[0]} + + for _, i := range imports[1:] { + last := group[len(group)-1] + + // nil-location imports have been sorted up to come first + if i.Loc() != nil && last.Loc() != nil && // first import with a location, or + i.Loc().Row-last.Loc().Row > 1 { // more than one row apart from previous import + + // start a new group + groups = append(groups, group) + group = []*ast.Import{} + } + group = append(group, i) + } + if len(group) > 0 { + groups = append(groups, group) + } + + return groups +} + +func partitionComments(comments []*ast.Comment, l *ast.Location) ([]*ast.Comment, *ast.Comment, []*ast.Comment) { + if len(comments) == 0 { + return nil, nil, nil + } + + numBefore, numAfter := 0, 0 + for _, c := range comments { + switch cmp := c.Location.Row - l.Row; { + case cmp < 0: + numBefore++ + case cmp > 0: + numAfter++ + } + } + + if numAfter == len(comments) { + return nil, nil, comments + } + + var at *ast.Comment + + before := make([]*ast.Comment, 0, numBefore) + after := make([]*ast.Comment, 0, numAfter) + + for _, c := range comments { + switch cmp := c.Location.Row - l.Row; { + case cmp < 0: + before = append(before, c) + case cmp > 0: + after = append(after, c) + default: + at = c + } + } + + return before, at, after +} + +func gatherImports(others []any) (imports []*ast.Import, rest []any) { + i := 0 +loop: + for ; i < len(others); i++ { + switch x := others[i].(type) { + case *ast.Import: + imports = append(imports, x) + case *ast.Rule: + break loop + } + } + return imports, others[i:] +} + +func gatherRules(others []any) (rules []*ast.Rule, rest []any) { + i := 0 +loop: + for ; i < len(others); i++ { + switch x := others[i].(type) { + case *ast.Rule: + rules = append(rules, x) + case *ast.Import: + break loop + } + } + return rules, others[i:] +} + +func locLess(a, b any) (bool, error) { + c, err := locCmp(a, b) + return c < 0, err +} + +func locCmp(a, b any) (int, error) { + al, err := getLoc(a) + if err != nil { + return 0, err + } + bl, err := getLoc(b) + if err != nil { + return 0, err + } + switch { + case al == nil && bl == nil: + return 0, nil + case al == nil: + return -1, nil + case bl == nil: + return 1, nil + } + + if cmp := al.Row - bl.Row; cmp != 0 { + return cmp, nil + + } + return al.Col - bl.Col, nil +} + +func getLoc(x any) (*ast.Location, error) { + switch x := x.(type) { + case ast.Node: // *ast.Head, *ast.Expr, *ast.With, *ast.Term + return x.Loc(), nil + case *ast.Location: + return x, nil + case [2]*ast.Term: // Special case to allow for easy printing of objects. + return x[0].Location, nil + default: + return nil, fmt.Errorf("unable to get location for type %v", x) + } +} + +var negativeRow = &ast.Location{Row: -1} + +func closingLoc(skipOpen, skipClose, openChar, closeChar byte, loc *ast.Location) *ast.Location { + i, offset := 0, 0 + + // Skip past parens/brackets/braces in rule heads. + if skipOpen > 0 { + i, offset = skipPast(skipOpen, skipClose, loc) + } + + for ; i < len(loc.Text); i++ { + if loc.Text[i] == openChar { + break + } + } + + if i >= len(loc.Text) { + return negativeRow + } + + state := 1 + for state > 0 { + i++ + if i >= len(loc.Text) { + return negativeRow + } + + switch loc.Text[i] { + case openChar: + state++ + case closeChar: + state-- + case '\n': + offset++ + } + } + + return &ast.Location{Row: loc.Row + offset} +} + +func skipPast(openChar, closeChar byte, loc *ast.Location) (int, int) { + i := 0 + for ; i < len(loc.Text); i++ { + if loc.Text[i] == openChar { + break + } + } + + state := 1 + offset := 0 + for state > 0 { + i++ + if i >= len(loc.Text) { + return i, offset + } + + switch loc.Text[i] { + case openChar: + state++ + case closeChar: + state-- + case '\n': + offset++ + } + } + + return i, offset +} + +// startLine begins a line with the current indentation level. +func (w *writer) startLine() { + w.inline = true + for range w.level { + w.write(w.indent) + } +} + +// endLine ends a line with a newline. +func (w *writer) endLine() { + w.inline = false + if w.beforeEnd != nil && !w.delay { + w.write(" " + w.beforeEnd.String()) + w.beforeEnd = nil + } + w.delay = false + w.write("\n") +} + +type unexpectedCommentError struct { + newComment string + newCommentRow int + existingComment string + existingCommentRow int +} + +func (u unexpectedCommentError) Error() string { + return fmt.Sprintf("unexpected new comment (%s) on line %d because there is already a comment (%s) registered for line %d", + u.newComment, u.newCommentRow, u.existingComment, u.existingCommentRow) +} + +// beforeLineEnd registers a comment to be printed at the end of the current line. +func (w *writer) beforeLineEnd(c *ast.Comment) error { + if w.beforeEnd != nil { + if c == nil { + return nil + } + + existingComment := truncatedString(w.beforeEnd.String(), 100) + existingCommentRow := w.beforeEnd.Location.Row + newComment := truncatedString(c.String(), 100) + w.beforeEnd = nil + + return unexpectedCommentError{ + newComment: newComment, + newCommentRow: c.Location.Row, + existingComment: existingComment, + existingCommentRow: existingCommentRow, + } + } + w.beforeEnd = c + return nil +} + +func truncatedString(s string, max int) string { + if len(s) > max { + return s[:max-2] + "..." + } + return s +} + +func (w *writer) delayBeforeEnd() { + w.delay = true +} + +// line prints a blank line. If the writer is currently in the middle of a line, +// line ends it and then prints a blank one. +func (w *writer) blankLine() { + if w.inline { + w.endLine() + } + w.write("\n") +} + +// write writes string s to the buffer. +func (w *writer) write(s string) { + w.buf.WriteString(s) +} + +// writeBytes writes []byte b to the buffer. +func (w *writer) writeBytes(b []byte) { + w.buf.Write(b) +} + +// writeLine writes the string on a newly started line, then terminate the line. +func (w *writer) writeLine(s string) { + if !w.inline { + w.startLine() + } + w.write(s) + w.endLine() +} + +func (w *writer) startMultilineSeq() { + w.endLine() + w.up() + w.startLine() +} + +// up increases the indentation level +func (w *writer) up() { + w.level++ +} + +// down decreases the indentation level +func (w *writer) down() error { + if w.level == 0 { + return errors.New("negative indentation level") + } + w.level-- + return nil +} + +func ensureFutureKeywordImport(imps []*ast.Import, kw string) []*ast.Import { + for _, imp := range imps { + if future.IsAllFutureKeywords(imp) || + future.IsFutureKeyword(imp, kw) || + (future.IsFutureKeyword(imp, "every") && kw == "in") { // "every" implies "in", so we don't need to add both + return imps + } + } + imp := &ast.Import{ + // NOTE: This is a hack to not error on the ref containing a keyword already present in v1. + // A cleaner solution would be to instead allow refs to contain keyword terms. + // E.g. in v1, `import future.keywords["in"]` is valid, but `import future.keywords.in` is not + // as it contains a reserved keyword. + Path: ast.MustParseTerm("future.keywords[\"" + kw + "\"]"), + //Path: ast.MustParseTerm("future.keywords." + kw), + } + imp.Location = defaultLocation(imp) + return append(imps, imp) +} + +func ensureRegoV1Import(imps []*ast.Import) []*ast.Import { + return ensureImport(imps, ast.RegoV1CompatibleRef) +} + +func filterRegoV1Import(imps []*ast.Import) []*ast.Import { + var ret []*ast.Import + for _, imp := range imps { + path := imp.Path.Value.(ast.Ref) + if !ast.RegoV1CompatibleRef.Equal(path) { + ret = append(ret, imp) + } + } + return ret +} + +func ensureImport(imps []*ast.Import, path ast.Ref) []*ast.Import { + for _, imp := range imps { + p := imp.Path.Value.(ast.Ref) + if p.Equal(path) { + return imps + } + } + imp := &ast.Import{ + Path: ast.NewTerm(path), + } + imp.Location = defaultLocation(imp) + return append(imps, imp) +} + +// ArityFormatErrDetail but for `fmt` checks since compiler has not run yet. +type ArityFormatErrDetail struct { + Have []string `json:"have"` + Want []string `json:"want"` +} + +// ArityFormatMismatchError but for `fmt` checks since the compiler has not run yet. +func ArityFormatMismatchError(operands []*ast.Term, operator string, loc *ast.Location, f *types.Function) *ast.Error { + want := make([]string, f.Arity()) + for i, arg := range f.FuncArgs().Args { + want[i] = types.Sprint(arg) + } + + have := make([]string, len(operands)) + for i := range operands { + have[i] = ast.ValueName(operands[i].Value) + } + err := ast.NewError(ast.TypeErr, loc, "%s: %s", operator, "arity mismatch") + err.Details = &ArityFormatErrDetail{ + Have: have, + Want: want, + } + return err +} + +// Lines returns the string representation of the detail. +func (d *ArityFormatErrDetail) Lines() []string { + return []string{ + "have: (" + strings.Join(d.Have, ",") + ")", + "want: (" + strings.Join(d.Want, ",") + ")", + } +} + +// isRegoV1Compatible returns true if the passed *ast.Import is `rego.v1` +func isRegoV1Compatible(imp *ast.Import) bool { + path := imp.Path.Value.(ast.Ref) + return len(path) == 2 && + ast.RegoRootDocument.Equal(path[0]) && + path[1].Equal(ast.InternedTerm("v1")) +} diff --git a/vendor/github.com/open-policy-agent/opa/ir/ir.go b/vendor/github.com/open-policy-agent/opa/v1/ir/ir.go similarity index 99% rename from vendor/github.com/open-policy-agent/opa/ir/ir.go rename to vendor/github.com/open-policy-agent/opa/v1/ir/ir.go index c07670704e..3657a9b673 100644 --- a/vendor/github.com/open-policy-agent/opa/ir/ir.go +++ b/vendor/github.com/open-policy-agent/opa/v1/ir/ir.go @@ -11,7 +11,7 @@ package ir import ( "fmt" - "github.com/open-policy-agent/opa/types" + "github.com/open-policy-agent/opa/v1/types" ) type ( @@ -106,7 +106,7 @@ const ( Unused ) -func (a *Policy) String() string { +func (*Policy) String() string { return "Policy" } diff --git a/vendor/github.com/open-policy-agent/opa/v1/ir/marshal.go b/vendor/github.com/open-policy-agent/opa/v1/ir/marshal.go new file mode 100644 index 0000000000..f792e2c1b6 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ir/marshal.go @@ -0,0 +1,147 @@ +// Copyright 2022 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ir + +import ( + "encoding/json" + "fmt" + "reflect" +) + +func (a *Block) MarshalJSON() ([]byte, error) { + var result typedBlock + result.Stmts = make([]typedStmt, len(a.Stmts)) + for i := range a.Stmts { + tpe := reflect.Indirect(reflect.ValueOf(a.Stmts[i])).Type().Name() + result.Stmts[i] = typedStmt{ + Type: tpe, + Stmt: a.Stmts[i], + } + } + return json.Marshal(result) +} + +func (a *Block) UnmarshalJSON(bs []byte) error { + var typed rawTypedBlock + if err := json.Unmarshal(bs, &typed); err != nil { + return err + } + a.Stmts = make([]Stmt, len(typed.Stmts)) + for i := range typed.Stmts { + var err error + a.Stmts[i], err = typed.Stmts[i].Unmarshal() + if err != nil { + return err + } + } + return nil +} + +func (a *Operand) MarshalJSON() ([]byte, error) { + var result typedOperand + result.Value = a.Value + result.Type = a.Value.typeHint() + return json.Marshal(result) +} + +func (a *Operand) UnmarshalJSON(bs []byte) error { + var typed rawTypedOperand + if err := json.Unmarshal(bs, &typed); err != nil { + return err + } + f, ok := valFactories[typed.Type] + if !ok { + return fmt.Errorf("unrecognized value type %q", typed.Type) + } + x := f() + if err := json.Unmarshal(typed.Value, &x); err != nil { + return err + } + a.Value = x + return nil +} + +type typedBlock struct { + Stmts []typedStmt `json:"stmts"` +} + +type typedStmt struct { + Type string `json:"type"` + Stmt Stmt `json:"stmt"` +} + +type rawTypedBlock struct { + Stmts []rawTypedStmt `json:"stmts"` +} + +type rawTypedStmt struct { + Type string `json:"type"` + Stmt json.RawMessage `json:"stmt"` +} + +func (raw rawTypedStmt) Unmarshal() (Stmt, error) { + f, ok := stmtFactories[raw.Type] + if !ok { + return nil, fmt.Errorf("unrecognized statement type %q", raw.Type) + } + x := f() + if err := json.Unmarshal(raw.Stmt, &x); err != nil { + return nil, err + } + return x, nil +} + +type rawTypedOperand struct { + Type string `json:"type"` + Value json.RawMessage `json:"value"` +} + +type typedOperand struct { + Type string `json:"type"` + Value Val `json:"value"` +} + +var stmtFactories = map[string]func() Stmt{ + "ReturnLocalStmt": func() Stmt { return &ReturnLocalStmt{} }, + "CallStmt": func() Stmt { return &CallStmt{} }, + "CallDynamicStmt": func() Stmt { return &CallDynamicStmt{} }, + "BlockStmt": func() Stmt { return &BlockStmt{} }, + "BreakStmt": func() Stmt { return &BreakStmt{} }, + "DotStmt": func() Stmt { return &DotStmt{} }, + "LenStmt": func() Stmt { return &LenStmt{} }, + "ScanStmt": func() Stmt { return &ScanStmt{} }, + "NotStmt": func() Stmt { return &NotStmt{} }, + "AssignIntStmt": func() Stmt { return &AssignIntStmt{} }, + "AssignVarStmt": func() Stmt { return &AssignVarStmt{} }, + "AssignVarOnceStmt": func() Stmt { return &AssignVarOnceStmt{} }, + "ResetLocalStmt": func() Stmt { return &ResetLocalStmt{} }, + "MakeNullStmt": func() Stmt { return &MakeNullStmt{} }, + "MakeNumberIntStmt": func() Stmt { return &MakeNumberIntStmt{} }, + "MakeNumberRefStmt": func() Stmt { return &MakeNumberRefStmt{} }, + "MakeArrayStmt": func() Stmt { return &MakeArrayStmt{} }, + "MakeObjectStmt": func() Stmt { return &MakeObjectStmt{} }, + "MakeSetStmt": func() Stmt { return &MakeSetStmt{} }, + "EqualStmt": func() Stmt { return &EqualStmt{} }, + "NotEqualStmt": func() Stmt { return &NotEqualStmt{} }, + "IsArrayStmt": func() Stmt { return &IsArrayStmt{} }, + "IsObjectStmt": func() Stmt { return &IsObjectStmt{} }, + "IsDefinedStmt": func() Stmt { return &IsDefinedStmt{} }, + "IsSetStmt": func() Stmt { return &IsSetStmt{} }, + "IsUndefinedStmt": func() Stmt { return &IsUndefinedStmt{} }, + "ArrayAppendStmt": func() Stmt { return &ArrayAppendStmt{} }, + "ObjectInsertStmt": func() Stmt { return &ObjectInsertStmt{} }, + "ObjectInsertOnceStmt": func() Stmt { return &ObjectInsertOnceStmt{} }, + "ObjectMergeStmt": func() Stmt { return &ObjectMergeStmt{} }, + "SetAddStmt": func() Stmt { return &SetAddStmt{} }, + "WithStmt": func() Stmt { return &WithStmt{} }, + "NopStmt": func() Stmt { return &NopStmt{} }, + "ResultSetAddStmt": func() Stmt { return &ResultSetAddStmt{} }, +} + +var valFactories = map[string]func() Val{ + "bool": func() Val { var x Bool; return &x }, + "string_index": func() Val { var x StringIndex; return &x }, + "local": func() Val { var x Local; return &x }, +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ir/pretty.go b/vendor/github.com/open-policy-agent/opa/v1/ir/pretty.go new file mode 100644 index 0000000000..53d7cbae88 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ir/pretty.go @@ -0,0 +1,44 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ir + +import ( + "fmt" + "io" + "strings" +) + +// Pretty writes a human-readable representation of an IR object to w. +func Pretty(w io.Writer, x any) error { + + pp := &prettyPrinter{ + depth: -1, + w: w, + } + return Walk(pp, x) +} + +type prettyPrinter struct { + depth int + w io.Writer +} + +func (pp *prettyPrinter) Before(_ any) { + pp.depth++ +} + +func (pp *prettyPrinter) After(_ any) { + pp.depth-- +} + +func (pp *prettyPrinter) Visit(x any) (Visitor, error) { + pp.writeIndent("%T %+v", x, x) + return pp, nil +} + +func (pp *prettyPrinter) writeIndent(f string, a ...any) { + pad := strings.Repeat("| ", pp.depth) + fmt.Fprintf(pp.w, pad+f+"\n", a...) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/ir/walk.go b/vendor/github.com/open-policy-agent/opa/v1/ir/walk.go new file mode 100644 index 0000000000..788f36cd8e --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/ir/walk.go @@ -0,0 +1,93 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ir + +// Visitor defines the interface for visiting IR nodes. +type Visitor interface { + Before(x any) + Visit(x any) (Visitor, error) + After(x any) +} + +// Walk invokes the visitor for nodes under x. +func Walk(vis Visitor, x any) error { + impl := walkerImpl{ + vis: vis, + } + impl.walk(x) + return impl.err +} + +type walkerImpl struct { + vis Visitor + err error +} + +func (w *walkerImpl) walk(x any) { + if w.err != nil { // abort on error + return + } + if x == nil { + return + } + + prev := w.vis + w.vis.Before(x) + defer w.vis.After(x) + w.vis, w.err = w.vis.Visit(x) + if w.err != nil { + return + } else if w.vis == nil { + w.vis = prev + return + } + + switch x := x.(type) { + case *Policy: + w.walk(x.Static) + w.walk(x.Plans) + w.walk(x.Funcs) + case *Static: + for _, s := range x.Strings { + w.walk(s) + } + for _, f := range x.BuiltinFuncs { + w.walk(f) + } + for _, f := range x.Files { + w.walk(f) + } + case *Plans: + for _, pl := range x.Plans { + w.walk(pl) + } + case *Funcs: + for _, fn := range x.Funcs { + w.walk(fn) + } + case *Func: + for _, b := range x.Blocks { + w.walk(b) + } + case *Plan: + for _, b := range x.Blocks { + w.walk(b) + } + case *Block: + for _, s := range x.Stmts { + w.walk(s) + } + case *BlockStmt: + for _, b := range x.Blocks { + w.walk(b) + } + case *ScanStmt: + w.walk(x.Block) + case *NotStmt: + w.walk(x.Block) + case *WithStmt: + w.walk(x.Block) + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/keys/keys.go b/vendor/github.com/open-policy-agent/opa/v1/keys/keys.go new file mode 100644 index 0000000000..fba7a9c939 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/keys/keys.go @@ -0,0 +1,99 @@ +package keys + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/open-policy-agent/opa/v1/util" +) + +const defaultSigningAlgorithm = "RS256" + +var supportedAlgos = map[string]struct{}{ + "ES256": {}, "ES384": {}, "ES512": {}, + "HS256": {}, "HS384": {}, "HS512": {}, + "PS256": {}, "PS384": {}, "PS512": {}, + "RS256": {}, "RS384": {}, "RS512": {}, +} + +// IsSupportedAlgorithm true if provided alg is supported +func IsSupportedAlgorithm(alg string) bool { + _, ok := supportedAlgos[alg] + return ok +} + +// Config holds the keys used to sign or verify bundles and tokens +type Config struct { + Key string `json:"key"` + PrivateKey string `json:"private_key"` + Algorithm string `json:"algorithm"` + Scope string `json:"scope"` +} + +// Equal returns true if this key config is equal to the other. +func (k *Config) Equal(other *Config) bool { + return other != nil && *k == *other +} + +func (k *Config) validateAndInjectDefaults(id string) error { + if k.Key == "" && k.PrivateKey == "" { + return fmt.Errorf("invalid keys configuration: no keys provided for key ID %v", id) + } + + if k.Algorithm == "" { + k.Algorithm = defaultSigningAlgorithm + } + + if !IsSupportedAlgorithm(k.Algorithm) { + return fmt.Errorf("unsupported algorithm '%v'", k.Algorithm) + } + + return nil +} + +// NewKeyConfig return a new Config +func NewKeyConfig(key, alg, scope string) (*Config, error) { + var pubKey string + if _, err := os.Stat(key); err == nil { + bs, err := os.ReadFile(key) + if err != nil { + return nil, err + } + pubKey = string(bs) + } else if os.IsNotExist(err) { + pubKey = key + } else { + return nil, err + } + + return &Config{ + Key: pubKey, + Algorithm: alg, + Scope: scope, + }, nil +} + +// ParseKeysConfig returns a map containing the key and the signing algorithm +func ParseKeysConfig(raw json.RawMessage) (map[string]*Config, error) { + keys := map[string]*Config{} + var obj map[string]json.RawMessage + + if err := util.Unmarshal(raw, &obj); err == nil { + for k := range obj { + var keyConfig Config + if err = util.Unmarshal(obj[k], &keyConfig); err != nil { + return nil, err + } + + if err = keyConfig.validateAndInjectDefaults(k); err != nil { + return nil, err + } + + keys[k] = &keyConfig + } + } else { + return nil, err + } + return keys, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/loader/errors.go b/vendor/github.com/open-policy-agent/opa/v1/loader/errors.go new file mode 100644 index 0000000000..55b8e7dc44 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/loader/errors.go @@ -0,0 +1,62 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package loader + +import ( + "fmt" + "strings" + + "github.com/open-policy-agent/opa/v1/ast" +) + +// Errors is a wrapper for multiple loader errors. +type Errors []error + +func (e Errors) Error() string { + if len(e) == 0 { + return "no error(s)" + } + if len(e) == 1 { + return "1 error occurred during loading: " + e[0].Error() + } + buf := make([]string, len(e)) + for i := range buf { + buf[i] = e[i].Error() + } + return fmt.Sprintf("%v errors occurred during loading:\n", len(e)) + strings.Join(buf, "\n") +} + +func (e *Errors) add(err error) { + if errs, ok := err.(ast.Errors); ok { + for i := range errs { + *e = append(*e, errs[i]) + } + } else { + *e = append(*e, err) + } +} + +type unsupportedDocumentType string + +func (path unsupportedDocumentType) Error() string { + return string(path) + ": document must be of type object" +} + +type unrecognizedFile string + +func (path unrecognizedFile) Error() string { + return string(path) + ": can't recognize file type" +} + +func isUnrecognizedFile(err error) bool { + _, ok := err.(unrecognizedFile) + return ok +} + +type mergeError string + +func (e mergeError) Error() string { + return string(e) + ": merge error" +} diff --git a/vendor/github.com/open-policy-agent/opa/loader/extension/extension.go b/vendor/github.com/open-policy-agent/opa/v1/loader/extension/extension.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/loader/extension/extension.go rename to vendor/github.com/open-policy-agent/opa/v1/loader/extension/extension.go diff --git a/vendor/github.com/open-policy-agent/opa/loader/filter/filter.go b/vendor/github.com/open-policy-agent/opa/v1/loader/filter/filter.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/loader/filter/filter.go rename to vendor/github.com/open-policy-agent/opa/v1/loader/filter/filter.go diff --git a/vendor/github.com/open-policy-agent/opa/v1/loader/loader.go b/vendor/github.com/open-policy-agent/opa/v1/loader/loader.go new file mode 100644 index 0000000000..d97e3e5409 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/loader/loader.go @@ -0,0 +1,863 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package loader contains utilities for loading files into OPA. +package loader + +import ( + "bytes" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + + "sigs.k8s.io/yaml" + + fileurl "github.com/open-policy-agent/opa/internal/file/url" + "github.com/open-policy-agent/opa/internal/merge" + "github.com/open-policy-agent/opa/v1/ast" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/bundle" + "github.com/open-policy-agent/opa/v1/loader/extension" + "github.com/open-policy-agent/opa/v1/loader/filter" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/storage/inmem" + "github.com/open-policy-agent/opa/v1/util" +) + +// Result represents the result of successfully loading zero or more files. +type Result struct { + Documents map[string]any + Modules map[string]*RegoFile + path []string +} + +// ParsedModules returns the parsed modules stored on the result. +func (l *Result) ParsedModules() map[string]*ast.Module { + modules := make(map[string]*ast.Module) + for _, module := range l.Modules { + modules[module.Name] = module.Parsed + } + return modules +} + +// Compiler returns a Compiler object with the compiled modules from this loader +// result. +func (l *Result) Compiler() (*ast.Compiler, error) { + compiler := ast.NewCompiler() + compiler.Compile(l.ParsedModules()) + if compiler.Failed() { + return nil, compiler.Errors + } + return compiler, nil +} + +// Store returns a Store object with the documents from this loader result. +func (l *Result) Store() (storage.Store, error) { + return l.StoreWithOpts() +} + +// StoreWithOpts returns a Store object with the documents from this loader result, +// instantiated with the passed options. +func (l *Result) StoreWithOpts(opts ...inmem.Opt) (storage.Store, error) { + return inmem.NewFromObjectWithOpts(l.Documents, opts...), nil +} + +// RegoFile represents the result of loading a single Rego source file. +type RegoFile struct { + Name string + Parsed *ast.Module + Raw []byte +} + +// Filter defines the interface for filtering files during loading. If the +// filter returns true, the file should be excluded from the result. +type Filter = filter.LoaderFilter + +// GlobExcludeName excludes files and directories whose names do not match the +// shell style pattern at minDepth or greater. +func GlobExcludeName(pattern string, minDepth int) Filter { + return func(_ string, info fs.FileInfo, depth int) bool { + match, _ := filepath.Match(pattern, info.Name()) + return match && depth >= minDepth + } +} + +// FileLoader defines an interface for loading OPA data files +// and Rego policies. +type FileLoader interface { + All(paths []string) (*Result, error) + Filtered(paths []string, filter Filter) (*Result, error) + AsBundle(path string) (*bundle.Bundle, error) + WithReader(io.Reader) FileLoader + WithFS(fs.FS) FileLoader + WithMetrics(metrics.Metrics) FileLoader + WithFilter(Filter) FileLoader + WithBundleVerificationConfig(*bundle.VerificationConfig) FileLoader + WithSkipBundleVerification(bool) FileLoader + WithBundleLazyLoadingMode(bool) FileLoader + WithProcessAnnotation(bool) FileLoader + WithCapabilities(*ast.Capabilities) FileLoader + // Deprecated: Use SetOptions in the json package instead, where a longer description + // of why this is deprecated also can be found. + WithJSONOptions(*astJSON.Options) FileLoader + WithRegoVersion(ast.RegoVersion) FileLoader + WithFollowSymlinks(bool) FileLoader +} + +// NewFileLoader returns a new FileLoader instance. +func NewFileLoader() FileLoader { + return &fileLoader{ + metrics: metrics.New(), + files: make(map[string]bundle.FileInfo), + } +} + +type fileLoader struct { + metrics metrics.Metrics + filter Filter + bvc *bundle.VerificationConfig + skipVerify bool + bundleLazyLoading bool + files map[string]bundle.FileInfo + opts ast.ParserOptions + fsys fs.FS + reader io.Reader + followSymlinks bool +} + +// WithFS provides an fs.FS to use for loading files. You can pass nil to +// use plain IO calls (e.g. os.Open, os.Stat, etc.), this is the default +// behaviour. +func (fl *fileLoader) WithFS(fsys fs.FS) FileLoader { + fl.fsys = fsys + return fl +} + +// WithReader provides an io.Reader to use for loading the bundle tarball. +// An io.Reader passed via WithReader takes precedence over an fs.FS passed +// via WithFS. +func (fl *fileLoader) WithReader(rdr io.Reader) FileLoader { + fl.reader = rdr + return fl +} + +// WithMetrics provides the metrics instance to use while loading +func (fl *fileLoader) WithMetrics(m metrics.Metrics) FileLoader { + fl.metrics = m + return fl +} + +// WithFilter specifies the filter object to use to filter files while loading +func (fl *fileLoader) WithFilter(filter Filter) FileLoader { + fl.filter = filter + return fl +} + +// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle +func (fl *fileLoader) WithBundleVerificationConfig(config *bundle.VerificationConfig) FileLoader { + fl.bvc = config + return fl +} + +// WithSkipBundleVerification skips verification of a signed bundle +func (fl *fileLoader) WithSkipBundleVerification(skipVerify bool) FileLoader { + fl.skipVerify = skipVerify + return fl +} + +// WithBundleLazyLoadingMode enables or disables bundle lazy loading mode +func (fl *fileLoader) WithBundleLazyLoadingMode(bundleLazyLoading bool) FileLoader { + fl.bundleLazyLoading = bundleLazyLoading + return fl +} + +// WithProcessAnnotation enables or disables processing of schema annotations on rules +func (fl *fileLoader) WithProcessAnnotation(processAnnotation bool) FileLoader { + fl.opts.ProcessAnnotation = processAnnotation + return fl +} + +// WithCapabilities sets the supported capabilities when loading the files +func (fl *fileLoader) WithCapabilities(caps *ast.Capabilities) FileLoader { + fl.opts.Capabilities = caps + return fl +} + +// WithJSONOptions sets the JSON options on the parser (now a no-op). +// +// Deprecated: Use SetOptions in the json package instead, where a longer description +// of why this is deprecated also can be found. +func (fl *fileLoader) WithJSONOptions(*astJSON.Options) FileLoader { + return fl +} + +// WithRegoVersion sets the ast.RegoVersion to use when parsing and compiling modules. +func (fl *fileLoader) WithRegoVersion(version ast.RegoVersion) FileLoader { + fl.opts.RegoVersion = version + return fl +} + +// WithFollowSymlinks enables or disables following symlinks when loading files +func (fl *fileLoader) WithFollowSymlinks(followSymlinks bool) FileLoader { + fl.followSymlinks = followSymlinks + return fl +} + +// All returns a Result object loaded (recursively) from the specified paths. +func (fl fileLoader) All(paths []string) (*Result, error) { + return fl.Filtered(paths, nil) +} + +// Filtered returns a Result object loaded (recursively) from the specified +// paths while applying the given filters. If any filter returns true, the +// file/directory is excluded. +func (fl fileLoader) Filtered(paths []string, filter Filter) (*Result, error) { + return all(fl.fsys, paths, filter, func(curr *Result, path string, depth int) error { + + var ( + bs []byte + err error + ) + if fl.fsys != nil { + bs, err = fs.ReadFile(fl.fsys, path) + } else { + bs, err = os.ReadFile(path) + } + if err != nil { + return err + } + + result, err := loadKnownTypes(path, bs, fl.metrics, fl.opts, fl.bundleLazyLoading) + if err != nil { + if !isUnrecognizedFile(err) { + return err + } + if depth > 0 { + return nil + } + result, err = loadFileForAnyType(path, bs, fl.metrics, fl.opts) + if err != nil { + return err + } + } + + return curr.merge(path, result) + }) +} + +// AsBundle loads a path as a bundle. If it is a single file +// it will be treated as a normal tarball bundle. If a directory +// is supplied it will be loaded as an unzipped bundle tree. +func (fl fileLoader) AsBundle(path string) (*bundle.Bundle, error) { + path, err := fileurl.Clean(path) + if err != nil { + return nil, err + } + + if err := checkForUNCPath(path); err != nil { + return nil, err + } + + var bundleLoader bundle.DirectoryLoader + var isDir bool + if fl.reader != nil { + bundleLoader = bundle.NewTarballLoaderWithBaseURL(fl.reader, path).WithFilter(fl.filter) + } else { + bundleLoader, isDir, err = GetBundleDirectoryLoaderFS(fl.fsys, path, fl.filter) + } + + if err != nil { + return nil, err + } + bundleLoader = bundleLoader.WithFollowSymlinks(fl.followSymlinks) + + br := bundle.NewCustomReader(bundleLoader). + WithMetrics(fl.metrics). + WithBundleVerificationConfig(fl.bvc). + WithSkipBundleVerification(fl.skipVerify). + WithLazyLoadingMode(fl.bundleLazyLoading). + WithProcessAnnotations(fl.opts.ProcessAnnotation). + WithCapabilities(fl.opts.Capabilities). + WithFollowSymlinks(fl.followSymlinks). + WithRegoVersion(fl.opts.RegoVersion). + WithLazyLoadingMode(fl.bundleLazyLoading). + WithBundleName(path) + + // For bundle directories add the full path in front of module file names + // to simplify debugging. + if isDir { + br.WithBaseDir(path) + } + + b, err := br.Read() + if err != nil { + err = fmt.Errorf("bundle %s: %w", path, err) + } + + return &b, err +} + +// GetBundleDirectoryLoader returns a bundle directory loader which can be used to load +// files in the directory +func GetBundleDirectoryLoader(path string) (bundle.DirectoryLoader, bool, error) { + return GetBundleDirectoryLoaderFS(nil, path, nil) +} + +// GetBundleDirectoryLoaderWithFilter returns a bundle directory loader which can be used to load +// files in the directory after applying the given filter. +func GetBundleDirectoryLoaderWithFilter(path string, filter Filter) (bundle.DirectoryLoader, bool, error) { + return GetBundleDirectoryLoaderFS(nil, path, filter) +} + +// GetBundleDirectoryLoaderFS returns a bundle directory loader which can be used to load +// files in the directory. +func GetBundleDirectoryLoaderFS(fsys fs.FS, path string, filter Filter) (bundle.DirectoryLoader, bool, error) { + path, err := fileurl.Clean(path) + if err != nil { + return nil, false, err + } + + if err := checkForUNCPath(path); err != nil { + return nil, false, err + } + + var fi fs.FileInfo + if fsys != nil { + fi, err = fs.Stat(fsys, path) + } else { + fi, err = os.Stat(path) + } + if err != nil { + return nil, false, fmt.Errorf("error reading %q: %s", path, err) + } + + var bundleLoader bundle.DirectoryLoader + if fi.IsDir() { + if fsys != nil { + bundleLoader = bundle.NewFSLoaderWithRoot(fsys, path) + } else { + bundleLoader = bundle.NewDirectoryLoader(path) + } + } else { + var fh fs.File + if fsys != nil { + fh, err = fsys.Open(path) + } else { + fh, err = os.Open(path) + } + if err != nil { + return nil, false, err + } + bundleLoader = bundle.NewTarballLoaderWithBaseURL(fh, path) + } + + if filter != nil { + bundleLoader = bundleLoader.WithFilter(filter) + } + return bundleLoader, fi.IsDir(), nil +} + +// FilteredPaths is the same as FilterPathsFS using the current diretory file +// system +func FilteredPaths(paths []string, filter Filter) ([]string, error) { + return FilteredPathsFS(nil, paths, filter) +} + +// FilteredPathsFS return a list of files from the specified +// paths while applying the given filters. If any filter returns true, the +// file/directory is excluded. +func FilteredPathsFS(fsys fs.FS, paths []string, filter Filter) ([]string, error) { + result := []string{} + + _, err := all(fsys, paths, filter, func(_ *Result, path string, _ int) error { + result = append(result, path) + return nil + }) + if err != nil { + return nil, err + } + return result, nil +} + +// Schemas loads a schema set from the specified file path. +func Schemas(schemaPath string) (*ast.SchemaSet, error) { + + var errs Errors + ss, err := loadSchemas(schemaPath) + if err != nil { + errs.add(err) + return nil, errs + } + + return ss, nil +} + +func loadSchemas(schemaPath string) (*ast.SchemaSet, error) { + + if schemaPath == "" { + return nil, nil + } + + ss := ast.NewSchemaSet() + path, err := fileurl.Clean(schemaPath) + if err != nil { + return nil, err + } + + info, err := os.Stat(path) + if err != nil { + return nil, err + } + + // Handle single file case. + if !info.IsDir() { + schema, err := loadOneSchema(path) + if err != nil { + return nil, err + } + ss.Put(ast.SchemaRootRef, schema) + return ss, nil + + } + + // Handle directory case. + rootDir := path + + err = filepath.Walk(path, + func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } else if info.IsDir() { + return nil + } + + schema, err := loadOneSchema(path) + if err != nil { + return err + } + + relPath, err := filepath.Rel(rootDir, path) + if err != nil { + return err + } + + key := getSchemaSetByPathKey(relPath) + ss.Put(key, schema) + return nil + }) + + if err != nil { + return nil, err + } + + return ss, nil +} + +func getSchemaSetByPathKey(path string) ast.Ref { + + front := filepath.Dir(path) + last := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path)) + + var parts []string + + if front != "." { + parts = append(strings.Split(filepath.ToSlash(front), "/"), last) + } else { + parts = []string{last} + } + + key := make(ast.Ref, 1+len(parts)) + key[0] = ast.SchemaRootDocument + for i := range parts { + key[i+1] = ast.InternedTerm(parts[i]) + } + + return key +} + +func loadOneSchema(path string) (any, error) { + bs, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + var schema any + if err := util.Unmarshal(bs, &schema); err != nil { + return nil, fmt.Errorf("%s: %w", path, err) + } + + return schema, nil +} + +// All returns a Result object loaded (recursively) from the specified paths. +// +// Deprecated: Use FileLoader.Filtered() instead. +func All(paths []string) (*Result, error) { + return NewFileLoader().Filtered(paths, nil) +} + +// Filtered returns a Result object loaded (recursively) from the specified +// paths while applying the given filters. If any filter returns true, the +// file/directory is excluded. +// +// Deprecated: Use FileLoader.Filtered() instead. +func Filtered(paths []string, filter Filter) (*Result, error) { + return NewFileLoader().Filtered(paths, filter) +} + +// AsBundle loads a path as a bundle. If it is a single file +// it will be treated as a normal tarball bundle. If a directory +// is supplied it will be loaded as an unzipped bundle tree. +// +// Deprecated: Use FileLoader.AsBundle() instead. +func AsBundle(path string) (*bundle.Bundle, error) { + return NewFileLoader().AsBundle(path) +} + +// AllRegos returns a Result object loaded (recursively) with all Rego source +// files from the specified paths. +func AllRegos(paths []string) (*Result, error) { + return NewFileLoader().Filtered(paths, func(_ string, info os.FileInfo, _ int) bool { + return !info.IsDir() && !strings.HasSuffix(info.Name(), bundle.RegoExt) + }) +} + +// Rego is deprecated. Use RegoWithOpts instead. +func Rego(path string) (*RegoFile, error) { + return RegoWithOpts(path, ast.ParserOptions{}) +} + +// RegoWithOpts returns a RegoFile object loaded from the given path. +func RegoWithOpts(path string, opts ast.ParserOptions) (*RegoFile, error) { + path, err := fileurl.Clean(path) + if err != nil { + return nil, err + } + bs, err := os.ReadFile(path) + if err != nil { + return nil, err + } + return loadRego(path, bs, metrics.New(), opts) +} + +// CleanPath returns the normalized version of a path that can be used as an identifier. +func CleanPath(path string) string { + return strings.Trim(path, "/") +} + +// Paths returns a sorted list of files contained at path. If recurse is true +// and path is a directory, then Paths will walk the directory structure +// recursively and list files at each level. +func Paths(path string, recurse bool) (paths []string, err error) { + path, err = fileurl.Clean(path) + if err != nil { + return nil, err + } + err = filepath.Walk(path, func(f string, _ os.FileInfo, _ error) error { + if !recurse { + if path != f && path != filepath.Dir(f) { + return filepath.SkipDir + } + } + paths = append(paths, f) + return nil + }) + return paths, err +} + +// Dirs resolves filepaths to directories. It will return a list of unique +// directories. +func Dirs(paths []string) []string { + unique := map[string]struct{}{} + + for _, path := range paths { + // TODO: /dir/dir will register top level directory /dir + dir := filepath.Dir(path) + unique[dir] = struct{}{} + } + + return util.KeysSorted(unique) +} + +// SplitPrefix returns a tuple specifying the document prefix and the file +// path. +func SplitPrefix(path string) ([]string, string) { + // Non-prefixed URLs can be returned without modification and their contents + // can be rooted directly under data. + if strings.Index(path, "://") == strings.Index(path, ":") { + return nil, path + } + parts := strings.SplitN(path, ":", 2) + if len(parts) == 2 && len(parts[0]) > 0 { + return strings.Split(parts[0], "."), parts[1] + } + return nil, path +} + +func (l *Result) merge(path string, result any) error { + switch result := result.(type) { + case bundle.Bundle: + for _, module := range result.Modules { + l.Modules[module.Path] = &RegoFile{ + Name: module.Path, + Parsed: module.Parsed, + Raw: module.Raw, + } + } + return l.mergeDocument(path, result.Data) + case *RegoFile: + l.Modules[CleanPath(path)] = result + return nil + default: + return l.mergeDocument(path, result) + } +} + +func (l *Result) mergeDocument(path string, doc any) error { + obj, ok := makeDir(l.path, doc) + if !ok { + return unsupportedDocumentType(path) + } + merged, ok := merge.InterfaceMaps(l.Documents, obj) + if !ok { + return mergeError(path) + } + for k := range merged { + l.Documents[k] = merged[k] + } + return nil +} + +func (l *Result) withParent(p string) *Result { + return &Result{ + Documents: l.Documents, + Modules: l.Modules, + path: append(l.path, p), + } +} + +func newResult() *Result { + return &Result{ + Documents: map[string]any{}, + Modules: map[string]*RegoFile{}, + } +} + +func all(fsys fs.FS, paths []string, filter Filter, f func(*Result, string, int) error) (*Result, error) { + errs := Errors{} + root := newResult() + + for _, path := range paths { + + // Paths can be prefixed with a string that specifies where content should be + // loaded under data. E.g., foo.bar:/path/to/some.json will load the content + // of some.json under {"foo": {"bar": ...}}. + loaded := root + prefix, path := SplitPrefix(path) + if len(prefix) > 0 { + for _, part := range prefix { + loaded = loaded.withParent(part) + } + } + + allRec(fsys, path, filter, &errs, loaded, 0, f) + } + + if len(errs) > 0 { + return nil, errs + } + + return root, nil +} + +func allRec(fsys fs.FS, path string, filter Filter, errors *Errors, loaded *Result, depth int, f func(*Result, string, int) error) { + + path, err := fileurl.Clean(path) + if err != nil { + errors.add(err) + return + } + + if err := checkForUNCPath(path); err != nil { + errors.add(err) + return + } + + var info fs.FileInfo + if fsys != nil { + info, err = fs.Stat(fsys, path) + } else { + info, err = os.Stat(path) + } + + if err != nil { + errors.add(err) + return + } + + if filter != nil && filter(path, info, depth) { + return + } + + if !info.IsDir() { + if err := f(loaded, path, depth); err != nil { + errors.add(err) + } + return + } + + // If we are recursing on directories then content must be loaded under path + // specified by directory hierarchy. + if depth > 0 { + loaded = loaded.withParent(info.Name()) + } + + var files []fs.DirEntry + if fsys != nil { + files, err = fs.ReadDir(fsys, path) + } else { + files, err = os.ReadDir(path) + } + if err != nil { + errors.add(err) + return + } + + for _, file := range files { + allRec(fsys, filepath.Join(path, file.Name()), filter, errors, loaded, depth+1, f) + } +} + +func loadKnownTypes(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions, bundleLazyLoadingMode bool) (any, error) { + ext := filepath.Ext(path) + if handler := extension.FindExtension(ext); handler != nil { + m.Timer(metrics.RegoDataParse).Start() + + var value any + err := handler(bs, &value) + + m.Timer(metrics.RegoDataParse).Stop() + if err != nil { + return nil, fmt.Errorf("bundle %s: %w", path, err) + } + + return value, nil + } + switch ext { + case ".json": + return loadJSON(path, bs, m) + case ".rego": + return loadRego(path, bs, m, opts) + case ".yaml", ".yml": + return loadYAML(path, bs, m) + default: + if strings.HasSuffix(path, ".tar.gz") { + r, err := loadBundleFile(path, bs, m, opts, bundleLazyLoadingMode) + if err != nil { + err = fmt.Errorf("bundle %s: %w", path, err) + } + return r, err + } + } + return nil, unrecognizedFile(path) +} + +func loadFileForAnyType(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (any, error) { + module, err := loadRego(path, bs, m, opts) + if err == nil { + return module, nil + } + doc, err := loadJSON(path, bs, m) + if err == nil { + return doc, nil + } + doc, err = loadYAML(path, bs, m) + if err == nil { + return doc, nil + } + return nil, unrecognizedFile(path) +} + +func loadBundleFile(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions, bundleLazyLoadingMode bool) (bundle.Bundle, error) { + tl := bundle.NewTarballLoaderWithBaseURL(bytes.NewBuffer(bs), path) + br := bundle.NewCustomReader(tl). + WithRegoVersion(opts.RegoVersion). + WithCapabilities(opts.Capabilities). + WithProcessAnnotations(opts.ProcessAnnotation). + WithMetrics(m). + WithSkipBundleVerification(true). + WithLazyLoadingMode(bundleLazyLoadingMode). + IncludeManifestInData(true) + return br.Read() +} + +func loadRego(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (*RegoFile, error) { + m.Timer(metrics.RegoModuleParse).Start() + var module *ast.Module + var err error + module, err = ast.ParseModuleWithOpts(path, string(bs), opts) + m.Timer(metrics.RegoModuleParse).Stop() + if err != nil { + return nil, err + } + result := &RegoFile{ + Name: path, + Parsed: module, + Raw: bs, + } + return result, nil +} + +func loadJSON(path string, bs []byte, m metrics.Metrics) (any, error) { + m.Timer(metrics.RegoDataParse).Start() + var x any + err := util.UnmarshalJSON(bs, &x) + m.Timer(metrics.RegoDataParse).Stop() + + if err != nil { + return nil, fmt.Errorf("%s: %w", path, err) + } + return x, nil +} + +func loadYAML(path string, bs []byte, m metrics.Metrics) (any, error) { + m.Timer(metrics.RegoDataParse).Start() + bs, err := yaml.YAMLToJSON(bs) + m.Timer(metrics.RegoDataParse).Stop() + if err != nil { + return nil, fmt.Errorf("%v: error converting YAML to JSON: %v", path, err) + } + return loadJSON(path, bs, m) +} + +func makeDir(path []string, x any) (map[string]any, bool) { + if len(path) == 0 { + obj, ok := x.(map[string]any) + if !ok { + return nil, false + } + return obj, true + } + return makeDir(path[:len(path)-1], map[string]any{path[len(path)-1]: x}) +} + +// isUNC reports whether path is a UNC path. +func isUNC(path string) bool { + return len(path) > 1 && isSlash(path[0]) && isSlash(path[1]) +} + +func isSlash(c uint8) bool { + return c == '\\' || c == '/' +} + +func checkForUNCPath(path string) error { + if isUNC(path) { + return fmt.Errorf("UNC path read is not allowed: %s", path) + } + return nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/logging/buffered_logger.go b/vendor/github.com/open-policy-agent/opa/v1/logging/buffered_logger.go new file mode 100644 index 0000000000..1fbd560035 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/logging/buffered_logger.go @@ -0,0 +1,182 @@ +package logging + +import ( + "fmt" + "maps" + "sync" + "time" +) + +type logEntry struct { + level Level + message string + fields map[string]any + time time.Time +} + +// BufferedLogger captures log entries in memory until Flush is called, +// at which point it replays all buffered entries to the target logger. +// After Flush() is called, the BufferedLogger should not be used anymore. +type BufferedLogger struct { + mu sync.Mutex + buffer []logEntry + maxEntries int + currentLevel Level +} + +// NewBufferedLogger creates a new buffered logger that will buffer up to maxEntries. +func NewBufferedLogger(maxEntries int) *BufferedLogger { + if maxEntries <= 0 { + maxEntries = 1000 + } + return &BufferedLogger{ + buffer: make([]logEntry, 0, maxEntries), + maxEntries: maxEntries, + currentLevel: Info, + } +} + +func (b *BufferedLogger) addToBuffer(level Level, format string, args []any, fields map[string]any) { + message := format + if len(args) > 0 { + message = fmt.Sprintf(format, args...) + } + + entry := logEntry{ + level: level, + message: message, + fields: fields, + time: time.Now(), + } + + b.mu.Lock() + defer b.mu.Unlock() + + if len(b.buffer) >= b.maxEntries { + b.buffer = b.buffer[1:] + } + b.buffer = append(b.buffer, entry) +} + +func (*BufferedLogger) logToTarget(target Logger, entry logEntry) { + fields := make(map[string]any, len(entry.fields)) + maps.Copy(fields, entry.fields) + fields["time"] = entry.time + + logger := target.WithFields(fields) + + switch entry.level { + case Debug: + logger.Debug("%s", entry.message) + case Info: + logger.Info("%s", entry.message) + case Warn: + logger.Warn("%s", entry.message) + case Error: + logger.Error("%s", entry.message) + } +} + +func (b *BufferedLogger) Debug(format string, args ...any) { + b.addToBuffer(Debug, format, args, nil) +} + +func (b *BufferedLogger) Info(format string, args ...any) { + b.addToBuffer(Info, format, args, nil) +} + +func (b *BufferedLogger) Warn(format string, args ...any) { + b.addToBuffer(Warn, format, args, nil) +} + +func (b *BufferedLogger) Error(format string, args ...any) { + b.addToBuffer(Error, format, args, nil) +} + +// WithFields returns a new logger with additional fields. +func (b *BufferedLogger) WithFields(fields map[string]any) Logger { + return &bufferedLoggerWithFields{ + parent: b, + fields: fields, + } +} + +// GetLevel returns the current log level. +func (b *BufferedLogger) GetLevel() Level { + b.mu.Lock() + defer b.mu.Unlock() + return b.currentLevel +} + +// SetLevel sets the log level. +func (b *BufferedLogger) SetLevel(level Level) { + b.mu.Lock() + defer b.mu.Unlock() + b.currentLevel = level +} + +// Close discards all buffered entries without flushing them. +// After calling Close, the BufferedLogger should not be used anymore. +func (b *BufferedLogger) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.buffer = nil +} + +// Flush replays all buffered entries to the target logger. +// After calling Flush, the BufferedLogger should not be used anymore. +// The caller should switch to using the target logger directly. +func (b *BufferedLogger) Flush(targetLogger Logger) { + if targetLogger == nil { + return + } + + b.mu.Lock() + targetLogger.SetLevel(b.currentLevel) + entries := b.buffer + b.buffer = nil + b.mu.Unlock() + + for _, entry := range entries { + b.logToTarget(targetLogger, entry) + } +} + +type bufferedLoggerWithFields struct { + parent *BufferedLogger + fields map[string]any +} + +func (b *bufferedLoggerWithFields) Debug(format string, args ...any) { + b.parent.addToBuffer(Debug, format, args, b.fields) +} + +func (b *bufferedLoggerWithFields) Info(format string, args ...any) { + b.parent.addToBuffer(Info, format, args, b.fields) +} + +func (b *bufferedLoggerWithFields) Warn(format string, args ...any) { + b.parent.addToBuffer(Warn, format, args, b.fields) +} + +func (b *bufferedLoggerWithFields) Error(format string, args ...any) { + b.parent.addToBuffer(Error, format, args, b.fields) +} + +func (b *bufferedLoggerWithFields) WithFields(fields map[string]any) Logger { + merged := make(map[string]any, len(b.fields)+len(fields)) + maps.Copy(merged, b.fields) + maps.Copy(merged, fields) + return &bufferedLoggerWithFields{ + parent: b.parent, + fields: merged, + } +} + +func (b *bufferedLoggerWithFields) GetLevel() Level { + return b.parent.GetLevel() +} + +func (b *bufferedLoggerWithFields) SetLevel(level Level) { + b.parent.SetLevel(level) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/logging/logging.go b/vendor/github.com/open-policy-agent/opa/v1/logging/logging.go new file mode 100644 index 0000000000..135785994f --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/logging/logging.go @@ -0,0 +1,470 @@ +package logging + +import ( + "context" + "fmt" + "io" + "log/slog" + "maps" + "net/http" + "time" + + "github.com/sirupsen/logrus" +) + +// Level log level for Logger +type Level uint8 + +const ( + // Error error log level + Error Level = iota + // Warn warn log level + Warn + // Info info log level + Info + // Debug debug log level + Debug +) + +// Logger provides interface for OPA logger implementations +type Logger interface { + Debug(fmt string, a ...any) + Info(fmt string, a ...any) + Error(fmt string, a ...any) + Warn(fmt string, a ...any) + + WithFields(map[string]any) Logger + + GetLevel() Level + SetLevel(Level) +} + +// LoggerWithContext is an optional interface that Logger implementations +// can implement to support extracting trace information from a context. +// Use WithContext to call this method on a Logger if it is supported. +type LoggerWithContext interface { + WithContext(context.Context) Logger +} + +// WithContext returns a logger with context information if the logger +// supports it (i.e., implements LoggerWithContext). Otherwise, the +// logger is returned unchanged. +func WithContext(logger Logger, ctx context.Context) Logger { + if lc, ok := logger.(LoggerWithContext); ok { + return lc.WithContext(ctx) + } + return logger +} + +// StandardLogger is the default OPA logger implementation. +type StandardLogger struct { + logger *logrus.Logger + fields map[string]any +} + +// New returns a new standard logger. +func New() *StandardLogger { + return &StandardLogger{ + logger: logrus.New(), + } +} + +// Get returns the standard logger used throughout OPA. +// +// Deprecated. Do not rely on the global logger. +func Get() *StandardLogger { + return &StandardLogger{ + logger: logrus.StandardLogger(), + } +} + +// SetOutput sets the underlying logrus output. +func (l *StandardLogger) SetOutput(w io.Writer) { + l.logger.SetOutput(w) +} + +// SetFormatter sets the underlying logrus formatter. +func (l *StandardLogger) SetFormatter(formatter logrus.Formatter) { + l.logger.SetFormatter(formatter) +} + +// WithFields provides additional fields to include in log output +func (l *StandardLogger) WithFields(fields map[string]any) Logger { + cp := *l + cp.fields = make(map[string]any) + maps.Copy(cp.fields, l.fields) + maps.Copy(cp.fields, fields) + return &cp +} + +// getFields returns additional fields of this logger +func (l *StandardLogger) getFields() map[string]any { + return l.fields +} + +// SetLevel sets the standard logger level. +func (l *StandardLogger) SetLevel(level Level) { + var logrusLevel logrus.Level + switch level { + case Error: // set logging level report Warn or higher (includes Error) + logrusLevel = logrus.WarnLevel + case Warn: + logrusLevel = logrus.WarnLevel + case Info: + logrusLevel = logrus.InfoLevel + case Debug: + logrusLevel = logrus.DebugLevel + default: + l.Warn("unknown log level %v", level) + logrusLevel = logrus.InfoLevel + } + + l.logger.SetLevel(logrusLevel) +} + +// GetLevel returns the standard logger level. +func (l *StandardLogger) GetLevel() Level { + logrusLevel := l.logger.GetLevel() + + var level Level + switch logrusLevel { + case logrus.WarnLevel: + level = Error + case logrus.InfoLevel: + level = Info + case logrus.DebugLevel: + level = Debug + default: + l.Warn("unknown log level %v", logrusLevel) + level = Info + } + + return level +} + +// Debug logs at debug level +func (l *StandardLogger) Debug(fmt string, a ...any) { + if len(a) == 0 { + l.logger.WithFields(l.getFields()).Debug(fmt) + return + } + l.logger.WithFields(l.getFields()).Debugf(fmt, a...) +} + +// Info logs at info level +func (l *StandardLogger) Info(fmt string, a ...any) { + if len(a) == 0 { + l.logger.WithFields(l.getFields()).Info(fmt) + return + } + l.logger.WithFields(l.getFields()).Infof(fmt, a...) +} + +// Error logs at error level +func (l *StandardLogger) Error(fmt string, a ...any) { + if len(a) == 0 { + l.logger.WithFields(l.getFields()).Error(fmt) + return + } + l.logger.WithFields(l.getFields()).Errorf(fmt, a...) +} + +// Warn logs at warn level +func (l *StandardLogger) Warn(fmt string, a ...any) { + if len(a) == 0 { + l.logger.WithFields(l.getFields()).Warn(fmt) + return + } + l.logger.WithFields(l.getFields()).Warnf(fmt, a...) +} + +// NoOpLogger logging implementation that does nothing +type NoOpLogger struct { + level Level + fields map[string]any +} + +// NewNoOpLogger instantiates new NoOpLogger +func NewNoOpLogger() *NoOpLogger { + return &NoOpLogger{ + level: Info, + } +} + +// WithFields provides additional fields to include in log output. +// Implemented here primarily to be able to switch between implementations without loss of data. +func (l *NoOpLogger) WithFields(fields map[string]any) Logger { + cp := *l + cp.fields = fields + return &cp +} + +// Debug noop +func (*NoOpLogger) Debug(string, ...any) {} + +// Info noop +func (*NoOpLogger) Info(string, ...any) {} + +// Error noop +func (*NoOpLogger) Error(string, ...any) {} + +// Warn noop +func (*NoOpLogger) Warn(string, ...any) {} + +// SetLevel set log level +func (l *NoOpLogger) SetLevel(level Level) { + l.level = level +} + +// GetLevel get log level +func (l *NoOpLogger) GetLevel() Level { + return l.level +} + +type requestContextKey string + +const reqCtxKey = requestContextKey("request-context-key") + +// RequestContext represents the request context used to store data +// related to the request that could be used on logs. +type RequestContext struct { + ClientAddr string + ReqID uint64 + ReqMethod string + ReqPath string + HTTPRequestContext HTTPRequestContext +} + +type HTTPRequestContext struct { + Header http.Header +} + +// Fields adapts the RequestContext fields to logrus.Fields. +func (rctx RequestContext) Fields() logrus.Fields { + return logrus.Fields{ + "client_addr": rctx.ClientAddr, + "req_id": rctx.ReqID, + "req_method": rctx.ReqMethod, + "req_path": rctx.ReqPath, + } +} + +// NewContext returns a copy of parent with an associated RequestContext. +func NewContext(parent context.Context, val *RequestContext) context.Context { + return context.WithValue(parent, reqCtxKey, val) +} + +// FromContext returns the RequestContext associated with ctx, if any. +func FromContext(ctx context.Context) (*RequestContext, bool) { + requestContext, ok := ctx.Value(reqCtxKey).(*RequestContext) + return requestContext, ok +} + +const httpReqCtxKey = requestContextKey("http-request-context-key") + +func WithHTTPRequestContext(parent context.Context, val *HTTPRequestContext) context.Context { + return context.WithValue(parent, httpReqCtxKey, val) +} + +func HTTPRequestContextFromContext(ctx context.Context) (*HTTPRequestContext, bool) { + requestContext, ok := ctx.Value(httpReqCtxKey).(*HTTPRequestContext) + return requestContext, ok +} + +const decisionCtxKey = requestContextKey("decision_id") + +func WithDecisionID(parent context.Context, id string) context.Context { + return context.WithValue(parent, decisionCtxKey, id) +} + +func DecisionIDFromContext(ctx context.Context) (string, bool) { + s, ok := ctx.Value(decisionCtxKey).(string) + return s, ok +} + +const batchDecisionCtxKey = requestContextKey("batch_decision_id") + +func WithBatchDecisionID(parent context.Context, id string) context.Context { + return context.WithValue(parent, batchDecisionCtxKey, id) +} + +func BatchDecisionIDFromContext(ctx context.Context) (string, bool) { + s, ok := ctx.Value(batchDecisionCtxKey).(string) + return s, ok +} + +// SlogHandler adapts a Logger to slog.Handler interface +type SlogHandler struct { + logger Logger +} + +// NewSlogHandler creates an slog.Handler from a Logger +func NewSlogHandler(logger Logger) slog.Handler { + return &SlogHandler{logger: logger} +} + +func (h *SlogHandler) Enabled(_ context.Context, level slog.Level) bool { + lvl := h.logger.GetLevel() + switch level { + case slog.LevelDebug: + return lvl >= Debug + case slog.LevelInfo: + return lvl >= Info + case slog.LevelWarn: + return lvl >= Warn + case slog.LevelError: + return lvl >= Error + } + return true +} + +func (h *SlogHandler) Handle(ctx context.Context, record slog.Record) error { + attrs := make(map[string]any) + record.Attrs(func(a slog.Attr) bool { + attrs[a.Key] = a.Value.Any() + return true + }) + + logger := h.logger + if len(attrs) > 0 { + logger = logger.WithFields(attrs) + } + if ctx != nil { + logger = WithContext(logger, ctx) + } + + msg := record.Message + switch record.Level { + case slog.LevelDebug: + logger.Debug(msg) + case slog.LevelInfo: + logger.Info(msg) + case slog.LevelWarn: + logger.Warn(msg) + case slog.LevelError: + logger.Error(msg) + default: + logger.Info(msg) + } + return nil +} + +func (h *SlogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + fields := make(map[string]any, len(attrs)) + for _, a := range attrs { + fields[a.Key] = a.Value.Any() + } + return &SlogHandler{ + logger: h.logger.WithFields(fields), + } +} + +func (h *SlogHandler) WithGroup(name string) slog.Handler { + return h +} + +// loggerFromSlogHandler wraps an slog.Handler to implement the Logger interface +type loggerFromSlogHandler struct { + handler slog.Handler + level Level + fields map[string]any + ctx context.Context +} + +var _ LoggerWithContext = (*loggerFromSlogHandler)(nil) + +// NewLoggerFromSlogHandler creates a Logger from an slog.Handler +func NewLoggerFromSlogHandler(handler slog.Handler, level Level) Logger { + return &loggerFromSlogHandler{ + handler: handler, + level: level, + fields: make(map[string]any), + ctx: context.Background(), + } +} + +func (l *loggerFromSlogHandler) log(level slog.Level, format string, args ...any) { + if !l.handler.Enabled(l.ctx, level) { + return + } + msg := format + if len(args) > 0 { + msg = fmt.Sprintf(format, args...) + } + + attrs := make([]slog.Attr, 0, len(l.fields)) + for k, v := range l.fields { + var attr slog.Attr + switch val := v.(type) { + case string: + attr = slog.String(k, val) + case int: + attr = slog.Int(k, val) + case int64: + attr = slog.Int64(k, val) + case uint64: + attr = slog.Uint64(k, val) + case float64: + attr = slog.Float64(k, val) + case bool: + attr = slog.Bool(k, val) + case time.Time: + attr = slog.Time(k, val) + case time.Duration: + attr = slog.Duration(k, val) + default: + attr = slog.Any(k, v) + } + attrs = append(attrs, attr) + } + + record := slog.NewRecord(time.Now(), level, msg, 0) + record.AddAttrs(attrs...) + + _ = l.handler.Handle(l.ctx, record) +} + +func (l *loggerFromSlogHandler) Debug(format string, args ...any) { + l.log(slog.LevelDebug, format, args...) +} + +func (l *loggerFromSlogHandler) Info(format string, args ...any) { + l.log(slog.LevelInfo, format, args...) +} + +func (l *loggerFromSlogHandler) Warn(format string, args ...any) { + l.log(slog.LevelWarn, format, args...) +} + +func (l *loggerFromSlogHandler) Error(format string, args ...any) { + l.log(slog.LevelError, format, args...) +} + +func (l *loggerFromSlogHandler) WithFields(fields map[string]any) Logger { + merged := make(map[string]any, len(l.fields)+len(fields)) + maps.Copy(merged, l.fields) + maps.Copy(merged, fields) + return &loggerFromSlogHandler{ + handler: l.handler, + level: l.level, + fields: merged, + ctx: l.ctx, + } +} + +func (l *loggerFromSlogHandler) WithContext(ctx context.Context) Logger { + return &loggerFromSlogHandler{ + handler: l.handler, + level: l.level, + fields: l.fields, + ctx: ctx, + } +} + +func (l *loggerFromSlogHandler) GetLevel() Level { + return l.level +} + +func (l *loggerFromSlogHandler) SetLevel(level Level) { + l.level = level +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go b/vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go new file mode 100644 index 0000000000..481f27337e --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go @@ -0,0 +1,370 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package metrics contains helpers for performance metric management inside the policy engine. +package metrics + +import ( + "encoding/json" + "fmt" + "slices" + "strings" + "sync" + "sync/atomic" + "time" + + go_metrics "github.com/rcrowley/go-metrics" +) + +// Well-known metric names. +const ( + BundleRequest = "bundle_request" + ServerHandler = "server_handler" + ServerQueryCacheHit = "server_query_cache_hit" + SDKDecisionEval = "sdk_decision_eval" + RegoQueryCompile = "rego_query_compile" + RegoQueryEval = "rego_query_eval" + RegoQueryParse = "rego_query_parse" + RegoModuleParse = "rego_module_parse" + RegoDataParse = "rego_data_parse" + RegoModuleCompile = "rego_module_compile" + RegoPartialEval = "rego_partial_eval" + RegoInputParse = "rego_input_parse" + RegoLoadFiles = "rego_load_files" + RegoLoadBundles = "rego_load_bundles" + RegoExternalResolve = "rego_external_resolve" + CompilePrepPartial = "compile_prep_partial" + CompileEvalConstraints = "compile_eval_constraints" + CompileTranslateQueries = "compile_translate_queries" + CompileExtractAnnotationsUnknowns = "compile_extract_annotations_unknowns" + CompileExtractAnnotationsMask = "compile_extract_annotations_mask" + CompileEvalMaskRule = "compile_eval_mask_rule" +) + +// Info contains attributes describing the underlying metrics provider. +type Info struct { + Name string `json:"name"` // name is a unique human-readable identifier for the provider. +} + +// Metrics defines the interface for a collection of performance metrics in the +// policy engine. +type Metrics interface { + Info() Info + Timer(name string) Timer + Histogram(name string) Histogram + Counter(name string) Counter + All() map[string]any + Clear() + json.Marshaler +} + +type TimerMetrics interface { + Timers() map[string]any +} + +type metrics struct { + mtx sync.Mutex + timers map[string]Timer + histograms map[string]Histogram + counters map[string]Counter +} + +// New returns a new Metrics object. +func New() Metrics { + return &metrics{ + timers: map[string]Timer{}, + histograms: map[string]Histogram{}, + counters: map[string]Counter{}, + } +} + +// NoOp returns a Metrics implementation that does nothing and costs nothing. +// Used when metrics are expected, but not of interest. +func NoOp() Metrics { + return noOpMetricsInstance +} + +type metric struct { + Key string + Value any +} + +func (*metrics) Info() Info { + return Info{ + Name: "", + } +} + +func (m *metrics) String() string { + all := m.All() + sorted := make([]metric, 0, len(all)) + + for key, value := range all { + sorted = append(sorted, metric{ + Key: key, + Value: value, + }) + } + + slices.SortFunc(sorted, func(a, b metric) int { + return strings.Compare(a.Key, b.Key) + }) + + buf := make([]string, len(sorted)) + for i := range sorted { + buf[i] = fmt.Sprintf("%v:%v", sorted[i].Key, sorted[i].Value) + } + + return strings.Join(buf, " ") +} + +func (m *metrics) MarshalJSON() ([]byte, error) { + return json.Marshal(m.All()) +} + +func (m *metrics) Timer(name string) Timer { + m.mtx.Lock() + defer m.mtx.Unlock() + t, ok := m.timers[name] + if !ok { + t = &timer{} + m.timers[name] = t + } + return t +} + +func (m *metrics) Histogram(name string) Histogram { + m.mtx.Lock() + defer m.mtx.Unlock() + h, ok := m.histograms[name] + if !ok { + h = newHistogram() + m.histograms[name] = h + } + return h +} + +func (m *metrics) Counter(name string) Counter { + m.mtx.Lock() + defer m.mtx.Unlock() + c, ok := m.counters[name] + if !ok { + zero := counter{} + c = &zero + m.counters[name] = c + } + return c +} + +func (m *metrics) All() map[string]any { + m.mtx.Lock() + defer m.mtx.Unlock() + result := make(map[string]any, len(m.timers)+len(m.histograms)+len(m.counters)) + for name, timer := range m.timers { + result[m.formatKey(name, timer)] = timer.Value() + } + for name, hist := range m.histograms { + result[m.formatKey(name, hist)] = hist.Value() + } + for name, cntr := range m.counters { + result[m.formatKey(name, cntr)] = cntr.Value() + } + return result +} + +func (m *metrics) Timers() map[string]any { + m.mtx.Lock() + defer m.mtx.Unlock() + ts := make(map[string]any, len(m.timers)) + for n, t := range m.timers { + ts[m.formatKey(n, t)] = t.Value() + } + return ts +} + +func (m *metrics) Clear() { + m.mtx.Lock() + defer m.mtx.Unlock() + m.timers = map[string]Timer{} + m.histograms = map[string]Histogram{} + m.counters = map[string]Counter{} +} + +func (*metrics) formatKey(name string, metrics any) string { + switch metrics.(type) { + case Timer: + return "timer_" + name + "_ns" + case Histogram: + return "histogram_" + name + case Counter: + return "counter_" + name + default: + return name + } +} + +// Timer defines the interface for a restartable timer that accumulates elapsed +// time. +type Timer interface { + Value() any + Int64() int64 + // Start or resume a timer's time tracking. + Start() + // Stop a timer, and accumulate the delta (in nanoseconds) since it was last + // started. + Stop() int64 +} + +type timer struct { + mtx sync.Mutex + start time.Time + value int64 +} + +func (t *timer) Start() { + t.mtx.Lock() + t.start = time.Now() + t.mtx.Unlock() +} + +func (t *timer) Stop() int64 { + t.mtx.Lock() + defer t.mtx.Unlock() + + var delta int64 + if !t.start.IsZero() { + // Add the delta to the accumulated time value so far. + delta = time.Since(t.start).Nanoseconds() + t.value += delta + t.start = time.Time{} // Reset the start time to zero. + } + + return delta +} + +func (t *timer) Value() any { + return t.Int64() +} + +func (t *timer) Int64() int64 { + t.mtx.Lock() + defer t.mtx.Unlock() + return t.value +} + +// Histogram defines the interface for a histogram with hardcoded percentiles. +type Histogram interface { + Value() any + Update(int64) +} + +type histogram struct { + hist go_metrics.Histogram // is thread-safe because of the underlying ExpDecaySample +} + +func newHistogram() Histogram { + // NOTE(tsandall): the reservoir size and alpha factor are taken from + // https://github.com/rcrowley/go-metrics. They may need to be tweaked in + // the future. + sample := go_metrics.NewExpDecaySample(1028, 0.015) + hist := go_metrics.NewHistogram(sample) + return &histogram{hist} +} + +func (h *histogram) Update(v int64) { + h.hist.Update(v) +} + +func (h *histogram) Value() any { + values := make(map[string]any, 12) + snap := h.hist.Snapshot() + percentiles := snap.Percentiles([]float64{ + 0.5, + 0.75, + 0.9, + 0.95, + 0.99, + 0.999, + 0.9999, + }) + values["count"] = snap.Count() + values["min"] = snap.Min() + values["max"] = snap.Max() + values["mean"] = snap.Mean() + values["stddev"] = snap.StdDev() + values["median"] = percentiles[0] + values["75%"] = percentiles[1] + values["90%"] = percentiles[2] + values["95%"] = percentiles[3] + values["99%"] = percentiles[4] + values["99.9%"] = percentiles[5] + values["99.99%"] = percentiles[6] + return values +} + +// Counter defines the interface for a monotonic increasing counter. +type Counter interface { + Value() any + Incr() + Add(n uint64) +} + +type counter struct { + c uint64 +} + +func (c *counter) Incr() { + atomic.AddUint64(&c.c, 1) +} + +func (c *counter) Add(n uint64) { + atomic.AddUint64(&c.c, n) +} + +func (c *counter) Value() any { + return atomic.LoadUint64(&c.c) +} + +func Statistics(num ...int64) any { + t := newHistogram() + for _, n := range num { + t.Update(n) + } + return t.Value() +} + +type noOpMetrics struct{} +type noOpTimer struct{} +type noOpHistogram struct{} +type noOpCounter struct{} + +var ( + noOpMetricsInstance = &noOpMetrics{} + noOpTimerInstance = &noOpTimer{} + noOpHistogramInstance = &noOpHistogram{} + noOpCounterInstance = &noOpCounter{} +) + +func (*noOpMetrics) Info() Info { return Info{Name: ""} } +func (*noOpMetrics) Timer(name string) Timer { return noOpTimerInstance } +func (*noOpMetrics) Histogram(name string) Histogram { return noOpHistogramInstance } +func (*noOpMetrics) Counter(name string) Counter { return noOpCounterInstance } +func (*noOpMetrics) All() map[string]any { return nil } +func (*noOpMetrics) Clear() {} +func (*noOpMetrics) MarshalJSON() ([]byte, error) { + return []byte(`{"name": ""}`), nil +} + +func (*noOpTimer) Start() {} +func (*noOpTimer) Stop() int64 { return 0 } +func (*noOpTimer) Value() any { return 0 } +func (*noOpTimer) Int64() int64 { return 0 } + +func (*noOpHistogram) Update(v int64) {} +func (*noOpHistogram) Value() any { return nil } + +func (*noOpCounter) Incr() {} +func (*noOpCounter) Add(_ uint64) {} +func (*noOpCounter) Value() any { return 0 } +func (*noOpCounter) Int64() int64 { return 0 } diff --git a/vendor/github.com/open-policy-agent/opa/v1/rego/errors.go b/vendor/github.com/open-policy-agent/opa/v1/rego/errors.go new file mode 100644 index 0000000000..dcc5e2679d --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/rego/errors.go @@ -0,0 +1,24 @@ +package rego + +// HaltError is an error type to return from a custom function implementation +// that will abort the evaluation process (analogous to topdown.Halt). +type HaltError struct { + err error +} + +// Error delegates to the wrapped error +func (h *HaltError) Error() string { + return h.err.Error() +} + +// NewHaltError wraps an error such that the evaluation process will stop +// when it occurs. +func NewHaltError(err error) error { + return &HaltError{err: err} +} + +// ErrorDetails interface is satisfied by an error that provides further +// details. +type ErrorDetails interface { + Lines() []string +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go b/vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go new file mode 100644 index 0000000000..0043321acf --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go @@ -0,0 +1,49 @@ +// Copyright 2023 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package rego + +import ( + "context" + "sync" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/ir" +) + +var targetPlugins = map[string]TargetPlugin{} +var pluginMtx sync.Mutex + +type TargetPlugin interface { + IsTarget(string) bool + PrepareForEval(context.Context, *ir.Policy, ...PrepareOption) (TargetPluginEval, error) +} + +type TargetPluginEval interface { + Eval(context.Context, *EvalContext, ast.Value) (ast.Value, error) +} + +func (*Rego) targetPlugin(tgt string) TargetPlugin { + for _, p := range targetPlugins { + if p.IsTarget(tgt) { + return p + } + } + return nil +} + +func RegisterPlugin(name string, p TargetPlugin) { + pluginMtx.Lock() + defer pluginMtx.Unlock() + if _, ok := targetPlugins[name]; ok { + panic("plugin already registered " + name) + } + targetPlugins[name] = p +} + +func ResetTargetPlugins() { + pluginMtx.Lock() + defer pluginMtx.Unlock() + targetPlugins = map[string]TargetPlugin{} +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/rego/rego.go b/vendor/github.com/open-policy-agent/opa/v1/rego/rego.go new file mode 100644 index 0000000000..a69dba1bb8 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/rego/rego.go @@ -0,0 +1,3035 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package rego exposes high level APIs for evaluating Rego policies. +package rego + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "maps" + "strings" + "time" + + bundleUtils "github.com/open-policy-agent/opa/internal/bundle" + "github.com/open-policy-agent/opa/internal/compiler/wasm" + "github.com/open-policy-agent/opa/internal/future" + "github.com/open-policy-agent/opa/internal/planner" + "github.com/open-policy-agent/opa/internal/rego/opa" + "github.com/open-policy-agent/opa/internal/wasm/encoding" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/bundle" + "github.com/open-policy-agent/opa/v1/ir" + "github.com/open-policy-agent/opa/v1/loader" + "github.com/open-policy-agent/opa/v1/loader/filter" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/resolver" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/storage/inmem" + "github.com/open-policy-agent/opa/v1/topdown" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/print" + "github.com/open-policy-agent/opa/v1/tracing" + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" +) + +const ( + defaultPartialNamespace = "partial" + wasmVarPrefix = "^" + + targetWasm = "wasm" + targetRego = "rego" +) + +// CompileResult represents the result of compiling a Rego query, zero or more +// Rego modules, and arbitrary contextual data into an executable. +type CompileResult struct { + Bytes []byte `json:"bytes"` +} + +// PartialQueries contains the queries and support modules produced by partial +// evaluation. +type PartialQueries struct { + Queries []ast.Body `json:"queries,omitempty"` + Support []*ast.Module `json:"modules,omitempty"` +} + +// PartialResult represents the result of partial evaluation. The result can be +// used to generate a new query that can be run when inputs are known. +type PartialResult struct { + compiler *ast.Compiler + store storage.Store + body ast.Body + builtinDecls map[string]*ast.Builtin + builtinFuncs map[string]*topdown.Builtin +} + +// Rego returns an object that can be evaluated to produce a query result. +func (pr PartialResult) Rego(options ...func(*Rego)) *Rego { + options = append(options, Compiler(pr.compiler), Store(pr.store), ParsedQuery(pr.body)) + r := New(options...) + + // Propagate any custom builtins. + maps.Copy(r.builtinDecls, pr.builtinDecls) + maps.Copy(r.builtinFuncs, pr.builtinFuncs) + return r +} + +// preparedQuery is a wrapper around a Rego object which has pre-processed +// state stored on it. Once prepared there are a more limited number of actions +// that can be taken with it. It will, however, be able to evaluate faster since +// it will not have to re-parse or compile as much. +type preparedQuery struct { + r *Rego + cfg *PrepareConfig +} + +// EvalContext defines the set of options allowed to be set at evaluation +// time. Any other options will need to be set on a new Rego object. +type EvalContext struct { + hasInput bool + time time.Time + seed io.Reader + rawInput *any + parsedInput ast.Value + metrics metrics.Metrics + txn storage.Transaction + instrument bool + instrumentation *topdown.Instrumentation + partialNamespace string + queryTracers []topdown.QueryTracer + compiledQuery compiledQuery + unknowns []string + disableInlining []ast.Ref + nondeterministicBuiltins bool + parsedUnknowns []*ast.Term + indexing bool + earlyExit bool + interQueryBuiltinCache cache.InterQueryCache + interQueryBuiltinValueCache cache.InterQueryValueCache + ndBuiltinCache builtins.NDBCache + resolvers []refResolver + httpRoundTripper topdown.CustomizeRoundTripper + sortSets bool + copyMaps bool + printHook print.Hook + capabilities *ast.Capabilities + strictBuiltinErrors bool + virtualCache topdown.VirtualCache + baseCache topdown.BaseCache + tracing tracing.Options + externalCancel topdown.Cancel // Note(philip): If non-nil, the cancellation is handled outside of this package. +} + +func (e *EvalContext) RawInput() *any { + return e.rawInput +} + +func (e *EvalContext) ParsedInput() ast.Value { + return e.parsedInput +} + +func (e *EvalContext) Time() time.Time { + return e.time +} + +func (e *EvalContext) Seed() io.Reader { + return e.seed +} + +func (e *EvalContext) InterQueryBuiltinCache() cache.InterQueryCache { + return e.interQueryBuiltinCache +} + +func (e *EvalContext) InterQueryBuiltinValueCache() cache.InterQueryValueCache { + return e.interQueryBuiltinValueCache +} + +func (e *EvalContext) PrintHook() print.Hook { + return e.printHook +} + +func (e *EvalContext) Metrics() metrics.Metrics { + return e.metrics +} + +func (e *EvalContext) StrictBuiltinErrors() bool { + return e.strictBuiltinErrors +} + +func (e *EvalContext) NDBCache() builtins.NDBCache { + return e.ndBuiltinCache +} + +func (e *EvalContext) CompiledQuery() ast.Body { + return e.compiledQuery.query +} + +func (e *EvalContext) Capabilities() *ast.Capabilities { + return e.capabilities +} + +func (e *EvalContext) Transaction() storage.Transaction { + return e.txn +} + +func (e *EvalContext) TracingOpts() tracing.Options { + return e.tracing +} + +func (e *EvalContext) ExternalCancel() topdown.Cancel { + return e.externalCancel +} + +func (e *EvalContext) QueryTracers() []topdown.QueryTracer { + return e.queryTracers +} + +// EvalOption defines a function to set an option on an EvalConfig +type EvalOption func(*EvalContext) + +// EvalInput configures the input for a Prepared Query's evaluation +func EvalInput(input any) EvalOption { + return func(e *EvalContext) { + e.rawInput = &input + e.hasInput = true + } +} + +// EvalParsedInput configures the input for a Prepared Query's evaluation +func EvalParsedInput(input ast.Value) EvalOption { + return func(e *EvalContext) { + e.parsedInput = input + e.hasInput = true + } +} + +// EvalMetrics configures the metrics for a Prepared Query's evaluation +func EvalMetrics(metric metrics.Metrics) EvalOption { + return func(e *EvalContext) { + e.metrics = metric + } +} + +// EvalTransaction configures the Transaction for a Prepared Query's evaluation +func EvalTransaction(txn storage.Transaction) EvalOption { + return func(e *EvalContext) { + e.txn = txn + } +} + +// EvalInstrument enables or disables instrumenting for a Prepared Query's evaluation +func EvalInstrument(instrument bool) EvalOption { + return func(e *EvalContext) { + e.instrument = instrument + } +} + +// EvalTracer configures a tracer for a Prepared Query's evaluation +// +// Deprecated: Use EvalQueryTracer instead. +func EvalTracer(tracer topdown.Tracer) EvalOption { + return func(e *EvalContext) { + if tracer != nil { + e.queryTracers = append(e.queryTracers, topdown.WrapLegacyTracer(tracer)) + } + } +} + +// EvalQueryTracer configures a tracer for a Prepared Query's evaluation +func EvalQueryTracer(tracer topdown.QueryTracer) EvalOption { + return func(e *EvalContext) { + if tracer != nil { + e.queryTracers = append(e.queryTracers, tracer) + } + } +} + +// EvalPartialNamespace returns an argument that sets the namespace to use for +// partial evaluation results. The namespace must be a valid package path +// component. +func EvalPartialNamespace(ns string) EvalOption { + return func(e *EvalContext) { + e.partialNamespace = ns + } +} + +// EvalUnknowns returns an argument that sets the values to treat as +// unknown during partial evaluation. +func EvalUnknowns(unknowns []string) EvalOption { + return func(e *EvalContext) { + e.unknowns = unknowns + } +} + +// EvalDisableInlining returns an argument that adds a set of paths to exclude from +// partial evaluation inlining. +func EvalDisableInlining(paths []ast.Ref) EvalOption { + return func(e *EvalContext) { + e.disableInlining = paths + } +} + +// EvalParsedUnknowns returns an argument that sets the values to treat +// as unknown during partial evaluation. +func EvalParsedUnknowns(unknowns []*ast.Term) EvalOption { + return func(e *EvalContext) { + e.parsedUnknowns = unknowns + } +} + +// EvalRuleIndexing will disable indexing optimizations for the +// evaluation. This should only be used when tracing in debug mode. +func EvalRuleIndexing(enabled bool) EvalOption { + return func(e *EvalContext) { + e.indexing = enabled + } +} + +// EvalEarlyExit will disable 'early exit' optimizations for the +// evaluation. This should only be used when tracing in debug mode. +func EvalEarlyExit(enabled bool) EvalOption { + return func(e *EvalContext) { + e.earlyExit = enabled + } +} + +// EvalTime sets the wall clock time to use during policy evaluation. +// time.now_ns() calls will return this value. +func EvalTime(x time.Time) EvalOption { + return func(e *EvalContext) { + e.time = x + } +} + +// EvalSeed sets a reader that will seed randomization required by built-in functions. +// If a seed is not provided crypto/rand.Reader is used. +func EvalSeed(r io.Reader) EvalOption { + return func(e *EvalContext) { + e.seed = r + } +} + +// EvalInterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize +// during evaluation. +func EvalInterQueryBuiltinCache(c cache.InterQueryCache) EvalOption { + return func(e *EvalContext) { + e.interQueryBuiltinCache = c + } +} + +// EvalInterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize +// during evaluation. +func EvalInterQueryBuiltinValueCache(c cache.InterQueryValueCache) EvalOption { + return func(e *EvalContext) { + e.interQueryBuiltinValueCache = c + } +} + +// EvalNDBuiltinCache sets the non-deterministic builtin cache that built-in functions can +// use during evaluation. +func EvalNDBuiltinCache(c builtins.NDBCache) EvalOption { + return func(e *EvalContext) { + e.ndBuiltinCache = c + } +} + +// EvalResolver sets a Resolver for a specified ref path for this evaluation. +func EvalResolver(ref ast.Ref, r resolver.Resolver) EvalOption { + return func(e *EvalContext) { + e.resolvers = append(e.resolvers, refResolver{ref, r}) + } +} + +// EvalHTTPRoundTripper allows customizing the http.RoundTripper for this evaluation. +func EvalHTTPRoundTripper(t topdown.CustomizeRoundTripper) EvalOption { + return func(e *EvalContext) { + e.httpRoundTripper = t + } +} + +// EvalSortSets causes the evaluator to sort sets before returning them as JSON arrays. +func EvalSortSets(yes bool) EvalOption { + return func(e *EvalContext) { + e.sortSets = yes + } +} + +// EvalCopyMaps causes the evaluator to copy `map[string]any`s before returning them. +func EvalCopyMaps(yes bool) EvalOption { + return func(e *EvalContext) { + e.copyMaps = yes + } +} + +// EvalPrintHook sets the object to use for handling print statement outputs. +func EvalPrintHook(ph print.Hook) EvalOption { + return func(e *EvalContext) { + e.printHook = ph + } +} + +// EvalVirtualCache sets the topdown.VirtualCache to use for evaluation. +// This is optional, and if not set, the default cache is used. +func EvalVirtualCache(vc topdown.VirtualCache) EvalOption { + return func(e *EvalContext) { + e.virtualCache = vc + } +} + +// EvalBaseCache sets the topdown.BaseCache to use for evaluation. +// This is optional, and if not set, the default cache is used. +func EvalBaseCache(bc topdown.BaseCache) EvalOption { + return func(e *EvalContext) { + e.baseCache = bc + } +} + +// EvalNondeterministicBuiltins causes non-deterministic builtins to be evalued +// during partial evaluation. This is needed to pull in external data, or validate +// a JWT, during PE, so that the result informs what queries are returned. +func EvalNondeterministicBuiltins(yes bool) EvalOption { + return func(e *EvalContext) { + e.nondeterministicBuiltins = yes + } +} + +// EvalExternalCancel sets an external topdown.Cancel for the interpreter to use +// for cancellation. This is useful for batch-evaluation of many rego queries. +func EvalExternalCancel(ec topdown.Cancel) EvalOption { + return func(e *EvalContext) { + e.externalCancel = ec + } +} + +func (pq preparedQuery) Modules() map[string]*ast.Module { + mods := make(map[string]*ast.Module) + + maps.Copy(mods, pq.r.parsedModules) + + for _, b := range pq.r.bundles { + for _, mod := range b.Modules { + mods[mod.Path] = mod.Parsed + } + } + + return mods +} + +// newEvalContext creates a new EvalContext overlaying any EvalOptions over top +// the Rego object on the preparedQuery. The returned function should be called +// once the evaluation is complete to close any transactions that might have +// been opened. +func (pq preparedQuery) newEvalContext(ctx context.Context, options []EvalOption) (*EvalContext, func(context.Context), error) { + ectx := &EvalContext{ + hasInput: false, + rawInput: nil, + parsedInput: nil, + metrics: nil, + txn: nil, + instrument: false, + instrumentation: nil, + partialNamespace: pq.r.partialNamespace, + queryTracers: nil, + unknowns: pq.r.unknowns, + parsedUnknowns: pq.r.parsedUnknowns, + nondeterministicBuiltins: pq.r.nondeterministicBuiltins, + compiledQuery: compiledQuery{}, + indexing: true, + earlyExit: true, + resolvers: pq.r.resolvers, + printHook: pq.r.printHook, + capabilities: pq.r.capabilities, + strictBuiltinErrors: pq.r.strictBuiltinErrors, + tracing: pq.r.distributedTracingOpts, + } + + for _, o := range options { + o(ectx) + } + + if ectx.metrics == nil { + ectx.metrics = metrics.New() + } + + if ectx.instrument { + ectx.instrumentation = topdown.NewInstrumentation(ectx.metrics) + } + + // Default to an empty "finish" function + finishFunc := func(context.Context) {} + + var err error + ectx.disableInlining, err = parseStringsToRefs(pq.r.disableInlining) + if err != nil { + return nil, finishFunc, err + } + + if ectx.txn == nil { + ectx.txn, err = pq.r.store.NewTransaction(ctx) + if err != nil { + return nil, finishFunc, err + } + finishFunc = func(ctx context.Context) { + pq.r.store.Abort(ctx, ectx.txn) + } + } + + // If we didn't get an input specified in the Eval options + // then fall back to the Rego object's input fields. + if !ectx.hasInput { + ectx.rawInput = pq.r.rawInput + ectx.parsedInput = pq.r.parsedInput + } + + if ectx.parsedInput == nil { + if ectx.rawInput == nil { + // Fall back to the original Rego objects input if none was specified + // Note that it could still be nil + ectx.rawInput = pq.r.rawInput + } + + if pq.r.targetPlugin(pq.r.target) == nil && // no plugin claims this target + pq.r.target != targetWasm { + ectx.parsedInput, err = pq.r.parseRawInput(ectx.rawInput, ectx.metrics) + if err != nil { + return nil, finishFunc, err + } + } + } + + return ectx, finishFunc, nil +} + +// PreparedEvalQuery holds the prepared Rego state that has been pre-processed +// for subsequent evaluations. +type PreparedEvalQuery struct { + preparedQuery +} + +// Eval evaluates this PartialResult's Rego object with additional eval options +// and returns a ResultSet. +// If options are provided they will override the original Rego options respective value. +// The original Rego object transaction will *not* be re-used. A new transaction will be opened +// if one is not provided with an EvalOption. +func (pq PreparedEvalQuery) Eval(ctx context.Context, options ...EvalOption) (ResultSet, error) { + ectx, finish, err := pq.newEvalContext(ctx, options) + if err != nil { + return nil, err + } + defer finish(ctx) + + ectx.compiledQuery = pq.r.compiledQueries[evalQueryType] + + return pq.r.eval(ctx, ectx) +} + +// PreparedPartialQuery holds the prepared Rego state that has been pre-processed +// for partial evaluations. +type PreparedPartialQuery struct { + preparedQuery +} + +// Partial runs partial evaluation on the prepared query and returns the result. +// The original Rego object transaction will *not* be re-used. A new transaction will be opened +// if one is not provided with an EvalOption. +func (pq PreparedPartialQuery) Partial(ctx context.Context, options ...EvalOption) (*PartialQueries, error) { + ectx, finish, err := pq.newEvalContext(ctx, options) + if err != nil { + return nil, err + } + defer finish(ctx) + + ectx.compiledQuery = pq.r.compiledQueries[partialQueryType] + + return pq.r.partial(ctx, ectx) +} + +// Errors represents a collection of errors returned when evaluating Rego. +type Errors []error + +func (errs Errors) Error() string { + if len(errs) == 0 { + return "no error" + } + if len(errs) == 1 { + return fmt.Sprintf("1 error occurred: %v", errs[0].Error()) + } + buf := []string{fmt.Sprintf("%v errors occurred", len(errs))} + for _, err := range errs { + buf = append(buf, err.Error()) + } + return strings.Join(buf, "\n") +} + +var errPartialEvaluationNotEffective = errors.New("partial evaluation not effective") + +// IsPartialEvaluationNotEffectiveErr returns true if err is an error returned by +// this package to indicate that partial evaluation was ineffective. +func IsPartialEvaluationNotEffectiveErr(err error) bool { + errs, ok := err.(Errors) + if !ok { + return false + } + return len(errs) == 1 && errs[0] == errPartialEvaluationNotEffective +} + +type compiledQuery struct { + query ast.Body + compiler ast.QueryCompiler +} + +type queryType int + +// Define a query type for each of the top level Rego +// API's that compile queries differently. +const ( + evalQueryType queryType = iota + partialResultQueryType + partialQueryType + compileQueryType +) + +type loadPaths struct { + paths []string + filter loader.Filter +} + +// Rego constructs a query and can be evaluated to obtain results. +type Rego struct { + query string + parsedQuery ast.Body + compiledQueries map[queryType]compiledQuery + pkg string + parsedPackage *ast.Package + imports []string + parsedImports []*ast.Import + rawInput *any + parsedInput ast.Value + unknowns []string + parsedUnknowns []*ast.Term + disableInlining []string + shallowInlining bool + nondeterministicBuiltins bool + skipPartialNamespace bool + partialNamespace string + modules []rawModule + parsedModules map[string]*ast.Module + compiler *ast.Compiler + store storage.Store + ownStore bool + ownStoreReadAst bool + txn storage.Transaction + metrics metrics.Metrics + queryTracers []topdown.QueryTracer + tracebuf *topdown.BufferTracer + trace bool + instrumentation *topdown.Instrumentation + instrument bool + capture map[*ast.Expr]ast.Var // map exprs to generated capture vars + termVarID int + dump io.Writer + runtime *ast.Term + time time.Time + seed io.Reader + capabilities *ast.Capabilities + builtinDecls map[string]*ast.Builtin + builtinFuncs map[string]*topdown.Builtin + unsafeBuiltins map[string]struct{} + loadPaths loadPaths + bundlePaths []string + bundles map[string]*bundle.Bundle + skipBundleVerification bool + bundleActivationPlugin string + enableBundleLazyLoadingMode bool + interQueryBuiltinCache cache.InterQueryCache + interQueryBuiltinValueCache cache.InterQueryValueCache + ndBuiltinCache builtins.NDBCache + strictBuiltinErrors bool + builtinErrorList *[]topdown.Error + resolvers []refResolver + schemaSet *ast.SchemaSet + target string // target type (wasm, rego, etc.) + opa opa.EvalEngine + generateJSON func(*ast.Term, *EvalContext) (any, error) + printHook print.Hook + enablePrintStatements bool + distributedTracingOpts tracing.Options + strict bool + targetPrepState TargetPluginEval + regoVersion ast.RegoVersion + compilerHook func(*ast.Compiler) + evalMode *ast.CompilerEvalMode + filter filter.LoaderFilter +} + +func (r *Rego) RegoVersion() ast.RegoVersion { + return r.regoVersion +} + +// Function represents a built-in function that is callable in Rego. +type Function struct { + Name string + Description string + Decl *types.Function + Memoize bool + Nondeterministic bool +} + +// BuiltinContext contains additional attributes from the evaluator that +// built-in functions can use, e.g., the request context.Context, caches, etc. +type BuiltinContext = topdown.BuiltinContext + +type ( + // Builtin1 defines a built-in function that accepts 1 argument. + Builtin1 func(bctx BuiltinContext, op1 *ast.Term) (*ast.Term, error) + + // Builtin2 defines a built-in function that accepts 2 arguments. + Builtin2 func(bctx BuiltinContext, op1, op2 *ast.Term) (*ast.Term, error) + + // Builtin3 defines a built-in function that accepts 3 argument. + Builtin3 func(bctx BuiltinContext, op1, op2, op3 *ast.Term) (*ast.Term, error) + + // Builtin4 defines a built-in function that accepts 4 argument. + Builtin4 func(bctx BuiltinContext, op1, op2, op3, op4 *ast.Term) (*ast.Term, error) + + // BuiltinDyn defines a built-in function that accepts a list of arguments. + BuiltinDyn func(bctx BuiltinContext, terms []*ast.Term) (*ast.Term, error) +) + +// RegisterBuiltin1 adds a built-in function globally inside the OPA runtime. +func RegisterBuiltin1(decl *Function, impl Builtin1) { + ast.RegisterBuiltin(&ast.Builtin{ + Name: decl.Name, + Description: decl.Description, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + }) + topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// RegisterBuiltin2 adds a built-in function globally inside the OPA runtime. +func RegisterBuiltin2(decl *Function, impl Builtin2) { + ast.RegisterBuiltin(&ast.Builtin{ + Name: decl.Name, + Description: decl.Description, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + }) + topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// RegisterBuiltin3 adds a built-in function globally inside the OPA runtime. +func RegisterBuiltin3(decl *Function, impl Builtin3) { + ast.RegisterBuiltin(&ast.Builtin{ + Name: decl.Name, + Description: decl.Description, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + }) + topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// RegisterBuiltin4 adds a built-in function globally inside the OPA runtime. +func RegisterBuiltin4(decl *Function, impl Builtin4) { + ast.RegisterBuiltin(&ast.Builtin{ + Name: decl.Name, + Description: decl.Description, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + }) + topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2], terms[3]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// RegisterBuiltinDyn adds a built-in function globally inside the OPA runtime. +func RegisterBuiltinDyn(decl *Function, impl BuiltinDyn) { + ast.RegisterBuiltin(&ast.Builtin{ + Name: decl.Name, + Description: decl.Description, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + }) + topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// Function1 returns an option that adds a built-in function to the Rego object. +func Function1(decl *Function, f Builtin1) func(*Rego) { + return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// Function2 returns an option that adds a built-in function to the Rego object. +func Function2(decl *Function, f Builtin2) func(*Rego) { + return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// Function3 returns an option that adds a built-in function to the Rego object. +func Function3(decl *Function, f Builtin3) func(*Rego) { + return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// Function4 returns an option that adds a built-in function to the Rego object. +func Function4(decl *Function, f Builtin4) func(*Rego) { + return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2], terms[3]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// FunctionDyn returns an option that adds a built-in function to the Rego object. +func FunctionDyn(decl *Function, f BuiltinDyn) func(*Rego) { + return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// FunctionDecl returns an option that adds a custom-built-in function +// __declaration__. NO implementation is provided. This is used for +// non-interpreter execution envs (e.g., Wasm). +func FunctionDecl(decl *Function) func(*Rego) { + return newDecl(decl) +} + +func newDecl(decl *Function) func(*Rego) { + return func(r *Rego) { + r.builtinDecls[decl.Name] = &ast.Builtin{ + Name: decl.Name, + Decl: decl.Decl, + } + } +} + +type memo struct { + term *ast.Term + err error +} + +type memokey string + +func memoize(decl *Function, bctx BuiltinContext, terms []*ast.Term, ifEmpty func() (*ast.Term, error)) (*ast.Term, error) { + if !decl.Memoize { + return ifEmpty() + } + + // NOTE(tsandall): we assume memoization is applied to infrequent built-in + // calls that do things like fetch data from remote locations. As such, + // converting the terms to strings is acceptable for now. + var b strings.Builder + if _, err := b.WriteString(decl.Name); err != nil { + return nil, err + } + + // The term slice _may_ include an output term depending on how the caller + // referred to the built-in function. Only use the arguments as the cache + // key. Unification ensures we don't get false positive matches. + for i := range decl.Decl.Arity() { + if _, err := b.WriteString(terms[i].String()); err != nil { + return nil, err + } + } + + key := memokey(b.String()) + hit, ok := bctx.Cache.Get(key) + var m memo + if ok { + m = hit.(memo) + } else { + m.term, m.err = ifEmpty() + bctx.Cache.Put(key, m) + } + + return m.term, m.err +} + +// Dump returns an argument that sets the writer to dump debugging information to. +func Dump(w io.Writer) func(r *Rego) { + return func(r *Rego) { + r.dump = w + } +} + +// Query returns an argument that sets the Rego query. +func Query(q string) func(r *Rego) { + return func(r *Rego) { + r.query = q + } +} + +// ParsedQuery returns an argument that sets the Rego query. +func ParsedQuery(q ast.Body) func(r *Rego) { + return func(r *Rego) { + r.parsedQuery = q + } +} + +// Package returns an argument that sets the Rego package on the query's +// context. +func Package(p string) func(r *Rego) { + return func(r *Rego) { + r.pkg = p + } +} + +// ParsedPackage returns an argument that sets the Rego package on the query's +// context. +func ParsedPackage(pkg *ast.Package) func(r *Rego) { + return func(r *Rego) { + r.parsedPackage = pkg + } +} + +// Imports returns an argument that adds a Rego import to the query's context. +func Imports(p []string) func(r *Rego) { + return func(r *Rego) { + r.imports = append(r.imports, p...) + } +} + +// ParsedImports returns an argument that adds Rego imports to the query's +// context. +func ParsedImports(imp []*ast.Import) func(r *Rego) { + return func(r *Rego) { + r.parsedImports = append(r.parsedImports, imp...) + } +} + +// Input returns an argument that sets the Rego input document. Input should be +// a native Go value representing the input document. +func Input(x any) func(r *Rego) { + return func(r *Rego) { + r.rawInput = &x + } +} + +// ParsedInput returns an argument that sets the Rego input document. +func ParsedInput(x ast.Value) func(r *Rego) { + return func(r *Rego) { + r.parsedInput = x + } +} + +// Unknowns returns an argument that sets the values to treat as unknown during +// partial evaluation. +func Unknowns(unknowns []string) func(r *Rego) { + return func(r *Rego) { + r.unknowns = unknowns + } +} + +// ParsedUnknowns returns an argument that sets the values to treat as unknown +// during partial evaluation. +func ParsedUnknowns(unknowns []*ast.Term) func(r *Rego) { + return func(r *Rego) { + r.parsedUnknowns = unknowns + } +} + +// DisableInlining adds a set of paths to exclude from partial evaluation inlining. +func DisableInlining(paths []string) func(r *Rego) { + return func(r *Rego) { + r.disableInlining = paths + } +} + +// NondeterministicBuiltins causes non-deterministic builtins to be evalued during +// partial evaluation. This is needed to pull in external data, or validate a JWT, +// during PE, so that the result informs what queries are returned. +func NondeterministicBuiltins(yes bool) func(r *Rego) { + return func(r *Rego) { + r.nondeterministicBuiltins = yes + } +} + +// ShallowInlining prevents rules that depend on unknown values from being inlined. +// Rules that only depend on known values are inlined. +func ShallowInlining(yes bool) func(r *Rego) { + return func(r *Rego) { + r.shallowInlining = yes + } +} + +// SkipPartialNamespace disables namespacing of partial evalution results for support +// rules generated from policy. Synthetic support rules are still namespaced. +func SkipPartialNamespace(yes bool) func(r *Rego) { + return func(r *Rego) { + r.skipPartialNamespace = yes + } +} + +// PartialNamespace returns an argument that sets the namespace to use for +// partial evaluation results. The namespace must be a valid package path +// component. +func PartialNamespace(ns string) func(r *Rego) { + return func(r *Rego) { + r.partialNamespace = ns + } +} + +// Module returns an argument that adds a Rego module. +func Module(filename, input string) func(r *Rego) { + return func(r *Rego) { + r.modules = append(r.modules, rawModule{ + filename: filename, + module: input, + }) + } +} + +// ParsedModule returns an argument that adds a parsed Rego module. If a string +// module with the same filename name is added, it will override the parsed +// module. +func ParsedModule(module *ast.Module) func(*Rego) { + return func(r *Rego) { + var filename string + if module.Package.Location != nil { + filename = module.Package.Location.File + } else { + filename = fmt.Sprintf("module_%p.rego", module) + } + r.parsedModules[filename] = module + } +} + +// Load returns an argument that adds a filesystem path to load data +// and Rego modules from. Any file with a *.rego, *.yaml, or *.json +// extension will be loaded. The path can be either a directory or file, +// directories are loaded recursively. The optional ignore string patterns +// can be used to filter which files are used. +// The Load option can only be used once. +// Note: Loading files will require a write transaction on the store. +func Load(paths []string, filter loader.Filter) func(r *Rego) { + return func(r *Rego) { + r.loadPaths = loadPaths{paths, filter} + } +} + +// LoadBundle returns an argument that adds a filesystem path to load +// a bundle from. The path can be a compressed bundle file or a directory +// to be loaded as a bundle. +// Note: Loading bundles will require a write transaction on the store. +func LoadBundle(path string) func(r *Rego) { + return func(r *Rego) { + r.bundlePaths = append(r.bundlePaths, path) + } +} + +func WithFilter(f filter.LoaderFilter) func(r *Rego) { + return func(r *Rego) { + r.filter = f + } +} + +// ParsedBundle returns an argument that adds a bundle to be loaded. +func ParsedBundle(name string, b *bundle.Bundle) func(r *Rego) { + return func(r *Rego) { + r.bundles[name] = b + } +} + +// Compiler returns an argument that sets the Rego compiler. +func Compiler(c *ast.Compiler) func(r *Rego) { + return func(r *Rego) { + r.compiler = c + } +} + +// Store returns an argument that sets the policy engine's data storage layer. +// +// If using the Load, LoadBundle, or ParsedBundle options then a transaction +// must also be provided via the Transaction() option. After loading files +// or bundles the transaction should be aborted or committed. +func Store(s storage.Store) func(r *Rego) { + return func(r *Rego) { + r.store = s + } +} + +// Data returns an argument that sets the Rego data document. Data should be +// a map representing the data document. This is a simpler alternative to +// using Store with inmem.NewFromObject for cases where an in-memory store +// with static data is sufficient. +func Data(x map[string]any) func(r *Rego) { + return func(r *Rego) { + r.store = inmem.NewFromObject(x) + } +} + +// StoreReadAST returns an argument that sets whether the store should eagerly convert data to AST values. +// +// Only applicable when no store has been set on the Rego object through the Store option. +func StoreReadAST(enabled bool) func(r *Rego) { + return func(r *Rego) { + r.ownStoreReadAst = enabled + } +} + +// Transaction returns an argument that sets the transaction to use for storage +// layer operations. +// +// Requires the store associated with the transaction to be provided via the +// Store() option. If using Load(), LoadBundle(), or ParsedBundle() options +// the transaction will likely require write params. +func Transaction(txn storage.Transaction) func(r *Rego) { + return func(r *Rego) { + r.txn = txn + } +} + +// Metrics returns an argument that sets the metrics collection. +func Metrics(m metrics.Metrics) func(r *Rego) { + return func(r *Rego) { + r.metrics = m + } +} + +// Instrument returns an argument that enables instrumentation for diagnosing +// performance issues. +func Instrument(yes bool) func(r *Rego) { + return func(r *Rego) { + r.instrument = yes + } +} + +// Trace returns an argument that enables tracing on r. +func Trace(yes bool) func(r *Rego) { + return func(r *Rego) { + r.trace = yes + } +} + +// Tracer returns an argument that adds a query tracer to r. +// +// Deprecated: Use QueryTracer instead. +func Tracer(t topdown.Tracer) func(r *Rego) { + return func(r *Rego) { + if t != nil { + r.queryTracers = append(r.queryTracers, topdown.WrapLegacyTracer(t)) + } + } +} + +// QueryTracer returns an argument that adds a query tracer to r. +func QueryTracer(t topdown.QueryTracer) func(r *Rego) { + return func(r *Rego) { + if t != nil { + r.queryTracers = append(r.queryTracers, t) + } + } +} + +// Runtime returns an argument that sets the runtime data to provide to the +// evaluation engine. +func Runtime(term *ast.Term) func(r *Rego) { + return func(r *Rego) { + r.runtime = term + } +} + +// Time sets the wall clock time to use during policy evaluation. Prepared queries +// do not inherit this parameter. Use EvalTime to set the wall clock time when +// executing a prepared query. +func Time(x time.Time) func(r *Rego) { + return func(r *Rego) { + r.time = x + } +} + +// Seed sets a reader that will seed randomization required by built-in functions. +// If a seed is not provided crypto/rand.Reader is used. +func Seed(r io.Reader) func(*Rego) { + return func(e *Rego) { + e.seed = r + } +} + +// PrintTrace is a helper function to write a human-readable version of the +// trace to the writer w. +func PrintTrace(w io.Writer, r *Rego) { + if r == nil || r.tracebuf == nil { + return + } + topdown.PrettyTrace(w, *r.tracebuf) +} + +// PrintTraceWithLocation is a helper function to write a human-readable version of the +// trace to the writer w. +func PrintTraceWithLocation(w io.Writer, r *Rego) { + if r == nil || r.tracebuf == nil { + return + } + topdown.PrettyTraceWithLocation(w, *r.tracebuf) +} + +// UnsafeBuiltins sets the built-in functions to treat as unsafe and not allow. +// This option is ignored for module compilation if the caller supplies the +// compiler. This option is always honored for query compilation. Provide an +// empty (non-nil) map to disable checks on queries. +func UnsafeBuiltins(unsafeBuiltins map[string]struct{}) func(r *Rego) { + return func(r *Rego) { + r.unsafeBuiltins = unsafeBuiltins + } +} + +// SkipBundleVerification skips verification of a signed bundle. +func SkipBundleVerification(yes bool) func(r *Rego) { + return func(r *Rego) { + r.skipBundleVerification = yes + } +} + +// BundleActivatorPlugin sets the name of the activator plugin used to load bundles into the store. +func BundleActivatorPlugin(name string) func(r *Rego) { + return func(r *Rego) { + r.bundleActivationPlugin = name + } +} + +// BundleLazyLoadingMode sets the bundle loading mode. If true, bundles will be +// read in lazy mode. In this mode, data files in the bundle will not be +// deserialized and the check to validate that the bundle data does not contain +// paths outside the bundle's roots will not be performed while reading the bundle. +func BundleLazyLoadingMode(yes bool) func(r *Rego) { + return func(r *Rego) { + r.enableBundleLazyLoadingMode = yes + } +} + +// InterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize +// during evaluation. +func InterQueryBuiltinCache(c cache.InterQueryCache) func(r *Rego) { + return func(r *Rego) { + r.interQueryBuiltinCache = c + } +} + +// InterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize +// during evaluation. +func InterQueryBuiltinValueCache(c cache.InterQueryValueCache) func(r *Rego) { + return func(r *Rego) { + r.interQueryBuiltinValueCache = c + } +} + +// NDBuiltinCache sets the non-deterministic builtins cache. +func NDBuiltinCache(c builtins.NDBCache) func(r *Rego) { + return func(r *Rego) { + r.ndBuiltinCache = c + } +} + +// StrictBuiltinErrors tells the evaluator to treat all built-in function errors as fatal errors. +func StrictBuiltinErrors(yes bool) func(r *Rego) { + return func(r *Rego) { + r.strictBuiltinErrors = yes + } +} + +// BuiltinErrorList supplies an error slice to store built-in function errors. +func BuiltinErrorList(list *[]topdown.Error) func(r *Rego) { + return func(r *Rego) { + r.builtinErrorList = list + } +} + +// Resolver sets a Resolver for a specified ref path. +func Resolver(ref ast.Ref, r resolver.Resolver) func(r *Rego) { + return func(rego *Rego) { + rego.resolvers = append(rego.resolvers, refResolver{ref, r}) + } +} + +// Schemas sets the schemaSet +func Schemas(x *ast.SchemaSet) func(r *Rego) { + return func(r *Rego) { + r.schemaSet = x + } +} + +// Capabilities configures the underlying compiler's capabilities. +// This option is ignored for module compilation if the caller supplies the +// compiler. +func Capabilities(c *ast.Capabilities) func(r *Rego) { + return func(r *Rego) { + r.capabilities = c + } +} + +// Target sets the runtime to exercise. +func Target(t string) func(r *Rego) { + return func(r *Rego) { + r.target = t + } +} + +// GenerateJSON sets the AST to JSON converter for the results. +func GenerateJSON(f func(*ast.Term, *EvalContext) (any, error)) func(r *Rego) { + return func(r *Rego) { + r.generateJSON = f + } +} + +// PrintHook sets the object to use for handling print statement outputs. +func PrintHook(h print.Hook) func(r *Rego) { + return func(r *Rego) { + r.printHook = h + } +} + +// DistributedTracingOpts sets the options to be used by distributed tracing. +func DistributedTracingOpts(tr tracing.Options) func(r *Rego) { + return func(r *Rego) { + r.distributedTracingOpts = tr + } +} + +// EnablePrintStatements enables print() calls. If this option is not provided, +// print() calls will be erased from the policy. This option only applies to +// queries and policies that passed as raw strings, i.e., this function will not +// have any affect if the caller supplies the ast.Compiler instance. +func EnablePrintStatements(yes bool) func(r *Rego) { + return func(r *Rego) { + r.enablePrintStatements = yes + } +} + +// Strict enables or disables strict-mode in the compiler +func Strict(yes bool) func(r *Rego) { + return func(r *Rego) { + r.strict = yes + } +} + +func SetRegoVersion(version ast.RegoVersion) func(r *Rego) { + return func(r *Rego) { + r.regoVersion = version + } +} + +// CompilerHook sets a hook function that will be called after the compiler is initialized. +// This is only called if the compiler has not been provided already. +func CompilerHook(hook func(*ast.Compiler)) func(r *Rego) { + return func(r *Rego) { + r.compilerHook = hook + } +} + +// EvalMode lets you override the evaluation mode. +func EvalMode(mode ast.CompilerEvalMode) func(r *Rego) { + return func(r *Rego) { + r.evalMode = &mode + } +} + +// New returns a new Rego object. +func New(options ...func(r *Rego)) *Rego { + r := &Rego{ + parsedModules: map[string]*ast.Module{}, + capture: map[*ast.Expr]ast.Var{}, + compiledQueries: map[queryType]compiledQuery{}, + builtinDecls: map[string]*ast.Builtin{}, + builtinFuncs: map[string]*topdown.Builtin{}, + bundles: map[string]*bundle.Bundle{}, + } + + for _, option := range options { + option(r) + } + + callHook := r.compiler == nil // call hook only if we created the compiler here + + if r.compiler == nil { + //nolint:staticcheck + r.compiler = ast.NewCompiler(). + WithUnsafeBuiltins(r.unsafeBuiltins). + WithBuiltins(r.builtinDecls). + WithDebug(r.dump). + WithSchemas(r.schemaSet). + WithCapabilities(r.capabilities). + WithEnablePrintStatements(r.enablePrintStatements). + WithStrict(r.strict). + WithUseTypeCheckAnnotations(true) + + // topdown could be target "" or "rego", but both could be overridden by + // a target plugin (checked below) + if r.target == targetWasm { + r.compiler = r.compiler.WithEvalMode(ast.EvalModeIR) + } + + if r.regoVersion != ast.RegoUndefined { + r.compiler = r.compiler.WithDefaultRegoVersion(r.regoVersion) + } + } + + if r.store == nil { + if bundle.HasExtension() { + r.store = bundle.BundleExtStore() + } else { + r.store = inmem.NewWithOpts(inmem.OptReturnASTValuesOnRead(r.ownStoreReadAst)) + } + r.ownStore = true + } else { + r.ownStore = false + } + + if r.metrics == nil { + r.metrics = metrics.New() + } + + if r.instrument { + r.instrumentation = topdown.NewInstrumentation(r.metrics) + r.compiler.WithMetrics(r.metrics) + } + + if r.trace { + r.tracebuf = topdown.NewBufferTracer() + r.queryTracers = append(r.queryTracers, r.tracebuf) + } + + if r.partialNamespace == "" { + r.partialNamespace = defaultPartialNamespace + } + + if r.generateJSON == nil { + r.generateJSON = generateJSON + } + + if t := r.targetPlugin(r.target); t != nil { + r.compiler = r.compiler.WithEvalMode(ast.EvalModeIR) + } + + if r.evalMode != nil { + r.compiler = r.compiler.WithEvalMode(*r.evalMode) + } + + if r.compilerHook != nil && callHook { + r.compilerHook(r.compiler) + } + + return r +} + +// Eval evaluates this Rego object and returns a ResultSet. +func (r *Rego) Eval(ctx context.Context) (ResultSet, error) { + var err error + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return nil, err + } + + pq, err := r.PrepareForEval(ctx) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return nil, err + } + + evalArgs := []EvalOption{ + EvalTransaction(r.txn), + EvalMetrics(r.metrics), + EvalInstrument(r.instrument), + EvalTime(r.time), + EvalInterQueryBuiltinCache(r.interQueryBuiltinCache), + EvalInterQueryBuiltinValueCache(r.interQueryBuiltinValueCache), + EvalSeed(r.seed), + } + + if r.ndBuiltinCache != nil { + evalArgs = append(evalArgs, EvalNDBuiltinCache(r.ndBuiltinCache)) + } + + for _, qt := range r.queryTracers { + evalArgs = append(evalArgs, EvalQueryTracer(qt)) + } + + for i := range r.resolvers { + evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r)) + } + + rs, err := pq.Eval(ctx, evalArgs...) + txnErr := txnClose(ctx, err) // Always call closer + if err == nil { + err = txnErr + } + return rs, err +} + +// PartialEval has been deprecated and renamed to PartialResult. +func (r *Rego) PartialEval(ctx context.Context) (PartialResult, error) { + return r.PartialResult(ctx) +} + +// PartialResult partially evaluates this Rego object and returns a PartialResult. +func (r *Rego) PartialResult(ctx context.Context) (PartialResult, error) { + var err error + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return PartialResult{}, err + } + + pq, err := r.PrepareForEval(ctx, WithPartialEval()) + txnErr := txnClose(ctx, err) // Always call closer + if err != nil { + return PartialResult{}, err + } + if txnErr != nil { + return PartialResult{}, txnErr + } + + pr := PartialResult{ + compiler: pq.r.compiler, + store: pq.r.store, + body: pq.r.parsedQuery, + builtinDecls: pq.r.builtinDecls, + builtinFuncs: pq.r.builtinFuncs, + } + + return pr, nil +} + +// Partial runs partial evaluation on r and returns the result. +func (r *Rego) Partial(ctx context.Context) (*PartialQueries, error) { + var err error + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return nil, err + } + + pq, err := r.PrepareForPartial(ctx) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return nil, err + } + + evalArgs := []EvalOption{ + EvalTransaction(r.txn), + EvalMetrics(r.metrics), + EvalInstrument(r.instrument), + EvalInterQueryBuiltinCache(r.interQueryBuiltinCache), + EvalInterQueryBuiltinValueCache(r.interQueryBuiltinValueCache), + } + + if r.ndBuiltinCache != nil { + evalArgs = append(evalArgs, EvalNDBuiltinCache(r.ndBuiltinCache)) + } + + for _, t := range r.queryTracers { + evalArgs = append(evalArgs, EvalQueryTracer(t)) + } + + for i := range r.resolvers { + evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r)) + } + + pqs, err := pq.Partial(ctx, evalArgs...) + txnErr := txnClose(ctx, err) // Always call closer + if err == nil { + err = txnErr + } + return pqs, err +} + +// CompileOption defines a function to set options on Compile calls. +type CompileOption func(*CompileContext) + +// CompileContext contains options for Compile calls. +type CompileContext struct { + partial bool +} + +// CompilePartial defines an option to control whether partial evaluation is run +// before the query is planned and compiled. +func CompilePartial(yes bool) CompileOption { + return func(cfg *CompileContext) { + cfg.partial = yes + } +} + +// Compile returns a compiled policy query. +func (r *Rego) Compile(ctx context.Context, opts ...CompileOption) (*CompileResult, error) { + var cfg CompileContext + + for _, opt := range opts { + opt(&cfg) + } + + var queries []ast.Body + modules := make([]*ast.Module, 0, len(r.compiler.Modules)) + + if cfg.partial { + + pq, err := r.Partial(ctx) + if err != nil { + return nil, err + } + if r.dump != nil { + if len(pq.Queries) != 0 { + msg := fmt.Sprintf("QUERIES (%d total):", len(pq.Queries)) + fmt.Fprintln(r.dump, msg) + fmt.Fprintln(r.dump, strings.Repeat("-", len(msg))) + for i := range pq.Queries { + fmt.Println(pq.Queries[i]) + } + fmt.Fprintln(r.dump) + } + if len(pq.Support) != 0 { + msg := fmt.Sprintf("SUPPORT (%d total):", len(pq.Support)) + fmt.Fprintln(r.dump, msg) + fmt.Fprintln(r.dump, strings.Repeat("-", len(msg))) + for i := range pq.Support { + fmt.Println(pq.Support[i]) + } + fmt.Fprintln(r.dump) + } + } + + queries = pq.Queries + modules = pq.Support + + for _, module := range r.compiler.Modules { + modules = append(modules, module) + } + } else { + var err error + // If creating a new transaction it should be closed before calling the + // planner to avoid holding open the transaction longer than needed. + // + // TODO(tsandall): in future, planner could make use of store, in which + // case this will need to change. + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return nil, err + } + + err = r.prepare(ctx, compileQueryType, nil) + txnErr := txnClose(ctx, err) // Always call closer + if err != nil { + return nil, err + } + if txnErr != nil { + return nil, err + } + + for _, module := range r.compiler.Modules { + modules = append(modules, module) + } + + queries = []ast.Body{r.compiledQueries[compileQueryType].query} + } + + if tgt := r.targetPlugin(r.target); tgt != nil { + return nil, errors.New("unsupported for rego target plugins") + } + + return r.compileWasm(modules, queries, compileQueryType) // TODO(sr) control flow is funky here +} + +func (r *Rego) compileWasm(_ []*ast.Module, queries []ast.Body, qType queryType) (*CompileResult, error) { + policy, err := r.planQuery(queries, qType) + if err != nil { + return nil, err + } + + m, err := wasm.New().WithPolicy(policy).Compile() + if err != nil { + return nil, err + } + + var out bytes.Buffer + if err := encoding.WriteModule(&out, m); err != nil { + return nil, err + } + + return &CompileResult{ + Bytes: out.Bytes(), + }, nil +} + +// PrepareOption defines a function to set an option to control +// the behavior of the Prepare call. +type PrepareOption func(*PrepareConfig) + +// PrepareConfig holds settings to control the behavior of the +// Prepare call. +type PrepareConfig struct { + doPartialEval bool + disableInlining *[]string + builtinFuncs map[string]*topdown.Builtin +} + +// WithPartialEval configures an option for PrepareForEval +// which will have it perform partial evaluation while preparing +// the query (similar to rego.Rego#PartialResult) +func WithPartialEval() PrepareOption { + return func(p *PrepareConfig) { + p.doPartialEval = true + } +} + +// WithNoInline adds a set of paths to exclude from partial evaluation inlining. +func WithNoInline(paths []string) PrepareOption { + return func(p *PrepareConfig) { + p.disableInlining = &paths + } +} + +// WithBuiltinFuncs carries the rego.Function{1,2,3} per-query function definitions +// to the target plugins. +func WithBuiltinFuncs(bis map[string]*topdown.Builtin) PrepareOption { + return func(p *PrepareConfig) { + if p.builtinFuncs == nil { + p.builtinFuncs = maps.Clone(bis) + } else { + maps.Copy(p.builtinFuncs, bis) + } + } +} + +// BuiltinFuncs allows retrieving the builtin funcs set via PrepareOption +// WithBuiltinFuncs. +func (p *PrepareConfig) BuiltinFuncs() map[string]*topdown.Builtin { + return p.builtinFuncs +} + +// PrepareForEval will parse inputs, modules, and query arguments in preparation +// of evaluating them. +func (r *Rego) PrepareForEval(ctx context.Context, opts ...PrepareOption) (PreparedEvalQuery, error) { + if !r.hasQuery() { + return PreparedEvalQuery{}, errors.New("cannot evaluate empty query") + } + + pCfg := &PrepareConfig{} + for _, o := range opts { + o(pCfg) + } + + var err error + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return PreparedEvalQuery{}, err + } + + // If the caller wanted to do partial evaluation as part of preparation + // do it now and use the new Rego object. + if pCfg.doPartialEval { + + pr, err := r.partialResult(ctx, pCfg) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, err + } + + // Prepare the new query using the result of partial evaluation + pq, err := pr.Rego(Transaction(r.txn)).PrepareForEval(ctx) + txnErr := txnClose(ctx, err) + if err != nil { + return pq, err + } + return pq, txnErr + } + + err = r.prepare(ctx, evalQueryType, []extraStage{ + { + after: "ResolveRefs", + stage: ast.QueryCompilerStageDefinition{ + Name: "RewriteToCaptureValue", + MetricName: "query_compile_stage_rewrite_to_capture_value", + Stage: r.rewriteQueryToCaptureValue, + }, + }, + }) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, err + } + + switch r.target { + case targetWasm: // TODO(sr): make wasm a target plugin, too + + if r.hasWasmModule() { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, errors.New("wasm target not supported") + } + + var modules []*ast.Module + for _, module := range r.compiler.Modules { + modules = append(modules, module) + } + + queries := []ast.Body{r.compiledQueries[evalQueryType].query} + + e, err := opa.LookupEngine(targetWasm) + if err != nil { + return PreparedEvalQuery{}, err + } + + // nolint: staticcheck // SA4006 false positive + cr, err := r.compileWasm(modules, queries, evalQueryType) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, err + } + + // nolint: staticcheck // SA4006 false positive + data, err := r.store.Read(ctx, r.txn, storage.RootPath) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, err + } + + o, err := e.New().WithPolicyBytes(cr.Bytes).WithDataJSON(data).Init() + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, err + } + r.opa = o + + case targetRego: // do nothing, don't lookup default plugin + default: // either a specific plugin target, or one that is default + if tgt := r.targetPlugin(r.target); tgt != nil { + queries := []ast.Body{r.compiledQueries[evalQueryType].query} + pol, err := r.planQuery(queries, evalQueryType) + if err != nil { + return PreparedEvalQuery{}, err + } + // always add the builtins provided via rego.FunctionN options + opts = append(opts, WithBuiltinFuncs(r.builtinFuncs)) + r.targetPrepState, err = tgt.PrepareForEval(ctx, pol, opts...) + if err != nil { + return PreparedEvalQuery{}, err + } + } + } + + txnErr := txnClose(ctx, err) // Always call closer + if txnErr != nil { + return PreparedEvalQuery{}, txnErr + } + + return PreparedEvalQuery{preparedQuery{r, pCfg}}, err +} + +// PrepareForPartial will parse inputs, modules, and query arguments in preparation +// of partially evaluating them. +func (r *Rego) PrepareForPartial(ctx context.Context, opts ...PrepareOption) (PreparedPartialQuery, error) { + if !r.hasQuery() { + return PreparedPartialQuery{}, errors.New("cannot evaluate empty query") + } + + pCfg := &PrepareConfig{} + for _, o := range opts { + o(pCfg) + } + + var err error + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return PreparedPartialQuery{}, err + } + + err = r.prepare(ctx, partialQueryType, []extraStage{ + { + after: "CheckSafety", + stage: ast.QueryCompilerStageDefinition{ + Name: "RewriteEquals", + MetricName: "query_compile_stage_rewrite_equals", + Stage: r.rewriteEqualsForPartialQueryCompile, + }, + }, + }) + txnErr := txnClose(ctx, err) // Always call closer + if err != nil { + return PreparedPartialQuery{}, err + } + if txnErr != nil { + return PreparedPartialQuery{}, txnErr + } + + return PreparedPartialQuery{preparedQuery{r, pCfg}}, err +} + +func (r *Rego) prepare(ctx context.Context, qType queryType, extras []extraStage) error { + var err error + + r.parsedInput, err = r.parseInput() + if err != nil { + return err + } + + err = r.loadFiles(ctx, r.txn, r.metrics) + if err != nil { + return err + } + + err = r.loadBundles(ctx, r.txn, r.metrics) + if err != nil { + return err + } + + err = r.parseModules(ctx, r.txn, r.metrics) + if err != nil { + return err + } + + // Compile the modules *before* the query, else functions + // defined in the module won't be found... + err = r.compileModules(ctx, r.txn, r.metrics) + if err != nil { + return err + } + + imports, err := r.prepareImports() + if err != nil { + return err + } + + queryImports := []*ast.Import{} + for _, imp := range imports { + path := imp.Path.Value.(ast.Ref) + if path.HasPrefix([]*ast.Term{ast.FutureRootDocument}) || path.HasPrefix([]*ast.Term{ast.RegoRootDocument}) { + queryImports = append(queryImports, imp) + } + } + + r.parsedQuery, err = r.parseQuery(queryImports, r.metrics) + if err != nil { + return err + } + + err = r.compileAndCacheQuery(qType, r.parsedQuery, imports, r.metrics, extras) + if err != nil { + return err + } + + return nil +} + +func (r *Rego) parseModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { + if len(r.modules) == 0 { + return nil + } + + ids, err := r.store.ListPolicies(ctx, txn) + if err != nil { + return err + } + + m.Timer(metrics.RegoModuleParse).Start() + defer m.Timer(metrics.RegoModuleParse).Stop() + var errs Errors + + popts := ast.ParserOptions{ + RegoVersion: r.regoVersion, + Capabilities: r.capabilities, + } + + // Parse any modules that are saved to the store, but only if + // another compile step is going to occur (ie. we have parsed modules + // that need to be compiled). + for _, id := range ids { + // if it is already on the compiler we're using + // then don't bother to re-parse it from source + if _, haveMod := r.compiler.Modules[id]; haveMod { + continue + } + + bs, err := r.store.GetPolicy(ctx, txn, id) + if err != nil { + return err + } + + parsed, err := ast.ParseModuleWithOpts(id, string(bs), popts) + if err != nil { + errs = append(errs, err) + } + + r.parsedModules[id] = parsed + } + + // Parse any passed in as arguments to the Rego object + for _, module := range r.modules { + p, err := module.ParseWithOpts(popts) + if err != nil { + switch errorWithType := err.(type) { + case ast.Errors: + for _, e := range errorWithType { + errs = append(errs, e) + } + default: + errs = append(errs, errorWithType) + } + } + r.parsedModules[module.filename] = p + } + + if len(errs) > 0 { + return errs + } + + return nil +} + +func (r *Rego) loadFiles(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { + if len(r.loadPaths.paths) == 0 { + return nil + } + + m.Timer(metrics.RegoLoadFiles).Start() + defer m.Timer(metrics.RegoLoadFiles).Stop() + + result, err := loader.NewFileLoader(). + WithMetrics(m). + WithProcessAnnotation(true). + WithBundleLazyLoadingMode(bundle.HasExtension()). + WithRegoVersion(r.regoVersion). + WithCapabilities(r.capabilities). + Filtered(r.loadPaths.paths, r.loadPaths.filter) + if err != nil { + return err + } + for name, mod := range result.Modules { + r.parsedModules[name] = mod.Parsed + } + + if len(result.Documents) > 0 { + err = r.store.Write(ctx, txn, storage.AddOp, storage.RootPath, result.Documents) + if err != nil { + return err + } + } + return nil +} + +func (r *Rego) loadBundles(_ context.Context, _ storage.Transaction, m metrics.Metrics) error { + if len(r.bundlePaths) == 0 { + return nil + } + + m.Timer(metrics.RegoLoadBundles).Start() + defer m.Timer(metrics.RegoLoadBundles).Stop() + + for _, path := range r.bundlePaths { + bndl, err := loader.NewFileLoader(). + WithMetrics(m). + WithProcessAnnotation(true). + WithBundleLazyLoadingMode(bundle.HasExtension()). + WithSkipBundleVerification(r.skipBundleVerification). + WithRegoVersion(r.regoVersion). + WithCapabilities(r.capabilities). + WithFilter(r.filter). + AsBundle(path) + if err != nil { + return fmt.Errorf("loading error: %s", err) + } + r.bundles[path] = bndl + } + return nil +} + +func (r *Rego) parseInput() (ast.Value, error) { + if r.parsedInput != nil { + return r.parsedInput, nil + } + return r.parseRawInput(r.rawInput, r.metrics) +} + +func (*Rego) parseRawInput(rawInput *any, m metrics.Metrics) (ast.Value, error) { + var input ast.Value + + if rawInput == nil { + return input, nil + } + + m.Timer(metrics.RegoInputParse).Start() + defer m.Timer(metrics.RegoInputParse).Stop() + + rawPtr := util.Reference(rawInput) + + // roundtrip through json: this turns slices (e.g. []string, []bool) into + // []any, the only array type ast.InterfaceToValue can work with + if err := util.RoundTrip(rawPtr); err != nil { + return nil, err + } + + return ast.InterfaceToValue(*rawPtr) +} + +func (r *Rego) parseQuery(queryImports []*ast.Import, m metrics.Metrics) (ast.Body, error) { + if r.parsedQuery != nil { + return r.parsedQuery, nil + } + + m.Timer(metrics.RegoQueryParse).Start() + defer m.Timer(metrics.RegoQueryParse).Stop() + + popts, err := future.ParserOptionsFromFutureImports(queryImports) + if err != nil { + return nil, err + } + popts.RegoVersion = r.regoVersion + popts, err = parserOptionsFromRegoVersionImport(queryImports, popts) + if err != nil { + return nil, err + } + popts.SkipRules = true + popts.Capabilities = r.capabilities + + return ast.ParseBodyWithOpts(r.query, popts) +} + +func parserOptionsFromRegoVersionImport(imports []*ast.Import, popts ast.ParserOptions) (ast.ParserOptions, error) { + for _, imp := range imports { + path := imp.Path.Value.(ast.Ref) + if ast.Compare(path, ast.RegoV1CompatibleRef) == 0 { + popts.RegoVersion = ast.RegoV1 + return popts, nil + } + } + return popts, nil +} + +func (r *Rego) compileModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { + // Only compile again if there are new modules. + if len(r.bundles) > 0 || len(r.parsedModules) > 0 { + + // The bundle.Activate call will activate any bundles passed in + // (ie compile + handle data store changes), and include any of + // the additional modules passed in. If no bundles are provided + // it will only compile the passed in modules. + // Use this as the single-point of compiling everything only a + // single time. + opts := &bundle.ActivateOpts{ + Ctx: ctx, + Store: r.store, + Txn: txn, + Compiler: r.compilerForTxn(ctx, r.store, txn), + Metrics: m, + Bundles: r.bundles, + ExtraModules: r.parsedModules, + ParserOptions: ast.ParserOptions{RegoVersion: r.regoVersion}, + } + err := bundle.Activate(opts) + if err != nil { + return err + } + } + + // Ensure all configured resolvers from the store are loaded. Skip if any were explicitly provided. + if len(r.resolvers) == 0 { + resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, r.store, txn, r.bundles) + if err != nil { + return err + } + + for _, rslvr := range resolvers { + for _, ep := range rslvr.Entrypoints() { + r.resolvers = append(r.resolvers, refResolver{ep, rslvr}) + } + } + } + return nil +} + +func (r *Rego) compileAndCacheQuery(qType queryType, query ast.Body, imports []*ast.Import, m metrics.Metrics, extras []extraStage) error { + m.Timer(metrics.RegoQueryCompile).Start() + defer m.Timer(metrics.RegoQueryCompile).Stop() + + cachedQuery, ok := r.compiledQueries[qType] + if ok && cachedQuery.query != nil && cachedQuery.compiler != nil { + return nil + } + + qc, compiled, err := r.compileQuery(query, imports, m, extras) + if err != nil { + return err + } + + // cache the query for future use + r.compiledQueries[qType] = compiledQuery{ + query: compiled, + compiler: qc, + } + return nil +} + +func (r *Rego) prepareImports() ([]*ast.Import, error) { + imports := r.parsedImports + + if len(r.imports) > 0 { + s := make([]string, len(r.imports)) + for i := range r.imports { + s[i] = fmt.Sprintf("import %v", r.imports[i]) + } + parsed, err := ast.ParseImports(strings.Join(s, "\n")) + if err != nil { + return nil, err + } + imports = append(imports, parsed...) + } + return imports, nil +} + +func (r *Rego) compileQuery(query ast.Body, imports []*ast.Import, _ metrics.Metrics, extras []extraStage) (ast.QueryCompiler, ast.Body, error) { + var pkg *ast.Package + + if r.pkg != "" { + var err error + pkg, err = ast.ParsePackage("package " + r.pkg) + if err != nil { + return nil, nil, err + } + } else { + pkg = r.parsedPackage + } + + qctx := ast.NewQueryContext(). + WithPackage(pkg). + WithImports(imports) + + qc := r.compiler.QueryCompiler(). + WithContext(qctx). + WithUnsafeBuiltins(r.unsafeBuiltins). + WithEnablePrintStatements(r.enablePrintStatements). + WithStrict(false) + + for _, extra := range extras { + qc = qc.WithStageAfterID(extra.after, extra.stage) + } + + compiled, err := qc.Compile(query) + + return qc, compiled, err +} + +func (r *Rego) eval(ctx context.Context, ectx *EvalContext) (ResultSet, error) { + switch { + case r.targetPrepState != nil: // target plugin flow + var val ast.Value + if r.runtime != nil { + val = r.runtime.Value + } + s, err := r.targetPrepState.Eval(ctx, ectx, val) + if err != nil { + return nil, err + } + return r.valueToQueryResult(s, ectx) + case r.target == targetWasm: + return r.evalWasm(ctx, ectx) + case r.target == targetRego: // continue + } + + q := topdown.NewQuery(ectx.compiledQuery.query). + WithQueryCompiler(ectx.compiledQuery.compiler). + WithCompiler(r.compiler). + WithStore(r.store). + WithTransaction(ectx.txn). + WithBuiltins(r.builtinFuncs). + WithMetrics(ectx.metrics). + WithInstrumentation(ectx.instrumentation). + WithRuntime(r.runtime). + WithIndexing(ectx.indexing). + WithEarlyExit(ectx.earlyExit). + WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache). + WithInterQueryBuiltinValueCache(ectx.interQueryBuiltinValueCache). + WithStrictBuiltinErrors(r.strictBuiltinErrors). + WithBuiltinErrorList(r.builtinErrorList). + WithSeed(ectx.seed). + WithPrintHook(ectx.printHook). + WithDistributedTracingOpts(r.distributedTracingOpts). + WithVirtualCache(ectx.virtualCache). + WithBaseCache(ectx.baseCache) + + if !ectx.time.IsZero() { + q = q.WithTime(ectx.time) + } + + if ectx.ndBuiltinCache != nil { + q = q.WithNDBuiltinCache(ectx.ndBuiltinCache) + } + + for i := range ectx.queryTracers { + q = q.WithQueryTracer(ectx.queryTracers[i]) + } + + if ectx.parsedInput != nil { + q = q.WithInput(ast.NewTerm(ectx.parsedInput)) + } + + if ectx.httpRoundTripper != nil { + q = q.WithHTTPRoundTripper(ectx.httpRoundTripper) + } + + for i := range ectx.resolvers { + q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r) + } + + // Cancel query if context is cancelled or deadline is reached. + if ectx.externalCancel == nil { + // Create a one-off goroutine to handle cancellation for this query. + c := topdown.NewCancel() + q = q.WithCancel(c) + exit := make(chan struct{}) + defer close(exit) + go waitForDone(ctx, exit, func() { + c.Cancel() + }) + } else { + // Query cancellation is being handled elsewhere. + q = q.WithCancel(ectx.externalCancel) + } + + var rs ResultSet + err := q.Iter(ctx, func(qr topdown.QueryResult) error { + result, err := r.generateResult(qr, ectx) + if err != nil { + return err + } + rs = append(rs, result) + return nil + }) + if err != nil { + return nil, err + } + + if len(rs) == 0 { + return nil, nil + } + + return rs, nil +} + +func (r *Rego) evalWasm(ctx context.Context, ectx *EvalContext) (ResultSet, error) { + input := ectx.rawInput + if ectx.parsedInput != nil { + i := any(ectx.parsedInput) + input = &i + } + result, err := r.opa.Eval(ctx, opa.EvalOpts{ + Metrics: r.metrics, + Input: input, + Time: ectx.time, + Seed: ectx.seed, + InterQueryBuiltinCache: ectx.interQueryBuiltinCache, + NDBuiltinCache: ectx.ndBuiltinCache, + PrintHook: ectx.printHook, + Capabilities: ectx.capabilities, + }) + if err != nil { + return nil, err + } + + parsed, err := ast.ParseTerm(string(result.Result)) + if err != nil { + return nil, err + } + + return r.valueToQueryResult(parsed.Value, ectx) +} + +func (r *Rego) valueToQueryResult(res ast.Value, ectx *EvalContext) (ResultSet, error) { + resultSet, ok := res.(ast.Set) + if !ok { + return nil, errors.New("illegal result type") + } + + if resultSet.Len() == 0 { + return nil, nil + } + + var rs ResultSet + err := resultSet.Iter(func(term *ast.Term) error { + obj, ok := term.Value.(ast.Object) + if !ok { + return errors.New("illegal result type") + } + qr := topdown.QueryResult{} + obj.Foreach(func(k, v *ast.Term) { + kvt := ast.VarTerm(string(k.Value.(ast.String))) + qr[kvt.Value.(ast.Var)] = v + }) + result, err := r.generateResult(qr, ectx) + if err != nil { + return err + } + rs = append(rs, result) + return nil + }) + + return rs, err +} + +func (r *Rego) generateResult(qr topdown.QueryResult, ectx *EvalContext) (Result, error) { + rewritten := ectx.compiledQuery.compiler.RewrittenVars() + + result := newResult() + for k, term := range qr { + if rw, ok := rewritten[k]; ok { + k = rw + } + if isTermVar(k) || isTermWasmVar(k) || k.IsGenerated() || k.IsWildcard() { + continue + } + + v, err := r.generateJSON(term, ectx) + if err != nil { + return result, err + } + + result.Bindings[string(k)] = v + } + + for _, expr := range ectx.compiledQuery.query { + if expr.Generated { + continue + } + + if k, ok := r.capture[expr]; ok { + v, err := r.generateJSON(qr[k], ectx) + if err != nil { + return result, err + } + result.Expressions = append(result.Expressions, newExpressionValue(expr, v)) + } else { + result.Expressions = append(result.Expressions, newExpressionValue(expr, true)) + } + + } + return result, nil +} + +func (r *Rego) partialResult(ctx context.Context, pCfg *PrepareConfig) (PartialResult, error) { + err := r.prepare(ctx, partialResultQueryType, []extraStage{ + { + after: "ResolveRefs", + stage: ast.QueryCompilerStageDefinition{ + Name: "RewriteForPartialEval", + MetricName: "query_compile_stage_rewrite_for_partial_eval", + Stage: r.rewriteQueryForPartialEval, + }, + }, + }) + if err != nil { + return PartialResult{}, err + } + + ectx := &EvalContext{ + parsedInput: r.parsedInput, + metrics: r.metrics, + txn: r.txn, + partialNamespace: r.partialNamespace, + queryTracers: r.queryTracers, + compiledQuery: r.compiledQueries[partialResultQueryType], + instrumentation: r.instrumentation, + indexing: true, + resolvers: r.resolvers, + capabilities: r.capabilities, + strictBuiltinErrors: r.strictBuiltinErrors, + nondeterministicBuiltins: r.nondeterministicBuiltins, + } + + disableInlining := r.disableInlining + + if pCfg.disableInlining != nil { + disableInlining = *pCfg.disableInlining + } + + ectx.disableInlining, err = parseStringsToRefs(disableInlining) + if err != nil { + return PartialResult{}, err + } + + pq, err := r.partial(ctx, ectx) + if err != nil { + return PartialResult{}, err + } + + // Construct module for queries. + id := fmt.Sprintf("__partialresult__%s__", ectx.partialNamespace) + + module, err := ast.ParseModuleWithOpts(id, "package "+ectx.partialNamespace, + ast.ParserOptions{RegoVersion: r.regoVersion}) + if err != nil { + return PartialResult{}, errors.New("bad partial namespace") + } + + module.Rules = make([]*ast.Rule, len(pq.Queries)) + for i, body := range pq.Queries { + rule := &ast.Rule{ + Head: ast.NewHead(ast.Var("__result__"), nil, ast.Wildcard), + Body: body, + Module: module, + } + module.Rules[i] = rule + if checkPartialResultForRecursiveRefs(body, module.Package.Path.Extend(rule.Head.Reference.GroundPrefix())) { + return PartialResult{}, Errors{errPartialEvaluationNotEffective} + } + } + + // Update compiler with partial evaluation output. + r.compiler.Modules[id] = module + for i, module := range pq.Support { + r.compiler.Modules[fmt.Sprintf("__partialsupport__%s__%d__", ectx.partialNamespace, i)] = module + } + + r.metrics.Timer(metrics.RegoModuleCompile).Start() + r.compilerForTxn(ctx, r.store, r.txn).Compile(r.compiler.Modules) + r.metrics.Timer(metrics.RegoModuleCompile).Stop() + + if r.compiler.Failed() { + return PartialResult{}, r.compiler.Errors + } + + result := PartialResult{ + compiler: r.compiler, + store: r.store, + body: ast.MustParseBody(fmt.Sprintf("data.%v.__result__", ectx.partialNamespace)), + builtinDecls: r.builtinDecls, + builtinFuncs: r.builtinFuncs, + } + + return result, nil +} + +func (r *Rego) partial(ctx context.Context, ectx *EvalContext) (*PartialQueries, error) { + var unknowns []*ast.Term + + switch { + case ectx.parsedUnknowns != nil: + unknowns = ectx.parsedUnknowns + case ectx.unknowns != nil: + unknowns = make([]*ast.Term, len(ectx.unknowns)) + for i := range ectx.unknowns { + var err error + unknowns[i], err = ast.ParseTerm(ectx.unknowns[i]) + if err != nil { + return nil, err + } + } + default: + // Use input document as unknown if caller has not specified any. + unknowns = []*ast.Term{ast.NewTerm(ast.InputRootRef)} + } + + q := topdown.NewQuery(ectx.compiledQuery.query). + WithQueryCompiler(ectx.compiledQuery.compiler). + WithCompiler(r.compiler). + WithStore(r.store). + WithTransaction(ectx.txn). + WithBuiltins(r.builtinFuncs). + WithMetrics(ectx.metrics). + WithInstrumentation(ectx.instrumentation). + WithUnknowns(unknowns). + WithDisableInlining(ectx.disableInlining). + WithNondeterministicBuiltins(ectx.nondeterministicBuiltins). + WithRuntime(r.runtime). + WithIndexing(ectx.indexing). + WithEarlyExit(ectx.earlyExit). + WithPartialNamespace(ectx.partialNamespace). + WithSkipPartialNamespace(r.skipPartialNamespace). + WithShallowInlining(r.shallowInlining). + WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache). + WithInterQueryBuiltinValueCache(ectx.interQueryBuiltinValueCache). + WithStrictBuiltinErrors(ectx.strictBuiltinErrors). + WithSeed(ectx.seed). + WithPrintHook(ectx.printHook) + + if !ectx.time.IsZero() { + q = q.WithTime(ectx.time) + } + + if ectx.ndBuiltinCache != nil { + q = q.WithNDBuiltinCache(ectx.ndBuiltinCache) + } + + for i := range ectx.queryTracers { + q = q.WithQueryTracer(ectx.queryTracers[i]) + } + + if ectx.parsedInput != nil { + q = q.WithInput(ast.NewTerm(ectx.parsedInput)) + } + + for i := range ectx.resolvers { + q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r) + } + + // Cancel query if context is cancelled or deadline is reached. + if ectx.externalCancel == nil { + // Create a one-off goroutine to handle cancellation for this query. + c := topdown.NewCancel() + q = q.WithCancel(c) + exit := make(chan struct{}) + defer close(exit) + go waitForDone(ctx, exit, func() { + c.Cancel() + }) + } else { + // Query cancellation is being handled elsewhere. + q = q.WithCancel(ectx.externalCancel) + } + + queries, support, err := q.PartialRun(ctx) + if err != nil { + return nil, err + } + + // If the target rego-version is v0, and the rego.v1 import is available, then we attempt to apply it to support modules. + if r.regoVersion == ast.RegoV0 && + (r.capabilities == nil || + r.capabilities.ContainsFeature(ast.FeatureRegoV1Import) || + r.capabilities.ContainsFeature(ast.FeatureRegoV1)) { + + for i, mod := range support { + // We can't apply the RegoV0CompatV1 version to the support module if it contains rules or vars that + // conflict with future keywords. + applyRegoVersion := true + + ast.WalkRules(mod, func(r *ast.Rule) bool { + name := r.Head.Name + if name == "" && len(r.Head.Reference) > 0 { + name = r.Head.Reference[0].Value.(ast.Var) + } + if ast.IsFutureKeywordForRegoVersion(name.String(), ast.RegoV0) { + applyRegoVersion = false + return true + } + return false + }) + + if applyRegoVersion { + ast.WalkVars(mod, func(v ast.Var) bool { + if ast.IsFutureKeywordForRegoVersion(v.String(), ast.RegoV0) { + applyRegoVersion = false + return true + } + return false + }) + } + + if applyRegoVersion { + support[i].SetRegoVersion(ast.RegoV0CompatV1) + } else { + support[i].SetRegoVersion(r.regoVersion) + } + } + } else { + // If the target rego-version is not v0, then we apply the target rego-version to the support modules. + for i := range support { + support[i].SetRegoVersion(r.regoVersion) + } + } + + pq := &PartialQueries{ + Queries: queries, + Support: support, + } + + return pq, nil +} + +func (r *Rego) rewriteQueryToCaptureValue(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { + checkCapture := iteration(query) || len(query) > 1 + + for _, expr := range query { + + if expr.Negated { + continue + } + + if expr.IsAssignment() || expr.IsEquality() { + continue + } + + var capture *ast.Term + + // If the expression can be evaluated as a function, rewrite it to + // capture the return value. E.g., neq(1,2) becomes neq(1,2,x) but + // plus(1,2,x) does not get rewritten. + switch terms := expr.Terms.(type) { + case *ast.Term: + capture = r.generateTermVar() + expr.Terms = ast.Equality.Expr(terms, capture).Terms + r.capture[expr] = capture.Value.(ast.Var) + case []*ast.Term: + tpe := r.compiler.TypeEnv.GetByValue(terms[0].Value) + if !types.Void(tpe) && types.Arity(tpe) == len(terms)-1 { + capture = r.generateTermVar() + expr.Terms = append(terms, capture) + r.capture[expr] = capture.Value.(ast.Var) + } + } + + if capture != nil && checkCapture { + cpy := expr.Copy() + cpy.Terms = capture + cpy.Generated = true + cpy.With = nil + query.Append(cpy) + } + } + + return query, nil +} + +func (*Rego) rewriteQueryForPartialEval(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { + if len(query) != 1 { + return nil, errors.New("partial evaluation requires single ref (not multiple expressions)") + } + + term, ok := query[0].Terms.(*ast.Term) + if !ok { + return nil, errors.New("partial evaluation requires ref (not expression)") + } + + ref, ok := term.Value.(ast.Ref) + if !ok { + return nil, fmt.Errorf("partial evaluation requires ref (not %v)", ast.ValueName(term.Value)) + } + + if !ref.IsGround() { + return nil, errors.New("partial evaluation requires ground ref") + } + + return ast.NewBody(ast.Equality.Expr(ast.Wildcard, term)), nil +} + +// rewriteEqualsForPartialQueryCompile will rewrite == to = in queries. Normally +// this wouldn't be done, except for handling queries with the `Partial` API +// where rewriting them can substantially simplify the result, and it is unlikely +// that the caller would need expression values. +func (*Rego) rewriteEqualsForPartialQueryCompile(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { + doubleEq := ast.Equal.Ref() + unifyOp := ast.Equality.Ref() + ast.WalkExprs(query, func(x *ast.Expr) bool { + if x.IsCall() { + operator := x.Operator() + if operator.Equal(doubleEq) && len(x.Operands()) == 2 { + x.SetOperator(ast.NewTerm(unifyOp)) + } + } + return false + }) + return query, nil +} + +func (r *Rego) generateTermVar() *ast.Term { + r.termVarID++ + prefix := ast.WildcardPrefix + if p := r.targetPlugin(r.target); p != nil { + prefix = wasmVarPrefix + } else if r.target == targetWasm { + prefix = wasmVarPrefix + } + return ast.VarTerm(fmt.Sprintf("%sterm%v", prefix, r.termVarID)) +} + +func (r Rego) hasQuery() bool { + return len(r.query) != 0 || len(r.parsedQuery) != 0 +} + +func (r Rego) hasWasmModule() bool { + for _, b := range r.bundles { + if len(b.WasmModules) > 0 { + return true + } + } + return false +} + +type transactionCloser func(ctx context.Context, err error) error + +// getTxn will conditionally create a read or write transaction suitable for +// the configured Rego object. The returned function should be used to close the txn +// regardless of status. +func (r *Rego) getTxn(ctx context.Context) (storage.Transaction, transactionCloser, error) { + noopCloser := func(_ context.Context, _ error) error { + return nil // no-op default + } + + if r.txn != nil { + // Externally provided txn + return r.txn, noopCloser, nil + } + + // Create a new transaction.. + params := storage.TransactionParams{} + + // Bundles and data paths may require writing data files or manifests to storage + if len(r.bundles) > 0 || len(r.bundlePaths) > 0 || len(r.loadPaths.paths) > 0 { + + // If we were given a store we will *not* write to it, only do that on one + // which was created automatically on behalf of the user. + if !r.ownStore { + return nil, noopCloser, errors.New("unable to start write transaction when store was provided") + } + + params.Write = true + } + + txn, err := r.store.NewTransaction(ctx, params) + if err != nil { + return nil, noopCloser, err + } + + // Setup a closer function that will abort or commit as needed. + closer := func(ctx context.Context, txnErr error) error { + var err error + + if txnErr == nil && params.Write { + err = r.store.Commit(ctx, txn) + } else { + r.store.Abort(ctx, txn) + } + + // Clear the auto created transaction now that it is closed. + r.txn = nil + + return err + } + + return txn, closer, nil +} + +func (r *Rego) compilerForTxn(ctx context.Context, store storage.Store, txn storage.Transaction) *ast.Compiler { + // Update the compiler to have a valid path conflict check + // for the current context and transaction. + return r.compiler.WithPathConflictsCheck(storage.NonEmpty(ctx, store, txn)) +} + +func checkPartialResultForRecursiveRefs(body ast.Body, path ast.Ref) bool { + var stop bool + ast.WalkRefs(body, func(x ast.Ref) bool { + if !stop { + if path.HasPrefix(x) { + stop = true + } + } + return stop + }) + return stop +} + +func isTermVar(v ast.Var) bool { + return strings.HasPrefix(string(v), ast.WildcardPrefix+"term") +} + +func isTermWasmVar(v ast.Var) bool { + return strings.HasPrefix(string(v), wasmVarPrefix+"term") +} + +func waitForDone(ctx context.Context, exit chan struct{}, f func()) { + select { + case <-exit: + return + case <-ctx.Done(): + f() + return + } +} + +type rawModule struct { + filename string + module string +} + +func (m rawModule) Parse() (*ast.Module, error) { + return ast.ParseModule(m.filename, m.module) +} + +func (m rawModule) ParseWithOpts(opts ast.ParserOptions) (*ast.Module, error) { + return ast.ParseModuleWithOpts(m.filename, m.module, opts) +} + +type extraStage struct { + after ast.StageID + stage ast.QueryCompilerStageDefinition +} + +type refResolver struct { + ref ast.Ref + r resolver.Resolver +} + +func iteration(x any) bool { + var stopped bool + + vis := ast.NewGenericVisitor(func(x any) bool { + switch x := x.(type) { + case *ast.Term: + if ast.IsComprehension(x.Value) { + return true + } + case ast.Ref: + if !stopped { + if bi := ast.BuiltinMap[x.String()]; bi != nil { + if bi.Relation { + stopped = true + return stopped + } + } + for i := 1; i < len(x); i++ { + if _, ok := x[i].Value.(ast.Var); ok { + stopped = true + return stopped + } + } + } + return stopped + } + return stopped + }) + + vis.Walk(x) + + return stopped +} + +func parseStringsToRefs(s []string) ([]ast.Ref, error) { + if len(s) == 0 { + return nil, nil + } + + refs := make([]ast.Ref, len(s)) + for i := range refs { + var err error + refs[i], err = ast.ParseRef(s[i]) + if err != nil { + return nil, err + } + } + + return refs, nil +} + +// helper function to finish a built-in function call. If an error occurred, +// wrap the error and return it. Otherwise, invoke the iterator if the result +// was defined. +func finishFunction(name string, bctx topdown.BuiltinContext, result *ast.Term, err error, iter func(*ast.Term) error) error { + if err != nil { + var e *HaltError + sb := strings.Builder{} + if errors.As(err, &e) { + sb.Grow(len(name) + len(e.Error()) + 2) + sb.WriteString(name) + sb.WriteString(": ") + sb.WriteString(e.Error()) + tdErr := &topdown.Error{ + Code: topdown.BuiltinErr, + Message: sb.String(), + Location: bctx.Location, + } + return topdown.Halt{Err: tdErr.Wrap(e)} + } + sb.Grow(len(name) + len(err.Error()) + 2) + sb.WriteString(name) + sb.WriteString(": ") + sb.WriteString(err.Error()) + tdErr := &topdown.Error{ + Code: topdown.BuiltinErr, + Message: sb.String(), + Location: bctx.Location, + } + return tdErr.Wrap(err) + } + if result == nil { + return nil + } + return iter(result) +} + +// helper function to return an option that sets a custom built-in function. +func newFunction(decl *Function, f topdown.BuiltinFunc) func(*Rego) { + return func(r *Rego) { + r.builtinDecls[decl.Name] = &ast.Builtin{ + Name: decl.Name, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + } + r.builtinFuncs[decl.Name] = &topdown.Builtin{ + Decl: r.builtinDecls[decl.Name], + Func: f, + } + } +} + +func generateJSON(term *ast.Term, ectx *EvalContext) (any, error) { + return ast.JSONWithOpt(term.Value, + ast.JSONOpt{ + SortSets: ectx.sortSets, + CopyMaps: ectx.copyMaps, + }) +} + +func (r *Rego) planQuery(queries []ast.Body, evalQueryType queryType) (*ir.Policy, error) { + modules := make([]*ast.Module, 0, len(r.compiler.Modules)) + for _, module := range r.compiler.Modules { + modules = append(modules, module) + } + + decls := make(map[string]*ast.Builtin, len(r.builtinDecls)+len(ast.BuiltinMap)) + maps.Copy(decls, ast.BuiltinMap) + maps.Copy(decls, r.builtinDecls) + + const queryName = "eval" // NOTE(tsandall): the query name is arbitrary + + p := planner.New(). + WithQueries([]planner.QuerySet{ + { + Name: queryName, + Queries: queries, + RewrittenVars: r.compiledQueries[evalQueryType].compiler.RewrittenVars(), + }, + }). + WithModules(modules). + WithBuiltinDecls(decls). + WithDebug(r.dump) + + policy, err := p.Plan() + if err != nil { + return nil, err + } + if r.dump != nil { + fmt.Fprintln(r.dump, "PLAN:") + fmt.Fprintln(r.dump, "-----") + err = ir.Pretty(r.dump, policy) + if err != nil { + return nil, err + } + fmt.Fprintln(r.dump) + } + return policy, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go b/vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go new file mode 100644 index 0000000000..f1618799eb --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go @@ -0,0 +1,102 @@ +package rego + +import ( + "fmt" + + "github.com/open-policy-agent/opa/v1/ast" +) + +// ResultSet represents a collection of output from Rego evaluation. An empty +// result set represents an undefined query. +type ResultSet []Result + +// Vars represents a collection of variable bindings. The keys are the variable +// names and the values are the binding values. +type Vars map[string]any + +// WithoutWildcards returns a copy of v with wildcard variables removed. +func (v Vars) WithoutWildcards() Vars { + n := Vars{} + for k, v := range v { + if ast.Var(k).IsWildcard() || ast.Var(k).IsGenerated() { + continue + } + n[k] = v + } + return n +} + +// Result defines the output of Rego evaluation. +type Result struct { + Expressions []*ExpressionValue `json:"expressions"` + Bindings Vars `json:"bindings,omitempty"` +} + +func newResult() Result { + return Result{ + Bindings: Vars{}, + } +} + +// Location defines a position in a Rego query or module. +type Location struct { + Row int `json:"row"` + Col int `json:"col"` +} + +// ExpressionValue defines the value of an expression in a Rego query. +type ExpressionValue struct { + Value any `json:"value"` + Text string `json:"text"` + Location *Location `json:"location"` +} + +func newExpressionValue(expr *ast.Expr, value any) *ExpressionValue { + result := &ExpressionValue{ + Value: value, + } + if expr.Location != nil { + result.Text = string(expr.Location.Text) + result.Location = &Location{ + Row: expr.Location.Row, + Col: expr.Location.Col, + } + } + return result +} + +func (ev *ExpressionValue) String() string { + return fmt.Sprint(ev.Value) +} + +// Allowed is a helper method that'll return true if all of these conditions hold: +// - the result set only has one element +// - there is only one expression in the result set's only element +// - that expression has the value `true` +// - there are no bindings. +// +// If bindings are present, this will yield `false`: it would be a pitfall to +// return `true` for a query like `data.authz.allow = x`, which always has result +// set element with value true, but could also have a binding `x: false`. +func (rs ResultSet) Allowed() bool { + x, _ := ResultValue[bool](rs) + return x +} + +// ResultValue is a helper function that'll return a value of type T if all of +// these conditions hold: +// - the result set only has one element +// - there is only one expression in the result set's only element +// - that expression has type T +// - there are no bindings. +func ResultValue[T any](rs ResultSet) (T, bool) { + var zero T + if len(rs) == 1 && len(rs[0].Bindings) == 0 { + if exprs := rs[0].Expressions; len(exprs) == 1 { + if v, ok := exprs[0].Value.(T); ok { + return v, true + } + } + } + return zero, false +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/resolver/interface.go b/vendor/github.com/open-policy-agent/opa/v1/resolver/interface.go new file mode 100644 index 0000000000..1f04d21c01 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/resolver/interface.go @@ -0,0 +1,29 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package resolver + +import ( + "context" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" +) + +// Resolver defines an external value resolver for OPA evaluations. +type Resolver interface { + Eval(context.Context, Input) (Result, error) +} + +// Input as provided to a Resolver instance when evaluating. +type Input struct { + Ref ast.Ref + Input *ast.Term + Metrics metrics.Metrics +} + +// Result of resolving a ref. +type Result struct { + Value ast.Value +} diff --git a/vendor/github.com/open-policy-agent/opa/resolver/wasm/wasm.go b/vendor/github.com/open-policy-agent/opa/v1/resolver/wasm/wasm.go similarity index 87% rename from vendor/github.com/open-policy-agent/opa/resolver/wasm/wasm.go rename to vendor/github.com/open-policy-agent/opa/v1/resolver/wasm/wasm.go index 9c13879dc3..884e4ca7cc 100644 --- a/vendor/github.com/open-policy-agent/opa/resolver/wasm/wasm.go +++ b/vendor/github.com/open-policy-agent/opa/v1/resolver/wasm/wasm.go @@ -6,17 +6,18 @@ package wasm import ( "context" + "errors" "fmt" "strconv" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/rego/opa" - "github.com/open-policy-agent/opa/resolver" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/resolver" ) // New creates a new Resolver instance which is using the Wasm module // policy for the given entrypoint ref. -func New(entrypoints []ast.Ref, policy []byte, data interface{}) (*Resolver, error) { +func New(entrypoints []ast.Ref, policy []byte, data any) (*Resolver, error) { e, err := opa.LookupEngine("wasm") if err != nil { return nil, err @@ -96,9 +97,9 @@ func (r *Resolver) Eval(ctx context.Context, input resolver.Input) (resolver.Res return resolver.Result{}, fmt.Errorf("internal error: invalid entrypoint id %s", numValue) } - var in *interface{} + var in *any if input.Input != nil { - var str interface{} = []byte(input.Input.String()) + var str any = []byte(input.Input.String()) in = &str } @@ -121,12 +122,12 @@ func (r *Resolver) Eval(ctx context.Context, input resolver.Input) (resolver.Res } // SetData will update the external data for the Wasm instance. -func (r *Resolver) SetData(ctx context.Context, data interface{}) error { +func (r *Resolver) SetData(ctx context.Context, data any) error { return r.o.SetData(ctx, data) } // SetDataPath will set the provided data on the wasm instance at the specified path. -func (r *Resolver) SetDataPath(ctx context.Context, path []string, data interface{}) error { +func (r *Resolver) SetDataPath(ctx context.Context, path []string, data any) error { return r.o.SetDataPath(ctx, path, data) } @@ -144,7 +145,7 @@ func getResult(evalResult *opa.Result) (ast.Value, error) { resultSet, ok := parsed.Value.(ast.Set) if !ok { - return nil, fmt.Errorf("illegal result type") + return nil, errors.New("illegal result type") } if resultSet.Len() == 0 { @@ -152,14 +153,14 @@ func getResult(evalResult *opa.Result) (ast.Value, error) { } if resultSet.Len() > 1 { - return nil, fmt.Errorf("illegal result type") + return nil, errors.New("illegal result type") } var obj ast.Object err = resultSet.Iter(func(term *ast.Term) error { obj, ok = term.Value.(ast.Object) if !ok || obj.Len() != 1 { - return fmt.Errorf("illegal result type") + return errors.New("illegal result type") } return nil }) @@ -167,7 +168,7 @@ func getResult(evalResult *opa.Result) (ast.Value, error) { return nil, err } - result := obj.Get(ast.StringTerm("result")) + result := obj.Get(ast.InternedTerm("result")) return result.Value, nil } diff --git a/vendor/github.com/open-policy-agent/opa/schemas/authorizationPolicy.json b/vendor/github.com/open-policy-agent/opa/v1/schemas/authorizationPolicy.json similarity index 100% rename from vendor/github.com/open-policy-agent/opa/schemas/authorizationPolicy.json rename to vendor/github.com/open-policy-agent/opa/v1/schemas/authorizationPolicy.json diff --git a/vendor/github.com/open-policy-agent/opa/schemas/schemas.go b/vendor/github.com/open-policy-agent/opa/v1/schemas/schemas.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/schemas/schemas.go rename to vendor/github.com/open-policy-agent/opa/v1/schemas/schemas.go diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/doc.go b/vendor/github.com/open-policy-agent/opa/v1/storage/doc.go new file mode 100644 index 0000000000..6fa2f86d98 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/doc.go @@ -0,0 +1,6 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package storage exposes the policy engine's storage layer. +package storage diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/errors.go b/vendor/github.com/open-policy-agent/opa/v1/storage/errors.go new file mode 100644 index 0000000000..a3d1c00737 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/errors.go @@ -0,0 +1,121 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package storage + +import ( + "fmt" +) + +const ( + // InternalErr indicates an unknown, internal error has occurred. + InternalErr = "storage_internal_error" + + // NotFoundErr indicates the path used in the storage operation does not + // locate a document. + NotFoundErr = "storage_not_found_error" + + // WriteConflictErr indicates a write on the path enocuntered a conflicting + // value inside the transaction. + WriteConflictErr = "storage_write_conflict_error" + + // InvalidPatchErr indicates an invalid patch/write was issued. The patch + // was rejected. + InvalidPatchErr = "storage_invalid_patch_error" + + // InvalidTransactionErr indicates an invalid operation was performed + // inside of the transaction. + InvalidTransactionErr = "storage_invalid_txn_error" + + // TriggersNotSupportedErr indicates the caller attempted to register a + // trigger against a store that does not support them. + TriggersNotSupportedErr = "storage_triggers_not_supported_error" + + // WritesNotSupportedErr indicate the caller attempted to perform a write + // against a store that does not support them. + WritesNotSupportedErr = "storage_writes_not_supported_error" + + // PolicyNotSupportedErr indicate the caller attempted to perform a policy + // management operation against a store that does not support them. + PolicyNotSupportedErr = "storage_policy_not_supported_error" +) + +// Error is the error type returned by the storage layer. +type Error struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (err *Error) Error() string { + if err.Message != "" { + return fmt.Sprintf("%v: %v", err.Code, err.Message) + } + return err.Code +} + +// IsNotFound returns true if this error is a NotFoundErr. +func IsNotFound(err error) bool { + if err, ok := err.(*Error); ok { + return err.Code == NotFoundErr + } + return false +} + +// IsWriteConflictError returns true if this error a WriteConflictErr. +func IsWriteConflictError(err error) bool { + switch err := err.(type) { + case *Error: + return err.Code == WriteConflictErr + } + return false +} + +// IsInvalidPatch returns true if this error is a InvalidPatchErr. +func IsInvalidPatch(err error) bool { + switch err := err.(type) { + case *Error: + return err.Code == InvalidPatchErr + } + return false +} + +// IsInvalidTransaction returns true if this error is a InvalidTransactionErr. +func IsInvalidTransaction(err error) bool { + switch err := err.(type) { + case *Error: + return err.Code == InvalidTransactionErr + } + return false +} + +// IsIndexingNotSupported is a stub for backwards-compatibility. +// +// Deprecated: We no longer return IndexingNotSupported errors, so it is +// unnecessary to check for them. +func IsIndexingNotSupported(error) bool { return false } + +func writeConflictError(path Path) *Error { + return &Error{ + Code: WriteConflictErr, + Message: path.String(), + } +} + +func triggersNotSupportedError() *Error { + return &Error{ + Code: TriggersNotSupportedErr, + } +} + +func writesNotSupportedError() *Error { + return &Error{ + Code: WritesNotSupportedErr, + } +} + +func policyNotSupportedError() *Error { + return &Error{ + Code: PolicyNotSupportedErr, + } +} diff --git a/vendor/github.com/open-policy-agent/opa/storage/inmem/ast.go b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/ast.go similarity index 85% rename from vendor/github.com/open-policy-agent/opa/storage/inmem/ast.go rename to vendor/github.com/open-policy-agent/opa/v1/storage/inmem/ast.go index 5a8a6743fa..40f18ab0de 100644 --- a/vendor/github.com/open-policy-agent/opa/storage/inmem/ast.go +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/ast.go @@ -8,10 +8,10 @@ import ( "fmt" "strconv" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/storage/internal/errors" - "github.com/open-policy-agent/opa/storage/internal/ptr" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/storage/internal/errors" + "github.com/open-policy-agent/opa/v1/storage/internal/ptr" ) type updateAST struct { @@ -28,7 +28,7 @@ func (u *updateAST) Remove() bool { return u.remove } -func (u *updateAST) Set(v interface{}) { +func (u *updateAST) Set(v any) { if v, ok := v.(ast.Value); ok { u.value = v } else { @@ -36,7 +36,7 @@ func (u *updateAST) Set(v interface{}) { } } -func (u *updateAST) Value() interface{} { +func (u *updateAST) Value() any { return u.value } @@ -46,7 +46,7 @@ func (u *updateAST) Relative(path storage.Path) dataUpdate { return &cpy } -func (u *updateAST) Apply(v interface{}) interface{} { +func (u *updateAST) Apply(v any) any { if len(u.path) == 0 { return u.value } @@ -72,11 +72,10 @@ func (u *updateAST) Apply(v interface{}) interface{} { return newV } -func newUpdateAST(data interface{}, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) { - +func newUpdateAST(data any, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) { switch data.(type) { case ast.Null, ast.Boolean, ast.Number, ast.String: - return nil, errors.NewNotFoundError(path) + return nil, errors.NotFoundErr } switch data := data.(type) { @@ -94,15 +93,13 @@ func newUpdateAST(data interface{}, op storage.PatchOp, path storage.Path, idx i } func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) { - if idx == len(path)-1 { if path[idx] == "-" || path[idx] == strconv.Itoa(data.Len()) { if op != storage.AddOp { - return nil, invalidPatchError("%v: invalid patch path", path) + return nil, errors.NewInvalidPatchError("%v: invalid patch path", path) } - cpy := data.Copy() - cpy = cpy.Append(ast.NewTerm(value)) + cpy := data.Append(ast.NewTerm(value)) return &updateAST{path[:len(path)-1], false, cpy}, nil } @@ -114,7 +111,7 @@ func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, i switch op { case storage.AddOp: var results []*ast.Term - for i := 0; i < data.Len(); i++ { + for i := range data.Len() { if i == pos { results = append(results, ast.NewTerm(value)) } @@ -125,7 +122,7 @@ func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, i case storage.RemoveOp: var results []*ast.Term - for i := 0; i < data.Len(); i++ { + for i := range data.Len() { if i != pos { results = append(results, data.Elem(i)) } @@ -134,7 +131,7 @@ func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, i default: var results []*ast.Term - for i := 0; i < data.Len(); i++ { + for i := range data.Len() { if i == pos { results = append(results, ast.NewTerm(value)) } else { @@ -155,14 +152,14 @@ func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, i } func newUpdateObjectAST(data ast.Object, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) { - key := ast.StringTerm(path[idx]) + key := ast.InternedTerm(path[idx]) val := data.Get(key) if idx == len(path)-1 { switch op { case storage.ReplaceOp, storage.RemoveOp: if val == nil { - return nil, errors.NewNotFoundError(path) + return nil, errors.NotFoundErr } } return &updateAST{path, op == storage.RemoveOp, value}, nil @@ -172,14 +169,7 @@ func newUpdateObjectAST(data ast.Object, op storage.PatchOp, path storage.Path, return newUpdateAST(val.Value, op, path, idx+1, value) } - return nil, errors.NewNotFoundError(path) -} - -func interfaceToValue(v interface{}) (ast.Value, error) { - if v, ok := v.(ast.Value); ok { - return v, nil - } - return ast.InterfaceToValue(v) + return nil, errors.NotFoundErr } // setInAst updates the value in the AST at the given path with the given value. @@ -201,7 +191,7 @@ func setInAst(data ast.Value, path storage.Path, value ast.Value) (ast.Value, er } func setInAstObject(obj ast.Object, path storage.Path, value ast.Value) (ast.Value, error) { - key := ast.StringTerm(path[0]) + key := ast.InternedTerm(path[0]) if len(path) == 1 { obj.Insert(key, ast.NewTerm(value)) @@ -257,7 +247,7 @@ func removeInAst(value ast.Value, path storage.Path) (ast.Value, error) { } func removeInAstObject(obj ast.Object, path storage.Path) (ast.Value, error) { - key := ast.StringTerm(path[0]) + key := ast.InternedTerm(path[0]) if len(path) == 1 { var items [][2]*ast.Term @@ -296,7 +286,7 @@ func removeInAstArray(arr *ast.Array, path storage.Path) (ast.Value, error) { if len(path) == 1 { var elems []*ast.Term // Note: possibly expensive operation for large data. - for i := 0; i < arr.Len(); i++ { + for i := range arr.Len() { if i == idx { continue } diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/inmem.go b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/inmem.go new file mode 100644 index 0000000000..9fa145a051 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/inmem.go @@ -0,0 +1,463 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package inmem implements an in-memory version of the policy engine's storage +// layer. +// +// The in-memory store is used as the default storage layer implementation. The +// in-memory store supports multi-reader/single-writer concurrency with +// rollback. +// +// Callers should assume the in-memory store does not make copies of written +// data. Once data is written to the in-memory store, it should not be modified +// (outside of calling Store.Write). Furthermore, data read from the in-memory +// store should be treated as read-only. +package inmem + +import ( + "context" + "fmt" + "io" + "path/filepath" + "strings" + "sync" + "sync/atomic" + + "github.com/open-policy-agent/opa/internal/merge" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/storage/internal/errors" + "github.com/open-policy-agent/opa/v1/util" +) + +// New returns an empty in-memory store. +func New() storage.Store { + return NewWithOpts() +} + +// NewWithOpts returns an empty in-memory store, with extra options passed. +func NewWithOpts(opts ...Opt) storage.Store { + s := &store{ + triggers: map[*handle]storage.TriggerConfig{}, + policies: map[string][]byte{}, + roundTripOnWrite: true, + returnASTValuesOnRead: false, + } + + for _, opt := range opts { + opt(s) + } + + if s.returnASTValuesOnRead { + s.data = ast.NewObject() + s.roundTripOnWrite = false + } else { + s.data = map[string]any{} + } + + return s +} + +// NewFromObject returns a new in-memory store from the supplied data object. +func NewFromObject(data map[string]any) storage.Store { + return NewFromObjectWithOpts(data) +} + +// NewFromObjectWithOpts returns a new in-memory store from the supplied data object, with the +// options passed. +func NewFromObjectWithOpts(data map[string]any, opts ...Opt) storage.Store { + db := NewWithOpts(opts...) + ctx := context.Background() + txn, err := db.NewTransaction(ctx, storage.WriteParams) + if err != nil { + panic(err) + } + if err := db.Write(ctx, txn, storage.AddOp, storage.RootPath, data); err != nil { + panic(err) + } + if err := db.Commit(ctx, txn); err != nil { + panic(err) + } + return db +} + +// NewFromReader returns a new in-memory store from a reader that produces a +// JSON serialized object. This function is for test purposes. +func NewFromReader(r io.Reader) storage.Store { + return NewFromReaderWithOpts(r) +} + +// NewFromReader returns a new in-memory store from a reader that produces a +// JSON serialized object, with extra options. This function is for test purposes. +func NewFromReaderWithOpts(r io.Reader, opts ...Opt) storage.Store { + var data map[string]any + if err := util.NewJSONDecoder(r).Decode(&data); err != nil { + panic(err) + } + return NewFromObjectWithOpts(data, opts...) +} + +type store struct { + rmu sync.RWMutex // reader-writer lock + wmu sync.Mutex // writer lock + xid uint64 // last generated transaction id + data any // raw or AST data + policies map[string][]byte // raw policies + triggers map[*handle]storage.TriggerConfig // registered triggers + + // roundTripOnWrite, if true, means that every call to Write round trips the + // data through JSON before adding the data to the store. Defaults to true. + roundTripOnWrite bool + + // returnASTValuesOnRead, if true, means that the store will eagerly convert data to AST values, + // and return them on Read. + // FIXME: naming(?) + returnASTValuesOnRead bool +} + +type handle struct { + db *store +} + +func (db *store) NewTransaction(_ context.Context, params ...storage.TransactionParams) (storage.Transaction, error) { + txn := &transaction{ + xid: atomic.AddUint64(&db.xid, uint64(1)), + db: db, + } + + if len(params) > 0 { + txn.write = params[0].Write + txn.context = params[0].Context + } + + if txn.write { + db.wmu.Lock() + } else { + db.rmu.RLock() + } + + return txn, nil +} + +// Truncate implements the storage.Store interface. This method must be called within a transaction. +func (db *store) Truncate(ctx context.Context, txn storage.Transaction, params storage.TransactionParams, it storage.Iterator) error { + var update *storage.Update + var err error + + underlying, err := db.underlying(txn) + if err != nil { + return err + } + + mergedData := map[string]any{} + + for { + if update, err = it.Next(); err != nil { + break + } + + if update.IsPolicy { + err = underlying.UpsertPolicy(strings.TrimLeft(update.Path.String(), "/"), update.Value) + if err != nil { + return err + } + } else { + var value any + if err = util.Unmarshal(update.Value, &value); err != nil { + return err + } + + var key []string + dirpath := strings.TrimLeft(update.Path.String(), "/") + if len(dirpath) > 0 { + key = strings.Split(dirpath, "/") + } + + if value != nil { + obj, err := mktree(key, value) + if err != nil { + return err + } + + merged, ok := merge.InterfaceMaps(mergedData, obj) + if !ok { + return fmt.Errorf("failed to insert data file from path %s", filepath.Join(key...)) + } + mergedData = merged + } + } + } + + // err is known not to be nil at this point, as it getting assigned + // a non-nil value is the only way the loop above can exit. + if err != io.EOF { + return err + } + + // For backwards compatibility, check if `RootOverwrite` was configured. + if params.RootOverwrite { + return underlying.Write(storage.AddOp, storage.RootPath, mergedData) + } + + for _, root := range params.BasePaths { + newPath, ok := storage.ParsePathEscaped("/" + root) + if !ok { + return fmt.Errorf("storage path invalid: %v", newPath) + } + + if value, ok := lookup(newPath, mergedData); ok { + if len(newPath) > 0 { + if err := storage.MakeDir(ctx, db, txn, newPath[:len(newPath)-1]); err != nil { + return err + } + } + if err := underlying.Write(storage.AddOp, newPath, value); err != nil { + return err + } + } + } + return nil +} + +func (db *store) Commit(ctx context.Context, txn storage.Transaction) error { + underlying, err := db.underlying(txn) + if err != nil { + return err + } + if underlying.write { + db.rmu.Lock() + event := underlying.Commit() + db.runOnCommitTriggers(ctx, txn, event) + // Mark the transaction stale after executing triggers, so they can + // perform store operations if needed. + underlying.stale = true + db.rmu.Unlock() + db.wmu.Unlock() + } else { + db.rmu.RUnlock() + } + return nil +} + +func (db *store) Abort(_ context.Context, txn storage.Transaction) { + underlying, err := db.underlying(txn) + if err != nil { + panic(err) + } + underlying.stale = true + if underlying.write { + db.wmu.Unlock() + } else { + db.rmu.RUnlock() + } +} + +func (db *store) ListPolicies(_ context.Context, txn storage.Transaction) ([]string, error) { + underlying, err := db.underlying(txn) + if err != nil { + return nil, err + } + return underlying.ListPolicies(), nil +} + +func (db *store) GetPolicy(_ context.Context, txn storage.Transaction, id string) ([]byte, error) { + underlying, err := db.underlying(txn) + if err != nil { + return nil, err + } + return underlying.GetPolicy(id) +} + +func (db *store) UpsertPolicy(_ context.Context, txn storage.Transaction, id string, bs []byte) error { + underlying, err := db.underlying(txn) + if err != nil { + return err + } + return underlying.UpsertPolicy(id, bs) +} + +func (db *store) DeletePolicy(_ context.Context, txn storage.Transaction, id string) error { + underlying, err := db.underlying(txn) + if err != nil { + return err + } + if _, err := underlying.GetPolicy(id); err != nil { + return err + } + return underlying.DeletePolicy(id) +} + +func (db *store) Register(_ context.Context, txn storage.Transaction, config storage.TriggerConfig) (storage.TriggerHandle, error) { + underlying, err := db.underlying(txn) + if err != nil { + return nil, err + } + if !underlying.write { + return nil, &storage.Error{ + Code: storage.InvalidTransactionErr, + Message: "triggers must be registered with a write transaction", + } + } + h := &handle{db} + db.triggers[h] = config + return h, nil +} + +func (db *store) Read(_ context.Context, txn storage.Transaction, path storage.Path) (any, error) { + underlying, err := db.underlying(txn) + if err != nil { + return nil, err + } + + return underlying.Read(path) +} + +func (db *store) Write(_ context.Context, txn storage.Transaction, op storage.PatchOp, path storage.Path, value any) error { + underlying, err := db.underlying(txn) + if err != nil { + return err + } + + if db.returnASTValuesOnRead || !util.NeedsRoundTrip(value) { + // Fast path when value is nil, bool, string or json.Number. + return underlying.Write(op, path, value) + } + + val := util.Reference(value) + if db.roundTripOnWrite { + if err := util.RoundTrip(val); err != nil { + return err + } + } + + return underlying.Write(op, path, *val) +} + +func (h *handle) Unregister(_ context.Context, txn storage.Transaction) { + underlying, err := h.db.underlying(txn) + if err != nil { + panic(err) + } + if !underlying.write { + panic(&storage.Error{ + Code: storage.InvalidTransactionErr, + Message: "triggers must be unregistered with a write transaction", + }) + } + delete(h.db.triggers, h) +} + +func (db *store) runOnCommitTriggers(ctx context.Context, txn storage.Transaction, event storage.TriggerEvent) { + // While it's unlikely, the API allows one trigger to be configured to want + // data conversion, and another that doesn't. So let's handle that properly. + var wantsDataConversion bool + if db.returnASTValuesOnRead && len(event.Data) > 0 { + for _, t := range db.triggers { + if !t.SkipDataConversion { + wantsDataConversion = true + break + } + } + } + + var converted storage.TriggerEvent + if wantsDataConversion { + converted = storage.TriggerEvent{ + Policy: event.Policy, + Data: make([]storage.DataEvent, 0, len(event.Data)), + Context: event.Context, + } + + for _, dataEvent := range event.Data { + if astData, ok := dataEvent.Data.(ast.Value); ok { + jsn, err := ast.ValueToInterface(astData, illegalResolver{}) + if err != nil { + panic(err) + } + converted.Data = append(converted.Data, storage.DataEvent{ + Path: dataEvent.Path, + Data: jsn, + Removed: dataEvent.Removed, + }) + } + } + } + + for _, t := range db.triggers { + if wantsDataConversion && !t.SkipDataConversion { + t.OnCommit(ctx, txn, converted) + } else { + t.OnCommit(ctx, txn, event) + } + } +} + +type illegalResolver struct{} + +func (illegalResolver) Resolve(ref ast.Ref) (any, error) { + return nil, fmt.Errorf("illegal value: %v", ref) +} + +func (db *store) underlying(txn storage.Transaction) (*transaction, error) { + underlying, ok := txn.(*transaction) + if !ok { + return nil, &storage.Error{ + Code: storage.InvalidTransactionErr, + Message: fmt.Sprintf("unexpected transaction type %T", txn), + } + } + if underlying.db != db { + return nil, &storage.Error{ + Code: storage.InvalidTransactionErr, + Message: "unknown transaction", + } + } + if underlying.stale { + return nil, &storage.Error{ + Code: storage.InvalidTransactionErr, + Message: "stale transaction", + } + } + return underlying, nil +} + +func mktree(path []string, value any) (map[string]any, error) { + if len(path) == 0 { + // For 0 length path the value is the full tree. + obj, ok := value.(map[string]any) + if !ok { + return nil, errors.RootMustBeObjectErr + } + return obj, nil + } + + dir := map[string]any{} + for i := len(path) - 1; i > 0; i-- { + dir[path[i]] = value + value = dir + dir = map[string]any{} + } + dir[path[0]] = value + + return dir, nil +} + +func lookup(path storage.Path, data map[string]any) (any, bool) { + if len(path) == 0 { + return data, true + } + for i := range len(path) - 1 { + value, ok := data[path[i]] + if !ok { + return nil, false + } + obj, ok := value.(map[string]any) + if !ok { + return nil, false + } + data = obj + } + value, ok := data[path[len(path)-1]] + return value, ok +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/opts.go b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/opts.go new file mode 100644 index 0000000000..2239fc73a3 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/opts.go @@ -0,0 +1,37 @@ +package inmem + +// An Opt modifies store at instantiation. +type Opt func(*store) + +// OptRoundTripOnWrite sets whether incoming objects written to store are +// round-tripped through JSON to ensure they are serializable to JSON. +// +// Callers should disable this if they can guarantee all objects passed to +// Write() are serializable to JSON. Failing to do so may result in undefined +// behavior, including panics. +// +// Usually, when only storing objects in the inmem store that have been read +// via encoding/json, this is safe to disable, and comes with an improvement +// in performance and memory use. +// +// If setting to false, callers should deep-copy any objects passed to Write() +// unless they can guarantee the objects will not be mutated after being written, +// and that mutations happening to the objects after they have been passed into +// Write() don't affect their logic. +func OptRoundTripOnWrite(enabled bool) Opt { + return func(s *store) { + s.roundTripOnWrite = enabled + } +} + +// OptReturnASTValuesOnRead sets whether data values added to the store should be +// eagerly converted to AST values, which are then returned on read. +// +// When enabled, this feature does not sanity check data before converting it to AST values, +// which may result in panics if the data is not valid. Callers should ensure that passed data +// can be serialized to AST values; otherwise, it's recommended to also enable OptRoundTripOnWrite. +func OptReturnASTValuesOnRead(enabled bool) Opt { + return func(s *store) { + s.returnASTValuesOnRead = enabled + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/txn.go b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/txn.go new file mode 100644 index 0000000000..e76bccd013 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/txn.go @@ -0,0 +1,539 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package inmem + +import ( + "container/list" + "encoding/json" + "slices" + "strconv" + + "github.com/open-policy-agent/opa/internal/deepcopy" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/storage/internal/errors" + "github.com/open-policy-agent/opa/v1/storage/internal/ptr" +) + +// transaction implements the low-level read/write operations on the in-memory +// store and contains the state required for pending transactions. +// +// For write transactions, the struct contains a logical set of updates +// performed by write operations in the transaction. Each write operation +// compacts the set such that two updates never overlap: +// +// - If new update path is a prefix of existing update path, existing update is +// removed, new update is added. +// +// - If existing update path is a prefix of new update path, existing update is +// modified. +// +// - Otherwise, new update is added. +// +// Read transactions do not require any special handling and simply passthrough +// to the underlying store. Read transactions do not support upgrade. +type transaction struct { + db *store + updates *list.List + context *storage.Context + policies map[string]policyUpdate + xid uint64 + write bool + stale bool +} + +type policyUpdate struct { + value []byte + remove bool +} + +func (txn *transaction) ID() uint64 { + return txn.xid +} + +func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value any) error { + if !txn.write { + return &storage.Error{Code: storage.InvalidTransactionErr, Message: "data write during read transaction"} + } + + if txn.updates == nil { + txn.updates = list.New() + } + + if len(path) == 0 { + return txn.updateRoot(op, value) + } + + for curr := txn.updates.Front(); curr != nil; { + update := curr.Value.(dataUpdate) + + // Check if new update masks existing update exactly. In this case, the + // existing update can be removed and no other updates have to be + // visited (because no two updates overlap.) + if update.Path().Equal(path) { + if update.Remove() { + if op != storage.AddOp { + return errors.NotFoundErr + } + } + // If the last update has the same path and value, we have nothing to do. + if txn.db.returnASTValuesOnRead { + if astValue, ok := update.Value().(ast.Value); ok { + if equalsValue(value, astValue) { + return nil + } + } + } else if comparableEquals(update.Value(), value) { + return nil + } + + txn.updates.Remove(curr) + break + } + + // Check if new update masks existing update. In this case, the + // existing update has to be removed but other updates may overlap, so + // we must continue. + if update.Path().HasPrefix(path) { + remove := curr + curr = curr.Next() + txn.updates.Remove(remove) + continue + } + + // Check if new update modifies existing update. In this case, the + // existing update is mutated. + if path.HasPrefix(update.Path()) { + if update.Remove() { + return errors.NotFoundErr + } + suffix := path[len(update.Path()):] + newUpdate, err := txn.db.newUpdate(update.Value(), op, suffix, 0, value) + if err != nil { + return err + } + update.Set(newUpdate.Apply(update.Value())) + return nil + } + + curr = curr.Next() + } + + update, err := txn.db.newUpdate(txn.db.data, op, path, 0, value) + if err != nil { + return err + } + + txn.updates.PushFront(update) + return nil +} + +func comparableEquals(a, b any) bool { + switch a := a.(type) { + case nil: + return b == nil + case bool: + if vb, ok := b.(bool); ok { + return vb == a + } + case string: + if vs, ok := b.(string); ok { + return vs == a + } + case json.Number: + if vn, ok := b.(json.Number); ok { + return vn == a + } + } + return false +} + +func (txn *transaction) updateRoot(op storage.PatchOp, value any) error { + if op == storage.RemoveOp { + return errors.RootCannotBeRemovedErr + } + + var update any + if txn.db.returnASTValuesOnRead { + valueAST, err := ast.InterfaceToValue(value) + if err != nil { + return err + } + if _, ok := valueAST.(ast.Object); !ok { + return errors.RootMustBeObjectErr + } + + update = &updateAST{ + path: storage.RootPath, + remove: false, + value: valueAST, + } + } else { + if _, ok := value.(map[string]any); !ok { + return errors.RootMustBeObjectErr + } + + update = &updateRaw{ + path: storage.RootPath, + remove: false, + value: value, + } + } + + txn.updates.Init() + txn.updates.PushFront(update) + + return nil +} + +func (txn *transaction) Commit() (result storage.TriggerEvent) { + result.Context = txn.context + + if txn.updates != nil { + if len(txn.db.triggers) > 0 { + result.Data = slices.Grow(result.Data, txn.updates.Len()) + } + + for curr := txn.updates.Front(); curr != nil; curr = curr.Next() { + action := curr.Value.(dataUpdate) + txn.db.data = action.Apply(txn.db.data) + + if len(txn.db.triggers) > 0 { + result.Data = append(result.Data, storage.DataEvent{ + Path: action.Path(), + Data: action.Value(), + Removed: action.Remove(), + }) + } + } + } + + if len(txn.policies) > 0 && len(txn.db.triggers) > 0 { + result.Policy = slices.Grow(result.Policy, len(txn.policies)) + } + + for id, upd := range txn.policies { + if upd.remove { + delete(txn.db.policies, id) + } else { + txn.db.policies[id] = upd.value + } + + if len(txn.db.triggers) > 0 { + result.Policy = append(result.Policy, storage.PolicyEvent{ + ID: id, + Data: upd.value, + Removed: upd.remove, + }) + } + } + return result +} + +func pointer(v any, path storage.Path) (any, error) { + if v, ok := v.(ast.Value); ok { + return ptr.ValuePtr(v, path) + } + return ptr.Ptr(v, path) +} + +func deepcpy(v any) any { + if v, ok := v.(ast.Value); ok { + var cpy ast.Value + + switch data := v.(type) { + case ast.Object: + cpy = data.Copy() + case *ast.Array: + cpy = data.Copy() + } + + return cpy + } + return deepcopy.DeepCopy(v) +} + +func (txn *transaction) Read(path storage.Path) (any, error) { + if !txn.write || txn.updates == nil { + return pointer(txn.db.data, path) + } + + var merge []dataUpdate + + for curr := txn.updates.Front(); curr != nil; curr = curr.Next() { + + upd := curr.Value.(dataUpdate) + + if path.HasPrefix(upd.Path()) { + if upd.Remove() { + return nil, errors.NotFoundErr + } + return pointer(upd.Value(), path[len(upd.Path()):]) + } + + if upd.Path().HasPrefix(path) { + merge = append(merge, upd) + } + } + + data, err := pointer(txn.db.data, path) + + if err != nil { + return nil, err + } + + if len(merge) == 0 { + return data, nil + } + + cpy := deepcpy(data) + + for _, update := range merge { + cpy = update.Relative(path).Apply(cpy) + } + + return cpy, nil +} + +func (txn *transaction) ListPolicies() (ids []string) { + for id := range txn.db.policies { + if _, ok := txn.policies[id]; !ok { + ids = append(ids, id) + } + } + for id, update := range txn.policies { + if !update.remove { + ids = append(ids, id) + } + } + return ids +} + +func (txn *transaction) GetPolicy(id string) ([]byte, error) { + if txn.policies != nil { + if update, ok := txn.policies[id]; ok { + if !update.remove { + return update.value, nil + } + return nil, errors.NewNotFoundErrorf("policy id %q", id) + } + } + if exist, ok := txn.db.policies[id]; ok { + return exist, nil + } + return nil, errors.NewNotFoundErrorf("policy id %q", id) +} + +func (txn *transaction) UpsertPolicy(id string, bs []byte) error { + return txn.updatePolicy(id, policyUpdate{bs, false}) +} + +func (txn *transaction) DeletePolicy(id string) error { + return txn.updatePolicy(id, policyUpdate{nil, true}) +} + +func (txn *transaction) updatePolicy(id string, update policyUpdate) error { + if !txn.write { + return &storage.Error{Code: storage.InvalidTransactionErr, Message: "policy write during read transaction"} + } + + if txn.policies == nil { + txn.policies = map[string]policyUpdate{id: update} + } else { + txn.policies[id] = update + } + + return nil +} + +type dataUpdate interface { + Path() storage.Path + Remove() bool + Apply(any) any + Relative(path storage.Path) dataUpdate + Set(any) + Value() any +} + +// update contains state associated with an update to be applied to the +// in-memory data store. +type updateRaw struct { + path storage.Path // data path modified by update + remove bool // indicates whether update removes the value at path + value any // value to add/replace at path (ignored if remove is true) +} + +func equalsValue(a any, v ast.Value) bool { + if a, ok := a.(ast.Value); ok { + return a.Compare(v) == 0 + } + switch a := a.(type) { + case nil: + return v == ast.NullValue + case bool: + if vb, ok := v.(ast.Boolean); ok { + return bool(vb) == a + } + case string: + if vs, ok := v.(ast.String); ok { + return string(vs) == a + } + } + + return false +} + +func (db *store) newUpdate(data any, op storage.PatchOp, path storage.Path, idx int, value any) (dataUpdate, error) { + if db.returnASTValuesOnRead { + astData, err := ast.InterfaceToValue(data) + if err != nil { + return nil, err + } + astValue, err := ast.InterfaceToValue(value) + if err != nil { + return nil, err + } + return newUpdateAST(astData, op, path, idx, astValue) + } + return newUpdateRaw(data, op, path, idx, value) +} + +func newUpdateRaw(data any, op storage.PatchOp, path storage.Path, idx int, value any) (dataUpdate, error) { + switch data.(type) { + case nil, bool, json.Number, string: + return nil, errors.NotFoundErr + } + + switch data := data.(type) { + case map[string]any: + return newUpdateObject(data, op, path, idx, value) + + case []any: + return newUpdateArray(data, op, path, idx, value) + } + + return nil, &storage.Error{ + Code: storage.InternalErr, + Message: "invalid data value encountered", + } +} + +func newUpdateArray(data []any, op storage.PatchOp, path storage.Path, idx int, value any) (dataUpdate, error) { + if idx == len(path)-1 { + if path[idx] == "-" || path[idx] == strconv.Itoa(len(data)) { + if op != storage.AddOp { + return nil, errors.NewInvalidPatchError("%v: invalid patch path", path) + } + cpy := make([]any, len(data)+1) + copy(cpy, data) + cpy[len(data)] = value + return &updateRaw{path[:len(path)-1], false, cpy}, nil + } + + pos, err := ptr.ValidateArrayIndex(data, path[idx], path) + if err != nil { + return nil, err + } + + switch op { + case storage.AddOp: + cpy := make([]any, len(data)+1) + copy(cpy[:pos], data[:pos]) + copy(cpy[pos+1:], data[pos:]) + cpy[pos] = value + return &updateRaw{path[:len(path)-1], false, cpy}, nil + + case storage.RemoveOp: + cpy := make([]any, len(data)-1) + copy(cpy[:pos], data[:pos]) + copy(cpy[pos:], data[pos+1:]) + return &updateRaw{path[:len(path)-1], false, cpy}, nil + + default: + cpy := make([]any, len(data)) + copy(cpy, data) + cpy[pos] = value + return &updateRaw{path[:len(path)-1], false, cpy}, nil + } + } + + pos, err := ptr.ValidateArrayIndex(data, path[idx], path) + if err != nil { + return nil, err + } + + return newUpdateRaw(data[pos], op, path, idx+1, value) +} + +func newUpdateObject(data map[string]any, op storage.PatchOp, path storage.Path, idx int, value any) (dataUpdate, error) { + + if idx == len(path)-1 { + switch op { + case storage.ReplaceOp, storage.RemoveOp: + if _, ok := data[path[idx]]; !ok { + return nil, errors.NotFoundErr + } + } + return &updateRaw{path, op == storage.RemoveOp, value}, nil + } + + if data, ok := data[path[idx]]; ok { + return newUpdateRaw(data, op, path, idx+1, value) + } + + return nil, errors.NotFoundErr +} + +func (u *updateRaw) Remove() bool { + return u.remove +} + +func (u *updateRaw) Path() storage.Path { + return u.path +} + +func (u *updateRaw) Apply(data any) any { + if len(u.path) == 0 { + return u.value + } + parent, err := ptr.Ptr(data, u.path[:len(u.path)-1]) + if err != nil { + panic(err) + } + key := u.path[len(u.path)-1] + if u.remove { + obj := parent.(map[string]any) + delete(obj, key) + return data + } + switch parent := parent.(type) { + case map[string]any: + if parent == nil { + parent = make(map[string]any, 1) + } + parent[key] = u.value + case []any: + idx, err := strconv.Atoi(key) + if err != nil { + panic(err) + } + parent[idx] = u.value + } + return data +} + +func (u *updateRaw) Set(v any) { + u.value = v +} + +func (u *updateRaw) Value() any { + return u.value +} + +func (u *updateRaw) Relative(path storage.Path) dataUpdate { + cpy := *u + cpy.path = cpy.path[len(path):] + return &cpy +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/interface.go b/vendor/github.com/open-policy-agent/opa/v1/storage/interface.go new file mode 100644 index 0000000000..cb2e811dc4 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/interface.go @@ -0,0 +1,264 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package storage + +import ( + "context" + + "github.com/open-policy-agent/opa/v1/metrics" +) + +// Transaction defines the interface that identifies a consistent snapshot over +// the policy engine's storage layer. +type Transaction interface { + ID() uint64 +} + +// Store defines the interface for the storage layer's backend. +type Store interface { + Trigger + Policy + + // NewTransaction is called create a new transaction in the store. + NewTransaction(context.Context, ...TransactionParams) (Transaction, error) + + // Read is called to fetch a document referred to by path. + Read(context.Context, Transaction, Path) (any, error) + + // Write is called to modify a document referred to by path. + Write(context.Context, Transaction, PatchOp, Path, any) error + + // Commit is called to finish the transaction. If Commit returns an error, the + // transaction must be automatically aborted by the Store implementation. + Commit(context.Context, Transaction) error + + // Truncate is called to make a copy of the underlying store, write documents in the new store + // by creating multiple transactions in the new store as needed and finally swapping + // over to the new storage instance. This method must be called within a transaction on the original store. + Truncate(context.Context, Transaction, TransactionParams, Iterator) error + + // Abort is called to cancel the transaction. + Abort(context.Context, Transaction) +} + +// MakeDirer defines the interface a Store could realize to override the +// generic MakeDir functionality in storage.MakeDir +type MakeDirer interface { + MakeDir(context.Context, Transaction, Path) error +} + +// NonEmptyer allows a store implemention to override NonEmpty()) +type NonEmptyer interface { + NonEmpty(context.Context, Transaction) func([]string) (bool, error) +} + +// Closer is an optional interface that storage implementations can implement +// to perform cleanup operations when the store is being shut down. +// If a Store implements this interface, Close will be called during +// graceful shutdown of the OPA runtime. +type Closer interface { + Close(context.Context) error +} + +// TransactionParams describes a new transaction. +type TransactionParams struct { + + // BasePaths indicates the top-level paths where write operations will be performed in this transaction. + BasePaths []string + + // RootOverwrite is deprecated. Use BasePaths instead. + RootOverwrite bool + + // Write indicates if this transaction will perform any write operations. + Write bool + + // Context contains key/value pairs passed to triggers. + Context *Context +} + +// Context is a simple container for key/value pairs. +type Context struct { + values map[any]any +} + +// NewContext returns a new context object. +func NewContext() *Context { + return &Context{ + values: map[any]any{}, + } +} + +// Get returns the key value in the context. +func (ctx *Context) Get(key any) any { + if ctx == nil { + return nil + } + return ctx.values[key] +} + +// Put adds a key/value pair to the context. +func (ctx *Context) Put(key, value any) { + ctx.values[key] = value +} + +var metricsKey = struct{}{} + +// WithMetrics allows passing metrics via the Context. +// It puts the metrics object in the ctx, and returns the same +// ctx (not a copy) for convenience. +func (ctx *Context) WithMetrics(m metrics.Metrics) *Context { + ctx.values[metricsKey] = m + return ctx +} + +// Metrics() allows using a Context's metrics. Returns nil if metrics +// were not attached to the Context. +func (ctx *Context) Metrics() metrics.Metrics { + if m, ok := ctx.values[metricsKey]; ok { + if met, ok := m.(metrics.Metrics); ok { + return met + } + } + return nil +} + +// WriteParams specifies the TransactionParams for a write transaction. +var WriteParams = TransactionParams{ + Write: true, +} + +// PatchOp is the enumeration of supposed modifications. +type PatchOp int + +// Patch supports add, remove, and replace operations. +const ( + AddOp PatchOp = iota + RemoveOp = iota + ReplaceOp = iota +) + +// WritesNotSupported provides a default implementation of the write +// interface which may be used if the backend does not support writes. +type WritesNotSupported struct{} + +func (WritesNotSupported) Write(context.Context, Transaction, PatchOp, Path, any) error { + return writesNotSupportedError() +} + +// Policy defines the interface for policy module storage. +type Policy interface { + ListPolicies(context.Context, Transaction) ([]string, error) + GetPolicy(context.Context, Transaction, string) ([]byte, error) + UpsertPolicy(context.Context, Transaction, string, []byte) error + DeletePolicy(context.Context, Transaction, string) error +} + +// PolicyNotSupported provides a default implementation of the policy interface +// which may be used if the backend does not support policy storage. +type PolicyNotSupported struct{} + +// ListPolicies always returns a PolicyNotSupportedErr. +func (PolicyNotSupported) ListPolicies(context.Context, Transaction) ([]string, error) { + return nil, policyNotSupportedError() +} + +// GetPolicy always returns a PolicyNotSupportedErr. +func (PolicyNotSupported) GetPolicy(context.Context, Transaction, string) ([]byte, error) { + return nil, policyNotSupportedError() +} + +// UpsertPolicy always returns a PolicyNotSupportedErr. +func (PolicyNotSupported) UpsertPolicy(context.Context, Transaction, string, []byte) error { + return policyNotSupportedError() +} + +// DeletePolicy always returns a PolicyNotSupportedErr. +func (PolicyNotSupported) DeletePolicy(context.Context, Transaction, string) error { + return policyNotSupportedError() +} + +// PolicyEvent describes a change to a policy. +type PolicyEvent struct { + ID string + Data []byte + Removed bool +} + +// DataEvent describes a change to a base data document. +type DataEvent struct { + Path Path + Data any + Removed bool +} + +// TriggerEvent describes the changes that caused the trigger to be invoked. +type TriggerEvent struct { + Policy []PolicyEvent + Data []DataEvent + Context *Context +} + +// IsZero returns true if the TriggerEvent indicates no changes occurred. This +// function is primarily for test purposes. +func (e TriggerEvent) IsZero() bool { + return !e.PolicyChanged() && !e.DataChanged() +} + +// PolicyChanged returns true if the trigger was caused by a policy change. +func (e TriggerEvent) PolicyChanged() bool { + return len(e.Policy) > 0 +} + +// DataChanged returns true if the trigger was caused by a data change. +func (e TriggerEvent) DataChanged() bool { + return len(e.Data) > 0 +} + +// TriggerConfig contains the trigger registration configuration. +type TriggerConfig struct { + // SkipDataConversion when set to true, avoids converting data passed to + // trigger functions from the store to Go types, and instead passes the + // original representation (e.g., ast.Value). + SkipDataConversion bool + + // OnCommit is invoked when a transaction is successfully committed. The + // callback is invoked with a handle to the write transaction that + // successfully committed before other clients see the changes. + OnCommit func(context.Context, Transaction, TriggerEvent) +} + +// Trigger defines the interface that stores implement to register for change +// notifications when the store is changed. +type Trigger interface { + Register(context.Context, Transaction, TriggerConfig) (TriggerHandle, error) +} + +// TriggersNotSupported provides default implementations of the Trigger +// interface which may be used if the backend does not support triggers. +type TriggersNotSupported struct{} + +// Register always returns an error indicating triggers are not supported. +func (TriggersNotSupported) Register(context.Context, Transaction, TriggerConfig) (TriggerHandle, error) { + return nil, triggersNotSupportedError() +} + +// TriggerHandle defines the interface that can be used to unregister triggers that have +// been registered on a Store. +type TriggerHandle interface { + Unregister(context.Context, Transaction) +} + +// Iterator defines the interface that can be used to read files from a directory starting with +// files at the base of the directory, then sub-directories etc. +type Iterator interface { + Next() (*Update, error) +} + +// Update contains information about a file +type Update struct { + Path Path + Value []byte + IsPolicy bool +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/internal/errors/errors.go b/vendor/github.com/open-policy-agent/opa/v1/storage/internal/errors/errors.go new file mode 100644 index 0000000000..a478b9f257 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/internal/errors/errors.go @@ -0,0 +1,54 @@ +// Copyright 2021 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package errors contains reusable error-related code for the storage layer. +package errors + +import ( + "fmt" + + "github.com/open-policy-agent/opa/v1/storage" +) + +const ( + ArrayIndexTypeMsg = "array index must be integer" + DoesNotExistMsg = "document does not exist" + OutOfRangeMsg = "array index out of range" + RootMustBeObjectMsg = "root must be object" + RootCannotBeRemovedMsg = "root cannot be removed" +) + +var ( + NotFoundErr = &storage.Error{Code: storage.NotFoundErr, Message: DoesNotExistMsg} + RootMustBeObjectErr = &storage.Error{Code: storage.InvalidPatchErr, Message: RootMustBeObjectMsg} + RootCannotBeRemovedErr = &storage.Error{Code: storage.InvalidPatchErr, Message: RootCannotBeRemovedMsg} +) + +func NewNotFoundErrorWithHint(path storage.Path, hint string) *storage.Error { + return &storage.Error{ + Code: storage.NotFoundErr, + Message: path.String() + ": " + hint, + } +} + +func NewNotFoundErrorf(f string, a ...any) *storage.Error { + return &storage.Error{ + Code: storage.NotFoundErr, + Message: fmt.Sprintf(f, a...), + } +} + +func NewWriteConflictError(p storage.Path) *storage.Error { + return &storage.Error{ + Code: storage.WriteConflictErr, + Message: p.String(), + } +} + +func NewInvalidPatchError(f string, a ...any) *storage.Error { + return &storage.Error{ + Code: storage.InvalidPatchErr, + Message: fmt.Sprintf(f, a...), + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/internal/ptr/ptr.go b/vendor/github.com/open-policy-agent/opa/v1/storage/internal/ptr/ptr.go new file mode 100644 index 0000000000..bef39ebf49 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/internal/ptr/ptr.go @@ -0,0 +1,141 @@ +// Copyright 2021 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package ptr provides utilities for pointer operations using storage layer paths. +package ptr + +import ( + "strconv" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/storage/internal/errors" +) + +func Ptr(data any, path storage.Path) (any, error) { + node := data + for i := range path { + key := path[i] + switch curr := node.(type) { + case map[string]any: + var ok bool + if node, ok = curr[key]; !ok { + return nil, errors.NotFoundErr + } + case []any: + pos, err := ValidateArrayIndex(curr, key, path) + if err != nil { + return nil, err + } + node = curr[pos] + default: + return nil, errors.NotFoundErr + } + } + + return node, nil +} + +func ValuePtr(data ast.Value, path storage.Path) (ast.Value, error) { + var keyTerm *ast.Term + + defer func() { + if keyTerm != nil { + ast.TermPtrPool.Put(keyTerm) + } + }() + + node := data + for i := range path { + key := path[i] + switch curr := node.(type) { + case ast.Object: + // Note(anders): + // This term is only created for the lookup, which is not great — especially + // considering the path likely was converted from a ref, where we had all + // the terms available already! Without chaging the storage API, our options + // for performant lookups are limitied to using interning or a pool. Prefer + // interning when possible, as that is zero alloc. Using the pool avoids at + // least allocating a new term for every lookup, but still requires an alloc + // for the string Value. + if ast.HasInternedValue(key) { + if val := curr.Get(ast.InternedTerm(key)); val != nil { + node = val.Value + } else { + return nil, errors.NotFoundErr + } + } else { + if keyTerm == nil { + keyTerm = ast.TermPtrPool.Get() + } + // 1 alloc + keyTerm.Value = ast.String(key) + if val := curr.Get(keyTerm); val != nil { + node = val.Value + } else { + return nil, errors.NotFoundErr + } + } + case *ast.Array: + pos, err := ValidateASTArrayIndex(curr, key, path) + if err != nil { + return nil, err + } + node = curr.Elem(pos).Value + default: + return nil, errors.NotFoundErr + } + } + + return node, nil +} + +func ValidateArrayIndex(arr []any, s string, path storage.Path) (int, error) { + idx, ok := isInt(s) + if !ok { + return 0, errors.NewNotFoundErrorWithHint(path, errors.ArrayIndexTypeMsg) + } + return inRange(idx, arr, path) +} + +func ValidateASTArrayIndex(arr *ast.Array, s string, path storage.Path) (int, error) { + idx, ok := isInt(s) + if !ok { + return 0, errors.NewNotFoundErrorWithHint(path, errors.ArrayIndexTypeMsg) + } + return inRange(idx, arr, path) +} + +// ValidateArrayIndexForWrite also checks that `s` is a valid way to address an +// array element like `ValidateArrayIndex`, but returns a `resource_conflict` error +// if it is not. +func ValidateArrayIndexForWrite(arr []any, s string, i int, path storage.Path) (int, error) { + idx, ok := isInt(s) + if !ok { + return 0, errors.NewWriteConflictError(path[:i-1]) + } + return inRange(idx, arr, path) +} + +func isInt(s string) (int, bool) { + idx, err := strconv.Atoi(s) + return idx, err == nil +} + +func inRange(i int, arr any, path storage.Path) (int, error) { + + var arrLen int + + switch v := arr.(type) { + case []any: + arrLen = len(v) + case *ast.Array: + arrLen = v.Len() + } + + if i < 0 || i >= arrLen { + return 0, errors.NewNotFoundErrorWithHint(path, errors.OutOfRangeMsg) + } + return i, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/path.go b/vendor/github.com/open-policy-agent/opa/v1/storage/path.go new file mode 100644 index 0000000000..16bb3e42c5 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/path.go @@ -0,0 +1,142 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package storage + +import ( + "errors" + "fmt" + "net/url" + "slices" + "strconv" + "strings" + + "github.com/open-policy-agent/opa/v1/ast" +) + +// RootPath refers to the root document in storage. +var RootPath = Path{} + +// Path refers to a document in storage. +type Path []string + +// ParsePath returns a new path for the given str. +func ParsePath(str string) (path Path, ok bool) { + if len(str) == 0 || str[0] != '/' { + return nil, false + } + if len(str) == 1 { + return Path{}, true + } + + return strings.Split(str[1:], "/"), true +} + +// ParsePathEscaped returns a new path for the given escaped str. +func ParsePathEscaped(str string) (path Path, ok bool) { + if path, ok = ParsePath(str); ok { + for i := range path { + if segment, err := url.PathUnescape(path[i]); err == nil { + path[i] = segment + } else { + return nil, false + } + } + } + return +} + +// NewPathForRef returns a new path for the given ref. +func NewPathForRef(ref ast.Ref) (path Path, err error) { + if len(ref) == 0 { + return nil, errors.New("empty reference (indicates error in caller)") + } + + if len(ref) == 1 { + return Path{}, nil + } + + path = make(Path, 0, len(ref)-1) + + for _, term := range ref[1:] { + switch v := term.Value.(type) { + case ast.String: + path = append(path, string(v)) + case ast.Number: + path = append(path, v.String()) + case ast.Boolean, ast.Null: + return nil, &Error{ + Code: NotFoundErr, + Message: fmt.Sprintf("%v: does not exist", ref), + } + case *ast.Array, ast.Object, ast.Set: + return nil, fmt.Errorf("composites cannot be base document keys: %v", ref) + default: + return nil, fmt.Errorf("unresolved reference (indicates error in caller): %v", ref) + } + } + + return path, nil +} + +// Compare performs lexigraphical comparison on p and other and returns -1 if p +// is less than other, 0 if p is equal to other, or 1 if p is greater than +// other. +func (p Path) Compare(other Path) (cmp int) { + return slices.Compare(p, other) +} + +// Equal returns true if p is the same as other. +func (p Path) Equal(other Path) bool { + return slices.Equal(p, other) +} + +// HasPrefix returns true if p starts with other. +func (p Path) HasPrefix(other Path) bool { + return len(other) <= len(p) && p[:len(other)].Equal(other) +} + +// Ref returns a ref that represents p rooted at head. +func (p Path) Ref(head *ast.Term) (ref ast.Ref) { + ref = make(ast.Ref, len(p)+1) + ref[0] = head + for i := range p { + idx, err := strconv.ParseInt(p[i], 10, 64) + if err == nil { + ref[i+1] = ast.UIntNumberTerm(uint64(idx)) + } else { + ref[i+1] = ast.StringTerm(p[i]) + } + } + return ref +} + +func (p Path) String() string { + if len(p) == 0 { + return "/" + } + + l := 0 + for i := range p { + l += len(p[i]) + 1 + } + + sb := strings.Builder{} + sb.Grow(l) + for i := range p { + sb.WriteByte('/') + sb.WriteString(url.PathEscape(p[i])) + } + return sb.String() +} + +// MustParsePath returns a new Path for s. If s cannot be parsed, this function +// will panic. This is mostly for test purposes. +func MustParsePath(s string) Path { + path, ok := ParsePath(s) + if !ok { + panic(s) + } + return path +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/storage/storage.go b/vendor/github.com/open-policy-agent/opa/v1/storage/storage.go new file mode 100644 index 0000000000..38d51be405 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/storage/storage.go @@ -0,0 +1,139 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package storage + +import ( + "context" + + "github.com/open-policy-agent/opa/v1/ast" +) + +// NewTransactionOrDie is a helper function to create a new transaction. If the +// storage layer cannot create a new transaction, this function will panic. This +// function should only be used for tests. +func NewTransactionOrDie(ctx context.Context, store Store, params ...TransactionParams) Transaction { + txn, err := store.NewTransaction(ctx, params...) + if err != nil { + panic(err) + } + return txn +} + +// ReadOne is a convenience function to read a single value from the provided Store. It +// will create a new Transaction to perform the read with, and clean up after itself +// should an error occur. +func ReadOne(ctx context.Context, store Store, path Path) (any, error) { + txn, err := store.NewTransaction(ctx) + if err != nil { + return nil, err + } + defer store.Abort(ctx, txn) + + return store.Read(ctx, txn, path) +} + +// WriteOne is a convenience function to write a single value to the provided Store. It +// will create a new Transaction to perform the write with, and clean up after itself +// should an error occur. +func WriteOne(ctx context.Context, store Store, op PatchOp, path Path, value any) error { + txn, err := store.NewTransaction(ctx, WriteParams) + if err != nil { + return err + } + + if err := store.Write(ctx, txn, op, path, value); err != nil { + store.Abort(ctx, txn) + return err + } + + return store.Commit(ctx, txn) +} + +// MakeDir inserts an empty object at path. If the parent path does not exist, +// MakeDir will create it recursively. +func MakeDir(ctx context.Context, store Store, txn Transaction, path Path) error { + + // Allow the Store implementation to deal with this in its own way. + if md, ok := store.(MakeDirer); ok { + return md.MakeDir(ctx, txn, path) + } + + if len(path) == 0 { + return nil + } + + node, err := store.Read(ctx, txn, path) + if err != nil { + if !IsNotFound(err) { + return err + } + + if err := MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { + return err + } + + return store.Write(ctx, txn, AddOp, path, map[string]any{}) + } + + if _, ok := node.(map[string]any); ok { + return nil + } + + if _, ok := node.(ast.Object); ok { + return nil + } + + return writeConflictError(path) +} + +// Txn is a convenience function that executes f inside a new transaction +// opened on the store. If the function returns an error, the transaction is +// aborted and the error is returned. Otherwise, the transaction is committed +// and the result of the commit is returned. +func Txn(ctx context.Context, store Store, params TransactionParams, f func(Transaction) error) error { + + txn, err := store.NewTransaction(ctx, params) + if err != nil { + return err + } + + if err := f(txn); err != nil { + store.Abort(ctx, txn) + return err + } + + return store.Commit(ctx, txn) +} + +// NonEmpty returns a function that tests if a path is non-empty. A +// path is non-empty if a Read on the path returns a value or a Read +// on any of the path prefixes returns a non-object value. +func NonEmpty(ctx context.Context, store Store, txn Transaction) func([]string) (bool, error) { + if md, ok := store.(NonEmptyer); ok { + return md.NonEmpty(ctx, txn) + } + return func(path []string) (bool, error) { + if _, err := store.Read(ctx, txn, Path(path)); err == nil { + return true, nil + } else if !IsNotFound(err) { + return false, err + } + for i := len(path) - 1; i > 0; i-- { + val, err := store.Read(ctx, txn, Path(path[:i])) + if err != nil && !IsNotFound(err) { + return false, err + } else if err == nil { + if _, ok := val.(map[string]any); ok { + return false, nil + } + if _, ok := val.(ast.Object); ok { + return false, nil + } + return true, nil + } + } + return false, nil + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/aggregates.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/aggregates.go new file mode 100644 index 0000000000..356e7b38b0 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/aggregates.go @@ -0,0 +1,306 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "math/big" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +func builtinCount(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch a := operands[0].Value.(type) { + case *ast.Array: + return iter(ast.InternedTerm(a.Len())) + case ast.Object: + return iter(ast.InternedTerm(a.Len())) + case ast.Set: + return iter(ast.InternedTerm(a.Len())) + case ast.String: + return iter(ast.InternedTerm(len([]rune(a)))) + } + return builtins.NewOperandTypeErr(1, operands[0].Value, "array", "object", "set", "string") +} + +func builtinSum(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch a := operands[0].Value.(type) { + case *ast.Array: + // Fast path for arrays of integers + is := 0 + nonInts := a.Until(func(x *ast.Term) bool { + if n, ok := x.Value.(ast.Number); ok { + if i, ok := n.Int(); ok { + is += i + return false + } + } + return true + }) + if !nonInts { + return iter(ast.InternedTerm(is)) + } + + // Non-integer values found, so we need to sum as floats. + sum := big.NewFloat(0) + tmp := new(big.Float) + err := a.Iter(func(x *ast.Term) error { + n, ok := x.Value.(ast.Number) + if !ok { + return builtins.NewOperandElementErr(1, a, x.Value, "number") + } + sum = new(big.Float).Add(sum, builtins.NumberToFloatInto(tmp, n)) + return nil + }) + if err != nil { + return err + } + return iter(ast.NewTerm(builtins.FloatToNumber(sum))) + case ast.Set: + // Fast path for sets of integers + is := 0 + nonInts := a.Until(func(x *ast.Term) bool { + if n, ok := x.Value.(ast.Number); ok { + if i, ok := n.Int(); ok { + is += i + return false + } + } + return true + }) + if !nonInts { + return iter(ast.InternedTerm(is)) + } + + sum := big.NewFloat(0) + tmp := new(big.Float) + err := a.Iter(func(x *ast.Term) error { + n, ok := x.Value.(ast.Number) + if !ok { + return builtins.NewOperandElementErr(1, a, x.Value, "number") + } + sum = new(big.Float).Add(sum, builtins.NumberToFloatInto(tmp, n)) + return nil + }) + if err != nil { + return err + } + return iter(ast.NewTerm(builtins.FloatToNumber(sum))) + } + return builtins.NewOperandTypeErr(1, operands[0].Value, "set", "array") +} + +func builtinProduct(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch a := operands[0].Value.(type) { + case *ast.Array: + product := big.NewFloat(1) + tmp := new(big.Float) + err := a.Iter(func(x *ast.Term) error { + n, ok := x.Value.(ast.Number) + if !ok { + return builtins.NewOperandElementErr(1, a, x.Value, "number") + } + product = new(big.Float).Mul(product, builtins.NumberToFloatInto(tmp, n)) + return nil + }) + if err != nil { + return err + } + return iter(ast.NewTerm(builtins.FloatToNumber(product))) + case ast.Set: + product := big.NewFloat(1) + tmp := new(big.Float) + err := a.Iter(func(x *ast.Term) error { + n, ok := x.Value.(ast.Number) + if !ok { + return builtins.NewOperandElementErr(1, a, x.Value, "number") + } + product = new(big.Float).Mul(product, builtins.NumberToFloatInto(tmp, n)) + return nil + }) + if err != nil { + return err + } + return iter(ast.NewTerm(builtins.FloatToNumber(product))) + } + return builtins.NewOperandTypeErr(1, operands[0].Value, "set", "array") +} + +func builtinMax(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch a := operands[0].Value.(type) { + case *ast.Array: + if a.Len() == 0 { + return nil + } + max := ast.InternedNullTerm.Value + a.Foreach(func(x *ast.Term) { + if ast.Compare(max, x.Value) <= 0 { + max = x.Value + } + }) + return iter(ast.NewTerm(max)) + case ast.Set: + if a.Len() == 0 { + return nil + } + max, err := a.Reduce(ast.InternedNullTerm, func(max *ast.Term, elem *ast.Term) (*ast.Term, error) { + if ast.Compare(max, elem) <= 0 { + return elem, nil + } + return max, nil + }) + if err != nil { + return err + } + return iter(max) + } + + return builtins.NewOperandTypeErr(1, operands[0].Value, "set", "array") +} + +func builtinMin(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch a := operands[0].Value.(type) { + case *ast.Array: + if a.Len() == 0 { + return nil + } + min := a.Elem(0).Value + a.Foreach(func(x *ast.Term) { + if ast.Compare(min, x.Value) >= 0 { + min = x.Value + } + }) + return iter(ast.NewTerm(min)) + case ast.Set: + if a.Len() == 0 { + return nil + } + min, err := a.Reduce(ast.InternedNullTerm, func(min *ast.Term, elem *ast.Term) (*ast.Term, error) { + // The null term is considered to be less than any other term, + // so in order for min of a set to make sense, we need to check + // for it. + if min.Value.Compare(ast.InternedNullValue) == 0 { + return elem, nil + } + + if ast.Compare(min, elem) >= 0 { + return elem, nil + } + return min, nil + }) + if err != nil { + return err + } + return iter(min) + } + + return builtins.NewOperandTypeErr(1, operands[0].Value, "set", "array") +} + +func builtinSort(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch a := operands[0].Value.(type) { + case *ast.Array: + return iter(ast.NewTerm(a.Sorted())) + case ast.Set: + return iter(ast.NewTerm(a.Sorted())) + } + return builtins.NewOperandTypeErr(1, operands[0].Value, "set", "array") +} + +func builtinAll(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch val := operands[0].Value.(type) { + case ast.Set: + res := true + match := ast.InternedTerm(true) + val.Until(func(term *ast.Term) bool { + if !match.Equal(term) { + res = false + return true + } + return false + }) + return iter(ast.InternedTerm(res)) + case *ast.Array: + res := true + match := ast.InternedTerm(true) + val.Until(func(term *ast.Term) bool { + if !match.Equal(term) { + res = false + return true + } + return false + }) + return iter(ast.InternedTerm(res)) + default: + return builtins.NewOperandTypeErr(1, operands[0].Value, "array", "set") + } +} + +func builtinAny(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch val := operands[0].Value.(type) { + case ast.Set: + res := val.Len() > 0 && val.Contains(ast.InternedTerm(true)) + return iter(ast.InternedTerm(res)) + case *ast.Array: + res := false + match := ast.InternedTerm(true) + val.Until(func(term *ast.Term) bool { + if match.Equal(term) { + res = true + return true + } + return false + }) + return iter(ast.InternedTerm(res)) + default: + return builtins.NewOperandTypeErr(1, operands[0].Value, "array", "set") + } +} + +func builtinMember(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + containee := operands[0] + switch c := operands[1].Value.(type) { + case ast.Set: + return iter(ast.InternedTerm(c.Contains(containee))) + case *ast.Array: + for i := range c.Len() { + if c.Elem(i).Value.Compare(containee.Value) == 0 { + return iter(ast.InternedTerm(true)) + } + } + return iter(ast.InternedTerm(false)) + case ast.Object: + return iter(ast.InternedTerm(c.Until(func(_, v *ast.Term) bool { + return v.Value.Compare(containee.Value) == 0 + }))) + } + return iter(ast.InternedTerm(false)) +} + +func builtinMemberWithKey(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + key, val := operands[0], operands[1] + switch c := operands[2].Value.(type) { + case interface{ Get(*ast.Term) *ast.Term }: + ret := false + if act := c.Get(key); act != nil { + ret = act.Value.Compare(val.Value) == 0 + } + return iter(ast.InternedTerm(ret)) + } + return iter(ast.InternedTerm(false)) +} + +func init() { + RegisterBuiltinFunc(ast.Count.Name, builtinCount) + RegisterBuiltinFunc(ast.Sum.Name, builtinSum) + RegisterBuiltinFunc(ast.Product.Name, builtinProduct) + RegisterBuiltinFunc(ast.Max.Name, builtinMax) + RegisterBuiltinFunc(ast.Min.Name, builtinMin) + RegisterBuiltinFunc(ast.Sort.Name, builtinSort) + RegisterBuiltinFunc(ast.Any.Name, builtinAny) + RegisterBuiltinFunc(ast.All.Name, builtinAll) + RegisterBuiltinFunc(ast.Member.Name, builtinMember) + RegisterBuiltinFunc(ast.MemberWithKey.Name, builtinMemberWithKey) +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/arithmetic.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/arithmetic.go similarity index 82% rename from vendor/github.com/open-policy-agent/opa/topdown/arithmetic.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/arithmetic.go index 3ac703efa3..91190330fa 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/arithmetic.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/arithmetic.go @@ -5,11 +5,11 @@ package topdown import ( - "fmt" + "errors" "math/big" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) type arithArity1 func(a *big.Float) (*big.Float, error) @@ -67,13 +67,11 @@ func builtinPlus(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) er y, ok2 := n2.Int() if ok1 && ok2 && inSmallIntRange(x) && inSmallIntRange(y) { - return iter(ast.IntNumberTerm(x + y)) + return iter(ast.InternedTerm(x + y)) } - f, err := arithPlus(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2)) - if err != nil { - return err - } + f := new(big.Float).Add(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2)) + return iter(ast.NewTerm(builtins.FloatToNumber(f))) } @@ -91,39 +89,25 @@ func builtinMultiply(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term y, ok2 := n2.Int() if ok1 && ok2 && inSmallIntRange(x) && inSmallIntRange(y) { - return iter(ast.IntNumberTerm(x * y)) + return iter(ast.InternedTerm(x * y)) } - f, err := arithMultiply(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2)) - if err != nil { - return err - } - return iter(ast.NewTerm(builtins.FloatToNumber(f))) -} + f := new(big.Float).Mul(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2)) -func arithPlus(a, b *big.Float) (*big.Float, error) { - return new(big.Float).Add(a, b), nil -} - -func arithMinus(a, b *big.Float) (*big.Float, error) { - return new(big.Float).Sub(a, b), nil -} - -func arithMultiply(a, b *big.Float) (*big.Float, error) { - return new(big.Float).Mul(a, b), nil + return iter(ast.NewTerm(builtins.FloatToNumber(f))) } func arithDivide(a, b *big.Float) (*big.Float, error) { i, acc := b.Int64() if acc == big.Exact && i == 0 { - return nil, fmt.Errorf("divide by zero") + return nil, errors.New("divide by zero") } return new(big.Float).Quo(a, b), nil } func arithRem(a, b *big.Int) (*big.Int, error) { if b.Int64() == 0 { - return nil, fmt.Errorf("modulo by zero") + return nil, errors.New("modulo by zero") } return new(big.Int).Rem(a, b), nil } @@ -171,13 +155,11 @@ func builtinMinus(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e y, oky := n2.Int() if okx && oky && inSmallIntRange(x) && inSmallIntRange(y) { - return iter(ast.IntNumberTerm(x - y)) + return iter(ast.InternedTerm(x - y)) } - f, err := arithMinus(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2)) - if err != nil { - return err - } + f := new(big.Float).Sub(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2)) + return iter(ast.NewTerm(builtins.FloatToNumber(f))) } @@ -185,7 +167,11 @@ func builtinMinus(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e s2, ok4 := operands[1].Value.(ast.Set) if ok3 && ok4 { - return iter(ast.NewTerm(s1.Diff(s2))) + diff := s1.Diff(s2) + if diff.Len() == 0 { + return iter(ast.InternedEmptySet) + } + return iter(ast.NewTerm(diff)) } if !ok1 && !ok3 { @@ -210,17 +196,17 @@ func builtinRem(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err if okx && oky && inSmallIntRange(x) && inSmallIntRange(y) { if y == 0 { - return fmt.Errorf("modulo by zero") + return errors.New("modulo by zero") } - return iter(ast.IntNumberTerm(x % y)) + return iter(ast.InternedTerm(x % y)) } op1, err1 := builtins.NumberToInt(n1) op2, err2 := builtins.NumberToInt(n2) if err1 != nil || err2 != nil { - return fmt.Errorf("modulo on floating-point number") + return errors.New("modulo on floating-point number") } i, err := arithRem(op1, op2) diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/array.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/array.go new file mode 100644 index 0000000000..ca40b7793f --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/array.go @@ -0,0 +1,152 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +func builtinArrayConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + arrA, err := builtins.ArrayOperand(operands[0].Value, 1) + if err != nil { + return err + } + + arrB, err := builtins.ArrayOperand(operands[1].Value, 2) + if err != nil { + return err + } + + if arrA.Len() == 0 { + return iter(operands[1]) + } + if arrB.Len() == 0 { + return iter(operands[0]) + } + + arrC := make([]*ast.Term, arrA.Len()+arrB.Len()) + + i := 0 + arrA.Foreach(func(elemA *ast.Term) { + arrC[i] = elemA + i++ + }) + + arrB.Foreach(func(elemB *ast.Term) { + arrC[i] = elemB + i++ + }) + + return iter(ast.ArrayTerm(arrC...)) +} + +func builtinArrayFlatten(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + arr, err := builtins.ArrayOperand(operands[0].Value, 1) + if err != nil { + return err + } + + size := arr.Len() + preAlloc := size + containsArray := false + + for i := range size { + if nested, ok := arr.Elem(i).Value.(*ast.Array); ok { + containsArray = true + preAlloc += nested.Len() - 1 + } + } + + if !containsArray && size == preAlloc { + return iter(operands[0]) // Empty array, or no nested arrays -> nothing to flatten. + } + + flattened := make([]*ast.Term, 0, preAlloc) + for i := range size { + elem := arr.Elem(i) + if nested, ok := elem.Value.(*ast.Array); ok { + for j := range nested.Len() { + flattened = append(flattened, nested.Elem(j)) + } + } else { + flattened = append(flattened, elem) + } + } + + return iter(ast.ArrayTerm(flattened...)) +} + +func builtinArraySlice(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + arr, err := builtins.ArrayOperand(operands[0].Value, 1) + if err != nil { + return err + } + + startIndex, err := builtins.IntOperand(operands[1].Value, 2) + if err != nil { + return err + } + + stopIndex, err := builtins.IntOperand(operands[2].Value, 3) + if err != nil { + return err + } + + l := arr.Len() + + // Clamp stopIndex to avoid out-of-range errors. If negative, clamp to zero. + // Otherwise, clamp to length of array. + if stopIndex < 0 { + stopIndex = 0 + } else if stopIndex > l { + stopIndex = l + } + + // Clamp startIndex to avoid out-of-range errors. If negative, clamp to zero. + // Otherwise, clamp to stopIndex to avoid to avoid cases like arr[1:0]. + if startIndex < 0 { + startIndex = 0 + } else if startIndex > stopIndex { + startIndex = stopIndex + } + + if startIndex == 0 && stopIndex >= l { + return iter(operands[0]) + } + + return iter(ast.NewTerm(arr.Slice(startIndex, stopIndex))) +} + +func builtinArrayReverse(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + arr, err := builtins.ArrayOperand(operands[0].Value, 1) + if err != nil { + return err + } + + length := arr.Len() + + if length == 0 { + return iter(ast.InternedEmptyArray) + } + + if length == 1 { + return iter(operands[0]) + } + + reversedArr := make([]*ast.Term, length) + for index := range length { + reversedArr[index] = arr.Elem(length - index - 1) + } + + return iter(ast.ArrayTerm(reversedArr...)) +} + +func init() { + RegisterBuiltinFunc(ast.ArrayConcat.Name, builtinArrayConcat) + RegisterBuiltinFunc(ast.ArrayFlatten.Name, builtinArrayFlatten) + RegisterBuiltinFunc(ast.ArraySlice.Name, builtinArraySlice) + RegisterBuiltinFunc(ast.ArrayReverse.Name, builtinArrayReverse) +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/binary.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/binary.go similarity index 81% rename from vendor/github.com/open-policy-agent/opa/topdown/binary.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/binary.go index b4f9dbd392..05050dbf7d 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/binary.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/binary.go @@ -5,8 +5,8 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func builtinBinaryAnd(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -21,7 +21,12 @@ func builtinBinaryAnd(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter return err } - return iter(ast.NewTerm(s1.Intersect(s2))) + i := s1.Intersect(s2) + if i.Len() == 0 { + return iter(ast.InternedEmptySet) + } + + return iter(ast.NewTerm(i)) } func builtinBinaryOr(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/bindings.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/bindings.go new file mode 100644 index 0000000000..809a31676c --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/bindings.go @@ -0,0 +1,473 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "fmt" + "strconv" + "strings" + + "github.com/open-policy-agent/opa/v1/ast" +) + +type undo struct { + k *ast.Term + u *bindings +} + +func (u *undo) Undo() { + if u == nil { + // Allow call on zero value of Undo for ease-of-use. + return + } + if u.u == nil { + // Call on empty unifier undos a no-op unify operation. + return + } + u.u.delete(u.k) +} + +type bindings struct { + id uint64 + values bindingsArrayHashmap + instr *Instrumentation +} + +func newBindings(id uint64, instr *Instrumentation) *bindings { + values := newBindingsArrayHashmap() + return &bindings{id, values, instr} +} + +// newBindingsWithSize creates bindings pre-sized for the expected number of entries. +// This avoids over-allocation when the binding count is known in advance (e.g., function arguments). +// For sizeHint <= maxLinearScan, it uses array mode; for larger hints, it pre-allocates a map. +func newBindingsWithSize(id uint64, instr *Instrumentation, sizeHint int) *bindings { + values := newBindingsArrayHashmapWithSize(sizeHint) + return &bindings{id, values, instr} +} + +func (u *bindings) Iter(caller *bindings, iter func(*ast.Term, *ast.Term) error) error { + + var err error + + u.values.Iter(func(k *ast.Term, _ value) bool { + if err != nil { + return true + } + err = iter(k, u.PlugNamespaced(k, caller)) + + return false + }) + + return err +} + +func (u *bindings) Namespace(x ast.Node, caller *bindings) { + vis := namespacingVisitor{ + b: u, + caller: caller, + } + ast.NewGenericVisitor(vis.Visit).Walk(x) +} + +func (u *bindings) Plug(a *ast.Term) *ast.Term { + return u.PlugNamespaced(a, nil) +} + +func (u *bindings) PlugNamespaced(a *ast.Term, caller *bindings) *ast.Term { + if u != nil && u.instr != nil { + u.instr.startTimer(evalOpPlug) + t := u.plugNamespaced(a, caller) + u.instr.stopTimer(evalOpPlug) + return t + } + + return u.plugNamespaced(a, caller) +} + +func (u *bindings) plugNamespaced(a *ast.Term, caller *bindings) *ast.Term { + switch v := a.Value.(type) { + case ast.Var: + b, next := u.apply(a) + if a != b || u != next { + return next.plugNamespaced(b, caller) + } + return u.namespaceVar(b, caller) + case *ast.Array: + if a.IsGround() { + return a + } + cpy := *a + arr := make([]*ast.Term, v.Len()) + for i := range arr { + arr[i] = u.plugNamespaced(v.Elem(i), caller) + } + cpy.Value = ast.NewArray(arr...) + return &cpy + case ast.Object: + if a.IsGround() { + return a + } + cpy := *a + cpy.Value, _ = v.Map(func(k, v *ast.Term) (*ast.Term, *ast.Term, error) { + return u.plugNamespaced(k, caller), u.plugNamespaced(v, caller), nil + }) + return &cpy + case ast.Set: + if a.IsGround() { + return a + } + cpy := *a + cpy.Value, _ = v.Map(func(x *ast.Term) (*ast.Term, error) { + return u.plugNamespaced(x, caller), nil + }) + return &cpy + case ast.Ref: + cpy := *a + ref := make(ast.Ref, len(v)) + for i := range ref { + ref[i] = u.plugNamespaced(v[i], caller) + } + cpy.Value = ref + return &cpy + } + return a +} + +func (u *bindings) bind(a *ast.Term, b *ast.Term, other *bindings, und *undo) { + u.values.Put(a, value{ + u: other, + v: b, + }) + und.k = a + und.u = u +} + +func (u *bindings) apply(a *ast.Term) (*ast.Term, *bindings) { + // Early exit for non-var terms. Only vars are bound in the binding list, + // so the lookup below will always fail for non-var terms. In some cases, + // the lookup may be expensive as it has to hash the term (which for large + // inputs can be costly). + _, ok := a.Value.(ast.Var) + if !ok { + return a, u + } + val, ok := u.get(a) + if !ok { + return a, u + } + return val.u.apply(val.v) +} + +func (u *bindings) delete(v *ast.Term) { + u.values.Delete(v) +} + +func (u *bindings) get(v *ast.Term) (value, bool) { + if u == nil { + return value{}, false + } + return u.values.Get(v) +} + +func (u *bindings) String() string { + if u == nil { + return "()" + } + var buf []string + u.values.Iter(func(a *ast.Term, b value) bool { + buf = append(buf, fmt.Sprintf("%v: %v", a, b)) + return false + }) + return fmt.Sprintf("({%v}, %v)", strings.Join(buf, ", "), u.id) +} + +func (u *bindings) namespaceVar(v *ast.Term, caller *bindings) *ast.Term { + name, ok := v.Value.(ast.Var) + if !ok { + panic("illegal value") + } + if caller != nil && caller != u { + // Root documents (i.e., data, input) should never be namespaced because they + // are globally unique. + if !ast.RootDocumentNames.Contains(v) { + return ast.VarTerm(string(name) + strconv.FormatUint(u.id, 10)) + } + } + return v +} + +type value struct { + u *bindings + v *ast.Term +} + +func (v value) String() string { + return fmt.Sprintf("(%v, %d)", v.v, v.u.id) +} + +func (v value) equal(other *value) bool { + if v.u == other.u { + return v.v.Equal(other.v) + } + return false +} + +type namespacingVisitor struct { + b *bindings + caller *bindings +} + +func (vis namespacingVisitor) Visit(x any) bool { + switch x := x.(type) { + case *ast.ArrayComprehension: + x.Term = vis.namespaceTerm(x.Term) + vis := ast.NewGenericVisitor(vis.Visit) + for _, expr := range x.Body { + vis.Walk(expr) + } + return true + case *ast.SetComprehension: + x.Term = vis.namespaceTerm(x.Term) + vis := ast.NewGenericVisitor(vis.Visit) + for _, expr := range x.Body { + vis.Walk(expr) + } + return true + case *ast.ObjectComprehension: + x.Key = vis.namespaceTerm(x.Key) + x.Value = vis.namespaceTerm(x.Value) + vis := ast.NewGenericVisitor(vis.Visit) + for _, expr := range x.Body { + vis.Walk(expr) + } + return true + case *ast.Expr: + switch terms := x.Terms.(type) { + case []*ast.Term: + for i := 1; i < len(terms); i++ { + terms[i] = vis.namespaceTerm(terms[i]) + } + case *ast.Term: + x.Terms = vis.namespaceTerm(terms) + } + for _, w := range x.With { + w.Target = vis.namespaceTerm(w.Target) + w.Value = vis.namespaceTerm(w.Value) + } + } + return false +} + +func (vis namespacingVisitor) namespaceTerm(a *ast.Term) *ast.Term { + switch v := a.Value.(type) { + case ast.Var: + return vis.b.namespaceVar(a, vis.caller) + case *ast.Array: + if a.IsGround() { + return a + } + cpy := *a + arr := make([]*ast.Term, v.Len()) + for i := range arr { + arr[i] = vis.namespaceTerm(v.Elem(i)) + } + cpy.Value = ast.NewArray(arr...) + return &cpy + case ast.Object: + if a.IsGround() { + return a + } + cpy := *a + cpy.Value, _ = v.Map(func(k, v *ast.Term) (*ast.Term, *ast.Term, error) { + return vis.namespaceTerm(k), vis.namespaceTerm(v), nil + }) + return &cpy + case ast.Set: + if a.IsGround() { + return a + } + cpy := *a + cpy.Value, _ = v.Map(func(x *ast.Term) (*ast.Term, error) { + return vis.namespaceTerm(x), nil + }) + return &cpy + case ast.Ref: + cpy := *a + ref := make(ast.Ref, len(v)) + for i := range ref { + ref[i] = vis.namespaceTerm(v[i]) + } + cpy.Value = ref + return &cpy + } + return a +} + +const maxLinearScan = 16 + +// bindingsArrayHashMap uses a dynamically growing slice with linear scan instead +// of a hash map for smaller # of entries. Hash maps start to +// show off their performance advantage only after 16 keys. +// +// Memory optimization: The slice grows incrementally (2 -> 4 -> 8 -> 16) to avoid +// wasting memory when only a few bindings are used. This is critical for scenarios +// like comprehensions and functions with few arguments that are called thousands of times. +type bindingsArrayHashmap struct { + n int // Entries in the slice. + a []bindingArrayKeyValue + m map[ast.Var]bindingArrayKeyValue +} + +type bindingArrayKeyValue struct { + key *ast.Term + value value +} + +func newBindingsArrayHashmap() bindingsArrayHashmap { + return bindingsArrayHashmap{} +} + +// newBindingsArrayHashmapWithSize creates a bindingsArrayHashmap pre-sized for the expected number of entries. +// This optimization reduces memory waste when the binding count is known in advance. +// +// Size selection strategy: +// - sizeHint == 0: lazy allocation (no pre-allocation) +// - sizeHint <= maxLinearScan: pre-allocate slice with exact capacity to avoid reallocation +// - sizeHint > maxLinearScan: pre-allocate map with exact capacity +// +// Memory impact example: +// - Without hint: dynamic growth 0 -> 2 -> 4 -> 8 -> 16 (saves memory for small counts) +// - With hint=2: pre-allocates slice with capacity 2 (exact fit, no waste) +// - With hint=20: pre-allocates map with capacity 20 (saves array allocation + reallocation) +func newBindingsArrayHashmapWithSize(sizeHint int) bindingsArrayHashmap { + if sizeHint <= 0 { + // For unknown sizes, use default lazy allocation with dynamic growth. + return bindingsArrayHashmap{} + } + + if sizeHint <= maxLinearScan { + // For small known sizes, pre-allocate slice with exact capacity to avoid growth overhead. + return bindingsArrayHashmap{ + a: make([]bindingArrayKeyValue, 0, sizeHint), + } + } + + // For larger sizes, pre-allocate map to avoid array allocation + transition cost. + return bindingsArrayHashmap{ + m: make(map[ast.Var]bindingArrayKeyValue, sizeHint), + } +} + +func (b *bindingsArrayHashmap) Put(key *ast.Term, value value) { + if b.m == nil { + // Check if key already exists and update value + if i := b.find(key); i >= 0 { + b.a[i].value = value + return + } + + // Still room in slice mode (< maxLinearScan) + if b.n < maxLinearScan { + // Grow slice if needed using exponential growth strategy + if b.n == cap(b.a) { + newCap := cap(b.a) * 2 + if newCap == 0 { + newCap = 2 // Start with 2 elements + } + if newCap > maxLinearScan { + newCap = maxLinearScan + } + newA := make([]bindingArrayKeyValue, b.n, newCap) + copy(newA, b.a) + b.a = newA + } + b.a = append(b.a, bindingArrayKeyValue{key, value}) + b.n++ + return + } + + // Slice is full (reached maxLinearScan), transition to map mode. + b.m = make(map[ast.Var]bindingArrayKeyValue, maxLinearScan+1) + for _, kv := range b.a { + b.m[kv.key.Value.(ast.Var)] = bindingArrayKeyValue{kv.key, kv.value} + } + b.m[key.Value.(ast.Var)] = bindingArrayKeyValue{key, value} + + // Clear slice to allow GC + b.a = nil + b.n = 0 + return + } + + b.m[key.Value.(ast.Var)] = bindingArrayKeyValue{key, value} +} + +func (b *bindingsArrayHashmap) Get(key *ast.Term) (value, bool) { + if b.m == nil { + if i := b.find(key); i >= 0 { + return b.a[i].value, true + } + + return value{}, false + } + + v, ok := b.m[key.Value.(ast.Var)] + if ok { + return v.value, true + } + + return value{}, false +} + +func (b *bindingsArrayHashmap) Delete(key *ast.Term) { + if b.m == nil { + if i := b.find(key); i >= 0 { + n := b.n - 1 + if i < n { + b.a[i] = b.a[n] + } + // Shrink slice to reflect deletion + b.a = b.a[:n] + b.n = n + } + return + } + + delete(b.m, key.Value.(ast.Var)) +} + +func (b *bindingsArrayHashmap) Iter(f func(k *ast.Term, v value) bool) { + if b.m == nil { + if b.a != nil { + for i := range b.n { + if f(b.a[i].key, b.a[i].value) { + return + } + } + } + return + } + + for _, v := range b.m { + if f(v.key, v.value) { + return + } + } +} + +func (b *bindingsArrayHashmap) find(key *ast.Term) int { + if b.a == nil || b.n == 0 { + return -1 + } + v := key.Value.(ast.Var) + for i := range b.n { + if b.a[i].key.Value.(ast.Var) == v { + return i + } + } + + return -1 +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/bits.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/bits.go similarity index 96% rename from vendor/github.com/open-policy-agent/opa/topdown/bits.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/bits.go index 7a63c0df1e..e420ffe611 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/bits.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/bits.go @@ -7,8 +7,8 @@ package topdown import ( "math/big" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) type bitsArity1 func(a *big.Int) (*big.Int, error) diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins.go new file mode 100644 index 0000000000..e0b893d477 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins.go @@ -0,0 +1,224 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "context" + "encoding/binary" + "fmt" + "io" + "math/rand" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/print" + "github.com/open-policy-agent/opa/v1/tracing" +) + +type ( + // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead. + FunctionalBuiltin1 func(op1 ast.Value) (output ast.Value, err error) + + // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead. + FunctionalBuiltin2 func(op1, op2 ast.Value) (output ast.Value, err error) + + // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead. + FunctionalBuiltin3 func(op1, op2, op3 ast.Value) (output ast.Value, err error) + + // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead. + FunctionalBuiltin4 func(op1, op2, op3, op4 ast.Value) (output ast.Value, err error) + + // BuiltinContext contains context from the evaluator that may be used by + // built-in functions. + BuiltinContext struct { + Context context.Context // request context that was passed when query started + Metrics metrics.Metrics // metrics registry for recording built-in specific metrics + Seed io.Reader // randomization source + Time *ast.Term // wall clock time + Cancel Cancel // atomic value that signals evaluation to halt + Runtime *ast.Term // runtime information on the OPA instance + Cache builtins.Cache // built-in function state cache + InterQueryBuiltinCache cache.InterQueryCache // cross-query built-in function state cache + InterQueryBuiltinValueCache cache.InterQueryValueCache // cross-query built-in function state value cache. this cache is useful for scenarios where the entry size cannot be calculated + NDBuiltinCache builtins.NDBCache // cache for non-deterministic built-in state + Location *ast.Location // location of built-in call + Tracers []Tracer // Deprecated: Use QueryTracers instead + QueryTracers []QueryTracer // tracer objects for trace() built-in function + TraceEnabled bool // indicates whether tracing is enabled for the evaluation + QueryID uint64 // identifies query being evaluated + ParentID uint64 // identifies parent of query being evaluated + PrintHook print.Hook // provides callback function to use for printing + RoundTripper CustomizeRoundTripper // customize transport to use for HTTP requests + DistributedTracingOpts tracing.Options // options to be used by distributed tracing. + rand *rand.Rand // randomization source for non-security-sensitive operations + Capabilities *ast.Capabilities + } + + // BuiltinFunc defines an interface for implementing built-in functions. + // The built-in function is called with the plugged operands from the call + // (including the output operands.) The implementation should evaluate the + // operands and invoke the iterator for each successful/defined output + // value. + BuiltinFunc func(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error +) + +// Rand returns a random number generator based on the Seed for this built-in +// context. The random number will be re-used across multiple calls to this +// function. If a random number generator cannot be created, an error is +// returned. +func (bctx *BuiltinContext) Rand() (*rand.Rand, error) { + + if bctx.rand != nil { + return bctx.rand, nil + } + + seed, err := readInt64(bctx.Seed) + if err != nil { + return nil, err + } + + bctx.rand = rand.New(rand.NewSource(seed)) + return bctx.rand, nil +} + +// RegisterBuiltinFunc adds a new built-in function to the evaluation engine. +func RegisterBuiltinFunc(name string, f BuiltinFunc) { + builtinFunctions[name] = builtinErrorWrapper(name, f) +} + +// Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead. +func RegisterFunctionalBuiltin1(name string, fun FunctionalBuiltin1) { + builtinFunctions[name] = functionalWrapper1(name, fun) +} + +// Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead. +func RegisterFunctionalBuiltin2(name string, fun FunctionalBuiltin2) { + builtinFunctions[name] = functionalWrapper2(name, fun) +} + +// Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead. +func RegisterFunctionalBuiltin3(name string, fun FunctionalBuiltin3) { + builtinFunctions[name] = functionalWrapper3(name, fun) +} + +// Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead. +func RegisterFunctionalBuiltin4(name string, fun FunctionalBuiltin4) { + builtinFunctions[name] = functionalWrapper4(name, fun) +} + +// GetBuiltin returns a built-in function implementation, nil if no built-in found. +func GetBuiltin(name string) BuiltinFunc { + return builtinFunctions[name] +} + +// Deprecated: The BuiltinEmpty type is no longer needed. Use nil return values instead. +type BuiltinEmpty struct{} + +func (BuiltinEmpty) Error() string { + return "" +} + +var builtinFunctions = map[string]BuiltinFunc{} + +func builtinErrorWrapper(name string, fn BuiltinFunc) BuiltinFunc { + return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { + err := fn(bctx, args, iter) + if err == nil { + return nil + } + return handleBuiltinErr(name, bctx.Location, err) + } +} + +func functionalWrapper1(name string, fn FunctionalBuiltin1) BuiltinFunc { + return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { + result, err := fn(args[0].Value) + if err == nil { + return iter(ast.NewTerm(result)) + } + return handleBuiltinErr(name, bctx.Location, err) + } +} + +func functionalWrapper2(name string, fn FunctionalBuiltin2) BuiltinFunc { + return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { + result, err := fn(args[0].Value, args[1].Value) + if err == nil { + return iter(ast.NewTerm(result)) + } + return handleBuiltinErr(name, bctx.Location, err) + } +} + +func functionalWrapper3(name string, fn FunctionalBuiltin3) BuiltinFunc { + return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { + result, err := fn(args[0].Value, args[1].Value, args[2].Value) + if err == nil { + return iter(ast.NewTerm(result)) + } + return handleBuiltinErr(name, bctx.Location, err) + } +} + +func functionalWrapper4(name string, fn FunctionalBuiltin4) BuiltinFunc { + return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { + result, err := fn(args[0].Value, args[1].Value, args[2].Value, args[3].Value) + if err == nil { + return iter(ast.NewTerm(result)) + } + if _, empty := err.(BuiltinEmpty); empty { + return nil + } + return handleBuiltinErr(name, bctx.Location, err) + } +} + +func handleBuiltinErr(name string, loc *ast.Location, err error) error { + switch err := err.(type) { + case BuiltinEmpty: + return nil + case *Error, Halt: + return err + case builtins.ErrOperand: + e := &Error{ + Code: TypeErr, + Message: fmt.Sprintf("%v: %v", name, err.Error()), + Location: loc, + } + return e.Wrap(err) + default: + e := &Error{ + Code: BuiltinErr, + Message: fmt.Sprintf("%v: %v", name, err.Error()), + Location: loc, + } + return e.Wrap(err) + } +} + +func readInt64(r io.Reader) (int64, error) { + bs := make([]byte, 8) + n, err := io.ReadFull(r, bs) + if n != len(bs) || err != nil { + return 0, err + } + return int64(binary.BigEndian.Uint64(bs)), nil +} + +// Used to get older-style (ast.Term, error) tuples out of newer functions. +func getResult(fn BuiltinFunc, operands ...*ast.Term) (*ast.Term, error) { + var result *ast.Term + extractionFn := func(r *ast.Term) error { + result = r + return nil + } + err := fn(BuiltinContext{}, operands, extractionFn) + if err != nil { + return nil, err + } + return result, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins/builtins.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins/builtins.go new file mode 100644 index 0000000000..c81b772484 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins/builtins.go @@ -0,0 +1,346 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package builtins contains utilities for implementing built-in functions. +package builtins + +import ( + "encoding/json" + "errors" + "fmt" + "math/big" + "strings" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" +) + +// Cache defines the built-in cache used by the top-down evaluation. The keys +// must be comparable and should not be of type string. +type Cache map[any]any + +// Put updates the cache for the named built-in. +func (c Cache) Put(k, v any) { + c[k] = v +} + +// Get returns the cached value for k. +func (c Cache) Get(k any) (any, bool) { + v, ok := c[k] + return v, ok +} + +// We use an ast.Object for the cached keys/values because a naive +// map[ast.Value]ast.Value will not correctly detect value equality of +// the member keys. +type NDBCache map[string]ast.Object + +func (c NDBCache) AsValue() ast.Value { + out := ast.NewObject() + for bname, obj := range c { + out.Insert(ast.InternedTerm(bname), ast.NewTerm(obj)) + } + return out +} + +// Put updates the cache for the named built-in. +// Automatically creates the 2-level hierarchy as needed. +func (c NDBCache) Put(name string, k, v ast.Value) { + if _, ok := c[name]; !ok { + c[name] = ast.NewObject() + } + c[name].Insert(ast.NewTerm(k), ast.NewTerm(v)) +} + +// Get returns the cached value for k for the named builtin. +func (c NDBCache) Get(name string, k ast.Value) (ast.Value, bool) { + if m, ok := c[name]; ok { + v := m.Get(ast.NewTerm(k)) + if v != nil { + return v.Value, true + } + return nil, false + } + return nil, false +} + +// Convenience functions for serializing the data structure. +func (c NDBCache) MarshalJSON() ([]byte, error) { + v, err := ast.JSON(c.AsValue()) + if err != nil { + return nil, err + } + return json.Marshal(v) +} + +func (c *NDBCache) UnmarshalJSON(data []byte) error { + out := map[string]ast.Object{} + var incoming any + + // Note: We use util.Unmarshal instead of json.Unmarshal to get + // correct deserialization of number types. + err := util.Unmarshal(data, &incoming) + if err != nil { + return err + } + + // Convert interface types back into ast.Value types. + nestedObject, err := ast.InterfaceToValue(incoming) + if err != nil { + return err + } + + // Reconstruct NDBCache from nested ast.Object structure. + if source, ok := nestedObject.(ast.Object); ok { + err = source.Iter(func(k, v *ast.Term) error { + if obj, ok := v.Value.(ast.Object); ok { + out[string(k.Value.(ast.String))] = obj + return nil + } + return errors.New("expected Object, got other Value type in conversion") + }) + if err != nil { + return err + } + } + + *c = out + + return nil +} + +// ErrOperand represents an invalid operand has been passed to a built-in +// function. Built-ins should return ErrOperand to indicate a type error has +// occurred. +type ErrOperand string + +func (err ErrOperand) Error() string { + return string(err) +} + +// NewOperandErr returns a generic operand error. +func NewOperandErr(pos int, f string, a ...any) error { + f = fmt.Sprintf("operand %v ", pos) + f + return ErrOperand(fmt.Sprintf(f, a...)) +} + +// NewOperandTypeErr returns an operand error indicating the operand's type was wrong. +func NewOperandTypeErr(pos int, got ast.Value, expected ...string) error { + + if len(expected) == 1 { + return NewOperandErr(pos, "must be %v but got %v", expected[0], ast.ValueName(got)) + } + + return NewOperandErr(pos, "must be one of {%v} but got %v", strings.Join(expected, ", "), ast.ValueName(got)) +} + +// NewOperandElementErr returns an operand error indicating an element in the +// composite operand was wrong. +func NewOperandElementErr(pos int, composite ast.Value, got ast.Value, expected ...string) error { + + tpe := ast.ValueName(composite) + + if len(expected) == 1 { + return NewOperandErr(pos, "must be %v of %vs but got %v containing %v", tpe, expected[0], tpe, ast.ValueName(got)) + } + + return NewOperandErr(pos, "must be %v of (any of) {%v} but got %v containing %v", tpe, strings.Join(expected, ", "), tpe, ast.ValueName(got)) +} + +// NewOperandEnumErr returns an operand error indicating a value was wrong. +func NewOperandEnumErr(pos int, expected ...string) error { + + if len(expected) == 1 { + return NewOperandErr(pos, "must be %v", expected[0]) + } + + return NewOperandErr(pos, "must be one of {%v}", strings.Join(expected, ", ")) +} + +// IntOperand converts x to an int. If the cast fails, a descriptive error is +// returned. +func IntOperand(x ast.Value, pos int) (int, error) { + n, ok := x.(ast.Number) + if !ok { + return 0, NewOperandTypeErr(pos, x, "number") + } + + i, ok := n.Int() + if !ok { + return 0, NewOperandErr(pos, "must be integer number but got floating-point number") + } + + return i, nil +} + +// BigIntOperand converts x to a big int. If the cast fails, a descriptive error +// is returned. +func BigIntOperand(x ast.Value, pos int) (*big.Int, error) { + n, err := NumberOperand(x, 1) + if err != nil { + return nil, NewOperandTypeErr(pos, x, "integer") + } + bi, err := NumberToInt(n) + if err != nil { + return nil, NewOperandErr(pos, "must be integer number but got floating-point number") + } + + return bi, nil +} + +// NumberOperand converts x to a number. If the cast fails, a descriptive error is +// returned. +func NumberOperand(x ast.Value, pos int) (ast.Number, error) { + n, ok := x.(ast.Number) + if !ok { + return ast.Number(""), NewOperandTypeErr(pos, x, "number") + } + return n, nil +} + +// SetOperand converts x to a set. If the cast fails, a descriptive error is +// returned. +func SetOperand(x ast.Value, pos int) (ast.Set, error) { + s, ok := x.(ast.Set) + if !ok { + return nil, NewOperandTypeErr(pos, x, "set") + } + return s, nil +} + +// StringOperand returns x as [ast.String], or a descriptive error if the conversion fails. +func StringOperand(x ast.Value, pos int) (ast.String, error) { + s, ok := x.(ast.String) + if !ok { + return ast.String(""), NewOperandTypeErr(pos, x, "string") + } + return s, nil +} + +// StringOperandByteSlice returns x a []byte, assuming x is [ast.String], or a descriptive error +// if that is not the case. The returned byte slice points directly at the underlying array backing +// the string, and should not be modified. +func StringOperandByteSlice(x ast.Value, pos int) ([]byte, error) { + s, err := StringOperand(x, pos) + if err != nil { + return nil, err + } + return util.StringToByteSlice(string(s)), nil +} + +// ObjectOperand converts x to an object. If the cast fails, a descriptive +// error is returned. +func ObjectOperand(x ast.Value, pos int) (ast.Object, error) { + o, ok := x.(ast.Object) + if !ok { + return nil, NewOperandTypeErr(pos, x, "object") + } + return o, nil +} + +// ArrayOperand converts x to an array. If the cast fails, a descriptive +// error is returned. +func ArrayOperand(x ast.Value, pos int) (*ast.Array, error) { + a, ok := x.(*ast.Array) + if !ok { + return nil, NewOperandTypeErr(pos, x, "array") + } + return a, nil +} + +// NumberToFloat converts n to a big float. +func NumberToFloat(n ast.Number) *big.Float { + return NumberToFloatInto(nil, n) +} + +// NumberToFloatInto converts n to a big float, storing it in dst when provided. +func NumberToFloatInto(dst *big.Float, n ast.Number) *big.Float { + if dst == nil { + dst = new(big.Float) + } + if _, ok := dst.SetString(string(n)); !ok { + panic("illegal value") + } + return dst +} + +// FloatToNumber converts f to a number. +func FloatToNumber(f *big.Float) ast.Number { + var format byte = 'g' + if f.IsInt() { + format = 'f' + } + return ast.Number(f.Text(format, -1)) +} + +// NumberToInt converts n to a big int. +// If n cannot be converted to an big int, an error is returned. +func NumberToInt(n ast.Number) (*big.Int, error) { + f := NumberToFloat(n) + r, accuracy := f.Int(nil) + if accuracy != big.Exact { + return nil, errors.New("illegal value") + } + return r, nil +} + +// IntToNumber converts i to a number. +func IntToNumber(i *big.Int) ast.Number { + return ast.Number(i.String()) +} + +// StringSliceOperand converts x to a []string. If the cast fails, a descriptive error is +// returned. +func StringSliceOperand(a ast.Value, pos int) ([]string, error) { + type iterable interface { + Iter(func(*ast.Term) error) error + Len() int + } + + strs, ok := a.(iterable) + if !ok { + return nil, NewOperandTypeErr(pos, a, "array", "set") + } + + var outStrs = make([]string, 0, strs.Len()) + if err := strs.Iter(func(x *ast.Term) error { + s, ok := x.Value.(ast.String) + if !ok { + return NewOperandElementErr(pos, a, x.Value, "string") + } + outStrs = append(outStrs, string(s)) + return nil + }); err != nil { + return nil, err + } + + return outStrs, nil +} + +// RuneSliceOperand converts x to a []rune. If the cast fails, a descriptive error is +// returned. +func RuneSliceOperand(x ast.Value, pos int) ([]rune, error) { + a, err := ArrayOperand(x, pos) + if err != nil { + return nil, err + } + + var f = make([]rune, a.Len()) + for k := range a.Len() { + b := a.Elem(k) + c, ok := b.Value.(ast.String) + if !ok { + return nil, NewOperandElementErr(pos, x, b.Value, "string") + } + + d := []rune(string(c)) + if len(d) != 1 { + return nil, NewOperandElementErr(pos, x, b.Value, "rune") + } + + f[k] = d[0] + } + + return f, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/cache.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/cache.go new file mode 100644 index 0000000000..a6c89b4537 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/cache.go @@ -0,0 +1,363 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "slices" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" +) + +// VirtualCache defines the interface for a cache that stores the results of +// evaluated virtual documents (rules). +// The cache is a stack of frames, where each frame is a mapping from references +// to values. +type VirtualCache interface { + // Push pushes a new, empty frame of value mappings onto the stack. + Push() + + // Pop pops the top frame of value mappings from the stack, removing all associated entries. + Pop() + + // Get returns the value associated with the given reference. The second return value + // indicates whether the reference has a recorded 'undefined' result. + Get(ref ast.Ref) (*ast.Term, bool) + + // Put associates the given reference with the given value. If the value is nil, the reference + // is marked as having an 'undefined' result. + Put(ref ast.Ref, value *ast.Term) + + // Keys returns the set of keys that have been cached for the active frame. + Keys() []ast.Ref +} + +// BaseCache defines the interface for a cache that stores cached base documents, i.e. data. +type BaseCache interface { + Get(ast.Ref) ast.Value + Put(ast.Ref, ast.Value) +} + +type virtualCache struct { + stack []*virtualCacheElem +} + +type virtualCacheElem struct { + value *ast.Term + children *util.HasherMap[*ast.Term, *virtualCacheElem] + undefined bool +} + +func NewVirtualCache() VirtualCache { + cache := &virtualCache{} + cache.Push() + return cache +} + +func (c *virtualCache) Push() { + c.stack = append(c.stack, newVirtualCacheElem()) +} + +func (c *virtualCache) Pop() { + c.stack = c.stack[:len(c.stack)-1] +} + +// Returns the resolved value of the AST term and a flag indicating if the value +// should be interpretted as undefined: +// +// nil, true indicates the ref is undefined +// ast.Term, false indicates the ref is defined +// nil, false indicates the ref has not been cached +// ast.Term, true is impossible +func (c *virtualCache) Get(ref ast.Ref) (*ast.Term, bool) { + node := c.stack[len(c.stack)-1] + for i := range ref { + x, ok := node.children.Get(ref[i]) + if !ok { + return nil, false + } + node = x + } + if node.undefined { + return nil, true + } + + return node.value, false +} + +// If value is a nil pointer, set the 'undefined' flag on the cache element to +// indicate that the Ref has resolved to undefined. +func (c *virtualCache) Put(ref ast.Ref, value *ast.Term) { + node := c.stack[len(c.stack)-1] + for i := range ref { + x, ok := node.children.Get(ref[i]) + if ok { + node = x + } else { + next := newVirtualCacheElem() + node.children.Put(ref[i], next) + node = next + } + } + if value != nil { + node.value = value + } else { + node.undefined = true + } +} + +func (c *virtualCache) Keys() []ast.Ref { + node := c.stack[len(c.stack)-1] + return keysRecursive(nil, node) +} + +func keysRecursive(root ast.Ref, node *virtualCacheElem) []ast.Ref { + var keys []ast.Ref + node.children.Iter(func(k *ast.Term, v *virtualCacheElem) bool { + ref := root.Append(k) + if v.value != nil { + keys = append(keys, ref) + } + if v.children.Len() > 0 { + keys = append(keys, keysRecursive(ref, v)...) + } + return false + }) + return keys +} + +func newVirtualCacheElem() *virtualCacheElem { + return &virtualCacheElem{children: newVirtualCacheHashMap()} +} + +func newVirtualCacheHashMap() *util.HasherMap[*ast.Term, *virtualCacheElem] { + return util.NewHasherMap[*ast.Term, *virtualCacheElem](ast.TermValueEqual) +} + +// baseCache implements a trie structure to cache base documents read out of +// storage. Values inserted into the cache may contain other values that were +// previously inserted. In this case, the previous values are erased from the +// structure. +type baseCache struct { + root *baseCacheElem +} + +func newBaseCache() *baseCache { + return &baseCache{ + root: newBaseCacheElem(), + } +} + +func (c *baseCache) Get(ref ast.Ref) ast.Value { + node := c.root + for i := range ref { + node = node.children[ref[i].Value] + if node == nil { + return nil + } else if node.value != nil { + if len(ref) == 1 && ast.IsScalar(node.value) { + // If the node is a scalar, return the value directly + // and avoid an allocation when calling Find. + return node.value + } + + result, err := node.value.Find(ref[i+1:]) + if err != nil { + return nil + } + return result + } + } + return nil +} + +func (c *baseCache) Put(ref ast.Ref, value ast.Value) { + node := c.root + for i := range ref { + if child, ok := node.children[ref[i].Value]; ok { + node = child + } else { + child := newBaseCacheElem() + node.children[ref[i].Value] = child + node = child + } + } + node.set(value) +} + +type baseCacheElem struct { + value ast.Value + children map[ast.Value]*baseCacheElem +} + +func newBaseCacheElem() *baseCacheElem { + return &baseCacheElem{ + children: map[ast.Value]*baseCacheElem{}, + } +} + +func (e *baseCacheElem) set(value ast.Value) { + e.value = value + e.children = map[ast.Value]*baseCacheElem{} +} + +type refStack struct { + sl []refStackElem +} + +type refStackElem struct { + refs []ast.Ref +} + +func newRefStack() *refStack { + return &refStack{} +} + +func (s *refStack) Push(refs []ast.Ref) { + s.sl = append(s.sl, refStackElem{refs: refs}) +} + +func (s *refStack) Pop() { + if s == nil { + return + } + s.sl = s.sl[:len(s.sl)-1] +} + +func (s *refStack) Prefixed(ref ast.Ref) bool { + if s != nil { + for i := len(s.sl) - 1; i >= 0; i-- { + if slices.ContainsFunc(s.sl[i].refs, ref.HasPrefix) { + return true + } + } + } + return false +} + +type comprehensionCache struct { + stack []map[*ast.Term]*comprehensionCacheElem +} + +type comprehensionCacheElem struct { + value *ast.Term + children *util.HasherMap[*ast.Term, *comprehensionCacheElem] +} + +func newComprehensionCache() *comprehensionCache { + cache := &comprehensionCache{} + cache.Push() + return cache +} + +func (c *comprehensionCache) Push() { + c.stack = append(c.stack, map[*ast.Term]*comprehensionCacheElem{}) +} + +func (c *comprehensionCache) Pop() { + c.stack = c.stack[:len(c.stack)-1] +} + +func (c *comprehensionCache) Elem(t *ast.Term) (*comprehensionCacheElem, bool) { + elem, ok := c.stack[len(c.stack)-1][t] + return elem, ok +} + +func (c *comprehensionCache) Set(t *ast.Term, elem *comprehensionCacheElem) { + c.stack[len(c.stack)-1][t] = elem +} + +func newComprehensionCacheElem() *comprehensionCacheElem { + return &comprehensionCacheElem{children: newComprehensionCacheHashMap()} +} + +func (c *comprehensionCacheElem) Get(key []*ast.Term) *ast.Term { + node := c + for i := range key { + x, ok := node.children.Get(key[i]) + if !ok { + return nil + } + node = x + } + return node.value +} + +func (c *comprehensionCacheElem) Put(key []*ast.Term, value *ast.Term) { + node := c + for i := range key { + x, ok := node.children.Get(key[i]) + if ok { + node = x + } else { + next := newComprehensionCacheElem() + node.children.Put(key[i], next) + node = next + } + } + node.value = value +} + +func newComprehensionCacheHashMap() *util.HasherMap[*ast.Term, *comprehensionCacheElem] { + return util.NewHasherMap[*ast.Term, *comprehensionCacheElem](ast.TermValueEqual) +} + +type functionMocksStack struct { + stack []*functionMocksElem +} + +type functionMocksElem []frame + +type frame map[string]*ast.Term + +func newFunctionMocksStack() *functionMocksStack { + stack := &functionMocksStack{} + stack.Push() + return stack +} + +func newFunctionMocksElem() *functionMocksElem { + return &functionMocksElem{} +} + +func (s *functionMocksStack) Push() { + s.stack = append(s.stack, newFunctionMocksElem()) +} + +func (s *functionMocksStack) Pop() { + s.stack = s.stack[:len(s.stack)-1] +} + +func (s *functionMocksStack) PopPairs() { + current := s.stack[len(s.stack)-1] + *current = (*current)[:len(*current)-1] +} + +func (s *functionMocksStack) PutPairs(mocks [][2]*ast.Term) { + el := frame{} + for i := range mocks { + el[mocks[i][0].Value.String()] = mocks[i][1] + } + s.Put(el) +} + +func (s *functionMocksStack) Put(el frame) { + current := s.stack[len(s.stack)-1] + *current = append(*current, el) +} + +func (s *functionMocksStack) Get(f ast.Ref) (*ast.Term, bool) { + if s == nil { + return nil, false + } + + current := *s.stack[len(s.stack)-1] + for i := len(current) - 1; i >= 0; i-- { + if r, ok := current[i][f.String()]; ok { + return r, true + } + } + return nil, false +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/cache/cache.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/cache/cache.go new file mode 100644 index 0000000000..1c2eacc99e --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/cache/cache.go @@ -0,0 +1,666 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package cache defines the inter-query cache interface that can cache data across queries +package cache + +import ( + "container/list" + "context" + "fmt" + "math" + "sync" + "time" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" +) + +const ( + defaultInterQueryBuiltinValueCacheSize = int(0) // unlimited + defaultMaxSizeBytes = int64(0) // unlimited + defaultForcedEvictionThresholdPercentage = int64(100) // trigger at max_size_bytes + defaultStaleEntryEvictionPeriodSeconds = int64(0) // never +) + +var interQueryBuiltinValueCacheDefaultConfigs = map[string]*NamedValueCacheConfig{} + +func getDefaultInterQueryBuiltinValueCacheConfig(name string) *NamedValueCacheConfig { + return interQueryBuiltinValueCacheDefaultConfigs[name] +} + +// RegisterDefaultInterQueryBuiltinValueCacheConfig registers a default configuration for the inter-query value cache; +// used when none has been explicitly configured. +// To disable a named cache when not configured, pass a config with the disabled value set to true. +func RegisterDefaultInterQueryBuiltinValueCacheConfig(name string, config *NamedValueCacheConfig) { + interQueryBuiltinValueCacheDefaultConfigs[name] = config +} + +// Config represents the configuration for the inter-query builtin cache. +type Config struct { + InterQueryBuiltinCache InterQueryBuiltinCacheConfig `json:"inter_query_builtin_cache"` + InterQueryBuiltinValueCache InterQueryBuiltinValueCacheConfig `json:"inter_query_builtin_value_cache"` +} + +// Clone creates a deep copy of Config. +func (c *Config) Clone() *Config { + if c == nil { + return nil + } + + return &Config{ + InterQueryBuiltinCache: *c.InterQueryBuiltinCache.Clone(), + InterQueryBuiltinValueCache: *c.InterQueryBuiltinValueCache.Clone(), + } +} + +// NamedValueCacheConfig represents the configuration of a named cache that built-in functions can utilize. +// A default configuration to be used if not explicitly configured can be registered using RegisterDefaultInterQueryBuiltinValueCacheConfig. +type NamedValueCacheConfig struct { + MaxNumEntries *int `json:"max_num_entries,omitempty"` + Disabled *bool `json:"disabled,omitempty"` +} + +// Clone creates a deep copy of NamedValueCacheConfig. +func (n *NamedValueCacheConfig) Clone() *NamedValueCacheConfig { + if n == nil { + return nil + } + + clone := &NamedValueCacheConfig{} + + if n.MaxNumEntries != nil { + maxEntries := *n.MaxNumEntries + clone.MaxNumEntries = &maxEntries + } + if n.Disabled != nil { + disabled := *n.Disabled + clone.Disabled = &disabled + } + + return clone +} + +// InterQueryBuiltinValueCacheConfig represents the configuration of the inter-query value cache that built-in functions can utilize. +// MaxNumEntries - max number of cache entries +type InterQueryBuiltinValueCacheConfig struct { + MaxNumEntries *int `json:"max_num_entries,omitempty"` + NamedCacheConfigs map[string]*NamedValueCacheConfig `json:"named,omitempty"` +} + +// Clone creates a deep copy of InterQueryBuiltinValueCacheConfig. +func (i *InterQueryBuiltinValueCacheConfig) Clone() *InterQueryBuiltinValueCacheConfig { + if i == nil { + return nil + } + + clone := &InterQueryBuiltinValueCacheConfig{} + + if i.MaxNumEntries != nil { + maxEntries := *i.MaxNumEntries + clone.MaxNumEntries = &maxEntries + } + + if i.NamedCacheConfigs != nil { + clone.NamedCacheConfigs = make(map[string]*NamedValueCacheConfig, len(i.NamedCacheConfigs)) + for k, v := range i.NamedCacheConfigs { + clone.NamedCacheConfigs[k] = v.Clone() + } + } + + return clone +} + +// InterQueryBuiltinCacheConfig represents the configuration of the inter-query cache that built-in functions can utilize. +// MaxSizeBytes - max capacity of cache in bytes +// ForcedEvictionThresholdPercentage - capacity usage in percentage after which forced FIFO eviction starts +// StaleEntryEvictionPeriodSeconds - time period between end of previous and start of new stale entry eviction routine +type InterQueryBuiltinCacheConfig struct { + MaxSizeBytes *int64 `json:"max_size_bytes,omitempty"` + ForcedEvictionThresholdPercentage *int64 `json:"forced_eviction_threshold_percentage,omitempty"` + StaleEntryEvictionPeriodSeconds *int64 `json:"stale_entry_eviction_period_seconds,omitempty"` +} + +// Clone creates a deep copy of InterQueryBuiltinCacheConfig. +func (i *InterQueryBuiltinCacheConfig) Clone() *InterQueryBuiltinCacheConfig { + if i == nil { + return nil + } + + clone := &InterQueryBuiltinCacheConfig{} + + if i.MaxSizeBytes != nil { + maxSize := *i.MaxSizeBytes + clone.MaxSizeBytes = &maxSize + } + + if i.ForcedEvictionThresholdPercentage != nil { + threshold := *i.ForcedEvictionThresholdPercentage + clone.ForcedEvictionThresholdPercentage = &threshold + } + + if i.StaleEntryEvictionPeriodSeconds != nil { + period := *i.StaleEntryEvictionPeriodSeconds + clone.StaleEntryEvictionPeriodSeconds = &period + } + + return clone +} + +// ParseCachingConfig returns the config for the inter-query cache. +func ParseCachingConfig(raw []byte) (*Config, error) { + if raw == nil { + maxSize := new(int64) + *maxSize = defaultMaxSizeBytes + threshold := new(int64) + *threshold = defaultForcedEvictionThresholdPercentage + period := new(int64) + *period = defaultStaleEntryEvictionPeriodSeconds + + maxInterQueryBuiltinValueCacheSize := new(int) + *maxInterQueryBuiltinValueCacheSize = defaultInterQueryBuiltinValueCacheSize + + return &Config{ + InterQueryBuiltinCache: InterQueryBuiltinCacheConfig{ + MaxSizeBytes: maxSize, + ForcedEvictionThresholdPercentage: threshold, + StaleEntryEvictionPeriodSeconds: period, + }, + InterQueryBuiltinValueCache: InterQueryBuiltinValueCacheConfig{ + MaxNumEntries: maxInterQueryBuiltinValueCacheSize, + }, + }, nil + } + + var config Config + + if err := util.Unmarshal(raw, &config); err == nil { + if err = config.validateAndInjectDefaults(); err != nil { + return nil, err + } + } else { + return nil, err + } + + return &config, nil +} + +func (c *Config) validateAndInjectDefaults() error { + if c.InterQueryBuiltinCache.MaxSizeBytes == nil { + maxSize := new(int64) + *maxSize = defaultMaxSizeBytes + c.InterQueryBuiltinCache.MaxSizeBytes = maxSize + } + if c.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage == nil { + threshold := new(int64) + *threshold = defaultForcedEvictionThresholdPercentage + c.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage = threshold + } else { + threshold := *c.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage + if threshold < 0 || threshold > 100 { + return fmt.Errorf("invalid forced_eviction_threshold_percentage %v", threshold) + } + } + if c.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds == nil { + period := new(int64) + *period = defaultStaleEntryEvictionPeriodSeconds + c.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds = period + } else { + period := *c.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds + if period < 0 { + return fmt.Errorf("invalid stale_entry_eviction_period_seconds %v", period) + } + } + + if c.InterQueryBuiltinValueCache.MaxNumEntries == nil { + maxSize := new(int) + *maxSize = defaultInterQueryBuiltinValueCacheSize + c.InterQueryBuiltinValueCache.MaxNumEntries = maxSize + } else { + numEntries := *c.InterQueryBuiltinValueCache.MaxNumEntries + if numEntries < 0 { + return fmt.Errorf("invalid max_num_entries %v", numEntries) + } + } + + for name, namedConfig := range c.InterQueryBuiltinValueCache.NamedCacheConfigs { + if namedConfig == nil || (namedConfig.MaxNumEntries == nil && namedConfig.Disabled == nil) { + return fmt.Errorf("missing configuration for named cache %v", name) + } + + if namedConfig.MaxNumEntries != nil { + numEntries := *namedConfig.MaxNumEntries + if numEntries < 0 { + return fmt.Errorf("invalid max_num_entries %v for named cache %v", numEntries, name) + } + } + } + + return nil +} + +// InterQueryCacheValue defines the interface for the data that the inter-query cache holds. +type InterQueryCacheValue interface { + SizeInBytes() int64 + Clone() (InterQueryCacheValue, error) +} + +// InterQueryCache defines the interface for the inter-query cache. +type InterQueryCache interface { + Get(key ast.Value) (value InterQueryCacheValue, found bool) + Insert(key ast.Value, value InterQueryCacheValue) int + InsertWithExpiry(key ast.Value, value InterQueryCacheValue, expiresAt time.Time) int + Delete(key ast.Value) + UpdateConfig(config *Config) + Clone(value InterQueryCacheValue) (InterQueryCacheValue, error) +} + +// NewInterQueryCache returns a new inter-query cache. +// The cache uses a FIFO eviction policy when it reaches the forced eviction threshold. +// Parameters: +// +// config - to configure the InterQueryCache +func NewInterQueryCache(config *Config) InterQueryCache { + return newCache(config) +} + +// NewInterQueryCacheWithContext returns a new inter-query cache with context. +// The cache uses a combination of FIFO eviction policy when it reaches the forced eviction threshold +// and a periodic cleanup routine to remove stale entries that exceed their expiration time, if specified. +// If configured with a zero stale_entry_eviction_period_seconds value, the stale entry cleanup routine is disabled. +// +// Parameters: +// +// ctx - used to control lifecycle of the stale entry cleanup routine +// config - to configure the InterQueryCache +func NewInterQueryCacheWithContext(ctx context.Context, config *Config) InterQueryCache { + iqCache := newCache(config) + if iqCache.staleEntryEvictionTimePeriodSeconds() > 0 { + go func() { + cleanupTicker := time.NewTicker(time.Duration(iqCache.staleEntryEvictionTimePeriodSeconds()) * time.Second) + for { + select { + case <-cleanupTicker.C: + // NOTE: We stop the ticker and create a new one here to ensure that applications + // get _at least_ staleEntryEvictionTimePeriodSeconds with the cache unlocked; + // see https://github.com/open-policy-agent/opa/pull/7188/files#r1855342998 + cleanupTicker.Stop() + iqCache.cleanStaleValues() + cleanupTicker = time.NewTicker(time.Duration(iqCache.staleEntryEvictionTimePeriodSeconds()) * time.Second) + case <-ctx.Done(): + cleanupTicker.Stop() + return + } + } + }() + } + + return iqCache +} + +type cacheItem struct { + value InterQueryCacheValue + expiresAt time.Time + keyElement *list.Element +} + +type cache struct { + items map[string]cacheItem + usage int64 + config *Config + l *list.List + mtx sync.Mutex +} + +func newCache(config *Config) *cache { + return &cache{ + items: map[string]cacheItem{}, + usage: 0, + config: config, + l: list.New(), + } +} + +// InsertWithExpiry inserts a key k into the cache with value v with an expiration time expiresAt. +// A zero time value for expiresAt indicates no expiry +func (c *cache) InsertWithExpiry(k ast.Value, v InterQueryCacheValue, expiresAt time.Time) (dropped int) { + c.mtx.Lock() + defer c.mtx.Unlock() + return c.unsafeInsert(k, v, expiresAt) +} + +// Insert inserts a key k into the cache with value v with no expiration time. +func (c *cache) Insert(k ast.Value, v InterQueryCacheValue) (dropped int) { + return c.InsertWithExpiry(k, v, time.Time{}) +} + +// Get returns the value in the cache for k. +func (c *cache) Get(k ast.Value) (InterQueryCacheValue, bool) { + c.mtx.Lock() + defer c.mtx.Unlock() + cacheItem, ok := c.unsafeGet(k) + + if ok { + return cacheItem.value, true + } + return nil, false +} + +// Delete deletes the value in the cache for k. +func (c *cache) Delete(k ast.Value) { + c.mtx.Lock() + defer c.mtx.Unlock() + c.unsafeDelete(k) +} + +func (c *cache) UpdateConfig(config *Config) { + if config == nil { + return + } + c.mtx.Lock() + defer c.mtx.Unlock() + c.config = config +} + +func (c *cache) Clone(value InterQueryCacheValue) (InterQueryCacheValue, error) { + c.mtx.Lock() + defer c.mtx.Unlock() + return c.unsafeClone(value) +} + +func (c *cache) unsafeInsert(k ast.Value, v InterQueryCacheValue, expiresAt time.Time) (dropped int) { + size := v.SizeInBytes() + limit := int64(math.Ceil(float64(c.forcedEvictionThresholdPercentage())/100.0) * (float64(c.maxSizeBytes()))) + if limit > 0 { + if size > limit { + dropped++ + return dropped + } + + for key := c.l.Front(); key != nil && (c.usage+size > limit); key = c.l.Front() { + dropKey := key.Value.(ast.Value) + c.unsafeDelete(dropKey) + dropped++ + } + } + + // By deleting the old value, if it exists, we ensure the usage variable stays correct + c.unsafeDelete(k) + + c.items[k.String()] = cacheItem{ + value: v, + expiresAt: expiresAt, + keyElement: c.l.PushBack(k), + } + c.usage += size + return dropped +} + +func (c *cache) unsafeGet(k ast.Value) (cacheItem, bool) { + value, ok := c.items[k.String()] + return value, ok +} + +func (c *cache) unsafeDelete(k ast.Value) { + cacheItem, ok := c.unsafeGet(k) + if !ok { + return + } + + c.usage -= cacheItem.value.SizeInBytes() + delete(c.items, k.String()) + c.l.Remove(cacheItem.keyElement) +} + +func (*cache) unsafeClone(value InterQueryCacheValue) (InterQueryCacheValue, error) { + return value.Clone() +} + +func (c *cache) maxSizeBytes() int64 { + if c.config == nil { + return defaultMaxSizeBytes + } + return *c.config.InterQueryBuiltinCache.MaxSizeBytes +} + +func (c *cache) forcedEvictionThresholdPercentage() int64 { + if c.config == nil { + return defaultForcedEvictionThresholdPercentage + } + return *c.config.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage +} + +func (c *cache) staleEntryEvictionTimePeriodSeconds() int64 { + if c.config == nil { + return defaultStaleEntryEvictionPeriodSeconds + } + return *c.config.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds +} + +func (c *cache) cleanStaleValues() (dropped int) { + c.mtx.Lock() + defer c.mtx.Unlock() + for key := c.l.Front(); key != nil; { + nextKey := key.Next() + // if expiresAt is zero, the item doesn't have an expiry + if ea := c.items[(key.Value.(ast.Value)).String()].expiresAt; !ea.IsZero() && ea.Before(time.Now()) { + c.unsafeDelete(key.Value.(ast.Value)) + dropped++ + } + key = nextKey + } + return dropped +} + +type InterQueryValueCacheBucket interface { + Get(key ast.Value) (value any, found bool) + Insert(key ast.Value, value any) int + Delete(key ast.Value) +} + +type interQueryValueCacheBucket struct { + items util.HasherMap[ast.Value, any] + config *NamedValueCacheConfig + mtx sync.RWMutex +} + +func newItemsMap() *util.HasherMap[ast.Value, any] { + return util.NewHasherMap[ast.Value, any](ast.ValueEqual) +} + +func (c *interQueryValueCacheBucket) Get(k ast.Value) (any, bool) { + c.mtx.RLock() + defer c.mtx.RUnlock() + return c.items.Get(k) +} + +func (c *interQueryValueCacheBucket) Insert(k ast.Value, v any) (dropped int) { + c.mtx.Lock() + defer c.mtx.Unlock() + + maxEntries := c.maxNumEntries() + if maxEntries > 0 { + l := c.items.Len() + if l >= maxEntries { + itemsToRemove := l - maxEntries + 1 + + // Delete a (semi-)random key to make room for the new one. + c.items.Iter(func(k ast.Value, _ any) bool { + c.items.Delete(k) + dropped++ + + return itemsToRemove == dropped + }) + } + } + + c.items.Put(k, v) + return dropped +} + +func (c *interQueryValueCacheBucket) Delete(k ast.Value) { + c.mtx.Lock() + defer c.mtx.Unlock() + c.items.Delete(k) +} + +func (c *interQueryValueCacheBucket) updateConfig(config *NamedValueCacheConfig) { + if config == nil { + return + } + c.mtx.Lock() + defer c.mtx.Unlock() + c.config = config +} + +func (c *interQueryValueCacheBucket) maxNumEntries() int { + if c.config == nil { + return defaultInterQueryBuiltinValueCacheSize + } + return *c.config.MaxNumEntries +} + +type InterQueryValueCache interface { + InterQueryValueCacheBucket + GetCache(name string) InterQueryValueCacheBucket + UpdateConfig(config *Config) +} + +func NewInterQueryValueCache(_ context.Context, config *Config) InterQueryValueCache { + var c *InterQueryBuiltinValueCacheConfig + var nc *NamedValueCacheConfig + if config != nil { + c = &config.InterQueryBuiltinValueCache + // NOTE: This is a side-effect of reusing the interQueryValueCacheBucket as the global cache. + // It's a hidden implementation detail that we can clean up in the future when revisiting the named caches + // to automatically apply them to any built-in instead of the global cache. + nc = &NamedValueCacheConfig{ + MaxNumEntries: c.MaxNumEntries, + } + } + + return &interQueryBuiltinValueCache{ + globalCache: interQueryValueCacheBucket{ + items: *newItemsMap(), + config: nc, + }, + namedCaches: map[string]*interQueryValueCacheBucket{}, + config: c, + } +} + +type interQueryBuiltinValueCache struct { + globalCache interQueryValueCacheBucket + namedCachesLock sync.RWMutex + namedCaches map[string]*interQueryValueCacheBucket + config *InterQueryBuiltinValueCacheConfig +} + +func (c *interQueryBuiltinValueCache) Get(k ast.Value) (any, bool) { + if c == nil { + return nil, false + } + + return c.globalCache.Get(k) +} + +func (c *interQueryBuiltinValueCache) Insert(k ast.Value, v any) int { + if c == nil { + return 0 + } + + return c.globalCache.Insert(k, v) +} + +func (c *interQueryBuiltinValueCache) Delete(k ast.Value) { + if c == nil { + return + } + + c.globalCache.Delete(k) +} + +func (c *interQueryBuiltinValueCache) GetCache(name string) InterQueryValueCacheBucket { + if c == nil { + return nil + } + + if c.namedCaches == nil { + return nil + } + + c.namedCachesLock.RLock() + nc, ok := c.namedCaches[name] + c.namedCachesLock.RUnlock() + + if !ok { + c.namedCachesLock.Lock() + defer c.namedCachesLock.Unlock() + + if nc, ok := c.namedCaches[name]; ok { + // Some other goroutine has created the cache while we were waiting for the lock. + return nc + } + + var config *NamedValueCacheConfig + if c.config != nil { + config = c.config.NamedCacheConfigs[name] + if config == nil { + config = getDefaultInterQueryBuiltinValueCacheConfig(name) + } + } + + if config == nil { + // No config, cache disabled. + return nil + } + + if config.Disabled != nil && *config.Disabled { + return nil + } + + nc = &interQueryValueCacheBucket{ + items: *newItemsMap(), + config: config, + } + + c.namedCaches[name] = nc + } + + return nc +} + +func (c *interQueryBuiltinValueCache) UpdateConfig(config *Config) { + if c == nil { + return + } + + if config == nil { + c.globalCache.updateConfig(nil) + } else { + + c.globalCache.updateConfig(&NamedValueCacheConfig{ + MaxNumEntries: config.InterQueryBuiltinValueCache.MaxNumEntries, + }) + } + + c.namedCachesLock.Lock() + defer c.namedCachesLock.Unlock() + + c.config = &config.InterQueryBuiltinValueCache + + for name, nc := range c.namedCaches { + // For each named cache: if it has a config, update it; if no config, remove it. + namedConfig := c.config.NamedCacheConfigs[name] + if namedConfig == nil { + namedConfig = getDefaultInterQueryBuiltinValueCacheConfig(name) + } + + if namedConfig == nil { + delete(c.namedCaches, name) + } else { + nc.updateConfig(namedConfig) + } + } +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/cancel.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/cancel.go new file mode 100644 index 0000000000..534e0799a1 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/cancel.go @@ -0,0 +1,33 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "sync/atomic" +) + +// Cancel defines the interface for cancelling topdown queries. Cancel +// operations are thread-safe and idempotent. +type Cancel interface { + Cancel() + Cancelled() bool +} + +type cancel struct { + flag int32 +} + +// NewCancel returns a new Cancel object. +func NewCancel() Cancel { + return &cancel{} +} + +func (c *cancel) Cancel() { + atomic.StoreInt32(&c.flag, 1) +} + +func (c *cancel) Cancelled() bool { + return atomic.LoadInt32(&c.flag) != 0 +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/casts.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/casts.go similarity index 76% rename from vendor/github.com/open-policy-agent/opa/topdown/casts.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/casts.go index 2eb8f97fc9..85e1a9c015 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/casts.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/casts.go @@ -6,24 +6,38 @@ package topdown import ( "strconv" + "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func builtinToNumber(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch a := operands[0].Value.(type) { case ast.Null: - return iter(ast.NumberTerm("0")) + return iter(ast.InternedTerm(0)) case ast.Boolean: if a { - return iter(ast.NumberTerm("1")) + return iter(ast.InternedTerm(1)) } - return iter(ast.NumberTerm("0")) + return iter(ast.InternedTerm(0)) case ast.Number: - return iter(ast.NewTerm(a)) + return iter(operands[0]) case ast.String: - _, err := strconv.ParseFloat(string(a), 64) + strValue := string(a) + + if it := ast.InternedIntNumberTermFromString(strValue); it != nil { + return iter(it) + } + + trimmedVal := strings.TrimLeft(strValue, "+-") + lowerCaseVal := strings.ToLower(trimmedVal) + + if lowerCaseVal == "inf" || lowerCaseVal == "infinity" || lowerCaseVal == "nan" { + return builtins.NewOperandTypeErr(1, operands[0].Value, "valid number string") + } + + _, err := strconv.ParseFloat(strValue, 64) if err != nil { return err } @@ -32,7 +46,7 @@ func builtinToNumber(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term return builtins.NewOperandTypeErr(1, operands[0].Value, "null", "boolean", "number", "string") } -// Deprecated in v0.13.0. +// Deprecated: deprecated in v0.13.0. func builtinToArray(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch val := operands[0].Value.(type) { case *ast.Array: @@ -50,7 +64,7 @@ func builtinToArray(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) } } -// Deprecated in v0.13.0. +// Deprecated: deprecated in v0.13.0. func builtinToSet(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch val := operands[0].Value.(type) { case *ast.Array: @@ -66,7 +80,7 @@ func builtinToSet(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e } } -// Deprecated in v0.13.0. +// Deprecated: deprecated in v0.13.0. func builtinToString(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch val := operands[0].Value.(type) { case ast.String: @@ -76,7 +90,7 @@ func builtinToString(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term } } -// Deprecated in v0.13.0. +// Deprecated: deprecated in v0.13.0. func builtinToBoolean(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch val := operands[0].Value.(type) { case ast.Boolean: @@ -86,7 +100,7 @@ func builtinToBoolean(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter } } -// Deprecated in v0.13.0. +// Deprecated: deprecated in v0.13.0. func builtinToNull(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch val := operands[0].Value.(type) { case ast.Null: @@ -96,7 +110,7 @@ func builtinToNull(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) } } -// Deprecated in v0.13.0. +// Deprecated: deprecated in v0.13.0. func builtinToObject(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch val := operands[0].Value.(type) { case ast.Object: diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cidr.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/cidr.go similarity index 93% rename from vendor/github.com/open-policy-agent/opa/topdown/cidr.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/cidr.go index 5b011bd161..e6e2bb3ae7 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/cidr.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/cidr.go @@ -6,11 +6,12 @@ import ( "fmt" "math/big" "net" + "slices" "sort" - "github.com/open-policy-agent/opa/ast" cidrMerge "github.com/open-policy-agent/opa/internal/cidr/merge" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func getNetFromOperand(v ast.Value) (*net.IPNet, error) { @@ -31,7 +32,7 @@ func getLastIP(cidr *net.IPNet) (net.IP, error) { prefixLen, bits := cidr.Mask.Size() if prefixLen == 0 && bits == 0 { // non-standard mask, see https://golang.org/pkg/net/#IPMask.Size - return nil, fmt.Errorf("CIDR mask is in non-standard format") + return nil, errors.New("CIDR mask is in non-standard format") } var lastIP []byte if prefixLen == bits { @@ -75,7 +76,7 @@ func builtinNetCIDRIntersects(_ BuiltinContext, operands []*ast.Term, iter func( // If either net contains the others starting IP they are overlapping cidrsOverlap := cidrnetA.Contains(cidrnetB.IP) || cidrnetB.Contains(cidrnetA.IP) - return iter(ast.BooleanTerm(cidrsOverlap)) + return iter(ast.InternedTerm(cidrsOverlap)) } func builtinNetCIDRContains(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -92,7 +93,7 @@ func builtinNetCIDRContains(_ BuiltinContext, operands []*ast.Term, iter func(*a ip := net.ParseIP(string(bStr)) if ip != nil { - return iter(ast.BooleanTerm(cidrnetA.Contains(ip))) + return iter(ast.InternedTerm(cidrnetA.Contains(ip))) } // It wasn't an IP, try and parse it as a CIDR @@ -113,7 +114,7 @@ func builtinNetCIDRContains(_ BuiltinContext, operands []*ast.Term, iter func(*a cidrContained = cidrnetA.Contains(lastIP) } - return iter(ast.BooleanTerm(cidrContained)) + return iter(ast.InternedTerm(cidrContained)) } var errNetCIDRContainsMatchElementType = errors.New("element must be string or non-empty array") @@ -137,12 +138,12 @@ func evalNetCIDRContainsMatchesOperand(operand int, a *ast.Term, iter func(cidr, case ast.String: return iter(a, a) case *ast.Array: - for i := 0; i < v.Len(); i++ { + for i := range v.Len() { cidr, err := getCIDRMatchTerm(v.Elem(i)) if err != nil { return fmt.Errorf("operand %v: %v", operand, err) } - if err := iter(cidr, ast.IntNumberTerm(i)); err != nil { + if err := iter(cidr, ast.InternedTerm(i)); err != nil { return err } } @@ -219,13 +220,13 @@ func builtinNetCIDRExpand(bctx BuiltinContext, operands []*ast.Term, iter func(* func builtinNetCIDRIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { cidr, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedTerm(false)) } if _, _, err := net.ParseCIDR(string(cidr)); err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedTerm(false)) } - return iter(ast.BooleanTerm(true)) + return iter(ast.InternedTerm(true)) } type cidrBlockRange struct { @@ -255,7 +256,7 @@ func (c cidrBlockRanges) Less(i, j int) bool { } // Then compare first IP. - cmp = bytes.Compare(*c[i].First, *c[i].First) + cmp = bytes.Compare(*c[i].First, *c[j].First) if cmp < 0 { return true } else if cmp > 0 { @@ -274,7 +275,7 @@ func builtinNetCIDRMerge(_ BuiltinContext, operands []*ast.Term, iter func(*ast. switch v := operands[0].Value.(type) { case *ast.Array: - for i := 0; i < v.Len(); i++ { + for i := range v.Len() { network, err := generateIPNet(v.Elem(i)) if err != nil { return err @@ -299,7 +300,7 @@ func builtinNetCIDRMerge(_ BuiltinContext, operands []*ast.Term, iter func(*ast. merged := evalNetCIDRMerge(networks) - result := ast.NewSet() + result := ast.NewSetWithCapacity(len(merged)) for _, network := range merged { result.Add(ast.StringTerm(network.String())) } @@ -392,7 +393,7 @@ func mergeCIDRs(ranges cidrBlockRanges) cidrBlockRanges { ranges[i-1] = &cidrBlockRange{First: &firstIPRange, Last: &lastIPRange, Network: nil} // Delete ranges[i] since merged with the previous. - ranges = append(ranges[:i], ranges[i+1:]...) + ranges = slices.Delete(ranges, i, i+1) } } return ranges diff --git a/vendor/github.com/open-policy-agent/opa/topdown/comparison.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/comparison.go similarity index 91% rename from vendor/github.com/open-policy-agent/opa/topdown/comparison.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/comparison.go index 0d033d2c32..6c10129faa 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/comparison.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/comparison.go @@ -4,7 +4,7 @@ package topdown -import "github.com/open-policy-agent/opa/ast" +import "github.com/open-policy-agent/opa/v1/ast" type compareFunc func(a, b ast.Value) bool @@ -34,7 +34,7 @@ func compareEq(a, b ast.Value) bool { func builtinCompare(cmp compareFunc) BuiltinFunc { return func(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - return iter(ast.BooleanTerm(cmp(operands[0].Value, operands[1].Value))) + return iter(ast.InternedTerm(cmp(operands[0].Value, operands[1].Value))) } } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/copypropagation.go similarity index 92% rename from vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/copypropagation.go index 8824d19bd2..607855632d 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/copypropagation.go @@ -8,7 +8,8 @@ import ( "fmt" "sort" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" ) // CopyPropagator implements a simple copy propagation optimization to remove @@ -49,17 +50,7 @@ func (l *localVarGenerator) Generate() ast.Var { // New returns a new CopyPropagator that optimizes queries while preserving vars // in the livevars set. func New(livevars ast.VarSet) *CopyPropagator { - - sorted := make([]ast.Var, 0, len(livevars)) - for v := range livevars { - sorted = append(sorted, v) - } - - sort.Slice(sorted, func(i, j int) bool { - return sorted[i].Compare(sorted[j]) < 0 - }) - - return &CopyPropagator{livevars: livevars, sorted: sorted, localvargen: &localVarGenerator{}} + return &CopyPropagator{livevars: livevars, sorted: util.KeysSorted(livevars), localvargen: &localVarGenerator{}} } // WithEnsureNonEmptyBody configures p to ensure that results are always non-empty. @@ -163,7 +154,8 @@ func (p *CopyPropagator) Apply(query ast.Body) ast.Body { // to the current result. // Invariant: Live vars are bound (above) and reserved vars are implicitly ground. - safe := ast.ReservedVars.Copy() + safe := ast.NewVarSetOfSize(len(p.livevars) + len(ast.ReservedVars) + 6) + safe.Update(ast.ReservedVars) safe.Update(p.livevars) safe.Update(ast.OutputVarsFromBody(p.compiler, result, safe)) unsafe := result.Vars(ast.SafetyCheckVisitorParams).Diff(safe) @@ -173,9 +165,8 @@ func (p *CopyPropagator) Apply(query ast.Body) ast.Body { providesSafety := false outputVars := ast.OutputVarsFromExpr(p.compiler, removedEq, safe) - diff := unsafe.Diff(outputVars) - if len(diff) < len(unsafe) { - unsafe = diff + if unsafe.DiffCount(outputVars) < len(unsafe) { + unsafe = unsafe.Diff(outputVars) providesSafety = true } @@ -209,7 +200,7 @@ func (p *CopyPropagator) Apply(query ast.Body) ast.Body { // plugBindings applies the binding list and union-find to x. This process // removes as many variables as possible. -func (p *CopyPropagator) plugBindings(pctx *plugContext, expr *ast.Expr) *ast.Expr { +func (*CopyPropagator) plugBindings(pctx *plugContext, expr *ast.Expr) *ast.Expr { xform := bindingPlugTransform{ pctx: pctx, @@ -233,7 +224,7 @@ type bindingPlugTransform struct { pctx *plugContext } -func (t bindingPlugTransform) Transform(x interface{}) (interface{}, error) { +func (t bindingPlugTransform) Transform(x any) (any, error) { switch x := x.(type) { case ast.Var: return t.plugBindingsVar(t.pctx, x), nil @@ -244,7 +235,7 @@ func (t bindingPlugTransform) Transform(x interface{}) (interface{}, error) { } } -func (t bindingPlugTransform) plugBindingsVar(pctx *plugContext, v ast.Var) ast.Value { +func (bindingPlugTransform) plugBindingsVar(pctx *plugContext, v ast.Var) ast.Value { var result ast.Value = v @@ -274,7 +265,7 @@ func (t bindingPlugTransform) plugBindingsVar(pctx *plugContext, v ast.Var) ast. return b } -func (t bindingPlugTransform) plugBindingsRef(pctx *plugContext, v ast.Ref) ast.Ref { +func (bindingPlugTransform) plugBindingsRef(pctx *plugContext, v ast.Ref) ast.Ref { // Apply union-find to remove redundant variables from input. if root, ok := pctx.uf.Find(v[0].Value); ok { @@ -344,7 +335,7 @@ func (p *CopyPropagator) livevarRef(a *ast.Term) bool { } for _, v := range p.sorted { - if ref[0].Value.Compare(v) == 0 { + if v.Equal(ref[0].Value) { return true } } @@ -385,11 +376,11 @@ type binding struct { k, v ast.Value } -func containedIn(value ast.Value, x interface{}) bool { +func containedIn(value ast.Value, x any) bool { var stop bool var vis *ast.GenericVisitor - vis = ast.NewGenericVisitor(func(x interface{}) bool { + vis = ast.NewGenericVisitor(func(x any) bool { switch x := x.(type) { case *ast.Every: // skip body vis.Walk(x.Key) @@ -403,7 +394,7 @@ func containedIn(value ast.Value, x interface{}) bool { if v, ok := value.(ast.Ref); ok { match = x.HasPrefix(v) } else { - match = x.Compare(value) == 0 + match = x.Equal(value) } if stop || match { stop = true diff --git a/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/unionfind.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/unionfind.go similarity index 81% rename from vendor/github.com/open-policy-agent/opa/topdown/copypropagation/unionfind.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/unionfind.go index 38ec56f315..cac2a3009f 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/unionfind.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/unionfind.go @@ -7,25 +7,21 @@ package copypropagation import ( "fmt" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" ) type rankFunc func(*unionFindRoot, *unionFindRoot) (*unionFindRoot, *unionFindRoot) type unionFind struct { - roots *util.HashMap + roots *util.HasherMap[ast.Value, *unionFindRoot] parents *ast.ValueMap rank rankFunc } func newUnionFind(rank rankFunc) *unionFind { return &unionFind{ - roots: util.NewHashMap(func(a util.T, b util.T) bool { - return a.(ast.Value).Compare(b.(ast.Value)) == 0 - }, func(v util.T) int { - return v.(ast.Value).Hash() - }), + roots: util.NewHasherMap[ast.Value, *unionFindRoot](ast.ValueEqual), parents: ast.NewValueMap(), rank: rank, } @@ -53,7 +49,7 @@ func (uf *unionFind) Find(v ast.Value) (*unionFindRoot, bool) { if parent.Compare(v) == 0 { r, ok := uf.roots.Get(v) - return r.(*unionFindRoot), ok + return r, ok } return uf.Find(parent) @@ -86,20 +82,20 @@ func (uf *unionFind) Merge(a, b ast.Value) (*unionFindRoot, bool) { func (uf *unionFind) String() string { o := struct { - Roots map[string]interface{} + Roots map[string]any Parents map[string]ast.Value }{ - map[string]interface{}{}, + map[string]any{}, map[string]ast.Value{}, } - uf.roots.Iter(func(k util.T, v util.T) bool { - o.Roots[k.(ast.Value).String()] = struct { + uf.roots.Iter(func(k ast.Value, v *unionFindRoot) bool { + o.Roots[k.String()] = struct { Constant *ast.Term Key ast.Value }{ - v.(*unionFindRoot).constant, - v.(*unionFindRoot).key, + v.constant, + v.key, } return true }) diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/crypto.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/crypto.go new file mode 100644 index 0000000000..f4ca23fae5 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/crypto.go @@ -0,0 +1,755 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "bytes" + "crypto" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "hash" + "os" + "strings" + "time" + + "github.com/lestrrat-go/jwx/v3/jwk" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/util" +) + +const ( + // blockTypeCertificate indicates this PEM block contains the signed certificate. + // Exported for tests. + blockTypeCertificate = "CERTIFICATE" + // blockTypeCertificateRequest indicates this PEM block contains a certificate + // request. Exported for tests. + blockTypeCertificateRequest = "CERTIFICATE REQUEST" + // blockTypeRSAPrivateKey indicates this PEM block contains a RSA private key. + // Exported for tests. + blockTypeRSAPrivateKey = "RSA PRIVATE KEY" + // blockTypeRSAPrivateKey indicates this PEM block contains a RSA private key. + // Exported for tests. + blockTypePrivateKey = "PRIVATE KEY" + blockTypeEcPrivateKey = "EC PRIVATE KEY" +) + +func builtinCryptoX509ParseCertificates(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + input, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + certs, err := getX509CertsFromString(string(input)) + if err != nil { + return err + } + + v, err := ast.InterfaceToValue(extendCertificates(certs)) + if err != nil { + return err + } + + return iter(ast.NewTerm(v)) +} + +// extendedCert is a wrapper around x509.Certificate that adds additional fields for JSON serialization. +type extendedCert struct { + x509.Certificate + URIStrings []string +} + +func extendCertificates(certs []*x509.Certificate) []extendedCert { + // add a field to certs containing the URIs as strings + processedCerts := make([]extendedCert, len(certs)) + + for i, cert := range certs { + processedCerts[i].Certificate = *cert + if cert.URIs != nil { + processedCerts[i].URIStrings = make([]string, len(cert.URIs)) + for j, uri := range cert.URIs { + processedCerts[i].URIStrings[j] = uri.String() + } + } + } + return processedCerts +} + +func builtinCryptoX509ParseAndVerifyCertificates(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + a := operands[0].Value + input, err := builtins.StringOperand(a, 1) + if err != nil { + return err + } + + certs, err := getX509CertsFromString(string(input)) + if err != nil { + return iter(ast.ArrayTerm(ast.InternedTerm(false), ast.InternedEmptyArray)) + } + + verified, err := verifyX509CertificateChain(certs, x509.VerifyOptions{}) + if err != nil { + return iter(ast.ArrayTerm(ast.InternedTerm(false), ast.InternedEmptyArray)) + } + + value, err := ast.InterfaceToValue(extendCertificates(verified)) + if err != nil { + return err + } + + valid := ast.ArrayTerm(ast.InternedTerm(true), ast.NewTerm(value)) + + return iter(valid) +} + +var allowedKeyUsages = map[string]x509.ExtKeyUsage{ + "KeyUsageAny": x509.ExtKeyUsageAny, + "KeyUsageServerAuth": x509.ExtKeyUsageServerAuth, + "KeyUsageClientAuth": x509.ExtKeyUsageClientAuth, + "KeyUsageCodeSigning": x509.ExtKeyUsageCodeSigning, + "KeyUsageEmailProtection": x509.ExtKeyUsageEmailProtection, + "KeyUsageIPSECEndSystem": x509.ExtKeyUsageIPSECEndSystem, + "KeyUsageIPSECTunnel": x509.ExtKeyUsageIPSECTunnel, + "KeyUsageIPSECUser": x509.ExtKeyUsageIPSECUser, + "KeyUsageTimeStamping": x509.ExtKeyUsageTimeStamping, + "KeyUsageOCSPSigning": x509.ExtKeyUsageOCSPSigning, + "KeyUsageMicrosoftServerGatedCrypto": x509.ExtKeyUsageMicrosoftServerGatedCrypto, + "KeyUsageNetscapeServerGatedCrypto": x509.ExtKeyUsageNetscapeServerGatedCrypto, + "KeyUsageMicrosoftCommercialCodeSigning": x509.ExtKeyUsageMicrosoftCommercialCodeSigning, + "KeyUsageMicrosoftKernelCodeSigning": x509.ExtKeyUsageMicrosoftKernelCodeSigning, +} + +func builtinCryptoX509ParseAndVerifyCertificatesWithOptions(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + input, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + options, err := builtins.ObjectOperand(operands[1].Value, 2) + if err != nil { + return err + } + + certs, err := getX509CertsFromString(string(input)) + if err != nil { + return iter(ast.ArrayTerm(ast.InternedTerm(false), ast.InternedEmptyArray)) + } + + // Collect the cert verification options + verifyOpt, err := extractVerifyOpts(options) + if err != nil { + return err + } + + verified, err := verifyX509CertificateChain(certs, verifyOpt) + if err != nil { + return iter(ast.ArrayTerm(ast.InternedTerm(false), ast.InternedEmptyArray)) + } + + value, err := ast.InterfaceToValue(verified) + if err != nil { + return err + } + + return iter(ast.ArrayTerm(ast.InternedTerm(true), ast.NewTerm(value))) +} + +func extractVerifyOpts(options ast.Object) (verifyOpt x509.VerifyOptions, err error) { + + for _, key := range options.Keys() { + k, err := ast.JSON(key.Value) + if err != nil { + return verifyOpt, err + } + k, ok := k.(string) + if !ok { + continue + } + + switch k { + case "DNSName": + dns, ok := options.Get(key).Value.(ast.String) + if ok { + verifyOpt.DNSName = strings.Trim(string(dns), "\"") + } else { + return verifyOpt, errors.New("'DNSName' should be a string") + } + case "CurrentTime": + c, ok := options.Get(key).Value.(ast.Number) + if ok { + nanosecs, ok := c.Int64() + if ok { + verifyOpt.CurrentTime = time.Unix(0, nanosecs) + } else { + return verifyOpt, errors.New("'CurrentTime' should be a valid int64 number") + } + } else { + return verifyOpt, errors.New("'CurrentTime' should be a number") + } + case "MaxConstraintComparisons": + c, ok := options.Get(key).Value.(ast.Number) + if ok { + maxComparisons, ok := c.Int() + if ok { + verifyOpt.MaxConstraintComparisions = maxComparisons + } else { + return verifyOpt, errors.New("'MaxConstraintComparisons' should be a valid number") + } + } else { + return verifyOpt, errors.New("'MaxConstraintComparisons' should be a number") + } + case "KeyUsages": + type forEach interface { + Foreach(func(*ast.Term)) + } + var ks forEach + switch v := options.Get(key).Value.(type) { + case *ast.Array: + ks = v + case ast.Set: + ks = v + default: + return verifyOpt, errors.New("'KeyUsages' should be an Array or Set") + } + + // Collect the x509.ExtKeyUsage values by looking up the + // mapping of key usage strings to x509.ExtKeyUsage + var invalidKUsgs []string + ks.Foreach(func(t *ast.Term) { + u, ok := t.Value.(ast.String) + if ok { + v := strings.Trim(string(u), "\"") + if k, ok := allowedKeyUsages[v]; ok { + verifyOpt.KeyUsages = append(verifyOpt.KeyUsages, k) + } else { + invalidKUsgs = append(invalidKUsgs, v) + } + } + }) + if len(invalidKUsgs) > 0 { + return x509.VerifyOptions{}, fmt.Errorf("invalid entries for 'KeyUsages' found: %s", invalidKUsgs) + } + default: + return verifyOpt, errors.New("invalid key option") + } + + } + + return verifyOpt, nil +} + +func builtinCryptoX509ParseKeyPair(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + certificate, err := builtins.StringOperandByteSlice(operands[0].Value, 1) + if err != nil { + return err + } + + key, err := builtins.StringOperandByteSlice(operands[1].Value, 1) + if err != nil { + return err + } + + certs, err := getTLSx509KeyPairFromString(certificate, key) + if err != nil { + return err + } + v, err := ast.InterfaceToValue(certs) + if err != nil { + return err + } + + return iter(ast.NewTerm(v)) +} + +func builtinCryptoX509ParseCertificateRequest(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + input, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + // data to be passed to x509.ParseCertificateRequest + bytes := []byte(input) + + // if the input is not a PEM string, attempt to decode b64 + if str := string(input); !strings.HasPrefix(str, "-----BEGIN CERTIFICATE REQUEST-----") { + bytes, err = base64.StdEncoding.DecodeString(str) + if err != nil { + return err + } + } + + p, _ := pem.Decode(bytes) + if p != nil && p.Type != blockTypeCertificateRequest { + return errors.New("invalid PEM-encoded certificate signing request") + } + if p != nil { + bytes = p.Bytes + } + + csr, err := x509.ParseCertificateRequest(bytes) + if err != nil { + return err + } + + bs, err := json.Marshal(csr) + if err != nil { + return err + } + + var x any + if err := util.UnmarshalJSON(bs, &x); err != nil { + return err + } + + v, err := ast.InterfaceToValue(x) + if err != nil { + return err + } + + return iter(ast.NewTerm(v)) +} + +func builtinCryptoJWKFromPrivateKey(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + input, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + // get the raw private key + pemDataString := string(input) + + if pemDataString == "" { + return errors.New("input PEM data was empty") + } + + // This built in must be supplied a valid PEM or base64 encoded string. + // If the input is not a PEM string, attempt to decode b64. + // If the base64 decode fails - this is an error + if !strings.HasPrefix(pemDataString, "-----BEGIN") { + bs, err := base64.StdEncoding.DecodeString(pemDataString) + if err != nil { + return err + } + pemDataString = string(bs) + } + + rawKeys, err := getPrivateKeysFromPEMData(pemDataString) + if err != nil { + return err + } + + if len(rawKeys) == 0 { + return iter(ast.InternedNullTerm) + } + + key, err := jwk.Import(rawKeys[0]) + if err != nil { + return err + } + + jsonKey, err := json.Marshal(key) + if err != nil { + return err + } + + var x any + if err := util.UnmarshalJSON(jsonKey, &x); err != nil { + return err + } + + value, err := ast.InterfaceToValue(x) + if err != nil { + return err + } + + return iter(ast.NewTerm(value)) +} + +func builtinCryptoParsePrivateKeys(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + a := operands[0].Value + input, err := builtins.StringOperand(a, 1) + if err != nil { + return err + } + + if string(input) == "" { + return iter(ast.InternedNullTerm) + } + + // get the raw private key + rawKeys, err := getPrivateKeysFromPEMData(string(input)) + if err != nil { + return err + } + + if len(rawKeys) == 0 { + return iter(ast.InternedEmptyArray) + } + + bs, err := json.Marshal(rawKeys) + if err != nil { + return err + } + + var x any + if err := util.UnmarshalJSON(bs, &x); err != nil { + return err + } + + value, err := ast.InterfaceToValue(x) + if err != nil { + return err + } + + return iter(ast.NewTerm(value)) +} + +func toHexEncodedString(src []byte) string { + dst := make([]byte, hex.EncodedLen(len(src))) + hex.Encode(dst, src) + return util.ByteSliceToString(dst) +} + +func builtinCryptoMd5(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) + if err != nil { + return err + } + + md5sum := md5.Sum(bs) + + return iter(ast.StringTerm(toHexEncodedString(md5sum[:]))) +} + +func builtinCryptoSha1(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) + if err != nil { + return err + } + + sha1sum := sha1.Sum(bs) + + return iter(ast.StringTerm(toHexEncodedString(sha1sum[:]))) +} + +func builtinCryptoSha256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) + if err != nil { + return err + } + + sha256sum := sha256.Sum256(bs) + + return iter(ast.StringTerm(toHexEncodedString(sha256sum[:]))) +} + +func hmacHelper(operands []*ast.Term, iter func(*ast.Term) error, h func() hash.Hash) error { + message, err := builtins.StringOperandByteSlice(operands[0].Value, 1) + if err != nil { + return err + } + + key, err := builtins.StringOperandByteSlice(operands[1].Value, 2) + if err != nil { + return err + } + + mac := hmac.New(h, key) + mac.Write(message) + messageDigest := mac.Sum(nil) + + return iter(ast.StringTerm(hex.EncodeToString(messageDigest))) +} + +func builtinCryptoHmacMd5(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + return hmacHelper(operands, iter, md5.New) +} + +func builtinCryptoHmacSha1(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + return hmacHelper(operands, iter, sha1.New) +} + +func builtinCryptoHmacSha256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + return hmacHelper(operands, iter, sha256.New) +} + +func builtinCryptoHmacSha512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + return hmacHelper(operands, iter, sha512.New) +} + +func builtinCryptoHmacEqual(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + mac1, err := builtins.StringOperandByteSlice(operands[0].Value, 1) + if err != nil { + return err + } + + mac2, err := builtins.StringOperandByteSlice(operands[1].Value, 2) + if err != nil { + return err + } + + return iter(ast.InternedTerm(hmac.Equal(mac1, mac2))) +} + +func init() { + RegisterBuiltinFunc(ast.CryptoX509ParseCertificates.Name, builtinCryptoX509ParseCertificates) + RegisterBuiltinFunc(ast.CryptoX509ParseAndVerifyCertificates.Name, builtinCryptoX509ParseAndVerifyCertificates) + RegisterBuiltinFunc(ast.CryptoX509ParseAndVerifyCertificatesWithOptions.Name, builtinCryptoX509ParseAndVerifyCertificatesWithOptions) + RegisterBuiltinFunc(ast.CryptoMd5.Name, builtinCryptoMd5) + RegisterBuiltinFunc(ast.CryptoSha1.Name, builtinCryptoSha1) + RegisterBuiltinFunc(ast.CryptoSha256.Name, builtinCryptoSha256) + RegisterBuiltinFunc(ast.CryptoX509ParseCertificateRequest.Name, builtinCryptoX509ParseCertificateRequest) + RegisterBuiltinFunc(ast.CryptoX509ParseRSAPrivateKey.Name, builtinCryptoJWKFromPrivateKey) + RegisterBuiltinFunc(ast.CryptoParsePrivateKeys.Name, builtinCryptoParsePrivateKeys) + RegisterBuiltinFunc(ast.CryptoX509ParseKeyPair.Name, builtinCryptoX509ParseKeyPair) + RegisterBuiltinFunc(ast.CryptoHmacMd5.Name, builtinCryptoHmacMd5) + RegisterBuiltinFunc(ast.CryptoHmacSha1.Name, builtinCryptoHmacSha1) + RegisterBuiltinFunc(ast.CryptoHmacSha256.Name, builtinCryptoHmacSha256) + RegisterBuiltinFunc(ast.CryptoHmacSha512.Name, builtinCryptoHmacSha512) + RegisterBuiltinFunc(ast.CryptoHmacEqual.Name, builtinCryptoHmacEqual) +} + +func verifyX509CertificateChain(certs []*x509.Certificate, vo x509.VerifyOptions) ([]*x509.Certificate, error) { + if len(certs) < 2 { + return nil, builtins.NewOperandErr(1, "must supply at least two certificates to be able to verify") + } + + // first cert is the root + roots := x509.NewCertPool() + roots.AddCert(certs[0]) + + // all other certs except the last are intermediates + intermediates := x509.NewCertPool() + for i := 1; i < len(certs)-1; i++ { + intermediates.AddCert(certs[i]) + } + + // last cert is the leaf + leaf := certs[len(certs)-1] + + // verify the cert chain back to the root + verifyOpts := x509.VerifyOptions{ + Roots: roots, + Intermediates: intermediates, + DNSName: vo.DNSName, + CurrentTime: vo.CurrentTime, + KeyUsages: vo.KeyUsages, + MaxConstraintComparisions: vo.MaxConstraintComparisions, + } + chains, err := leaf.Verify(verifyOpts) + if err != nil { + return nil, err + } + + return chains[0], nil +} + +func getX509CertsFromString(certs string) ([]*x509.Certificate, error) { + // if the input is PEM handle that + if strings.HasPrefix(certs, "-----BEGIN") { + return getX509CertsFromPem([]byte(certs)) + } + + // assume input is base64 if not PEM + b64, err := base64.StdEncoding.DecodeString(certs) + if err != nil { + return nil, err + } + + // handle if the decoded base64 contains PEM rather than the expected DER + if bytes.HasPrefix(b64, []byte("-----BEGIN")) { + return getX509CertsFromPem(b64) + } + + // otherwise assume the contents are DER + return x509.ParseCertificates(b64) +} + +func getX509CertsFromPem(pemBlocks []byte) ([]*x509.Certificate, error) { + var decodedCerts []byte + for len(pemBlocks) > 0 { + p, r := pem.Decode(pemBlocks) + if p != nil && p.Type != blockTypeCertificate { + return nil, fmt.Errorf("PEM block type is '%s', expected %s", p.Type, blockTypeCertificate) + } + + if p == nil { + break + } + + pemBlocks = r + decodedCerts = append(decodedCerts, p.Bytes...) + } + + return x509.ParseCertificates(decodedCerts) +} + +func getPrivateKeysFromPEMData(pemData string) ([]crypto.PrivateKey, error) { + pemBlockString := pemData + + var validPrivateKeys []crypto.PrivateKey + + // if the input is base64, decode it + bs, err := base64.StdEncoding.DecodeString(pemBlockString) + if err == nil { + pemBlockString = string(bs) + } + bs = []byte(pemBlockString) + + for len(bs) > 0 { + inputLen := len(bs) + var block *pem.Block + block, bs = pem.Decode(bs) + if block == nil && len(bs) == 0 { + break + } + // should only happen if end of input is not a valid PEM block. See TestParseRSAPrivateKeyVariedPemInput. + if inputLen == len(bs) { + break + } + + if block == nil { + continue + } + + switch block.Type { + case blockTypeRSAPrivateKey: + parsedKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + validPrivateKeys = append(validPrivateKeys, parsedKey) + case blockTypePrivateKey: + parsedKey, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + validPrivateKeys = append(validPrivateKeys, parsedKey) + case blockTypeEcPrivateKey: + parsedKey, err := x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return nil, err + } + validPrivateKeys = append(validPrivateKeys, parsedKey) + } + } + return validPrivateKeys, nil +} + +// addCACertsFromFile adds CA certificates from filePath into the given pool. +// If pool is nil, it creates a new x509.CertPool. pool is returned. +func addCACertsFromFile(pool *x509.CertPool, filePath string) (*x509.CertPool, error) { + if pool == nil { + pool = x509.NewCertPool() + } + + caCert, err := os.ReadFile(filePath) + if err != nil { + return nil, err + } + + if ok := pool.AppendCertsFromPEM(caCert); !ok { + return nil, fmt.Errorf("could not append CA certificates from %q", filePath) + } + + return pool, nil +} + +// addCACertsFromBytes adds CA certificates from pemBytes into the given pool. +// If pool is nil, it creates a new x509.CertPool. pool is returned. +func addCACertsFromBytes(pool *x509.CertPool, pemBytes []byte) (*x509.CertPool, error) { + if pool == nil { + pool = x509.NewCertPool() + } + + if ok := pool.AppendCertsFromPEM(pemBytes); !ok { + return nil, errors.New("could not append certificates") + } + + return pool, nil +} + +// addCACertsFromEnv adds CA certificates from the environment variable named +// by envName into the given pool. If pool is nil, it creates a new x509.CertPool. +// pool is returned. +func addCACertsFromEnv(pool *x509.CertPool, envName string) (*x509.CertPool, error) { + pool, err := addCACertsFromBytes(pool, []byte(os.Getenv(envName))) + if err != nil { + return nil, fmt.Errorf("could not add CA certificates from envvar %q: %w", envName, err) + } + + return pool, nil +} + +var beginPrefix = []byte("-----BEGIN ") + +func getTLSx509KeyPairFromString(certPemBlock []byte, keyPemBlock []byte) (*tls.Certificate, error) { + + if !bytes.HasPrefix(certPemBlock, beginPrefix) { + s, err := base64.StdEncoding.DecodeString(string(certPemBlock)) + if err != nil { + return nil, err + } + certPemBlock = s + } + + if !bytes.HasPrefix(keyPemBlock, beginPrefix) { + s, err := base64.StdEncoding.DecodeString(string(keyPemBlock)) + if err != nil { + return nil, err + } + keyPemBlock = s + } + + // we assume it a DER certificate and try to convert it to a PEM. + if !bytes.HasPrefix(certPemBlock, beginPrefix) { + + pemBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: certPemBlock, + } + + var buf bytes.Buffer + if err := pem.Encode(&buf, pemBlock); err != nil { + return nil, err + } + certPemBlock = buf.Bytes() + + } + // we assume it a DER key and try to convert it to a PEM. + if !bytes.HasPrefix(keyPemBlock, []byte("-----BEGIN")) { + pemBlock := &pem.Block{ + Type: "PRIVATE KEY", + Bytes: keyPemBlock, + } + var buf bytes.Buffer + if err := pem.Encode(&buf, pemBlock); err != nil { + return nil, err + } + keyPemBlock = buf.Bytes() + } + + cert, err := tls.X509KeyPair(certPemBlock, keyPemBlock) + if err != nil { + return nil, err + } + + return &cert, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/doc.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/doc.go new file mode 100644 index 0000000000..9aa7aa45c5 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/doc.go @@ -0,0 +1,10 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package topdown provides low-level query evaluation support. +// +// The topdown implementation is a modified version of the standard top-down +// evaluation algorithm used in Datalog. References and comprehensions are +// evaluated eagerly while all other terms are evaluated lazily. +package topdown diff --git a/vendor/github.com/open-policy-agent/opa/topdown/encoding.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/encoding.go similarity index 81% rename from vendor/github.com/open-policy-agent/opa/topdown/encoding.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/encoding.go index f3475a60d0..5ed8df68f3 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/encoding.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/encoding.go @@ -5,7 +5,6 @@ package topdown import ( - "bytes" "encoding/base64" "encoding/hex" "encoding/json" @@ -15,13 +14,12 @@ import ( "sigs.k8s.io/yaml" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/util" ) func builtinJSONMarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - asJSON, err := ast.JSON(operands[0].Value) if err != nil { return err @@ -32,11 +30,10 @@ func builtinJSONMarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T return err } - return iter(ast.StringTerm(string(bs))) + return iter(ast.StringTerm(util.ByteSliceToString(bs))) } func builtinJSONMarshalWithOpts(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - asJSON, err := ast.JSON(operands[0].Value) if err != nil { return err @@ -101,36 +98,34 @@ func builtinJSONMarshalWithOpts(_ BuiltinContext, operands []*ast.Term, iter fun } var bs []byte - if shouldPrettyPrint { bs, err = json.MarshalIndent(asJSON, prefixWith, indentWith) } else { bs, err = json.Marshal(asJSON) } - if err != nil { return err } + s := util.ByteSliceToString(bs) + if shouldPrettyPrint { // json.MarshalIndent() function will not prefix the first line of emitted JSON - return iter(ast.StringTerm(prefixWith + string(bs))) + return iter(ast.StringTerm(prefixWith + s)) } - return iter(ast.StringTerm(string(bs))) + return iter(ast.StringTerm(s)) } func builtinJSONUnmarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - str, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } - var x interface{} - - if err := util.UnmarshalJSON([]byte(str), &x); err != nil { + var x any + if err := util.UnmarshalJSON(bs, &x); err != nil { return err } v, err := ast.InterfaceToValue(x) @@ -141,22 +136,21 @@ func builtinJSONUnmarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast } func builtinJSONIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - str, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedTerm(false)) } - return iter(ast.BooleanTerm(json.Valid([]byte(str)))) + return iter(ast.InternedTerm(json.Valid(bs))) } func builtinBase64Encode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - str, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } - return iter(ast.StringTerm(base64.StdEncoding.EncodeToString([]byte(str)))) + return iter(ast.StringTerm(base64.StdEncoding.EncodeToString(bs))) } func builtinBase64Decode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -169,34 +163,34 @@ func builtinBase64Decode(_ BuiltinContext, operands []*ast.Term, iter func(*ast. if err != nil { return err } - return iter(ast.NewTerm(ast.String(result))) + return iter(ast.InternedTerm(string(result))) } func builtinBase64IsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { str, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedTerm(false)) } _, err = base64.StdEncoding.DecodeString(string(str)) - return iter(ast.BooleanTerm(err == nil)) + return iter(ast.InternedTerm(err == nil)) } func builtinBase64UrlEncode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - str, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } - return iter(ast.StringTerm(base64.URLEncoding.EncodeToString([]byte(str)))) + return iter(ast.StringTerm(base64.URLEncoding.EncodeToString(bs))) } func builtinBase64UrlEncodeNoPad(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - str, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } - return iter(ast.StringTerm(base64.RawURLEncoding.EncodeToString([]byte(str)))) + return iter(ast.StringTerm(base64.RawURLEncoding.EncodeToString(bs))) } func builtinBase64UrlDecode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -224,7 +218,7 @@ func builtinBase64UrlDecode(_ BuiltinContext, operands []*ast.Term, iter func(*a if err != nil { return err } - return iter(ast.NewTerm(ast.String(result))) + return iter(ast.InternedTerm(string(result))) } func builtinURLQueryEncode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -255,7 +249,7 @@ func builtinURLQueryEncodeObject(_ BuiltinContext, operands []*ast.Term, iter fu return err } - inputs, ok := asJSON.(map[string]interface{}) + inputs, ok := asJSON.(map[string]any) if !ok { return builtins.NewOperandTypeErr(1, operands[0].Value, "object") } @@ -266,7 +260,7 @@ func builtinURLQueryEncodeObject(_ BuiltinContext, operands []*ast.Term, iter fu switch vv := v.(type) { case string: query.Set(k, vv) - case []interface{}: + case []any: for _, val := range vv { strVal, ok := val.(string) if !ok { @@ -293,7 +287,7 @@ func builtinURLQueryDecodeObject(_ BuiltinContext, operands []*ast.Term, iter fu return err } - queryObject := ast.NewObject() + queryObject := ast.NewObjectWithCapacity(len(queryParams)) for k, v := range queryParams { paramsArray := make([]*ast.Term, len(v)) for i, param := range v { @@ -306,45 +300,39 @@ func builtinURLQueryDecodeObject(_ BuiltinContext, operands []*ast.Term, iter fu } func builtinYAMLMarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - asJSON, err := ast.JSON(operands[0].Value) if err != nil { return err } - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - if err := encoder.Encode(asJSON); err != nil { - return err - } - - bs, err := yaml.JSONToYAML(buf.Bytes()) + bs, err := yaml.Marshal(asJSON) if err != nil { return err } - return iter(ast.StringTerm(string(bs))) + return iter(ast.StringTerm(util.ByteSliceToString(bs))) } func builtinYAMLUnmarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - str, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } - bs, err := yaml.YAMLToJSON([]byte(str)) + js, err := yaml.YAMLToJSON(bs) if err != nil { return err } - buf := bytes.NewBuffer(bs) - decoder := util.NewJSONDecoder(buf) - var val interface{} - err = decoder.Decode(&val) - if err != nil { + reader := ast.BytesReaderPool.Get() + defer ast.BytesReaderPool.Put(reader) + reader.Reset(js) + + var val any + if err = util.NewJSONDecoder(reader).Decode(&val); err != nil { return err } + v, err := ast.InterfaceToValue(val) if err != nil { return err @@ -353,22 +341,22 @@ func builtinYAMLUnmarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast } func builtinYAMLIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - str, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedTerm(false)) } - var x interface{} - err = yaml.Unmarshal([]byte(str), &x) - return iter(ast.BooleanTerm(err == nil)) + var x any + err = yaml.Unmarshal(bs, &x) + return iter(ast.InternedTerm(err == nil)) } func builtinHexEncode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - str, err := builtins.StringOperand(operands[0].Value, 1) + bs, err := builtins.StringOperandByteSlice(operands[0].Value, 1) if err != nil { return err } - return iter(ast.StringTerm(hex.EncodeToString([]byte(str)))) + return iter(ast.StringTerm(hex.EncodeToString(bs))) } func builtinHexDecode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -380,7 +368,7 @@ func builtinHexDecode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter if err != nil { return err } - return iter(ast.NewTerm(ast.String(val))) + return iter(ast.StringTerm(string(val))) } func init() { diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/errors.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/errors.go new file mode 100644 index 0000000000..e80339e312 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/errors.go @@ -0,0 +1,163 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "errors" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" +) + +// Halt is a special error type that built-in function implementations return to indicate +// that policy evaluation should stop immediately. +type Halt struct { + Err error +} + +func (h Halt) Error() string { + return h.Err.Error() +} + +func (h Halt) Unwrap() error { return h.Err } + +// Error is the error type returned by the Eval and Query functions when +// an evaluation error occurs. +type Error struct { + Code string `json:"code"` + Message string `json:"message"` + Location *ast.Location `json:"location,omitempty"` + err error `json:"-"` +} + +const ( + + // InternalErr represents an unknown evaluation error. + InternalErr string = "eval_internal_error" + + // CancelErr indicates the evaluation process was cancelled. + CancelErr string = "eval_cancel_error" + + // ConflictErr indicates a conflict was encountered during evaluation. For + // instance, a conflict occurs if a rule produces multiple, differing values + // for the same key in an object. Conflict errors indicate the policy does + // not account for the data loaded into the policy engine. + ConflictErr string = "eval_conflict_error" + + // TypeErr indicates evaluation stopped because an expression was applied to + // a value of an inappropriate type. + TypeErr string = "eval_type_error" + + // BuiltinErr indicates a built-in function received a semantically invalid + // input or encountered some kind of runtime error, e.g., connection + // timeout, connection refused, etc. + BuiltinErr string = "eval_builtin_error" + + // WithMergeErr indicates that the real and replacement data could not be merged. + WithMergeErr string = "eval_with_merge_error" +) + +// IsError returns true if the err is an Error. +func IsError(err error) bool { + var e *Error + return errors.As(err, &e) +} + +// IsCancel returns true if err was caused by cancellation. +func IsCancel(err error) bool { + return errors.Is(err, &Error{Code: CancelErr}) +} + +// Is allows matching topdown errors using errors.Is (see IsCancel). +func (e *Error) Is(target error) bool { + var t *Error + if errors.As(target, &t) { + return (t.Code == "" || e.Code == t.Code) && + (t.Message == "" || e.Message == t.Message) && + (t.Location == nil || t.Location.Compare(e.Location) == 0) + } + return false +} + +func (e *Error) Error() string { + buf, _ := e.AppendText(make([]byte, 0, e.StringLength())) + return util.ByteSliceToString(buf) +} + +func (e *Error) AppendText(buf []byte) ([]byte, error) { + if e.Location != nil { + buf, _ := e.Location.AppendText(buf) + buf = append(append(buf, ": "...), e.Code...) + buf = append(append(buf, ": "...), e.Message...) + return buf, nil + } + + return append(append(append(buf, e.Code...), ": "...), e.Message...), nil +} + +func (e *Error) StringLength() int { + l := len(e.Code) + 2 + len(e.Message) + if e.Location != nil { + l += e.Location.StringLength() + 2 + } + return l +} + +func (e *Error) Wrap(err error) *Error { + e.err = err + return e +} + +func (e *Error) Unwrap() error { + return e.err +} + +func functionConflictErr(loc *ast.Location) error { + return &Error{ + Code: ConflictErr, + Location: loc, + Message: "functions must not produce multiple outputs for same inputs", + } +} + +func completeDocConflictErr(loc *ast.Location) error { + return &Error{ + Code: ConflictErr, + Location: loc, + Message: "complete rules must not produce multiple outputs", + } +} + +func objectDocKeyConflictErr(loc *ast.Location) error { + return &Error{ + Code: ConflictErr, + Location: loc, + Message: "object keys must be unique", + } +} + +func unsupportedBuiltinErr(loc *ast.Location, name string) error { + return &Error{ + Code: InternalErr, + Location: loc, + Message: "unsupported built-in: " + name, + } +} + +func mergeConflictErr(loc *ast.Location) error { + return &Error{ + Code: WithMergeErr, + Location: loc, + Message: "real and replacement data could not be merged", + } +} + +func internalErr(loc *ast.Location, msg string) error { + return &Error{ + Code: InternalErr, + Location: loc, + Message: msg, + } +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/eval.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go similarity index 75% rename from vendor/github.com/open-policy-agent/opa/topdown/eval.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go index 7884ac01e0..6f93ba530e 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/eval.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go @@ -5,19 +5,21 @@ import ( "errors" "fmt" "io" - "sort" + "slices" "strconv" "strings" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/copypropagation" - "github.com/open-policy-agent/opa/topdown/print" - "github.com/open-policy-agent/opa/tracing" - "github.com/open-policy-agent/opa/types" + "sync" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/copypropagation" + "github.com/open-policy-agent/opa/v1/topdown/print" + "github.com/open-policy-agent/opa/v1/tracing" + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" ) type evalIterator func(*eval) error @@ -30,6 +32,13 @@ type queryIDFactory struct { curr uint64 } +type biunifyArraysRecParams struct { + a, b *ast.Array + b1, b2 *bindings + iter unifyIterator + idx int +} + // Note: The first call to Next() returns 0. func (f *queryIDFactory) Next() uint64 { curr := f.curr @@ -57,60 +66,120 @@ func (ee deferredEarlyExitError) Error() string { return fmt.Sprintf("%v: deferred early exit", ee.e.query) } +// Note(æ): this struct is formatted for optimal alignment as it is big, internal and instantiated +// *very* frequently during evaluation. If you need to add fields here, please consider the alignment +// of the struct, and use something like betteralign (https://github.com/dkorunic/betteralign) if you +// need help with that. type eval struct { ctx context.Context metrics metrics.Metrics seed io.Reader + cancel Cancel + queryCompiler ast.QueryCompiler + store storage.Store + txn storage.Transaction + virtualCache VirtualCache + baseCache BaseCache + interQueryBuiltinCache cache.InterQueryCache + interQueryBuiltinValueCache cache.InterQueryValueCache + printHook print.Hook time *ast.Term - queryID uint64 queryIDFact *queryIDFactory parent *eval caller *eval - cancel Cancel - query ast.Body - queryCompiler ast.QueryCompiler - index int - indexing bool - earlyExit bool bindings *bindings - store storage.Store - baseCache *baseCache - txn storage.Transaction compiler *ast.Compiler input *ast.Term data *ast.Term external *resolverTrie targetStack *refStack - tracers []QueryTracer - traceEnabled bool traceLastLocation *ast.Location // Last location of a trace event. - plugTraceVars bool instr *Instrumentation builtins map[string]*Builtin builtinCache builtins.Cache ndBuiltinCache builtins.NDBCache functionMocks *functionMocksStack - virtualCache VirtualCache comprehensionCache *comprehensionCache - interQueryBuiltinCache cache.InterQueryCache - interQueryBuiltinValueCache cache.InterQueryValueCache saveSet *saveSet saveStack *saveStack saveSupport *saveSupport saveNamespace *ast.Term - skipSaveNamespace bool inliningControl *inliningControl - genvarprefix string - genvarid int runtime *ast.Term builtinErrors *builtinErrors - printHook print.Hook + roundTripper CustomizeRoundTripper + genvarprefix string + query ast.Body + tracers []QueryTracer tracingOpts tracing.Options + queryID uint64 + timeStart int64 + index int + genvarid int + indexing bool + earlyExit bool + traceEnabled bool + plugTraceVars bool + skipSaveNamespace bool findOne bool strictObjects bool + defined bool } +type ( + evfp struct{ pool sync.Pool } + evbp struct{ pool sync.Pool } +) + +func (ep *evfp) Put(e *evalFunc) { + if e != nil { + e.e, e.terms, e.ir = nil, nil, nil + ep.pool.Put(e) + } +} + +func (ep *evfp) Get() *evalFunc { + return ep.pool.Get().(*evalFunc) +} + +func (ep *evbp) Put(e *evalBuiltin) { + if e != nil { + e.e, e.bi, e.bctx, e.f, e.terms = nil, nil, nil, nil, nil + ep.pool.Put(e) + } +} + +func (ep *evbp) Get() *evalBuiltin { + return ep.pool.Get().(*evalBuiltin) +} + +var ( + evalPool = util.NewSyncPool[eval]() + deecPool = util.NewSyncPool[deferredEarlyExitContainer]() + resolverPool = util.NewSyncPool[evalResolver]() + arraysRecPool = util.NewSyncPool[biunifyArraysRecParams]() + evalFuncPool = &evfp{ + pool: sync.Pool{ + New: func() any { + return &evalFunc{} + }, + }, + } + evalBuiltinPool = &evbp{ + pool: sync.Pool{ + New: func() any { + return &evalBuiltin{} + }, + }, + } +) + func (e *eval) Run(iter evalIterator) error { + if !e.traceEnabled { + // avoid function literal escaping to heap if we don't need the trace + return e.eval(iter) + } + e.traceEnter(e.query) return e.eval(func(e *eval) error { e.traceExit(e.query) @@ -129,47 +198,48 @@ func (e *eval) String() string { func (e *eval) string(s *strings.Builder) { fmt.Fprintf(s, "') + s.WriteByte('>') } func (e *eval) builtinFunc(name string) (*ast.Builtin, BuiltinFunc, bool) { decl, ok := ast.BuiltinMap[name] if ok { - f, ok := builtinFunctions[name] - if ok { + if f, ok := builtinFunctions[name]; ok { return decl, f, true } - } else { - bi, ok := e.builtins[name] - if ok { - return bi.Decl, bi.Func, true + if bi, ok := e.builtins[name]; ok { + return decl, bi.Func, true } } + if bi, ok := e.builtins[name]; ok { + return bi.Decl, bi.Func, true + } + return nil, nil, false } -func (e *eval) closure(query ast.Body) *eval { - cpy := *e +func (e *eval) closure(query ast.Body, cpy *eval) { + *cpy = *e cpy.index = 0 cpy.query = query cpy.queryID = cpy.queryIDFact.Next() cpy.parent = e cpy.findOne = false - return &cpy } -func (e *eval) child(query ast.Body) *eval { - cpy := *e +// childWithBindingSizeHint creates a child evaluator with bindings pre-sized for the expected number of variables. +// This reduces memory waste when evaluating functions or rules with known argument counts. +func (e *eval) childWithBindingSizeHint(query ast.Body, cpy *eval, sizeHint int) { + *cpy = *e cpy.index = 0 cpy.query = query cpy.queryID = cpy.queryIDFact.Next() - cpy.bindings = newBindings(cpy.queryID, e.instr) + cpy.bindings = newBindingsWithSize(cpy.queryID, e.instr, sizeHint) cpy.parent = e cpy.findOne = false - return &cpy } func (e *eval) next(iter evalIterator) error { @@ -183,7 +253,7 @@ func (e *eval) partial() bool { return e.saveSet != nil } -func (e *eval) unknown(x interface{}, b *bindings) bool { +func (e *eval) unknown(x any, b *bindings) bool { if !e.partial() { return false } @@ -198,6 +268,11 @@ func (e *eval) unknown(x interface{}, b *bindings) bool { return saveRequired(e.compiler, e.inliningControl, true, e.saveSet, b, x, false) } +// exactly like `unknown` above` but without the cost of `any` boxing when arg is known to be a ref +func (e *eval) unknownRef(ref ast.Ref, b *bindings) bool { + return e.partial() && saveRequired(e.compiler, e.inliningControl, true, e.saveSet, b, ast.NewTerm(ref), false) +} + func (e *eval) traceEnter(x ast.Node) { e.traceEvent(EnterOp, x, "", nil) } @@ -243,7 +318,6 @@ func (e *eval) traceUnify(a, b *ast.Term) { } func (e *eval) traceEvent(op Op, x ast.Node, msg string, target *ast.Ref) { - if !e.traceEnabled { return } @@ -335,6 +409,13 @@ func (e *eval) evalExpr(iter evalIterator) error { } if e.cancel != nil && e.cancel.Cancelled() { + if e.ctx != nil && e.ctx.Err() != nil { + return &Error{ + Code: CancelErr, + Message: e.ctx.Err().Error(), + err: e.ctx.Err(), + } + } return &Error{ Code: CancelErr, Message: "caller cancelled query execution", @@ -342,13 +423,9 @@ func (e *eval) evalExpr(iter evalIterator) error { } if e.index >= len(e.query) { - err := iter(e) - - if err != nil { + if err := iter(e); err != nil { switch err := err.(type) { - case *deferredEarlyExitError: - return wrapErr(err) - case *earlyExitError: + case *deferredEarlyExitError, *earlyExitError: return wrapErr(err) default: return err @@ -360,9 +437,11 @@ func (e *eval) evalExpr(iter evalIterator) error { } return nil } - expr := e.query[e.index] - e.traceEval(expr) + expr := e.query[e.index] + if e.traceEnabled { + e.traceEval(expr) + } if len(expr.With) > 0 { return e.evalWith(iter) @@ -374,46 +453,119 @@ func (e *eval) evalExpr(iter evalIterator) error { } func (e *eval) evalStep(iter evalIterator) error { - expr := e.query[e.index] if expr.Negated { return e.evalNot(iter) } - var defined bool var err error + + // NOTE(æ): the reason why there's one branch for the tracing case and one almost + // identical branch below for when tracing is disabled is that the tracing case + // allocates wildly. These allocations are cause by the "defined" boolean variable + // escaping to the heap as its value is set from inside of closures. There may very + // well be more elegant solutions to this problem, but this is one that works, and + // saves several *million* allocations for some workloads. So feel free to refactor + // this, but do make sure that the common non-tracing case doesn't pay in allocations + // for something that is only needed when tracing is enabled. + if e.traceEnabled { + var defined bool + switch terms := expr.Terms.(type) { + case []*ast.Term: + switch { + case expr.IsEquality(): + err = e.unify(terms[1], terms[2], func() error { + defined = true + err := iter(e) + e.traceRedo(expr) + return err + }) + default: + err = e.evalCall(terms, func() error { + defined = true + err := iter(e) + e.traceRedo(expr) + return err + }) + } + case *ast.Term: + // generateVar inlined here to avoid extra allocations in hot path + rterm := ast.VarTerm(e.fmtVarTerm()) + + if e.partial() { + e.inliningControl.PushDisable(rterm.Value, true) + } + + err = e.unify(terms, rterm, func() error { + if e.saveSet.Contains(rterm, e.bindings) { + return e.saveExpr(ast.NewExpr(rterm), e.bindings, func() error { + return iter(e) + }) + } + if !e.bindings.Plug(rterm).Equal(ast.InternedTerm(false)) { + defined = true + err := iter(e) + e.traceRedo(expr) + return err + } + return nil + }) + + if e.partial() { + e.inliningControl.PopDisable() + } + case *ast.Every: + eval := evalEvery{ + Every: terms, + e: e, + expr: expr, + } + err = eval.eval(func() error { + defined = true + err := iter(e) + e.traceRedo(expr) + return err + }) + + default: // guard-rail for adding extra (Expr).Terms types + return fmt.Errorf("got %T terms: %[1]v", terms) + } + + if err != nil { + return err + } + + if !defined { + e.traceFail(expr) + } + + return nil + } + switch terms := expr.Terms.(type) { case []*ast.Term: switch { case expr.IsEquality(): err = e.unify(terms[1], terms[2], func() error { - defined = true - err := iter(e) - e.traceRedo(expr) - return err + return iter(e) }) default: err = e.evalCall(terms, func() error { - defined = true - err := iter(e) - e.traceRedo(expr) - return err + return iter(e) }) } case *ast.Term: - rterm := e.generateVar(fmt.Sprintf("term_%d_%d", e.queryID, e.index)) + // generateVar inlined here to avoid extra allocations in hot path + rterm := ast.VarTerm(e.fmtVarTerm()) err = e.unify(terms, rterm, func() error { - if e.saveSet.Contains(rterm, e.bindings) { + if e.saveSet != nil && e.saveSet.Contains(rterm, e.bindings) { return e.saveExpr(ast.NewExpr(rterm), e.bindings, func() error { return iter(e) }) } - if !e.bindings.Plug(rterm).Equal(ast.BooleanTerm(false)) { - defined = true - err := iter(e) - e.traceRedo(expr) - return err + if !e.bindings.Plug(rterm).Equal(ast.InternedTerm(false)) { + return iter(e) } return nil }) @@ -424,56 +576,65 @@ func (e *eval) evalStep(iter evalIterator) error { expr: expr, } err = eval.eval(func() error { - defined = true - err := iter(e) - e.traceRedo(expr) - return err + return iter(e) }) default: // guard-rail for adding extra (Expr).Terms types return fmt.Errorf("got %T terms: %[1]v", terms) } - if err != nil { - return err - } + return err +} - if !defined { - e.traceFail(expr) - } +// Single-purpose fmt.Sprintf replacement for generating variable names with only +// one allocation performed instead of 4, and in 1/3 the time. +func (e *eval) fmtVarTerm() string { + buf := make([]byte, 0, len(e.genvarprefix)+util.NumDigitsUint(e.queryID)+util.NumDigitsInt(e.index)+7) - return nil + buf = append(buf, e.genvarprefix...) + buf = append(buf, "_term_"...) + buf = strconv.AppendUint(buf, e.queryID, 10) + buf = append(buf, '_') + buf = strconv.AppendInt(buf, int64(e.index), 10) + + return util.ByteSliceToString(buf) } func (e *eval) evalNot(iter evalIterator) error { - expr := e.query[e.index] if e.unknown(expr, e.bindings) { return e.evalNotPartial(iter) } - negation := ast.NewBody(expr.Complement().NoWith()) - child := e.closure(negation) + negation := ast.NewBody(expr.ComplementNoWith()) + child := evalPool.Get() + defer evalPool.Put(child) - var defined bool - child.traceEnter(negation) + e.closure(negation, child) - err := child.eval(func(*eval) error { - child.traceExit(negation) - defined = true - child.traceRedo(negation) - return nil - }) + if e.traceEnabled { + child.traceEnter(negation) + } - if err != nil { + if err := child.eval(func(*eval) error { + if e.traceEnabled { + child.traceExit(negation) + child.traceRedo(negation) + } + child.defined = true + + return nil + }); err != nil { return err } - if !defined { + if !child.defined { return iter(e) } + child.defined = false + e.traceFail(expr) return nil } @@ -482,16 +643,18 @@ func (e *eval) evalWith(iter evalIterator) error { expr := e.query[e.index] - // Disable inlining on all references in the expression so the result of - // partial evaluation has the same semantics w/ the with statements - // preserved. var disable []ast.Ref - disableRef := func(x ast.Ref) bool { - disable = append(disable, x.GroundPrefix()) - return false - } if e.partial() { + // Avoid the `disable` var to escape to heap unless partial evaluation is enabled. + var disablePartial []ast.Ref + // Disable inlining on all references in the expression so the result of + // partial evaluation has the same semantics w/ the with statements + // preserved. + disableRef := func(x ast.Ref) bool { + disablePartial = append(disablePartial, x.GroundPrefix()) + return false + } // If the value is unknown the with statement cannot be evaluated and so // the entire expression should be saved to be safe. In the future this @@ -516,12 +679,15 @@ func (e *eval) evalWith(iter evalIterator) error { } ast.WalkRefs(expr.NoWith(), disableRef) + + disable = disablePartial } pairsInput := [][2]*ast.Term{} pairsData := [][2]*ast.Term{} - functionMocks := [][2]*ast.Term{} - targets := []ast.Ref{} + targets := make([]ast.Ref, 0, len(expr.With)) + + var functionMocks [][2]*ast.Term for i := range expr.With { target := expr.With[i].Target @@ -593,16 +759,31 @@ func (e *eval) evalWithPush(input, data *ast.Term, functionMocks [][2]*ast.Term, e.data = data } + if e.comprehensionCache == nil { + e.comprehensionCache = newComprehensionCache() + } + e.comprehensionCache.Push() e.virtualCache.Push() + + if e.targetStack == nil { + e.targetStack = newRefStack() + } + e.targetStack.Push(targets) e.inliningControl.PushDisable(disable, true) + + if e.functionMocks == nil { + e.functionMocks = newFunctionMocksStack() + } + e.functionMocks.PutPairs(functionMocks) return oldInput, oldData } func (e *eval) evalWithPop(input, data *ast.Term) { + // NOTE(ae) no nil checks here as we assume evalWithPush always called first e.inliningControl.PopDisable() e.targetStack.Pop() e.virtualCache.Pop() @@ -613,11 +794,14 @@ func (e *eval) evalWithPop(input, data *ast.Term) { } func (e *eval) evalNotPartial(iter evalIterator) error { - // Prepare query normally. expr := e.query[e.index] - negation := expr.Complement().NoWith() - child := e.closure(ast.NewBody(negation)) + negation := expr.ComplementNoWith() + + child := evalPool.Get() + defer evalPool.Put(child) + + e.closure(ast.NewBody(negation), child) // Unknowns is the set of variables that are marked as unknown. The variables // are namespaced with the query ID that they originate in. This ensures that @@ -710,9 +894,7 @@ func (e *eval) evalNotPartialSupport(negationID uint64, expr *ast.Expr, unknowns args = append(args, ast.NewTerm(v)) } - sort.Slice(args, func(i, j int) bool { - return args[i].Value.Compare(args[j].Value) < 0 - }) + slices.SortFunc(args, ast.TermValueCompare) if len(args) > 0 { head.Args = args @@ -744,14 +926,12 @@ func (e *eval) evalNotPartialSupport(negationID uint64, expr *ast.Expr, unknowns } func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error { - ref := terms[0].Value.(ast.Ref) - var mocked bool mock, mocked := e.functionMocks.Get(ref) if mocked { if m, ok := mock.Value.(ast.Ref); ok && isFunction(e.compiler.TypeEnv, m) { // builtin or data function - mockCall := append([]*ast.Term{ast.NewTerm(m)}, terms[1:]...) + mockCall := append([]*ast.Term{mock}, terms[1:]...) e.functionMocks.Push() err := e.evalCall(mockCall, func() error { @@ -769,8 +949,8 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error { if ref[0].Equal(ast.DefaultRootDocument) { if mocked { - f := e.compiler.TypeEnv.Get(ref).(*types.Function) - return e.evalCallValue(len(f.FuncArgs().Args), terms, mock, iter) + arity := e.compiler.TypeEnv.GetByRef(ref).(*types.Function).Arity() + return e.evalCallValue(arity, terms, mock, iter) } var ir *ast.IndexResult @@ -780,72 +960,85 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error { } else { ir, err = e.getRules(ref, terms[1:]) } + defer ast.IndexResultPool.Put(ir) if err != nil { return err } - eval := evalFunc{ - e: e, - ref: ref, - terms: terms, - ir: ir, - } + eval := evalFuncPool.Get() + defer evalFuncPool.Put(eval) + + eval.e = e + eval.terms = terms + eval.ir = ir + return eval.eval(iter) } builtinName := ref.String() bi, f, ok := e.builtinFunc(builtinName) if !ok { - return unsupportedBuiltinErr(e.query[e.index].Location) + return unsupportedBuiltinErr(e.query[e.index].Location, builtinName) } if mocked { // value replacement of built-in call - return e.evalCallValue(len(bi.Decl.Args()), terms, mock, iter) + return e.evalCallValue(bi.Decl.Arity(), terms, mock, iter) } if e.unknown(e.query[e.index], e.bindings) { - return e.saveCall(len(bi.Decl.Args()), terms, iter) + return e.saveCall(bi.Decl.Arity(), terms, iter) } - var parentID uint64 - if e.parent != nil { - parentID = e.parent.queryID - } + var bctx *BuiltinContext - var capabilities *ast.Capabilities - if e.compiler != nil { - capabilities = e.compiler.Capabilities() - } - - bctx := BuiltinContext{ - Context: e.ctx, - Metrics: e.metrics, - Seed: e.seed, - Time: e.time, - Cancel: e.cancel, - Runtime: e.runtime, - Cache: e.builtinCache, - InterQueryBuiltinCache: e.interQueryBuiltinCache, - InterQueryBuiltinValueCache: e.interQueryBuiltinValueCache, - NDBuiltinCache: e.ndBuiltinCache, - Location: e.query[e.index].Location, - QueryTracers: e.tracers, - TraceEnabled: e.traceEnabled, - QueryID: e.queryID, - ParentID: parentID, - PrintHook: e.printHook, - DistributedTracingOpts: e.tracingOpts, - Capabilities: capabilities, - } - - eval := evalBuiltin{ - e: e, - bi: bi, - bctx: bctx, - f: f, - terms: terms[1:], + // Creating a BuiltinContext is expensive, so only do it if the builtin depends on it. + if !bi.CanSkipBctx { + var parentID uint64 + if e.parent != nil { + parentID = e.parent.queryID + } + + var capabilities *ast.Capabilities + if e.compiler != nil { + capabilities = e.compiler.Capabilities() + } + + if e.time == nil { + e.time = ast.NumberTerm(int64ToJSONNumber(e.timeStart)) + } + + bctx = &BuiltinContext{ + Context: e.ctx, + Metrics: e.metrics, + Seed: e.seed, + Time: e.time, + Cancel: e.cancel, + Runtime: e.runtime, + Cache: e.builtinCache, + InterQueryBuiltinCache: e.interQueryBuiltinCache, + InterQueryBuiltinValueCache: e.interQueryBuiltinValueCache, + NDBuiltinCache: e.ndBuiltinCache, + Location: e.query[e.index].Location, + QueryTracers: e.tracers, + TraceEnabled: e.traceEnabled, + QueryID: e.queryID, + ParentID: parentID, + PrintHook: e.printHook, + DistributedTracingOpts: e.tracingOpts, + Capabilities: capabilities, + RoundTripper: e.roundTripper, + } } + eval := evalBuiltinPool.Get() + defer evalBuiltinPool.Put(eval) + + eval.e = e + eval.bi = bi + eval.bctx = bctx + eval.f = f + eval.terms = terms[1:] + return eval.eval(iter) } @@ -855,7 +1048,7 @@ func (e *eval) evalCallValue(arity int, terms []*ast.Term, mock *ast.Term, iter return e.unify(terms[len(terms)-1], mock, iter) case len(terms) == arity+1: - if mock.Value.Compare(ast.Boolean(false)) != 0 { + if !ast.Boolean(false).Equal(mock.Value) { return iter() } return nil @@ -901,7 +1094,14 @@ func (e *eval) biunify(a, b *ast.Term, b1, b2 *bindings, iter unifyIterator) err case ast.Var, ast.Ref, *ast.ArrayComprehension: return e.biunifyValues(a, b, b1, b2, iter) case *ast.Array: - return e.biunifyArrays(vA, vB, b1, b2, iter) + if vA.Len() == vB.Len() { + params := arraysRecPool.Get() + params.a, params.b = vA, vB + params.b1, params.b2 = b1, b2 + params.iter = iter + params.idx = 0 + return e.biunifyArraysRec(params) + } } case ast.Object: switch vB := b.Value.(type) { @@ -916,19 +1116,31 @@ func (e *eval) biunify(a, b *ast.Term, b1, b2 *bindings, iter unifyIterator) err return nil } -func (e *eval) biunifyArrays(a, b *ast.Array, b1, b2 *bindings, iter unifyIterator) error { - if a.Len() != b.Len() { +func (e *eval) biunifyArraysRec(params *biunifyArraysRecParams) error { + if params.idx == params.a.Len() { + err := params.iter() + arraysRecPool.Put(params) + return err + } + return e.biunify(params.a.Elem(params.idx), params.b.Elem(params.idx), params.b1, params.b2, func() error { + params.idx++ + return e.biunifyArraysRec(params) + }) +} + +func (e *eval) biunifyTerms(a, b []*ast.Term, b1, b2 *bindings, iter unifyIterator) error { + if len(a) != len(b) { return nil } - return e.biunifyArraysRec(a, b, b1, b2, iter, 0) + return e.biunifyTermsRec(a, b, b1, b2, iter, 0) } -func (e *eval) biunifyArraysRec(a, b *ast.Array, b1, b2 *bindings, iter unifyIterator, idx int) error { - if idx == a.Len() { +func (e *eval) biunifyTermsRec(a, b []*ast.Term, b1, b2 *bindings, iter unifyIterator, idx int) error { + if idx == len(a) { return iter() } - return e.biunify(a.Elem(idx), b.Elem(idx), b1, b2, func() error { - return e.biunifyArraysRec(a, b, b1, b2, iter, idx+1) + return e.biunify(a[idx], b[idx], b1, b2, func() error { + return e.biunifyTermsRec(a, b, b1, b2, iter, idx+1) }) } @@ -1048,7 +1260,6 @@ func (e *eval) biunifyValues(a, b *ast.Term, b1, b2 *bindings, iter unifyIterato } func (e *eval) biunifyRef(a, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error { - ref := a.Value.(ast.Ref) if ref[0].Equal(ast.DefaultRootDocument) { @@ -1057,7 +1268,7 @@ func (e *eval) biunifyRef(a, b *ast.Term, b1, b2 *bindings, iter unifyIterator) e: e, ref: ref, pos: 1, - plugged: ref.Copy(), + plugged: ref.CopyNonGround(), bindings: b1, rterm: b, rbindings: b2, @@ -1133,6 +1344,10 @@ func (e *eval) buildComprehensionCache(a *ast.Term) (*ast.Term, error) { return nil, nil } + if e.comprehensionCache == nil { + e.comprehensionCache = newComprehensionCache() + } + cache, ok := e.comprehensionCache.Elem(a) if !ok { var err error @@ -1165,7 +1380,10 @@ func (e *eval) buildComprehensionCache(a *ast.Term) (*ast.Term, error) { } func (e *eval) buildComprehensionCacheArray(x *ast.ArrayComprehension, keys []*ast.Term) (*comprehensionCacheElem, error) { - child := e.child(x.Body) + child := evalPool.Get() + defer evalPool.Put(child) + + e.childWithBindingSizeHint(x.Body, child, ast.EstimateBodyBindingCount(x.Body)) node := newComprehensionCacheElem() return node, child.Run(func(child *eval) error { values := make([]*ast.Term, len(keys)) @@ -1184,7 +1402,10 @@ func (e *eval) buildComprehensionCacheArray(x *ast.ArrayComprehension, keys []*a } func (e *eval) buildComprehensionCacheSet(x *ast.SetComprehension, keys []*ast.Term) (*comprehensionCacheElem, error) { - child := e.child(x.Body) + child := evalPool.Get() + defer evalPool.Put(child) + + e.childWithBindingSizeHint(x.Body, child, ast.EstimateBodyBindingCount(x.Body)) node := newComprehensionCacheElem() return node, child.Run(func(child *eval) error { values := make([]*ast.Term, len(keys)) @@ -1204,7 +1425,10 @@ func (e *eval) buildComprehensionCacheSet(x *ast.SetComprehension, keys []*ast.T } func (e *eval) buildComprehensionCacheObject(x *ast.ObjectComprehension, keys []*ast.Term) (*comprehensionCacheElem, error) { - child := e.child(x.Body) + child := evalPool.Get() + defer evalPool.Put(child) + + e.childWithBindingSizeHint(x.Body, child, ast.EstimateBodyBindingCount(x.Body)) node := newComprehensionCacheElem() return node, child.Run(func(child *eval) error { values := make([]*ast.Term, len(keys)) @@ -1284,47 +1508,81 @@ func (e *eval) amendComprehension(a *ast.Term, b1 *bindings) (*ast.Term, error) } func (e *eval) biunifyComprehensionArray(x *ast.ArrayComprehension, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error { - result := ast.NewArray() - child := e.closure(x.Body) + var elements []*ast.Term + child := evalPool.Get() + + e.closure(x.Body, child) + defer evalPool.Put(child) + err := child.Run(func(child *eval) error { - result = result.Append(child.bindings.Plug(x.Term)) + elements = append(elements, child.bindings.Plug(x.Term)) return nil }) if err != nil { return err } - return e.biunify(ast.NewTerm(result), b, b1, b2, iter) + + if len(elements) == 0 { + return e.biunify(ast.InternedEmptyArray, b, b1, b2, iter) + } + + return e.biunify(ast.NewTerm(ast.NewArray(elements...)), b, b1, b2, iter) } func (e *eval) biunifyComprehensionSet(x *ast.SetComprehension, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error { - result := ast.NewSet() - child := e.closure(x.Body) + child := evalPool.Get() + + e.closure(x.Body, child) + defer evalPool.Put(child) + + var result ast.Set err := child.Run(func(child *eval) error { - result.Add(child.bindings.Plug(x.Term)) + if result == nil { + result = ast.NewSet(child.bindings.Plug(x.Term)) + } else { + result.Add(child.bindings.Plug(x.Term)) + } return nil }) if err != nil { return err } + + if result == nil { + return e.biunify(ast.InternedEmptySet, b, b1, b2, iter) + } + return e.biunify(ast.NewTerm(result), b, b1, b2, iter) } func (e *eval) biunifyComprehensionObject(x *ast.ObjectComprehension, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error { - result := ast.NewObject() - child := e.closure(x.Body) + child := evalPool.Get() + defer evalPool.Put(child) + + e.closure(x.Body, child) + + var result ast.Object err := child.Run(func(child *eval) error { key := child.bindings.Plug(x.Key) value := child.bindings.Plug(x.Value) - exist := result.Get(key) - if exist != nil && !exist.Equal(value) { - return objectDocKeyConflictErr(x.Key.Location) + if result == nil { + result = ast.NewObject(ast.Item(key, value)) + } else { + if exist := result.Get(key); exist != nil && !exist.Equal(value) { + return objectDocKeyConflictErr(x.Key.Location) + } + result.Insert(key, value) } - result.Insert(key, value) return nil }) if err != nil { return err } + + if result == nil { + return e.biunify(ast.InternedEmptyObject, b, b1, b2, iter) + } + return e.biunify(ast.NewTerm(result), b, b1, b2, iter) } @@ -1354,7 +1612,7 @@ func (e *eval) saveExprMarkUnknowns(expr *ast.Expr, b *bindings, iter unifyItera e.traceSave(expr) err = iter() e.saveStack.Pop() - for i := 0; i < pops; i++ { + for range pops { e.saveSet.Pop() } return err @@ -1384,7 +1642,7 @@ func (e *eval) saveUnify(a, b *ast.Term, b1, b2 *bindings, iter unifyIterator) e err := iter() e.saveStack.Pop() - for i := 0; i < pops; i++ { + for range pops { e.saveSet.Pop() } @@ -1411,7 +1669,7 @@ func (e *eval) saveCall(declArgsLen int, terms []*ast.Term, iter unifyIterator) err := iter() e.saveStack.Pop() - for i := 0; i < pops; i++ { + for range pops { e.saveSet.Pop() } return err @@ -1433,7 +1691,7 @@ func (e *eval) saveInlinedNegatedExprs(exprs []*ast.Expr, iter unifyIterator) er e.traceSave(expr) } err := iter() - for i := 0; i < len(exprs); i++ { + for range exprs { e.saveStack.Pop() } return err @@ -1443,17 +1701,26 @@ func (e *eval) getRules(ref ast.Ref, args []*ast.Term) (*ast.IndexResult, error) e.instr.startTimer(evalOpRuleIndex) defer e.instr.stopTimer(evalOpRuleIndex) - index := e.compiler.RuleIndex(ref) + index := e.ruleIndex(ref) if index == nil { return nil, nil } + resolver := resolverPool.Get() + defer func() { + resolver.e = nil + resolver.args = nil + resolverPool.Put(resolver) + }() + var result *ast.IndexResult var err error + resolver.e = e if e.indexing { - result, err = index.Lookup(&evalResolver{e: e, args: args}) + resolver.args = args + result, err = index.Lookup(resolver) } else { - result, err = index.AllRules(&evalResolver{e: e}) + result, err = index.AllRules(resolver) } if err != nil { return nil, err @@ -1461,23 +1728,35 @@ func (e *eval) getRules(ref ast.Ref, args []*ast.Term) (*ast.IndexResult, error) result.EarlyExit = result.EarlyExit && e.earlyExit - var msg strings.Builder - if len(result.Rules) == 1 { - msg.WriteString("(matched 1 rule") - } else { - msg.Grow(len("(matched NNNN rules)")) - msg.WriteString("(matched ") - msg.WriteString(strconv.Itoa(len(result.Rules))) - msg.WriteString(" rules") - } - if result.EarlyExit { - msg.WriteString(", early exit") + if e.traceEnabled { + var msg strings.Builder + if len(result.Rules) == 1 { + msg.WriteString("(matched 1 rule") + } else { + msg.Grow(len("(matched NNNN rules)")) + msg.WriteString("(matched ") + msg.WriteString(strconv.Itoa(len(result.Rules))) + msg.WriteString(" rules") + } + if result.EarlyExit { + msg.WriteString(", early exit") + } + msg.WriteRune(')') + + // Copy ref here as ref otherwise always escapes to the heap, + // whether tracing is enabled or not. + r := ref.Copy() + e.traceIndex(e.query[e.index], msg.String(), &r) } - msg.WriteRune(')') - e.traceIndex(e.query[e.index], msg.String(), &ref) + return result, err } +// ruleIndex performs a lookup for a RuleIndex in the compiler's RuleTree. +func (e *eval) ruleIndex(ref ast.Ref) ast.RuleIndex { + return e.compiler.RuleIndex(ref) +} + func (e *eval) Resolve(ref ast.Ref) (ast.Value, error) { return (&evalResolver{e: e}).Resolve(ref) } @@ -1490,7 +1769,9 @@ type evalResolver struct { func (e *evalResolver) Resolve(ref ast.Ref) (ast.Value, error) { e.e.instr.startTimer(evalOpResolve) - if e.e.inliningControl.Disabled(ref, true) || e.e.saveSet.Contains(ast.NewTerm(ref), nil) { + // NOTE(ae): nil check on saveSet to avoid ast.NewTerm allocation when not needed + if e.e.inliningControl.Disabled(ref, true) || (e.e.saveSet != nil && + e.e.saveSet.Contains(ast.NewTerm(ref), nil)) { e.e.instr.stopTimer(evalOpResolve) return nil, ast.UnknownValueErr{} } @@ -1568,7 +1849,7 @@ func (e *evalResolver) Resolve(ref ast.Ref) (ast.Value, error) { return merged, err } e.e.instr.stopTimer(evalOpResolve) - return nil, fmt.Errorf("illegal ref") + return nil, errors.New("illegal ref") } func (e *eval) resolveReadFromStorage(ref ast.Ref, a ast.Value) (ast.Value, error) { @@ -1599,9 +1880,9 @@ func (e *eval) resolveReadFromStorage(ref ast.Ref, a ast.Value) (ast.Value, erro if len(path) == 0 { switch obj := blob.(type) { - case map[string]interface{}: + case map[string]any: if len(obj) > 0 { - cpy := make(map[string]interface{}, len(obj)-1) + cpy := make(map[string]any, len(obj)-1) for k, v := range obj { if string(ast.SystemDocumentKey) != k { cpy[k] = v @@ -1611,16 +1892,7 @@ func (e *eval) resolveReadFromStorage(ref ast.Ref, a ast.Value) (ast.Value, erro } case ast.Object: if obj.Len() > 0 { - cpy := ast.NewObject() - if err := obj.Iter(func(k *ast.Term, v *ast.Term) error { - if !ast.SystemDocumentKey.Equal(k.Value) { - cpy.Insert(k, v) - } - return nil - }); err != nil { - return nil, err - } - blob = cpy + blob, _ = obj.Map(systemDocumentKeyRemoveMapper) } } } @@ -1629,7 +1901,7 @@ func (e *eval) resolveReadFromStorage(ref ast.Ref, a ast.Value) (ast.Value, erro case ast.Value: v = blob default: - if blob, ok := blob.(map[string]interface{}); ok && !e.strictObjects { + if blob, ok := blob.(map[string]any); ok && !e.strictObjects { v = ast.LazyObject(blob) break } @@ -1653,8 +1925,21 @@ func (e *eval) resolveReadFromStorage(ref ast.Ref, a ast.Value) (ast.Value, erro return merged, nil } +func systemDocumentKeyRemoveMapper(k, v *ast.Term) (*ast.Term, *ast.Term, error) { + if ast.SystemDocumentKey.Equal(k.Value) { + return nil, nil, nil + } + return k, v, nil +} + func (e *eval) generateVar(suffix string) *ast.Term { - return ast.VarTerm(fmt.Sprintf("%v_%v", e.genvarprefix, suffix)) + buf := make([]byte, 0, len(e.genvarprefix)+len(suffix)+1) + + buf = append(buf, e.genvarprefix...) + buf = append(buf, '_') + buf = append(buf, suffix...) + + return ast.VarTerm(util.ByteSliceToString(buf)) } func (e *eval) rewrittenVar(v ast.Var) (ast.Var, bool) { @@ -1681,10 +1966,11 @@ func (e *eval) getDeclArgsLen(x *ast.Expr) (int, error) { bi, _, ok := e.builtinFunc(operator.String()) if ok { - return len(bi.Decl.Args()), nil + return bi.Decl.Arity(), nil } ir, err := e.getRules(operator, nil) + defer ast.IndexResultPool.Put(ir) if err != nil { return -1, err } else if ir == nil || ir.Empty() { @@ -1706,17 +1992,17 @@ func (e *eval) updateFromQuery(expr *ast.Expr) { type evalBuiltin struct { e *eval bi *ast.Builtin - bctx BuiltinContext + bctx *BuiltinContext f BuiltinFunc terms []*ast.Term } // Is this builtin non-deterministic, and did the caller provide an NDBCache? func (e *evalBuiltin) canUseNDBCache(bi *ast.Builtin) bool { - return bi.Nondeterministic && e.bctx.NDBuiltinCache != nil + return bi.Nondeterministic && e.bctx != nil && e.bctx.NDBuiltinCache != nil } -func (e evalBuiltin) eval(iter unifyIterator) error { +func (e *evalBuiltin) eval(iter unifyIterator) error { operands := make([]*ast.Term, len(e.terms)) @@ -1724,10 +2010,9 @@ func (e evalBuiltin) eval(iter unifyIterator) error { operands[i] = e.e.bindings.Plug(e.terms[i]) } - numDeclArgs := len(e.bi.Decl.FuncArgs().Args) + numDeclArgs := e.bi.Decl.Arity() e.e.instr.startTimer(evalOpBuiltinCall) - var err error // NOTE(philipc): We sometimes have to drop the very last term off // the args list for cases where a builtin's result is used/assigned, @@ -1749,7 +2034,7 @@ func (e evalBuiltin) eval(iter unifyIterator) error { case e.bi.Decl.Result() == nil: return iter() case len(operands) == numDeclArgs: - if v.Compare(ast.Boolean(false)) == 0 { + if ast.Boolean(false).Equal(v) { return nil // nothing to do } return iter() @@ -1762,8 +2047,18 @@ func (e evalBuiltin) eval(iter unifyIterator) error { e.e.instr.startTimer(evalOpBuiltinCall) } + var bctx BuiltinContext + if e.bctx == nil { + bctx = BuiltinContext{ + // Location potentially needed for error reporting. + Location: e.e.query[e.e.index].Location, + } + } else { + bctx = *e.bctx + } + // Normal unification flow for builtins: - err = e.f(e.bctx, operands, func(output *ast.Term) error { + err := e.f(bctx, operands, func(output *ast.Term) error { e.e.instr.stopTimer(evalOpBuiltinCall) @@ -1773,7 +2068,7 @@ func (e evalBuiltin) eval(iter unifyIterator) error { case e.bi.Decl.Result() == nil: err = iter() case len(operands) == numDeclArgs: - if output.Value.Compare(ast.Boolean(false)) != 0 { + if !ast.Boolean(false).Equal(output.Value) { err = iter() } // else: nothing to do, don't iter() default: @@ -1813,13 +2108,11 @@ func (e evalBuiltin) eval(iter unifyIterator) error { type evalFunc struct { e *eval - ref ast.Ref - terms []*ast.Term ir *ast.IndexResult + terms []*ast.Term } -func (e evalFunc) eval(iter unifyIterator) error { - +func (e *evalFunc) eval(iter unifyIterator) error { if e.ir.Empty() { return nil } @@ -1831,32 +2124,55 @@ func (e evalFunc) eval(iter unifyIterator) error { argCount = len(e.ir.Default.Head.Args) } - if len(e.ir.Else) > 0 && e.e.unknown(e.e.query[e.e.index], e.e.bindings) { - // Partial evaluation of ordered rules is not supported currently. Save the - // expression and continue. This could be revisited in the future. - return e.e.saveCall(argCount, e.terms, iter) - } + if e.e.partial() { + if len(e.ir.Else) > 0 && e.e.unknown(e.e.query[e.e.index], e.e.bindings) { + // Partial evaluation of ordered rules is not supported currently. Save the + // expression and continue. This could be revisited in the future. + return e.e.saveCall(argCount, e.terms, iter) + } + + var mustGenerateSupport bool - if e.e.partial() && (e.e.inliningControl.shallow || e.e.inliningControl.Disabled(e.ref, false)) { - // check if the function definitions, or any of the arguments - // contain something unknown - unknown := e.e.unknown(e.ref, e.e.bindings) - for i := 1; !unknown && i <= argCount; i++ { - unknown = e.e.unknown(e.terms[i], e.e.bindings) + if defRule := e.ir.Default; defRule != nil { + // The presence of a default func might force us to generate support + if len(defRule.Head.Args) == len(e.terms)-1 { + // The function is called without collecting the result in an output term, + // therefore any successful evaluation of the function is of interest, including the default value ... + if ret := defRule.Head.Value; ret == nil || !ret.Equal(ast.InternedTerm(false)) { + // ... unless the default value is false, + mustGenerateSupport = true + } + } else { + // The function is called with an output term, therefore any successful evaluation of the function is of interest. + // NOTE: Because of how the compiler rewrites function calls, we can't know if the result value is compared + // to a constant value, so we can't be as clever as we are for rules. + mustGenerateSupport = true + } } - if unknown { - return e.partialEvalSupport(argCount, iter) + + ref := e.terms[0].Value.(ast.Ref) + + if mustGenerateSupport || e.e.inliningControl.shallow || e.e.inliningControl.Disabled(ref, false) { + // check if the function definitions, or any of the arguments + // contain something unknown + unknown := e.e.unknownRef(ref, e.e.bindings) + for i := 1; !unknown && i <= argCount; i++ { + unknown = e.e.unknown(e.terms[i], e.e.bindings) + } + if unknown { + return e.partialEvalSupport(argCount, iter) + } } } return e.evalValue(iter, argCount, e.ir.EarlyExit) } -func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) error { +func (e *evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) error { var cacheKey ast.Ref - var hit bool - var err error if !e.e.partial() { + var hit bool + var err error cacheKey, hit, err = e.evalCache(argCount, iter) if err != nil { return err @@ -1865,12 +2181,23 @@ func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) erro } } + // NOTE(anders): While it makes the code a bit more complex, reusing the + // args slice across each function increment saves a lot of resources + // compared to creating a new one inside each call to evalOneRule... so + // think twice before simplifying this :) + args := make([]*ast.Term, len(e.terms)-1) + var prev *ast.Term return withSuppressEarlyExit(func() error { var outerEe *deferredEarlyExitError for _, rule := range e.ir.Rules { - next, err := e.evalOneRule(iter, rule, cacheKey, prev, findOne) + copy(args, rule.Head.Args) + if len(args) == len(rule.Head.Args)+1 { + args[len(args)-1] = rule.Head.Value + } + + next, err := e.evalOneRule(iter, rule, args, cacheKey, prev, findOne) if err != nil { if oee, ok := err.(*deferredEarlyExitError); ok { if outerEe == nil { @@ -1882,7 +2209,12 @@ func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) erro } if next == nil { for _, erule := range e.ir.Else[rule] { - next, err = e.evalOneRule(iter, erule, cacheKey, prev, findOne) + copy(args, erule.Head.Args) + if len(args) == len(erule.Head.Args)+1 { + args[len(args)-1] = erule.Head.Value + } + + next, err = e.evalOneRule(iter, erule, args, cacheKey, prev, findOne) if err != nil { if oee, ok := err.(*deferredEarlyExitError); ok { if outerEe == nil { @@ -1903,7 +2235,13 @@ func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) erro } if e.ir.Default != nil && prev == nil { - _, err := e.evalOneRule(iter, e.ir.Default, cacheKey, prev, findOne) + copy(args, e.ir.Default.Head.Args) + if len(args) == len(e.ir.Default.Head.Args)+1 { + args[len(args)-1] = e.ir.Default.Head.Value + } + + _, err := e.evalOneRule(iter, e.ir.Default, args, cacheKey, prev, findOne) + return err } @@ -1915,16 +2253,20 @@ func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) erro }) } -func (e evalFunc) evalCache(argCount int, iter unifyIterator) (ast.Ref, bool, error) { - var plen int - if len(e.terms) == argCount+2 { // func name + output = 2 - plen = len(e.terms) - 1 - } else { - plen = len(e.terms) +func (e *evalFunc) evalCache(argCount int, iter unifyIterator) (ast.Ref, bool, error) { + plen := len(e.terms) + if plen == argCount+2 { // func name + output = 2 + plen -= 1 } + cacheKey := make([]*ast.Term, plen) - for i := 0; i < plen; i++ { - cacheKey[i] = e.e.bindings.Plug(e.terms[i]) + for i := range plen { + if e.terms[i].IsGround() { + // Avoid expensive copying of ref if it is ground. + cacheKey[i] = e.terms[i] + } else { + cacheKey[i] = e.e.bindings.Plug(e.terms[i]) + } } cached, _ := e.e.virtualCache.Get(cacheKey) @@ -1943,23 +2285,22 @@ func (e evalFunc) evalCache(argCount int, iter unifyIterator) (ast.Ref, bool, er return cacheKey, false, nil } -func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, cacheKey ast.Ref, prev *ast.Term, findOne bool) (*ast.Term, error) { +func (e *evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, args []*ast.Term, cacheKey ast.Ref, prev *ast.Term, findOne bool) (*ast.Term, error) { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + // Optimization: pre-size bindings based on function argument count to reduce memory waste. + // Function argument count is known at compile time and most functions have < 10 arguments. + // This avoids allocating the default 16-slot array when only 2-3 bindings are needed. + sizeHint := len(args) + e.e.childWithBindingSizeHint(rule.Body, child, sizeHint) child.findOne = findOne - args := make([]*ast.Term, len(e.terms)-1) - copy(args, rule.Head.Args) - - if len(args) == len(rule.Head.Args)+1 { - args[len(args)-1] = rule.Head.Value - } - var result *ast.Term child.traceEnter(rule) - err := child.biunifyArrays(ast.NewArray(e.terms[1:]...), ast.NewArray(args...), e.e.bindings, child.bindings, func() error { + err := child.biunifyTerms(e.terms[1:], args, e.e.bindings, child.bindings, func() error { return child.eval(func(child *eval) error { child.traceExit(rule) @@ -1976,28 +2317,24 @@ func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, cacheKey ast.R e.e.virtualCache.Put(cacheKey, result) // the redos confirm this, or the evaluation is aborted } - if len(rule.Head.Args) == len(e.terms)-1 { - if result.Value.Compare(ast.Boolean(false)) == 0 { - if prev != nil && ast.Compare(prev, result) != 0 { - return functionConflictErr(rule.Location) - } - prev = result - return nil + if len(rule.Head.Args) == len(e.terms)-1 && ast.Boolean(false).Equal(result.Value) { + if prev != nil && !prev.Equal(result) { + return functionConflictErr(rule.Location) } + prev = result + return nil } // Partial evaluation should explore all rules and may not produce // a ground result so we do not perform conflict detection or // deduplication. See "ignore conflicts: functions" test case for // an example. - if !e.e.partial() { - if prev != nil { - if ast.Compare(prev, result) != 0 { - return functionConflictErr(rule.Location) - } - child.traceRedo(rule) - return nil + if !e.e.partial() && prev != nil { + if !prev.Equal(result) { + return functionConflictErr(rule.Location) } + child.traceRedo(rule) + return nil } prev = result @@ -2014,10 +2351,8 @@ func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, cacheKey ast.R return result, err } -func (e evalFunc) partialEvalSupport(declArgsLen int, iter unifyIterator) error { - - path := e.e.namespaceRef(e.ref) - term := ast.NewTerm(path) +func (e *evalFunc) partialEvalSupport(declArgsLen int, iter unifyIterator) error { + path := e.e.namespaceRef(e.terms[0].Value.(ast.Ref)) if !e.e.saveSupport.Exists(path) { for _, rule := range e.ir.Rules { @@ -2026,18 +2361,29 @@ func (e evalFunc) partialEvalSupport(declArgsLen int, iter unifyIterator) error return err } } + + if e.ir.Default != nil { + err := e.partialEvalSupportRule(e.ir.Default, path) + if err != nil { + return err + } + } } if !e.e.saveSupport.Exists(path) { // we haven't saved anything, nothing to call return nil } + term := ast.NewTerm(path) + return e.e.saveCall(declArgsLen, append([]*ast.Term{term}, e.terms[1:]...), iter) } -func (e evalFunc) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) error { +func (e *evalFunc) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) error { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + e.e.childWithBindingSizeHint(rule.Body, child, ast.EstimateBodyBindingCount(rule.Body)) child.traceEnter(rule) e.e.saveStack.PushQuery(nil) @@ -2070,8 +2416,9 @@ func (e evalFunc) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) error { } e.e.saveSupport.Insert(path, &ast.Rule{ - Head: head, - Body: plugged, + Head: head, + Body: plugged, + Default: rule.Default, }) } child.traceRedo(rule) @@ -2084,15 +2431,42 @@ func (e evalFunc) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) error { return err } +type deferredEarlyExitContainer struct { + deferred *deferredEarlyExitError +} + +func (dc *deferredEarlyExitContainer) handleErr(err error) error { + if err == nil { + return nil + } + + if dc.deferred == nil && errors.As(err, &dc.deferred) && dc.deferred != nil { + return nil + } + + return err +} + +// copyError returns a copy of the deferred early exit error if one is present. +// This exists only to allow the container to be reused. +func (dc *deferredEarlyExitContainer) copyError() *deferredEarlyExitError { + if dc.deferred == nil { + return nil + } + + cpy := *dc.deferred + return &cpy +} + type evalTree struct { e *eval - ref ast.Ref - plugged ast.Ref - pos int bindings *bindings rterm *ast.Term rbindings *bindings node *ast.TreeNode + ref ast.Ref + plugged ast.Ref + pos int } func (e evalTree) eval(iter unifyIterator) error { @@ -2115,9 +2489,7 @@ func (e evalTree) finish(iter unifyIterator) error { // In some cases, it may not be possible to PE the ref. If the path refers // to virtual docs that PE does not support or base documents where inlining // has been disabled, then we have to save. - save := e.e.unknown(e.plugged, e.e.bindings) - - if save { + if e.e.partial() && e.e.unknownRef(e.plugged, e.e.bindings) { return e.e.saveUnify(ast.NewTerm(e.plugged), e.rterm, e.bindings, e.rbindings, iter) } @@ -2160,6 +2532,20 @@ func (e evalTree) next(iter unifyIterator, plugged *ast.Term) error { return cpy.eval(iter) } +// enumerateNext is a helper to avoid closure allocation in enumerate loops. +// Method values don't allocate, unlike explicit closures. +// Using a pointer to evalTree avoids copying the 96-byte structure. +// Fields are ordered by size for optimal memory alignment (16 > 8 > 8 bytes). +type enumerateNext struct { + iter unifyIterator // 16 bytes (interface) + e *evalTree // 8 bytes (pointer) + key *ast.Term // 8 bytes (pointer) +} + +func (en *enumerateNext) call() error { + return en.e.next(en.iter, en.key) +} + func (e evalTree) enumerate(iter unifyIterator) error { if e.e.inliningControl.Disabled(e.plugged[:e.pos], true) { @@ -2171,66 +2557,60 @@ func (e evalTree) enumerate(iter unifyIterator) error { return err } - var deferredEe *deferredEarlyExitError - handleErr := func(err error) error { - var dee *deferredEarlyExitError - if errors.As(err, &dee) { - if deferredEe == nil { - deferredEe = dee - } - return nil - } - return err - } + dc := deecPool.Get() + dc.deferred = nil + defer deecPool.Put(dc) + + // Use method value to avoid closure allocation. + // Create once and reuse for both doc and virtual doc enumeration. + en := enumerateNext{iter: iter, e: &e, key: nil} if doc != nil { switch doc := doc.(type) { case *ast.Array: - for i := 0; i < doc.Len(); i++ { - k := ast.IntNumberTerm(i) - err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error { - return e.next(iter, k) - }) + for i := range doc.Len() { + k := ast.InternedTerm(i) + en.key = k + err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, en.call) - if err := handleErr(err); err != nil { + if err := dc.handleErr(err); err != nil { return err } } case ast.Object: ki := doc.KeysIterator() for k, more := ki.Next(); more; k, more = ki.Next() { - err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error { - return e.next(iter, k) - }) - if err := handleErr(err); err != nil { + en.key = k + err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, en.call) + if err := dc.handleErr(err); err != nil { return err } } case ast.Set: - if err := doc.Iter(func(elem *ast.Term) error { - err := e.e.biunify(elem, e.ref[e.pos], e.bindings, e.bindings, func() error { - return e.next(iter, elem) - }) - return handleErr(err) - }); err != nil { - return err + // Use Slice() to avoid closure allocation in Iter() + for _, elem := range doc.Slice() { + en.key = elem + err := e.e.biunify(elem, e.ref[e.pos], e.bindings, e.bindings, en.call) + if err := dc.handleErr(err); err != nil { + return err + } } } } - if deferredEe != nil { - return deferredEe + if dc.deferred != nil { + return dc.copyError() } if e.node == nil { return nil } + // Reuse the same enumerateNext for virtual documents for _, k := range e.node.Sorted { key := ast.NewTerm(k) - if err := e.e.biunify(key, e.ref[e.pos], e.bindings, e.bindings, func() error { - return e.next(iter, key) - }); err != nil { + en.key = key + if err := e.e.biunify(key, e.ref[e.pos], e.bindings, e.bindings, en.call); err != nil { return err } } @@ -2278,9 +2658,7 @@ func (e evalTree) leaves(plugged ast.Ref, node *ast.TreeNode) (ast.Object, error result := ast.NewObject() for _, k := range node.Sorted { - child := node.Children[k] - if child.Hide { continue } @@ -2317,24 +2695,25 @@ func (e evalTree) leaves(plugged ast.Ref, node *ast.TreeNode) (ast.Object, error type evalVirtual struct { e *eval - ref ast.Ref - plugged ast.Ref - pos int bindings *bindings rterm *ast.Term rbindings *bindings + ref ast.Ref + plugged ast.Ref + pos int } func (e evalVirtual) eval(iter unifyIterator) error { ir, err := e.e.getRules(e.plugged[:e.pos+1], nil) + defer ast.IndexResultPool.Put(ir) if err != nil { return err } // Partial evaluation of ordered rules is not supported currently. Save the // expression and continue. This could be revisited in the future. - if len(ir.Else) > 0 && e.e.unknown(e.ref, e.bindings) { + if len(ir.Else) > 0 && e.e.unknownRef(e.ref, e.bindings) { return e.e.saveUnify(ast.NewTerm(e.ref), e.rterm, e.bindings, e.rbindings, iter) } @@ -2393,14 +2772,14 @@ func (e evalVirtual) eval(iter unifyIterator) error { type evalVirtualPartial struct { e *eval - ref ast.Ref - plugged ast.Ref - pos int ir *ast.IndexResult bindings *bindings rterm *ast.Term rbindings *bindings empty *ast.Term + ref ast.Ref + plugged ast.Ref + pos int } type evalVirtualPartialCacheHint struct { @@ -2442,7 +2821,7 @@ func maxRefLength(rules []*ast.Rule, ceil int) int { for _, r := range rules { rl := len(r.Ref()) if r.Head.RuleKind() == ast.MultiValue { - rl = rl + 1 + rl++ } if rl >= ceil { return ceil @@ -2459,14 +2838,16 @@ func (e evalVirtualPartial) evalEachRule(iter unifyIterator, unknown bool) error return nil } - m := maxRefLength(e.ir.Rules, len(e.ref)) - if e.e.unknown(e.ref[e.pos+1:m], e.bindings) { - for _, rule := range e.ir.Rules { - if err := e.evalOneRulePostUnify(iter, rule); err != nil { - return err + if e.e.partial() { + m := maxRefLength(e.ir.Rules, len(e.ref)) + if e.e.unknown(e.ref[e.pos+1:m], e.bindings) { + for _, rule := range e.ir.Rules { + if err := e.evalOneRulePostUnify(iter, rule); err != nil { + return err + } } + return nil } - return nil } hint, err := e.evalCache(iter) @@ -2536,8 +2917,11 @@ func (e evalVirtualPartial) evalAllRulesNoCache(rules []*ast.Rule) (*ast.Term, e var visitedRefs []ast.Ref + child := evalPool.Get() + defer evalPool.Put(child) + for _, rule := range rules { - child := e.e.child(rule.Body) + e.e.childWithBindingSizeHint(rule.Body, child, ast.EstimateBodyBindingCount(rule.Body)) child.traceEnter(rule) err := child.eval(func(*eval) error { child.traceExit(rule) @@ -2570,8 +2954,10 @@ func wrapInObjects(leaf *ast.Term, ref ast.Ref) *ast.Term { } func (e evalVirtualPartial) evalOneRulePreUnify(iter unifyIterator, rule *ast.Rule, result *ast.Term, unknown bool, visitedRefs *[]ast.Ref) (*ast.Term, error) { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + e.e.childWithBindingSizeHint(rule.Body, child, ast.EstimateBodyBindingCount(rule.Body)) child.traceEnter(rule) var defined bool @@ -2663,7 +3049,10 @@ func (e *eval) biunifyDynamicRef(pos int, a, b ast.Ref, b1, b2 *bindings, iter u } func (e evalVirtualPartial) evalOneRulePostUnify(iter unifyIterator, rule *ast.Rule) error { - child := e.e.child(rule.Body) + child := evalPool.Get() + defer evalPool.Put(child) + + e.e.childWithBindingSizeHint(rule.Body, child, ast.EstimateBodyBindingCount(rule.Body)) child.traceEnter(rule) var defined bool @@ -2747,8 +3136,10 @@ func (e evalVirtualPartial) partialEvalSupport(iter unifyIterator) error { } func (e evalVirtualPartial) partialEvalSupportRule(rule *ast.Rule, _ ast.Ref) (bool, error) { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + e.e.childWithBindingSizeHint(rule.Body, child, ast.EstimateBodyBindingCount(rule.Body)) child.traceEnter(rule) e.e.saveStack.PushQuery(nil) @@ -2977,20 +3368,31 @@ func (q vcKeyScope) Hash() int { return hash } -func (q vcKeyScope) IsGround() bool { +func (vcKeyScope) IsGround() bool { return false } func (q vcKeyScope) String() string { - buf := make([]string, 0, len(q.Ref)) + buf, _ := q.AppendText(make([]byte, 0, 2+q.StringLength())) + return util.ByteSliceToString(buf) +} + +func (q vcKeyScope) AppendText(buf []byte) ([]byte, error) { + buf = append(buf, '<') for _, t := range q.Ref { if _, ok := t.Value.(ast.Var); ok { - buf = append(buf, "_") + buf = append(buf, '_') } else { - buf = append(buf, t.String()) + var err error + if buf, err = t.AppendText(buf); err != nil { + return nil, err + } } + buf = append(buf, ',') } - return fmt.Sprintf("<%s>", strings.Join(buf, ",")) + buf[len(buf)-1] = '>' + + return buf, nil } // reduce removes vars from the tail of the ref. @@ -3031,7 +3433,12 @@ func getNestedObject(ref ast.Ref, rootObj *ast.Object, b *bindings, l *ast.Locat } func hasCollisions(path ast.Ref, visitedRefs *[]ast.Ref, b *bindings) bool { - collisionPathTerm := b.Plug(ast.NewTerm(path)) + // Avoid allocating a new term just for the sake of a lookup + term := ast.TermPtrPool.Get() + term.Value = path + collisionPathTerm := b.Plug(term) + ast.TermPtrPool.Put(term) + collisionPath := collisionPathTerm.Value.(ast.Ref) for _, c := range *visitedRefs { if collisionPath.HasPrefix(c) && !collisionPath.Equal(c) { @@ -3043,15 +3450,15 @@ func hasCollisions(path ast.Ref, visitedRefs *[]ast.Ref, b *bindings) bool { } func (e evalVirtualPartial) reduce(rule *ast.Rule, b *bindings, result *ast.Term, visitedRefs *[]ast.Ref) (*ast.Term, bool, error) { - var exists bool head := rule.Head switch v := result.Value.(type) { case ast.Set: key := b.Plug(head.Key) - exists = v.Contains(key) - v.Add(key) + if exists = v.Contains(key); !exists { + v.Add(key) + } case ast.Object: // data.p.q[r].s.t := 42 {...} // |----|-| @@ -3111,13 +3518,13 @@ func (e evalVirtualPartial) reduce(rule *ast.Rule, b *bindings, result *ast.Term type evalVirtualComplete struct { e *eval - ref ast.Ref - plugged ast.Ref - pos int ir *ast.IndexResult bindings *bindings rterm *ast.Term rbindings *bindings + ref ast.Ref + plugged ast.Ref + pos int } func (e evalVirtualComplete) eval(iter unifyIterator) error { @@ -3132,20 +3539,24 @@ func (e evalVirtualComplete) eval(iter unifyIterator) error { return nil } - if !e.e.unknown(e.ref, e.bindings) { + if !e.e.unknownRef(e.ref, e.bindings) { return e.evalValue(iter, e.ir.EarlyExit) } var generateSupport bool if e.ir.Default != nil { - // If the other term is not constant OR it's equal to the default value, then - // a support rule must be produced as the default value _may_ be required. On - // the other hand, if the other term is constant (i.e., it does not require - // evaluation) and it differs from the default value then the default value is - // _not_ required, so partially evaluate the rule normally. - rterm := e.rbindings.Plug(e.rterm) - generateSupport = !ast.IsConstant(rterm.Value) || e.ir.Default.Head.Value.Equal(rterm) + // If inlining has been disabled for the rterm, and the default rule has a 'false' result value, + // the default value is inconsequential, and support does not need to be generated. + if !(e.ir.Default.Head.Value.Equal(ast.InternedTerm(false)) && e.e.inliningControl.Disabled(e.rterm.Value, false)) { + // If the other term is not constant OR it's equal to the default value, then + // a support rule must be produced as the default value _may_ be required. On + // the other hand, if the other term is constant (i.e., it does not require + // evaluation) and it differs from the default value then the default value is + // _not_ required, so partially evaluate the rule normally. + rterm := e.rbindings.Plug(e.rterm) + generateSupport = !ast.IsConstant(rterm.Value) || e.ir.Default.Head.Value.Equal(rterm) + } } if generateSupport || e.e.inliningControl.shallow || e.e.inliningControl.Disabled(e.plugged[:e.pos+1], false) { @@ -3226,10 +3637,13 @@ func (e evalVirtualComplete) evalValue(iter unifyIterator, findOne bool) error { } func (e evalVirtualComplete) evalValueRule(iter unifyIterator, rule *ast.Rule, prev *ast.Term, findOne bool) (*ast.Term, error) { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + e.e.childWithBindingSizeHint(rule.Body, child, ast.EstimateBodyBindingCount(rule.Body)) child.findOne = findOne child.traceEnter(rule) + var result *ast.Term err := child.eval(func(child *eval) error { child.traceExit(rule) @@ -3248,8 +3662,7 @@ func (e evalVirtualComplete) evalValueRule(iter unifyIterator, rule *ast.Rule, p e.e.virtualCache.Put(e.plugged[:e.pos+1], result) term, termbindings := child.bindings.apply(rule.Head.Value) - err := e.evalTerm(iter, term, termbindings) - if err != nil { + if err := e.evalTerm(iter, term, termbindings); err != nil { return err } @@ -3262,17 +3675,18 @@ func (e evalVirtualComplete) evalValueRule(iter unifyIterator, rule *ast.Rule, p } func (e evalVirtualComplete) partialEval(iter unifyIterator) error { + child := evalPool.Get() + defer evalPool.Put(child) for _, rule := range e.ir.Rules { - child := e.e.child(rule.Body) + e.e.childWithBindingSizeHint(rule.Body, child, ast.EstimateBodyBindingCount(rule.Body)) child.traceEnter(rule) err := child.eval(func(child *eval) error { child.traceExit(rule) term, termbindings := child.bindings.apply(rule.Head.Value) - err := e.evalTerm(iter, term, termbindings) - if err != nil { + if err := e.evalTerm(iter, term, termbindings); err != nil { return err } @@ -3289,17 +3703,22 @@ func (e evalVirtualComplete) partialEval(iter unifyIterator) error { } func (e evalVirtualComplete) partialEvalSupport(iter unifyIterator) error { - - path := e.e.namespaceRef(e.plugged[:e.pos+1]) + originalPath := e.plugged[:e.pos+1] + namespacePath := e.e.namespaceRef(originalPath) term := ast.NewTerm(e.e.namespaceRef(e.ref)) var defined bool - if e.e.saveSupport.Exists(path) { + if e.e.saveSupport.Exists(namespacePath) { defined = true } else { for i := range e.ir.Rules { - ok, err := e.partialEvalSupportRule(e.ir.Rules[i], path) + // Split the rule from the package + ruleRef := originalPath.Copy()[len(e.ir.Rules[i].Module.Package.Path):] + ruleRef[0].Value = ast.Var(ruleRef[0].Value.(ast.String)) + // Get the namespaced package path without the rule + packagePath := namespacePath.Copy()[:len(namespacePath)-len(ruleRef)] + ok, err := e.partialEvalSupportRule(e.ir.Rules[i], packagePath, ruleRef) if err != nil { return err } @@ -3309,7 +3728,12 @@ func (e evalVirtualComplete) partialEvalSupport(iter unifyIterator) error { } if e.ir.Default != nil { - ok, err := e.partialEvalSupportRule(e.ir.Default, path) + // Split the rule from the package + ruleRef := originalPath.Copy()[len(e.ir.Default.Module.Package.Path):] + ruleRef[0].Value = ast.Var(ruleRef[0].Value.(ast.String)) + // Get the namespaced package path without the rule + packagePath := namespacePath.Copy()[:len(namespacePath)-len(ruleRef)] + ok, err := e.partialEvalSupportRule(e.ir.Default, packagePath, ruleRef) if err != nil { return err } @@ -3326,9 +3750,11 @@ func (e evalVirtualComplete) partialEvalSupport(iter unifyIterator) error { return e.e.saveUnify(term, e.rterm, e.bindings, e.rbindings, iter) } -func (e evalVirtualComplete) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) (bool, error) { +func (e evalVirtualComplete) partialEvalSupportRule(rule *ast.Rule, packagePath ast.Ref, ruleRef ast.Ref) (bool, error) { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + e.e.childWithBindingSizeHint(rule.Body, child, ast.EstimateBodyBindingCount(rule.Body)) child.traceEnter(rule) e.e.saveStack.PushQuery(nil) @@ -3343,7 +3769,6 @@ func (e evalVirtualComplete) partialEvalSupportRule(rule *ast.Rule, path ast.Ref // Skip this rule body if it fails to type-check. // Type-checking failure means the rule body will never succeed. if e.e.compiler.PassesTypeCheck(plugged) { - pkg, ruleRef := splitPackageAndRule(path) head := ast.RefHead(ruleRef, child.bindings.PlugNamespaced(rule.Head.Value, e.e.caller.bindings)) if !e.e.inliningControl.shallow { @@ -3353,7 +3778,7 @@ func (e evalVirtualComplete) partialEvalSupportRule(rule *ast.Rule, path ast.Ref plugged = applyCopyPropagation(cp, e.e.instr, plugged) } - e.e.saveSupport.InsertByPkg(pkg, &ast.Rule{ + e.e.saveSupport.InsertByPkg(packagePath, &ast.Rule{ Head: head, Body: plugged, Default: rule.Default, @@ -3383,13 +3808,13 @@ func (e evalVirtualComplete) evalTerm(iter unifyIterator, term *ast.Term, termbi type evalTerm struct { e *eval - ref ast.Ref - pos int bindings *bindings term *ast.Term termbindings *bindings rterm *ast.Term rbindings *bindings + ref ast.Ref + pos int } func (e evalTerm) eval(iter unifyIterator) error { @@ -3440,33 +3865,56 @@ func (e evalTerm) enumerate(iter unifyIterator) error { switch v := e.term.Value.(type) { case *ast.Array: - for i := 0; i < v.Len(); i++ { - k := ast.IntNumberTerm(i) - err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error { - return e.next(iter, k) - }) + // Note(anders): + // For this case (e.g. input.foo[_]), we can avoid the (quite expensive) overhead of a callback + // function literal escaping to the heap in each iteration by inlining the biunification logic, + // meaning a 10x reduction in both the number of allocations made as well as the memory consumed. + // It is possible that such inlining could be done for the set/object cases as well, and that's + // worth looking into later, as I imagine set iteration in particular would be an even greater + // win across most policies. Those cases are however much more complex, as we need to deal with + // any type on either side, not just int/var as is the case here. + for i := range v.Len() { + a := ast.InternedTerm(i) + b := e.ref[e.pos] + + if _, ok := b.Value.(ast.Var); ok { + if e.e.traceEnabled { + e.e.traceUnify(a, b) + } + var undo undo + b, e.bindings = e.bindings.apply(b) + e.bindings.bind(b, a, e.bindings, &undo) - if err := handleErr(err); err != nil { - return err + err := e.next(iter, a) + undo.Undo() + if err != nil { + if err := handleErr(err); err != nil { + return err + } + } } } case ast.Object: - if err := v.Iter(func(k, _ *ast.Term) error { + for _, k := range v.Keys() { err := e.e.biunify(k, e.ref[e.pos], e.termbindings, e.bindings, func() error { return e.next(iter, e.termbindings.Plug(k)) }) - return handleErr(err) - }); err != nil { - return err + if err != nil { + if err := handleErr(err); err != nil { + return err + } + } } case ast.Set: - if err := v.Iter(func(elem *ast.Term) error { + for _, elem := range v.Slice() { err := e.e.biunify(elem, e.ref[e.pos], e.termbindings, e.bindings, func() error { return e.next(iter, e.termbindings.Plug(elem)) }) - return handleErr(err) - }); err != nil { - return err + if err != nil { + if err := handleErr(err); err != nil { + return err + } + } } } @@ -3527,12 +3975,10 @@ func (e evalTerm) get(plugged *ast.Term) (*ast.Term, *bindings) { } func (e evalTerm) save(iter unifyIterator) error { - v := e.e.generateVar(fmt.Sprintf("ref_%d", e.e.genvarid)) e.e.genvarid++ return e.e.biunify(e.term, v, e.termbindings, e.bindings, func() error { - suffix := e.ref[e.pos:] ref := make(ast.Ref, len(suffix)+1) ref[0] = v @@ -3551,7 +3997,8 @@ type evalEvery struct { func (e evalEvery) eval(iter unifyIterator) error { // unknowns in domain or body: save the expression, PE its body - if e.e.unknown(e.Domain, e.e.bindings) || e.e.unknown(e.Body, e.e.bindings) { + // partial() check to avoid e.Body -> Node boxing allocation + if e.e.partial() && (e.e.unknown(e.Domain, e.e.bindings) || e.e.unknown(e.Body, e.e.bindings)) { return e.save(iter) } @@ -3569,7 +4016,11 @@ func (e evalEvery) eval(iter unifyIterator) error { ).SetLocation(e.Domain.Location), ) - domain := e.e.closure(generator) + domain := evalPool.Get() + defer evalPool.Put(domain) + + e.e.closure(generator, domain) + all := true // all generator evaluations yield one successful body evaluation domain.traceEnter(e.expr) @@ -3580,14 +4031,25 @@ func (e evalEvery) eval(iter unifyIterator) error { // This would do extra work, like iterating needlessly if domain was a large array. return nil } - body := child.closure(e.Body) + + body := evalPool.Get() + defer evalPool.Put(body) + + child.closure(e.Body, body) body.findOne = true - body.traceEnter(e.Body) + + if e.e.traceEnabled { + body.traceEnter(e.Body) + } + done := false err := body.eval(func(*eval) error { - body.traceExit(e.Body) + if e.e.traceEnabled { + body.traceExit(e.Body) + body.traceRedo(e.Body) + } done = true - body.traceRedo(e.Body) + return nil }) if !done { @@ -3707,10 +4169,12 @@ func applyCopyPropagation(p *copypropagation.CopyPropagator, instr *Instrumentat return result } +func nonGroundKey(k, _ *ast.Term) bool { + return !k.IsGround() +} + func nonGroundKeys(a ast.Object) bool { - return a.Until(func(k, _ *ast.Term) bool { - return !k.IsGround() - }) + return a.Until(nonGroundKey) } func plugKeys(a ast.Object, b *bindings) ast.Object { @@ -3747,8 +4211,7 @@ func canInlineNegation(safe ast.VarSet, queries []ast.Body) bool { SkipClosures: true, }) vis.Walk(expr) - unsafe := vis.Vars().Diff(safe).Diff(ast.ReservedVars) - if len(unsafe) > 0 { + if vis.Vars().Diff(safe).DiffCount(ast.ReservedVars) > 0 { return false } } @@ -3772,7 +4235,7 @@ func newNestedCheckVisitor() *nestedCheckVisitor { return v } -func (v *nestedCheckVisitor) visit(x interface{}) bool { +func (v *nestedCheckVisitor) visit(x any) bool { switch x.(type) { case ast.Ref, ast.Call: v.found = true @@ -3863,7 +4326,7 @@ func isOtherRef(term *ast.Term) bool { return !ref.HasPrefix(ast.DefaultRootRef) && !ref.HasPrefix(ast.InputRootRef) } -func isFunction(env *ast.TypeEnv, ref interface{}) bool { +func isFunction(env *ast.TypeEnv, ref any) bool { var r ast.Ref switch v := ref.(type) { case ast.Ref: @@ -3875,7 +4338,7 @@ func isFunction(env *ast.TypeEnv, ref interface{}) bool { default: panic("expected ast.Value or *ast.Term") } - _, ok := env.Get(r).(*types.Function) + _, ok := env.GetByRef(r).(*types.Function) return ok } diff --git a/vendor/github.com/open-policy-agent/opa/topdown/glob.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/glob.go similarity index 85% rename from vendor/github.com/open-policy-agent/opa/topdown/glob.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/glob.go index baf092ab6d..4e80c519ba 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/glob.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/glob.go @@ -6,15 +6,17 @@ import ( "github.com/gobwas/glob" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) const globCacheMaxSize = 100 const globInterQueryValueCacheHits = "rego_builtin_glob_interquery_value_cache_hits" -var globCacheLock = sync.Mutex{} -var globCache map[string]glob.Glob +var noDelimiters = []rune{} +var dotDelimiters = []rune{'.'} +var globCacheLock = sync.RWMutex{} +var globCache = map[string]glob.Glob{} func builtinGlobMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { pattern, err := builtins.StringOperand(operands[0].Value, 1) @@ -25,14 +27,14 @@ func builtinGlobMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast. var delimiters []rune switch operands[1].Value.(type) { case ast.Null: - delimiters = []rune{} + delimiters = noDelimiters case *ast.Array: delimiters, err = builtins.RuneSliceOperand(operands[1].Value, 2) if err != nil { return err } if len(delimiters) == 0 { - delimiters = []rune{'.'} + delimiters = dotDelimiters } default: return builtins.NewOperandTypeErr(2, operands[1].Value, "array", "null") @@ -55,12 +57,13 @@ func builtinGlobMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast. if err != nil { return err } - return iter(ast.BooleanTerm(m)) + return iter(ast.InternedTerm(m)) } func globCompileAndMatch(bctx BuiltinContext, id, pattern, match string, delimiters []rune) (bool, error) { if bctx.InterQueryBuiltinValueCache != nil { + // TODO: Use named cache val, ok := bctx.InterQueryBuiltinValueCache.Get(ast.String(id)) if ok { pat, valid := val.(glob.Glob) @@ -86,14 +89,15 @@ func globCompileAndMatch(bctx BuiltinContext, id, pattern, match string, delimit return res.Match(match), nil } - globCacheLock.Lock() - defer globCacheLock.Unlock() + globCacheLock.RLock() p, ok := globCache[id] + globCacheLock.RUnlock() if !ok { var err error if p, err = glob.Compile(pattern, delimiters...); err != nil { return false, err } + globCacheLock.Lock() if len(globCache) >= globCacheMaxSize { // Delete a (semi-)random key to make room for the new one. for k := range globCache { @@ -102,9 +106,10 @@ func globCompileAndMatch(bctx BuiltinContext, id, pattern, match string, delimit } } globCache[id] = p + globCacheLock.Unlock() } - out := p.Match(match) - return out, nil + + return p.Match(match), nil } func builtinGlobQuoteMeta(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -117,7 +122,6 @@ func builtinGlobQuoteMeta(_ BuiltinContext, operands []*ast.Term, iter func(*ast } func init() { - globCache = map[string]glob.Glob{} RegisterBuiltinFunc(ast.GlobMatch.Name, builtinGlobMatch) RegisterBuiltinFunc(ast.GlobQuoteMeta.Name, builtinGlobQuoteMeta) } diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go new file mode 100644 index 0000000000..c47f7dc4e6 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go @@ -0,0 +1,688 @@ +// Copyright 2022 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + gqlast "github.com/vektah/gqlparser/v2/ast" + gqlparser "github.com/vektah/gqlparser/v2/parser" + gqlvalidator "github.com/vektah/gqlparser/v2/validator" + "github.com/vektah/gqlparser/v2/validator/rules" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" +) + +var defaultRules = rules.NewDefaultRules() + +// Parses a GraphQL schema, and returns the GraphQL AST for the schema. +func parseSchema(schema string) (*gqlast.SchemaDocument, error) { + // NOTE(philipc): We don't include the "built-in schema defs" from the + // underlying graphql parsing library here, because those definitions + // generate enormous AST blobs. In the future, if there is demand for + // a "full-spec" version of schema ASTs, we may need to provide a + // version of this function that includes the built-in schema + // definitions. + schemaAST, err := gqlparser.ParseSchema(&gqlast.Source{Input: schema}) + if err != nil { + return nil, formatGqlParserError(err) + } + return schemaAST, nil +} + +// Parses a GraphQL query, and returns the GraphQL AST for the query. +func parseQuery(query string) (*gqlast.QueryDocument, error) { + queryAST, err := gqlparser.ParseQuery(&gqlast.Source{Input: query}) + if err != nil { + return nil, formatGqlParserError(err) + } + return queryAST, nil +} + +// Validates a GraphQL query against a schema, and returns an error. +// In this case, we get a wrappered error list type, and pluck out +// just the first error message in the list. +func validateQuery(schema *gqlast.Schema, query *gqlast.QueryDocument) error { + // Validate the query against the schema, erroring if there's an issue. + if err := gqlvalidator.ValidateWithRules(schema, query, defaultRules); err != nil { + return formatGqlParserError(err) + } + return nil +} + +func getBuiltinSchema() *gqlast.SchemaDocument { + schema, err := gqlparser.ParseSchema(gqlvalidator.Prelude) + if err != nil { + panic(fmt.Errorf("Error in gqlparser Prelude (should be impossible): %w", err)) + } + return schema +} + +// NOTE(philipc): This function expects *validated* schema documents, and will break +// if it is fed arbitrary structures. +func mergeSchemaDocuments(docA *gqlast.SchemaDocument, docB *gqlast.SchemaDocument) *gqlast.SchemaDocument { + ast := &gqlast.SchemaDocument{} + ast.Merge(docA) + ast.Merge(docB) + return ast +} + +// Converts a SchemaDocument into a gqlast.Schema object that can be used for validation. +// It merges in the builtin schema typedefs exactly as gqltop.LoadSchema did internally. +func convertSchema(schemaDoc *gqlast.SchemaDocument) (*gqlast.Schema, error) { + // Merge builtin schema + schema we were provided. + builtinsSchemaDoc := getBuiltinSchema() + mergedSchemaDoc := mergeSchemaDocuments(builtinsSchemaDoc, schemaDoc) + schema, err := gqlvalidator.ValidateSchemaDocument(mergedSchemaDoc) + if err != nil { + return nil, fmt.Errorf("Error in gqlparser SchemaDocument to Schema conversion: %w", err) + } + return schema, nil +} + +// Converts an ast.Object into a gqlast.QueryDocument object. +func objectToQueryDocument(value ast.Object) (*gqlast.QueryDocument, error) { + // Convert ast.Term to any for JSON encoding below. + asJSON, err := ast.JSON(value) + if err != nil { + return nil, err + } + // Marshal to JSON. + bs, err := json.Marshal(asJSON) + if err != nil { + return nil, err + } + // Unmarshal from JSON -> gqlast.QueryDocument. + var result gqlast.QueryDocument + err = json.Unmarshal(bs, &result) + if err != nil { + return nil, err + } + return &result, nil +} + +// Converts an ast.Object into a gqlast.SchemaDocument object. +func objectToSchemaDocument(value ast.Object) (*gqlast.SchemaDocument, error) { + // Convert ast.Term to any for JSON encoding below. + asJSON, err := ast.JSON(value) + if err != nil { + return nil, err + } + // Marshal to JSON. + bs, err := json.Marshal(asJSON) + if err != nil { + return nil, err + } + // Unmarshal from JSON -> gqlast.SchemaDocument. + var result gqlast.SchemaDocument + err = json.Unmarshal(bs, &result) + if err != nil { + return nil, err + } + return &result, nil +} + +// Recursively traverses an AST that has been run through InterfaceToValue, +// and prunes away the fields with null or empty values, and all `Position` +// structs. +// NOTE(philipc): We currently prune away null values to reduce the level +// of clutter in the returned AST objects. In the future, if there is demand +// for ASTs that have a more regular/fixed structure, we may need to provide +// a "raw" version of the AST, where we still prune away the `Position` +// structs, but leave in the null fields. +func pruneIrrelevantGraphQLASTNodes(value ast.Value) ast.Value { + // We iterate over the Value we've been provided, and recurse down + // in the case of complex types, such as Arrays/Objects. + // We are guaranteed to only have to deal with standard JSON types, + // so this is much less ugly than what we'd need for supporting every + // extant ast type! + switch x := value.(type) { + case *ast.Array: + result := ast.NewArrayWithCapacity(x.Len()) + // Iterate over the array's elements, and do the following: + // - Drop any Nulls + // - Drop any any empty object/array value (after running the pruner) + for i := range x.Len() { + vTerm := x.Elem(i) + switch v := vTerm.Value.(type) { + case ast.Null: + continue + case *ast.Array: + // Safe, because we knew the type before going to prune it. + va := pruneIrrelevantGraphQLASTNodes(v).(*ast.Array) + if va.Len() > 0 { + result = result.Append(ast.NewTerm(va)) + } + case ast.Object: + // Safe, because we knew the type before going to prune it. + vo := pruneIrrelevantGraphQLASTNodes(v).(ast.Object) + if vo.Len() > 0 { + result = result.Append(ast.NewTerm(vo)) + } + default: + result = result.Append(vTerm) + } + } + return result + case ast.Object: + result := ast.NewObjectWithCapacity(x.Len()) + // Iterate over our object's keys, and do the following: + // - Drop "Position". + // - Drop any key with a Null value. + // - Drop any key with an empty object/array value (after running the pruner) + keys := x.Keys() + for _, k := range keys { + // We drop the "Position" objects because we don't need the + // source-backref/location info they provide for policy rules. + // Note that keys are ast.Strings. + if ast.String("Position").Equal(k.Value) { + continue + } + vTerm := x.Get(k) + switch v := vTerm.Value.(type) { + case ast.Null: + continue + case *ast.Array: + // Safe, because we knew the type before going to prune it. + va := pruneIrrelevantGraphQLASTNodes(v).(*ast.Array) + if va.Len() > 0 { + result.Insert(k, ast.NewTerm(va)) + } + case ast.Object: + // Safe, because we knew the type before going to prune it. + vo := pruneIrrelevantGraphQLASTNodes(v).(ast.Object) + if vo.Len() > 0 { + result.Insert(k, ast.NewTerm(vo)) + } + default: + result.Insert(k, vTerm) + } + } + return result + default: + return x + } +} + +func formatGqlParserError(err error) error { + // We use strings.TrimSuffix to remove the '.' characters that the library + // authors include on most of their validation errors. This should be safe, + // since variable names in their error messages are usually quoted, and + // this affects only the last character(s) in the string. + // NOTE(philipc): We know the error location will be in the query string, + // because schema validation always happens before this function is called. + // NOTE(rm): gqlparser does not _always_ return the error location + // so only populate location if it is available + if err == nil { + return nil + } + // If the error contains location information, format it nicely + errorParts := strings.SplitN(err.Error(), ":", 4) + if len(errorParts) >= 4 { + row, err := strconv.ParseUint(errorParts[1], 10, 64) + if err == nil { + col, err := strconv.ParseUint(errorParts[2], 10, 64) + if err == nil { + msg := strings.TrimSuffix(strings.TrimLeft(errorParts[len(errorParts)-1], " "), ".\n") + return fmt.Errorf("%s in GraphQL string at location %d:%d", msg, row, col) + } + } + } + // Wrap and return the full error if location information is not available + return fmt.Errorf("GraphQL parse error: %w", err) +} + +// Reports errors from parsing/validation. +func builtinGraphQLParse(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + var queryDoc *gqlast.QueryDocument + var schemaDoc *gqlast.SchemaDocument + var schemaASTValue ast.Value + var querySchema ast.Value + var err error + + // Parse/translate query if it's a string/object. + switch x := operands[0].Value.(type) { + case ast.String: + queryDoc, err = parseQuery(string(x)) + case ast.Object: + queryDoc, err = objectToQueryDocument(x) + default: + // Error if wrong type. + return builtins.NewOperandTypeErr(0, x, "string", "object") + } + if err != nil { + return err + } + + schemaCacheKey, schema := cacheGetSchema(bctx, operands[1]) + schemaASTCacheKey, querySchema := cacheGetSchemaAST(bctx, operands[1]) + if schema == nil || querySchema == nil { + // Parse/translate schema if it's a string/object. + switch x := operands[1].Value.(type) { + case ast.String: + schemaDoc, err = parseSchema(string(x)) + case ast.Object: + schemaDoc, err = objectToSchemaDocument(x) + default: + // Error if wrong type. + return builtins.NewOperandTypeErr(1, x, "string", "object") + } + if err != nil { + return err + } + + // Convert SchemaDoc to Object before validating and converting it to a Schema + // This precludes inclusion of extra definitions from the default GraphQL schema + if querySchema == nil { + schemaASTValue, err = ast.InterfaceToValue(schemaDoc) + if err != nil { + return err + } + querySchema = pruneIrrelevantGraphQLASTNodes(schemaASTValue.(ast.Object)) + cacheInsertSchemaAST(bctx, schemaASTCacheKey, querySchema) + } + + // Validate the query against the schema, erroring if there's an issue. + if schema == nil { + schema, err = convertSchema(schemaDoc) + if err != nil { + return err + } + cacheInsertSchema(bctx, schemaCacheKey, schema) + } + + } + // Transform the ASTs into Objects. + queryASTValue, err := ast.InterfaceToValue(queryDoc) + if err != nil { + return err + } + + if err := validateQuery(schema, queryDoc); err != nil { + return err + } + + // Recursively remove irrelevant AST structures. + queryResult := pruneIrrelevantGraphQLASTNodes(queryASTValue.(ast.Object)) + + // Construct return value. + verified := ast.ArrayTerm( + ast.NewTerm(queryResult), + ast.NewTerm(querySchema), + ) + + return iter(verified) +} + +// Returns default value when errors occur. +func builtinGraphQLParseAndVerify(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + var queryDoc *gqlast.QueryDocument + var schemaDoc *gqlast.SchemaDocument + var schemaASTValue ast.Value + var querySchema ast.Value + var err error + + unverified := ast.ArrayTerm( + ast.InternedTerm(false), + ast.NewTerm(ast.NewObject()), + ast.NewTerm(ast.NewObject()), + ) + + // Parse/translate query if it's a string/object. + switch x := operands[0].Value.(type) { + case ast.String: + queryDoc, err = parseQuery(string(x)) + case ast.Object: + queryDoc, err = objectToQueryDocument(x) + default: + // Error if wrong type. + return iter(unverified) + } + if err != nil { + return iter(unverified) + } + + // Transform the ASTs into Objects. + queryASTValue, err := ast.InterfaceToValue(queryDoc) + if err != nil { + return iter(unverified) + } + + schemaCacheKey, schema := cacheGetSchema(bctx, operands[1]) + schemaASTCacheKey, querySchema := cacheGetSchemaAST(bctx, operands[1]) + if schema == nil || querySchema == nil { + // Parse/translate schema if it's a string/object. + switch x := operands[1].Value.(type) { + case ast.String: + schemaDoc, err = parseSchema(string(x)) + case ast.Object: + schemaDoc, err = objectToSchemaDocument(x) + default: + // Error if wrong type. + return iter(unverified) + } + if err != nil { + return iter(unverified) + } + + // Convert SchemaDoc to Object before validating and converting it to a Schema + // This precludes inclusion of extra definitions from the default GraphQL schema + if querySchema == nil { + schemaASTValue, err = ast.InterfaceToValue(schemaDoc) + if err != nil { + return iter(unverified) + } + querySchema = pruneIrrelevantGraphQLASTNodes(schemaASTValue.(ast.Object)) + cacheInsertSchemaAST(bctx, schemaASTCacheKey, querySchema) + } + + if schema == nil { + schema, err = convertSchema(schemaDoc) + if err != nil { + return iter(unverified) + } + cacheInsertSchema(bctx, schemaCacheKey, schema) + } + + } + + // Validate the query against the schema, erroring if there's an issue. + if err := validateQuery(schema, queryDoc); err != nil { + return iter(unverified) + } + + // Recursively remove irrelevant AST structures. + queryResult := pruneIrrelevantGraphQLASTNodes(queryASTValue.(ast.Object)) + + // Construct return value. + verified := ast.ArrayTerm( + ast.InternedTerm(true), + ast.NewTerm(queryResult), + ast.NewTerm(querySchema), + ) + + return iter(verified) +} + +func builtinGraphQLParseQuery(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + raw, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + // Get the highly-nested AST struct, along with any errors generated. + query, err := parseQuery(string(raw)) + if err != nil { + return err + } + + // Transform the AST into an Object. + value, err := ast.InterfaceToValue(query) + if err != nil { + return err + } + + // Recursively remove irrelevant AST structures. + result := pruneIrrelevantGraphQLASTNodes(value.(ast.Object)) + + return iter(ast.NewTerm(result)) +} + +func builtinGraphQLParseSchema(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + schemaDocCacheKey, schemaDoc := cacheGetSchemaDoc(bctx, operands[0]) + if schemaDoc == nil { + raw, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + // Get the highly-nested AST struct, along with any errors generated. + schemaDoc, err = parseSchema(string(raw)) + if err != nil { + return err + } + // Note SchemaDoc is not validated + cacheInsertSchemaDoc(bctx, schemaDocCacheKey, schemaDoc) + } + + schemaASTCacheKey, schemaAST := cacheGetSchemaAST(bctx, operands[0]) + if schemaAST == nil { + + // Transform the AST into an Object. + value, err := ast.InterfaceToValue(schemaDoc) + if err != nil { + return err + } + + // Recursively remove irrelevant AST structures. + schemaAST = pruneIrrelevantGraphQLASTNodes(value.(ast.Object)) + cacheInsertSchemaAST(bctx, schemaASTCacheKey, schemaAST) + } + return iter(ast.NewTerm(schemaAST)) +} + +func builtinGraphQLIsValid(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + var queryDoc *gqlast.QueryDocument + var schemaDoc *gqlast.SchemaDocument + var schema *gqlast.Schema + var err error + + switch x := operands[0].Value.(type) { + case ast.String: + queryDoc, err = parseQuery(string(x)) + case ast.Object: + queryDoc, err = objectToQueryDocument(x) + default: + // Error if wrong type. + return iter(ast.InternedTerm(false)) + } + if err != nil { + return iter(ast.InternedTerm(false)) + } + + schemaCacheKey, schema := cacheGetSchema(bctx, operands[1]) + if schema == nil { + switch x := operands[1].Value.(type) { + case ast.String: + schemaDoc, err = parseSchema(string(x)) + case ast.Object: + schemaDoc, err = objectToSchemaDocument(x) + default: + // Error if wrong type. + return iter(ast.InternedTerm(false)) + } + if err != nil { + return iter(ast.InternedTerm(false)) + } + + // Validate the query against the schema, erroring if there's an issue. + schema, err = convertSchema(schemaDoc) + if err != nil { + return iter(ast.InternedTerm(false)) + } + cacheInsertSchema(bctx, schemaCacheKey, schema) + } + + if err := validateQuery(schema, queryDoc); err != nil { + return iter(ast.InternedTerm(false)) + } + + // If we got this far, the GraphQL query passed validation. + return iter(ast.InternedTerm(true)) +} + +func builtinGraphQLSchemaIsValid(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + var err error + + // Schemas are only cached if they are valid + schemaCacheKey, schema := cacheGetSchema(bctx, operands[0]) + if schema == nil { + var schemaDoc *gqlast.SchemaDocument + var validatedSchema *gqlast.Schema + + switch x := operands[0].Value.(type) { + case ast.String: + schemaDoc, err = parseSchema(string(x)) + case ast.Object: + schemaDoc, err = objectToSchemaDocument(x) + default: + // Error if wrong type. + return iter(ast.InternedTerm(false)) + } + if err != nil { + return iter(ast.InternedTerm(false)) + } + // Validate the schema, this determines the result + // and whether there is a schema to cache + validatedSchema, err = convertSchema(schemaDoc) + if err == nil { + cacheInsertSchema(bctx, schemaCacheKey, validatedSchema) + } + } + + return iter(ast.InternedTerm(err == nil)) +} + +// Insert Schema into cache +func cacheInsertSchema(bctx BuiltinContext, key string, schema *gqlast.Schema) { + if bctx.InterQueryBuiltinValueCache == nil || key == "" { + return + } + cacheKey := ast.String(key) + c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName) + if c == nil { + return + } + c.Insert(cacheKey, schema) +} + +// Insert SchemaAST into cache +func cacheInsertSchemaAST(bctx BuiltinContext, key string, schemaAST ast.Value) { + if bctx.InterQueryBuiltinValueCache == nil || key == "" { + return + } + cacheKeyAST := ast.String(key) + c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName) + if c == nil { + return + } + c.Insert(cacheKeyAST, schemaAST) +} + +// Insert SchemaDocument into cache +func cacheInsertSchemaDoc(bctx BuiltinContext, key string, schemaDoc *gqlast.SchemaDocument) { + if bctx.InterQueryBuiltinValueCache == nil || key == "" { + return + } + cacheKey := ast.String(key) + c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName) + if c == nil { + return + } + c.Insert(cacheKey, schemaDoc) +} + +// Returns the cache key and a Schema if this key already exists in the cache +func cacheGetSchema(bctx BuiltinContext, t *ast.Term) (string, *gqlast.Schema) { + if bctx.InterQueryBuiltinValueCache != nil { + if c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName); c != nil { + if key, keyOk := cacheKeyWithPrefix(bctx, t, "gql_schema-"); keyOk { + if val, ok := c.Get(ast.String(key)); ok { + if schema, isSchema := val.(*gqlast.Schema); isSchema { + return key, schema + } + } + return key, nil + } + } + } + return "", nil +} + +// Returns the cache key and a SchemaDocument if this key already exists in the cache +// Note: the SchemaDocument is not a validated Schema +func cacheGetSchemaDoc(bctx BuiltinContext, t *ast.Term) (string, *gqlast.SchemaDocument) { + if bctx.InterQueryBuiltinValueCache != nil { + if c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName); c != nil { + if key, keyOk := cacheKeyWithPrefix(bctx, t, "gql_schema_doc-"); keyOk { + if val, ok := c.Get(ast.String(key)); ok { + if schemaDoc, isSchemaDoc := val.(*gqlast.SchemaDocument); isSchemaDoc { + return key, schemaDoc + } + } + return key, nil + } + } + } + return "", nil +} + +// Returns the cache key and a SchemaDocument if this key already exists in the cache +// Note: the AST should be pruned +func cacheGetSchemaAST(bctx BuiltinContext, t *ast.Term) (string, ast.Value) { + if bctx.InterQueryBuiltinValueCache != nil { + if c := bctx.InterQueryBuiltinValueCache.GetCache(gqlCacheName); c != nil { + if key, keyOk := cacheKeyWithPrefix(bctx, t, "gql_schema_ast-"); keyOk { + if val, ok := c.Get(ast.String(key)); ok { + if schemaAST, isSchemaAST := val.(ast.Value); isSchemaAST { + return key, schemaAST + } + } + return key, nil + } + } + } + return "", nil +} + +// Compute a constant size key for use with the cache +func cacheKeyWithPrefix(bctx BuiltinContext, t *ast.Term, prefix string) (string, bool) { + var cacheKey ast.String + var ok = false + + if bctx.InterQueryBuiltinValueCache != nil { + switch t.Value.(type) { + case ast.String: + err := builtinCryptoSha256(bctx, []*ast.Term{t}, func(term *ast.Term) error { + cacheKey = term.Value.(ast.String) + return nil + }) + ok = (len(cacheKey) > 0) && (err == nil) + case ast.Object: + objTerm := ast.StringTerm(t.String()) + err := builtinCryptoSha256(bctx, []*ast.Term{objTerm}, func(term *ast.Term) error { + cacheKey = term.Value.(ast.String) + return nil + }) + ok = (len(cacheKey) > 0) && (err == nil) + default: + ok = false + } + } + + return prefix + string(cacheKey), ok +} + +const gqlCacheName = "graphql" + +func init() { + var defaultCacheEntries = 10 + var graphqlCacheConfig = cache.NamedValueCacheConfig{ + MaxNumEntries: &defaultCacheEntries, + } + cache.RegisterDefaultInterQueryBuiltinValueCacheConfig(gqlCacheName, &graphqlCacheConfig) + + RegisterBuiltinFunc(ast.GraphQLParse.Name, builtinGraphQLParse) + RegisterBuiltinFunc(ast.GraphQLParseAndVerify.Name, builtinGraphQLParseAndVerify) + RegisterBuiltinFunc(ast.GraphQLParseQuery.Name, builtinGraphQLParseQuery) + RegisterBuiltinFunc(ast.GraphQLParseSchema.Name, builtinGraphQLParseSchema) + RegisterBuiltinFunc(ast.GraphQLIsValid.Name, builtinGraphQLIsValid) + RegisterBuiltinFunc(ast.GraphQLSchemaIsValid.Name, builtinGraphQLSchemaIsValid) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go new file mode 100644 index 0000000000..79d49f33ca --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go @@ -0,0 +1,1636 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "mime" + "net" + "net/http" + "net/url" + "os" + "runtime" + "slices" + "strconv" + "strings" + "time" + + "github.com/open-policy-agent/opa/internal/version" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/tracing" + "github.com/open-policy-agent/opa/v1/util" +) + +type cachingMode string + +const ( + defaultHTTPRequestTimeoutEnv = "HTTP_SEND_TIMEOUT" + defaultCachingMode cachingMode = "serialized" + cachingModeDeserialized cachingMode = "deserialized" +) + +var defaultHTTPRequestTimeout = time.Second * 5 + +var allowedKeyNames = [...]string{ + "method", + "url", + "body", + "enable_redirect", + "force_json_decode", + "force_yaml_decode", + "headers", + "raw_body", + "tls_use_system_certs", + "tls_ca_cert", + "tls_ca_cert_file", + "tls_ca_cert_env_variable", + "tls_client_cert", + "tls_client_cert_file", + "tls_client_cert_env_variable", + "tls_client_key", + "tls_client_key_file", + "tls_client_key_env_variable", + "tls_insecure_skip_verify", + "tls_server_name", + "timeout", + "cache", + "force_cache", + "force_cache_duration_seconds", + "raise_error", + "caching_mode", + "max_retry_attempts", + "cache_ignored_headers", +} + +// ref: https://www.rfc-editor.org/rfc/rfc7231#section-6.1 +var cacheableHTTPStatusCodes = [...]int{ + http.StatusOK, + http.StatusNonAuthoritativeInfo, + http.StatusNoContent, + http.StatusPartialContent, + http.StatusMultipleChoices, + http.StatusMovedPermanently, + http.StatusNotFound, + http.StatusMethodNotAllowed, + http.StatusGone, + http.StatusRequestURITooLong, + http.StatusNotImplemented, +} + +var ( + httpSendNetworkErrTerm, httpSendInternalErrTerm *ast.Term + + allowedKeys = ast.NewSet() + cacheableCodes = ast.NewSet() + requiredKeys = ast.NewSet(ast.InternedTerm("method"), ast.InternedTerm("url")) + httpSendLatencyMetricKey = "rego_builtin_http_send" + httpSendInterQueryCacheHits = httpSendLatencyMetricKey + "_interquery_cache_hits" + httpSendNetworkRequests = httpSendLatencyMetricKey + "_network_requests" +) + +type httpSendKey string + +// CustomizeRoundTripper allows customizing an existing http.Transport, +// to the returned value, which could be the same Transport or a new one. +type CustomizeRoundTripper func(*http.Transport) http.RoundTripper + +const ( + // httpSendBuiltinCacheKey is the key in the builtin context cache that + // points to the http.send() specific cache resides at. + httpSendBuiltinCacheKey httpSendKey = "HTTP_SEND_CACHE_KEY" + + // HTTPSendInternalErr represents a runtime evaluation error. + HTTPSendInternalErr string = "eval_http_send_internal_error" + + // HTTPSendNetworkErr represents a network error. + HTTPSendNetworkErr string = "eval_http_send_network_error" + + // minRetryDelay is amount of time to backoff after the first failure. + minRetryDelay = time.Millisecond * 100 + + // maxRetryDelay is the upper bound of backoff delay. + maxRetryDelay = time.Second * 60 +) + +func builtinHTTPSend(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + obj, err := builtins.ObjectOperand(operands[0].Value, 1) + if err != nil { + return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err) + } + + raiseError, err := getRaiseErrorValue(obj) + if err != nil { + return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err) + } + + req, err := validateHTTPRequestOperand(operands[0], 1) + if err != nil { + if raiseError { + return handleHTTPSendErr(bctx, err) + } + + return iter(generateRaiseErrorResult(handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err))) + } + + result, err := getHTTPResponse(bctx, req) + if err != nil { + if raiseError { + return handleHTTPSendErr(bctx, err) + } + + result = generateRaiseErrorResult(err) + } + return iter(result) +} + +func generateRaiseErrorResult(err error) *ast.Term { + var errObj ast.Object + switch err.(type) { + case *url.Error: + errObj = ast.NewObject( + ast.Item(ast.InternedTerm("code"), httpSendNetworkErrTerm), + ast.Item(ast.InternedTerm("message"), ast.StringTerm(err.Error())), + ) + default: + errObj = ast.NewObject( + ast.Item(ast.InternedTerm("code"), httpSendInternalErrTerm), + ast.Item(ast.InternedTerm("message"), ast.StringTerm(err.Error())), + ) + } + + return ast.ObjectTerm( + ast.Item(ast.InternedTerm("status_code"), ast.InternedTerm(0)), + ast.Item(ast.InternedTerm("error"), ast.NewTerm(errObj)), + ) +} + +func getHTTPResponse(bctx BuiltinContext, req ast.Object) (*ast.Term, error) { + + bctx.Metrics.Timer(httpSendLatencyMetricKey).Start() + defer bctx.Metrics.Timer(httpSendLatencyMetricKey).Stop() + + key, err := getKeyFromRequest(req) + if err != nil { + return nil, err + } + + reqExecutor, err := newHTTPRequestExecutor(bctx, req, key) + if err != nil { + return nil, err + } + // Check if cache already has a response for this query + // set headers to exclude cache_ignored_headers + resp, err := reqExecutor.CheckCache() + if err != nil { + return nil, err + } + + if resp == nil { + httpResp, err := reqExecutor.ExecuteHTTPRequest() + if err != nil { + reqExecutor.InsertErrorIntoCache(err) + return nil, err + } + defer util.Close(httpResp) + // Add result to intra/inter-query cache. + resp, err = reqExecutor.InsertIntoCache(httpResp) + if err != nil { + return nil, err + } + } + + return ast.NewTerm(resp), nil +} + +// getKeyFromRequest returns a key to be used for caching HTTP responses +// deletes headers from request object mentioned in cache_ignored_headers +func getKeyFromRequest(req ast.Object) (ast.Object, error) { + // deep copy so changes to key do not reflect in the request object + key := req.Copy() + cacheIgnoredHeadersTerm := req.Get(ast.InternedTerm("cache_ignored_headers")) + allHeadersTerm := req.Get(ast.InternedTerm("headers")) + // skip because no headers to delete + if cacheIgnoredHeadersTerm == nil || allHeadersTerm == nil { + // need to explicitly set cache_ignored_headers to null + // equivalent requests might have different sets of exclusion lists + key.Insert(ast.InternedTerm("cache_ignored_headers"), ast.InternedNullTerm) + return key, nil + } + var cacheIgnoredHeaders []string + err := ast.As(cacheIgnoredHeadersTerm.Value, &cacheIgnoredHeaders) + if err != nil { + return nil, err + } + var allHeaders map[string]any + err = ast.As(allHeadersTerm.Value, &allHeaders) + if err != nil { + return nil, err + } + for _, header := range cacheIgnoredHeaders { + delete(allHeaders, header) + } + val, err := ast.InterfaceToValue(allHeaders) + if err != nil { + return nil, err + } + key.Insert(ast.InternedTerm("headers"), ast.NewTerm(val)) + // remove cache_ignored_headers key + key.Insert(ast.InternedTerm("cache_ignored_headers"), ast.InternedNullTerm) + return key, nil +} + +func init() { + for _, element := range allowedKeyNames { + ast.InternStringTerm(element) + allowedKeys.Add(ast.InternedTerm(element)) + } + + ast.InternStringTerm(HTTPSendNetworkErr, HTTPSendInternalErr) + httpSendNetworkErrTerm = ast.InternedTerm(HTTPSendNetworkErr) + httpSendInternalErrTerm = ast.InternedTerm(HTTPSendInternalErr) + + createCacheableHTTPStatusCodes() + initDefaults() + RegisterBuiltinFunc(ast.HTTPSend.Name, builtinHTTPSend) +} + +func handleHTTPSendErr(bctx BuiltinContext, err error) error { + // Return HTTP client timeout errors in a generic error message to avoid confusion about what happened. + // Do not do this if the builtin context was cancelled and is what caused the request to stop. + if urlErr, ok := err.(*url.Error); ok && urlErr.Timeout() && bctx.Context.Err() == nil { + err = fmt.Errorf("%s %s: request timed out", urlErr.Op, urlErr.URL) + } + if err := bctx.Context.Err(); err != nil { + return Halt{ + Err: &Error{ + Code: CancelErr, + Message: fmt.Sprintf("http.send: timed out (%s)", err.Error()), + }, + } + } + return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err) +} + +func initDefaults() { + if timeoutDuration := os.Getenv(defaultHTTPRequestTimeoutEnv); timeoutDuration != "" { + var err error + defaultHTTPRequestTimeout, err = time.ParseDuration(timeoutDuration) + if err != nil { + // If it is set to something not valid don't let the process continue in a state + // that will almost definitely give unexpected results by having it set at 0 + // which means no timeout.. + // This environment variable isn't considered part of the public API. + // TODO(patrick-east): Remove the environment variable + panic(fmt.Sprintf("invalid value for HTTP_SEND_TIMEOUT: %s", err)) + } + } +} + +func validateHTTPRequestOperand(term *ast.Term, pos int) (ast.Object, error) { + + obj, err := builtins.ObjectOperand(term.Value, pos) + if err != nil { + return nil, err + } + + requestKeys := ast.NewSet(obj.Keys()...) + + invalidKeys := requestKeys.Diff(allowedKeys) + if invalidKeys.Len() != 0 { + return nil, builtins.NewOperandErr(pos, "invalid request parameters(s): %v", invalidKeys) + } + + missingKeys := requiredKeys.Diff(requestKeys) + if missingKeys.Len() != 0 { + return nil, builtins.NewOperandErr(pos, "missing required request parameters(s): %v", missingKeys) + } + + return obj, nil +} + +// canonicalizeHeaders returns a copy of the headers where the keys are in +// canonical HTTP form. +func canonicalizeHeaders(headers map[string]any) map[string]any { + canonicalized := map[string]any{} + + for k, v := range headers { + canonicalized[http.CanonicalHeaderKey(k)] = v + } + + return canonicalized +} + +// useSocket examines the url for "unix://" and returns a *http.Transport with +// a DialContext that opens a socket (specified in the http call). +// The url is expected to contain socket=/path/to/socket (url encoded) +// Ex. "unix://localhost/end/point?socket=%2Ftmp%2Fhttp.sock" +func useSocket(rawURL string) (bool, string, *http.Transport) { + u, err := url.Parse(rawURL) + if err != nil { + return false, "", nil + } + + if u.Scheme != "unix" || u.RawQuery == "" { + return false, rawURL, nil + } + + v, err := url.ParseQuery(u.RawQuery) + if err != nil { + return false, rawURL, nil + } + + // Rewrite URL targeting the UNIX domain socket. + u.Scheme = "http" + + // Extract the path to the socket. + // Only retrieve the first value. Subsequent values are ignored and removed + // to prevent HTTP parameter pollution. + socket := v.Get("socket") + v.Del("socket") + u.RawQuery = v.Encode() + + tr := http.DefaultTransport.(*http.Transport).Clone() + tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + return http.DefaultTransport.(*http.Transport).DialContext(ctx, "unix", socket) + } + tr.DisableKeepAlives = true + + return true, u.String(), tr +} + +func verifyHost(bctx BuiltinContext, host string) error { + if bctx.Capabilities == nil || bctx.Capabilities.AllowNet == nil { + return nil + } + + if slices.Contains(bctx.Capabilities.AllowNet, host) { + return nil + } + + return fmt.Errorf("unallowed host: %s", host) +} + +func verifyURLHost(bctx BuiltinContext, unverifiedURL string) error { + // Eager return to avoid unnecessary URL parsing + if bctx.Capabilities == nil || bctx.Capabilities.AllowNet == nil { + return nil + } + + parsedURL, err := url.Parse(unverifiedURL) + if err != nil { + return err + } + + host := strings.Split(parsedURL.Host, ":")[0] + + return verifyHost(bctx, host) +} + +func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *http.Client, error) { + var ( + url, method string + // Additional CA certificates loading options. + tlsCaCert []byte + tlsCaCertEnvVar, tlsCaCertFile string + // Client TLS certificate and key options. Each input source + // comes in a matched pair. + tlsClientCert, tlsClientKey []byte + tlsClientCertEnvVar, tlsClientKeyEnvVar string + tlsClientCertFile, tlsClientKeyFile, tlsServerName string + + body, rawBody *bytes.Buffer + enableRedirect, tlsInsecureSkipVerify bool + tlsUseSystemCerts *bool + tlsConfig tls.Config + customHeaders map[string]any + ) + + timeout := defaultHTTPRequestTimeout + + for _, val := range obj.Keys() { + key, err := ast.JSON(val.Value) + if err != nil { + return nil, nil, err + } + + key = key.(string) + + var strVal string + + if s, ok := obj.Get(val).Value.(ast.String); ok { + strVal = strings.Trim(string(s), "\"") + } else { + // Most parameters are strings, so consolidate the type checking. + switch key { + case "method", + "url", + "raw_body", + "tls_ca_cert", + "tls_ca_cert_file", + "tls_ca_cert_env_variable", + "tls_client_cert", + "tls_client_cert_file", + "tls_client_cert_env_variable", + "tls_client_key", + "tls_client_key_file", + "tls_client_key_env_variable", + "tls_server_name": + return nil, nil, fmt.Errorf("%q must be a string", key) + } + } + + switch key { + case "method": + method = strings.ToUpper(strVal) + case "url": + err := verifyURLHost(bctx, strVal) + if err != nil { + return nil, nil, err + } + url = strVal + case "enable_redirect": + enableRedirect, err = strconv.ParseBool(obj.Get(val).String()) + if err != nil { + return nil, nil, err + } + case "body": + bodyVal := obj.Get(val).Value + bodyValInterface, err := ast.JSON(bodyVal) + if err != nil { + return nil, nil, err + } + + bodyValBytes, err := json.Marshal(bodyValInterface) + if err != nil { + return nil, nil, err + } + body = bytes.NewBuffer(bodyValBytes) + case "raw_body": + rawBody = bytes.NewBufferString(strVal) + case "tls_use_system_certs": + tempTLSUseSystemCerts, err := strconv.ParseBool(obj.Get(val).String()) + if err != nil { + return nil, nil, err + } + tlsUseSystemCerts = &tempTLSUseSystemCerts + case "tls_ca_cert": + tlsCaCert = []byte(strVal) + case "tls_ca_cert_file": + tlsCaCertFile = strVal + case "tls_ca_cert_env_variable": + tlsCaCertEnvVar = strVal + case "tls_client_cert": + tlsClientCert = []byte(strVal) + case "tls_client_cert_file": + tlsClientCertFile = strVal + case "tls_client_cert_env_variable": + tlsClientCertEnvVar = strVal + case "tls_client_key": + tlsClientKey = []byte(strVal) + case "tls_client_key_file": + tlsClientKeyFile = strVal + case "tls_client_key_env_variable": + tlsClientKeyEnvVar = strVal + case "tls_server_name": + tlsServerName = strVal + case "headers": + headersVal := obj.Get(val).Value + headersValInterface, err := ast.JSON(headersVal) + if err != nil { + return nil, nil, err + } + var ok bool + customHeaders, ok = headersValInterface.(map[string]any) + if !ok { + return nil, nil, errors.New("invalid type for headers key") + } + case "tls_insecure_skip_verify": + tlsInsecureSkipVerify, err = strconv.ParseBool(obj.Get(val).String()) + if err != nil { + return nil, nil, err + } + case "timeout": + timeout, err = parseTimeout(obj.Get(val).Value) + if err != nil { + return nil, nil, err + } + case "cache", "caching_mode", + "force_cache", "force_cache_duration_seconds", + "force_json_decode", "force_yaml_decode", + "raise_error", "max_retry_attempts", "cache_ignored_headers": // no-op + default: + return nil, nil, fmt.Errorf("invalid parameter %q", key) + } + } + + if len(customHeaders) != 0 { + customHeaders = canonicalizeHeaders(customHeaders) + } + + isTLS := false + client := &http.Client{ + Timeout: timeout, + CheckRedirect: func(*http.Request, []*http.Request) error { + return http.ErrUseLastResponse + }, + } + + if tlsInsecureSkipVerify { + isTLS = true + tlsConfig.InsecureSkipVerify = tlsInsecureSkipVerify + } + + if len(tlsClientCert) > 0 && len(tlsClientKey) > 0 { + cert, err := tls.X509KeyPair(tlsClientCert, tlsClientKey) + if err != nil { + return nil, nil, err + } + + isTLS = true + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + + if tlsClientCertFile != "" && tlsClientKeyFile != "" { + cert, err := tls.LoadX509KeyPair(tlsClientCertFile, tlsClientKeyFile) + if err != nil { + return nil, nil, err + } + + isTLS = true + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + + if tlsClientCertEnvVar != "" && tlsClientKeyEnvVar != "" { + cert, err := tls.X509KeyPair( + []byte(os.Getenv(tlsClientCertEnvVar)), + []byte(os.Getenv(tlsClientKeyEnvVar))) + if err != nil { + return nil, nil, fmt.Errorf("cannot extract public/private key pair from envvars %q, %q: %w", + tlsClientCertEnvVar, tlsClientKeyEnvVar, err) + } + + isTLS = true + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + + // Check the system certificates config first so that we + // load additional certificated into the correct pool. + if tlsUseSystemCerts != nil && *tlsUseSystemCerts && runtime.GOOS != "windows" { + pool, err := x509.SystemCertPool() + if err != nil { + return nil, nil, err + } + + isTLS = true + tlsConfig.RootCAs = pool + } + + if len(tlsCaCert) != 0 { + tlsCaCert = bytes.ReplaceAll(tlsCaCert, []byte("\\n"), []byte("\n")) + pool, err := addCACertsFromBytes(tlsConfig.RootCAs, tlsCaCert) + if err != nil { + return nil, nil, err + } + + isTLS = true + tlsConfig.RootCAs = pool + } + + if tlsCaCertFile != "" { + pool, err := addCACertsFromFile(tlsConfig.RootCAs, tlsCaCertFile) + if err != nil { + return nil, nil, err + } + + isTLS = true + tlsConfig.RootCAs = pool + } + + if tlsCaCertEnvVar != "" { + pool, err := addCACertsFromEnv(tlsConfig.RootCAs, tlsCaCertEnvVar) + if err != nil { + return nil, nil, err + } + + isTLS = true + tlsConfig.RootCAs = pool + } + + // If Host header is set, use it for TLS server name. + if host, hasHost := customHeaders["Host"]; hasHost { + // Only default the ServerName if the caller has + // specified the host. If we don't specify anything, + // Go will default to the target hostname. This name + // is not the same as the default that Go populates + // `req.Host` with, which is why we don't just set + // this unconditionally. + isTLS = true + tlsConfig.ServerName, _ = host.(string) + } + + if tlsServerName != "" { + isTLS = true + tlsConfig.ServerName = tlsServerName + } + + var transport *http.Transport + if ok, parsedURL, tr := useSocket(url); ok { + transport = tr + url = parsedURL + } else if isTLS { + transport = http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = &tlsConfig + transport.DisableKeepAlives = true + } + + if bctx.RoundTripper != nil { + client.Transport = bctx.RoundTripper(transport) + } else if transport != nil { + client.Transport = transport + } + + // check if redirects are enabled + if enableRedirect { + client.CheckRedirect = func(req *http.Request, _ []*http.Request) error { + return verifyURLHost(bctx, req.URL.String()) + } + } + + if rawBody != nil { + body = rawBody + } else if body == nil { + body = bytes.NewBufferString("") + } + + // create the http request, use the builtin context's context to ensure + // the request is cancelled if evaluation is cancelled. + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, nil, err + } + + req = req.WithContext(bctx.Context) + + // Add custom headers + if len(customHeaders) != 0 { + for k, v := range customHeaders { + header, ok := v.(string) + if !ok { + return nil, nil, fmt.Errorf("invalid type for headers value %q", v) + } + + req.Header.Add(k, header) + } + + // Don't overwrite or append to one that was set in the custom headers + if _, hasUA := customHeaders["User-Agent"]; !hasUA { + req.Header.Add("User-Agent", version.UserAgent) + } + + // If the caller specifies the Host header, use it for the HTTP + // request host and the TLS server name. + if host, hasHost := customHeaders["Host"]; hasHost { + host := host.(string) // We already checked that it's a string. + req.Host = host + } + } + + if len(bctx.DistributedTracingOpts) > 0 { + client.Transport = tracing.NewTransport(client.Transport, bctx.DistributedTracingOpts) + } + + return req, client, nil +} + +func executeHTTPRequest(req *http.Request, client *http.Client, inputReqObj ast.Object) (*http.Response, error) { + var err error + var retry int + + retry, err = getNumberValFromReqObj(inputReqObj, ast.InternedTerm("max_retry_attempts")) + if err != nil { + return nil, err + } + + for i := 0; true; i++ { + + var resp *http.Response + resp, err = client.Do(req) + if err == nil { + return resp, nil + } + + // final attempt + if i == retry { + break + } + + if err == context.Canceled { + return nil, err + } + + delay := util.DefaultBackoff(float64(minRetryDelay), float64(maxRetryDelay), i) + timer, timerCancel := util.TimerWithCancel(delay) + select { + case <-timer.C: + case <-req.Context().Done(): + timerCancel() // explicitly cancel the timer. + return nil, context.Canceled + } + } + return nil, err +} + +func isJSONType(header http.Header) bool { + t, _, err := mime.ParseMediaType(header.Get("Content-Type")) + if err != nil { + return false + } + + mediaType := strings.Split(t, "/") + if len(mediaType) != 2 { + return false + } + + if mediaType[0] == "application" { + if mediaType[1] == "json" || strings.HasSuffix(mediaType[1], "+json") { + return true + } + } + + return false +} + +func isContentType(header http.Header, typ ...string) bool { + for _, t := range typ { + if strings.Contains(header.Get("Content-Type"), t) { + return true + } + } + return false +} + +type httpSendCacheEntry struct { + response *ast.Value + error error +} + +// The httpSendCache is used for intra-query caching of http.send results. +type httpSendCache struct { + entries *util.HasherMap[ast.Value, httpSendCacheEntry] +} + +func newHTTPSendCache() *httpSendCache { + return &httpSendCache{ + entries: util.NewHasherMap[ast.Value, httpSendCacheEntry](ast.ValueEqual), + } +} + +func (cache *httpSendCache) get(k ast.Value) *httpSendCacheEntry { + if v, ok := cache.entries.Get(k); ok { + return &v + } + return nil +} + +func (cache *httpSendCache) putResponse(k ast.Value, v *ast.Value) { + cache.entries.Put(k, httpSendCacheEntry{response: v}) +} + +func (cache *httpSendCache) putError(k ast.Value, v error) { + cache.entries.Put(k, httpSendCacheEntry{error: v}) +} + +// In the BuiltinContext cache we only store a single entry that points to +// our ValueMap which is the "real" http.send() cache. +func getHTTPSendCache(bctx BuiltinContext) *httpSendCache { + raw, ok := bctx.Cache.Get(httpSendBuiltinCacheKey) + if !ok { + // Initialize if it isn't there + c := newHTTPSendCache() + bctx.Cache.Put(httpSendBuiltinCacheKey, c) + return c + } + + c, ok := raw.(*httpSendCache) + if !ok { + return nil + } + return c +} + +// checkHTTPSendCache checks for the given key's value in the cache +func checkHTTPSendCache(bctx BuiltinContext, key ast.Object) (ast.Value, error) { + requestCache := getHTTPSendCache(bctx) + if requestCache == nil { + return nil, nil + } + + v := requestCache.get(key) + if v != nil { + if v.error != nil { + return nil, v.error + } + if v.response != nil { + return *v.response, nil + } + // This should never happen + } + + return nil, nil +} + +func insertIntoHTTPSendCache(bctx BuiltinContext, key ast.Object, value ast.Value) { + requestCache := getHTTPSendCache(bctx) + if requestCache == nil { + // Should never happen.. if it does just skip caching the value + // FIXME: return error instead, to prevent inconsistencies? + return + } + requestCache.putResponse(key, &value) +} + +func insertErrorIntoHTTPSendCache(bctx BuiltinContext, key ast.Object, err error) { + requestCache := getHTTPSendCache(bctx) + if requestCache == nil { + // Should never happen.. if it does just skip caching the value + // FIXME: return error instead, to prevent inconsistencies? + return + } + requestCache.putError(key, err) +} + +// checkHTTPSendInterQueryCache checks for the given key's value in the inter-query cache +func (c *interQueryCache) checkHTTPSendInterQueryCache() (ast.Value, error) { + requestCache := c.bctx.InterQueryBuiltinCache + + cachedValue, found := requestCache.Get(c.key) + if !found { + return nil, nil + } + + value, cerr := requestCache.Clone(cachedValue) + if cerr != nil { + return nil, handleHTTPSendErr(c.bctx, cerr) + } + + c.bctx.Metrics.Counter(httpSendInterQueryCacheHits).Incr() + var cachedRespData *interQueryCacheData + + switch v := value.(type) { + case *interQueryCacheValue: + var err error + cachedRespData, err = v.copyCacheData() + if err != nil { + return nil, err + } + case *interQueryCacheData: + cachedRespData = v + default: + return nil, nil + } + + if getCurrentTime(c.bctx).Before(cachedRespData.ExpiresAt) { + return cachedRespData.formatToAST(c.forceJSONDecode, c.forceYAMLDecode) + } + + var err error + c.httpReq, c.httpClient, err = createHTTPRequest(c.bctx, c.key) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + + headers := parseResponseHeaders(cachedRespData.Headers) + + // check with the server if the stale response is still up-to-date. + // If server returns a new response (ie. status_code=200), update the cache with the new response + // If server returns an unmodified response (ie. status_code=304), update the headers for the existing response + result, modified, err := revalidateCachedResponse(c.httpReq, c.httpClient, c.key, headers) + requestCache.Delete(c.key) + if err != nil || result == nil { + return nil, err + } + + defer result.Body.Close() + + if !modified { + // update the headers in the cached response with their corresponding values from the 304 (Not Modified) response + for headerName, values := range result.Header { + cachedRespData.Headers.Del(headerName) + for _, v := range values { + cachedRespData.Headers.Add(headerName, v) + } + } + + if forceCaching(c.forceCacheParams) { + createdAt := getCurrentTime(c.bctx) + cachedRespData.ExpiresAt = createdAt.Add(time.Second * time.Duration(c.forceCacheParams.forceCacheDurationSeconds)) + } else { + expiresAt, err := expiryFromHeaders(result.Header) + if err != nil { + return nil, err + } + cachedRespData.ExpiresAt = expiresAt + } + + cachingMode, err := getCachingMode(c.key) + if err != nil { + return nil, err + } + + var pcv cache.InterQueryCacheValue + + if cachingMode == defaultCachingMode { + pcv, err = cachedRespData.toCacheValue() + if err != nil { + return nil, err + } + } else { + pcv = cachedRespData + } + + c.bctx.InterQueryBuiltinCache.InsertWithExpiry(c.key, pcv, cachedRespData.ExpiresAt) + + return cachedRespData.formatToAST(c.forceJSONDecode, c.forceYAMLDecode) + } + + newValue, respBody, err := formatHTTPResponseToAST(result, c.forceJSONDecode, c.forceYAMLDecode) + if err != nil { + return nil, err + } + + if err := insertIntoHTTPSendInterQueryCache(c.bctx, c.key, result, respBody, c.forceCacheParams); err != nil { + return nil, err + } + + return newValue, nil +} + +// insertIntoHTTPSendInterQueryCache inserts given key and value in the inter-query cache +func insertIntoHTTPSendInterQueryCache(bctx BuiltinContext, key ast.Value, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) error { + if resp == nil || (!forceCaching(cacheParams) && !canStore(resp.Header)) || !cacheableCodes.Contains(ast.InternedTerm(resp.StatusCode)) { + return nil + } + + requestCache := bctx.InterQueryBuiltinCache + + obj, ok := key.(ast.Object) + if !ok { + return errors.New("interface conversion error") + } + + cachingMode, err := getCachingMode(obj) + if err != nil { + return err + } + + var pcv cache.InterQueryCacheValue + var pcvData *interQueryCacheData + if cachingMode == defaultCachingMode { + pcv, pcvData, err = newInterQueryCacheValue(bctx, resp, respBody, cacheParams) + } else { + pcvData, err = newInterQueryCacheData(bctx, resp, respBody, cacheParams) + pcv = pcvData + } + + if err != nil { + return err + } + + requestCache.InsertWithExpiry(key, pcv, pcvData.ExpiresAt) + return nil +} + +func createCacheableHTTPStatusCodes() { + for _, element := range cacheableHTTPStatusCodes { + cacheableCodes.Add(ast.InternedTerm(element)) + } +} + +func parseTimeout(timeoutVal ast.Value) (time.Duration, error) { + var timeout time.Duration + switch t := timeoutVal.(type) { + case ast.Number: + timeoutInt, ok := t.Int64() + if !ok { + return timeout, fmt.Errorf("invalid timeout number value %v, must be int64", timeoutVal) + } + return time.Duration(timeoutInt), nil + case ast.String: + // Support strings without a unit, treat them the same as just a number value (ns) + var err error + timeoutInt, err := strconv.ParseInt(string(t), 10, 64) + if err == nil { + return time.Duration(timeoutInt), nil + } + + // Try parsing it as a duration (requires a supported units suffix) + timeout, err = time.ParseDuration(string(t)) + if err != nil { + return timeout, fmt.Errorf("invalid timeout value %v: %s", timeoutVal, err) + } + return timeout, nil + default: + return timeout, builtins.NewOperandErr(1, "'timeout' must be one of {string, number} but got %s", ast.ValueName(t)) + } +} + +func getBoolValFromReqObj(req ast.Object, key *ast.Term) (bool, error) { + var b ast.Boolean + var ok bool + if v := req.Get(key); v != nil { + if b, ok = v.Value.(ast.Boolean); !ok { + return false, fmt.Errorf("invalid value for %v field", key.String()) + } + } + return bool(b), nil +} + +func getNumberValFromReqObj(req ast.Object, key *ast.Term) (int, error) { + term := req.Get(key) + if term == nil { + return 0, nil + } + + if t, ok := term.Value.(ast.Number); ok { + num, ok := t.Int() + if !ok || num < 0 { + return 0, fmt.Errorf("invalid value %v for field %v", t.String(), key.String()) + } + return num, nil + } + + return 0, fmt.Errorf("invalid value %v for field %v", term.String(), key.String()) +} + +func getCachingMode(req ast.Object) (cachingMode, error) { + key := ast.InternedTerm("caching_mode") + var s ast.String + var ok bool + if v := req.Get(key); v != nil { + if s, ok = v.Value.(ast.String); !ok { + return "", fmt.Errorf("invalid value for %v field", key.String()) + } + + switch cachingMode(s) { + case defaultCachingMode, cachingModeDeserialized: + return cachingMode(s), nil + default: + return "", fmt.Errorf("invalid value specified for %v field: %v", key.String(), string(s)) + } + } + return defaultCachingMode, nil +} + +type interQueryCacheValue struct { + Data []byte +} + +func newInterQueryCacheValue(bctx BuiltinContext, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) (*interQueryCacheValue, *interQueryCacheData, error) { + data, err := newInterQueryCacheData(bctx, resp, respBody, cacheParams) + if err != nil { + return nil, nil, err + } + + b, err := json.Marshal(data) + if err != nil { + return nil, nil, err + } + return &interQueryCacheValue{Data: b}, data, nil +} + +func (cb interQueryCacheValue) Clone() (cache.InterQueryCacheValue, error) { + dup := make([]byte, len(cb.Data)) + copy(dup, cb.Data) + return &interQueryCacheValue{Data: dup}, nil +} + +func (cb interQueryCacheValue) SizeInBytes() int64 { + return int64(len(cb.Data)) +} + +func (cb *interQueryCacheValue) copyCacheData() (*interQueryCacheData, error) { + var res interQueryCacheData + err := util.UnmarshalJSON(cb.Data, &res) + if err != nil { + return nil, err + } + return &res, nil +} + +type interQueryCacheData struct { + RespBody []byte + Status string + StatusCode int + Headers http.Header + ExpiresAt time.Time +} + +func forceCaching(cacheParams *forceCacheParams) bool { + return cacheParams != nil && cacheParams.forceCacheDurationSeconds > 0 +} + +func expiryFromHeaders(headers http.Header) (time.Time, error) { + var expiresAt time.Time + maxAge, err := parseMaxAgeCacheDirective(parseCacheControlHeader(headers)) + if err != nil { + return time.Time{}, err + } + if maxAge != -1 { + createdAt, err := getResponseHeaderDate(headers) + if err != nil { + return time.Time{}, err + } + expiresAt = createdAt.Add(time.Second * time.Duration(maxAge)) + } else { + expiresAt = getResponseHeaderExpires(headers) + } + return expiresAt, nil +} + +func newInterQueryCacheData(bctx BuiltinContext, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) (*interQueryCacheData, error) { + var expiresAt time.Time + + if forceCaching(cacheParams) { + createdAt := getCurrentTime(bctx) + expiresAt = createdAt.Add(time.Second * time.Duration(cacheParams.forceCacheDurationSeconds)) + } else { + var err error + expiresAt, err = expiryFromHeaders(resp.Header) + if err != nil { + return nil, err + } + } + + cv := interQueryCacheData{ + ExpiresAt: expiresAt, + RespBody: respBody, + Status: resp.Status, + StatusCode: resp.StatusCode, + Headers: resp.Header, + } + + return &cv, nil +} + +func (c *interQueryCacheData) formatToAST(forceJSONDecode, forceYAMLDecode bool) (ast.Value, error) { + return prepareASTResult(c.Headers, forceJSONDecode, forceYAMLDecode, c.RespBody, c.Status, c.StatusCode) +} + +func (c *interQueryCacheData) toCacheValue() (*interQueryCacheValue, error) { + b, err := json.Marshal(c) + if err != nil { + return nil, err + } + return &interQueryCacheValue{Data: b}, nil +} + +func (*interQueryCacheData) SizeInBytes() int64 { + return 0 +} + +func (c *interQueryCacheData) Clone() (cache.InterQueryCacheValue, error) { + dup := make([]byte, len(c.RespBody)) + copy(dup, c.RespBody) + + return &interQueryCacheData{ + ExpiresAt: c.ExpiresAt, + RespBody: dup, + Status: c.Status, + StatusCode: c.StatusCode, + Headers: c.Headers.Clone(), + }, nil +} + +type responseHeaders struct { + etag string // identifier for a specific version of the response + lastModified string // date and time response was last modified as per origin server +} + +// deltaSeconds specifies a non-negative integer, representing +// time in seconds: http://tools.ietf.org/html/rfc7234#section-1.2.1 +type deltaSeconds int32 + +func parseResponseHeaders(headers http.Header) *responseHeaders { + result := responseHeaders{} + + result.etag = headers.Get("etag") + + result.lastModified = headers.Get("last-modified") + + return &result +} + +func revalidateCachedResponse(req *http.Request, client *http.Client, inputReqObj ast.Object, headers *responseHeaders) (*http.Response, bool, error) { + etag := headers.etag + lastModified := headers.lastModified + + if etag == "" && lastModified == "" { + return nil, false, nil + } + + cloneReq := req.Clone(req.Context()) + + if etag != "" { + cloneReq.Header.Set("if-none-match", etag) + } + + if lastModified != "" { + cloneReq.Header.Set("if-modified-since", lastModified) + } + + response, err := executeHTTPRequest(cloneReq, client, inputReqObj) + if err != nil { + return nil, false, err + } + + switch response.StatusCode { + case http.StatusOK: + return response, true, nil + + case http.StatusNotModified: + return response, false, nil + } + util.Close(response) + return nil, false, nil +} + +func canStore(headers http.Header) bool { + ccHeaders := parseCacheControlHeader(headers) + + // Check "no-store" cache directive + // The "no-store" response directive indicates that a cache MUST NOT + // store any part of either the immediate request or response. + if _, ok := ccHeaders["no-store"]; ok { + return false + } + return true +} + +func getCurrentTime(bctx BuiltinContext) time.Time { + var current time.Time + + value, err := ast.JSON(bctx.Time.Value) + if err != nil { + return current + } + + valueNum, ok := value.(json.Number) + if !ok { + return current + } + + valueNumInt, err := valueNum.Int64() + if err != nil { + return current + } + + current = time.Unix(0, valueNumInt).UTC() + return current +} + +func parseCacheControlHeader(headers http.Header) map[string]string { + ccDirectives := map[string]string{} + ccHeader := headers.Get("cache-control") + + for part := range strings.SplitSeq(ccHeader, ",") { + part = strings.Trim(part, " ") + if part == "" { + continue + } + if strings.ContainsRune(part, '=') { + items := strings.Split(part, "=") + if len(items) != 2 { + continue + } + ccDirectives[strings.Trim(items[0], " ")] = strings.Trim(items[1], ",") + } else { + ccDirectives[part] = "" + } + } + + return ccDirectives +} + +func getResponseHeaderDate(headers http.Header) (date time.Time, err error) { + dateHeader := headers.Get("date") + if dateHeader == "" { + err = errors.New("no date header") + return + } + return http.ParseTime(dateHeader) +} + +func getResponseHeaderExpires(headers http.Header) time.Time { + expiresHeader := headers.Get("expires") + if expiresHeader == "" { + return time.Time{} + } + + date, err := http.ParseTime(expiresHeader) + if err != nil { + // servers can set `Expires: 0` which is an invalid date to indicate expired content + return time.Time{} + } + + return date +} + +// parseMaxAgeCacheDirective parses the max-age directive expressed in delta-seconds as per +// https://tools.ietf.org/html/rfc7234#section-1.2.1 +func parseMaxAgeCacheDirective(cc map[string]string) (deltaSeconds, error) { + maxAge, ok := cc["max-age"] + if !ok { + return deltaSeconds(-1), nil + } + + val, err := strconv.ParseUint(maxAge, 10, 32) + if err != nil { + if numError, ok := err.(*strconv.NumError); ok { + if numError.Err == strconv.ErrRange { + return deltaSeconds(math.MaxInt32), nil + } + } + return deltaSeconds(-1), err + } + + if val > math.MaxInt32 { + return deltaSeconds(math.MaxInt32), nil + } + return deltaSeconds(val), nil +} + +func formatHTTPResponseToAST(resp *http.Response, forceJSONDecode, forceYAMLDecode bool) (ast.Value, []byte, error) { + resultRawBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, nil, err + } + + resultObj, err := prepareASTResult(resp.Header, forceJSONDecode, forceYAMLDecode, resultRawBody, resp.Status, resp.StatusCode) + if err != nil { + return nil, nil, err + } + + return resultObj, resultRawBody, nil +} + +func prepareASTResult(headers http.Header, forceJSONDecode, forceYAMLDecode bool, body []byte, status string, statusCode int) (ast.Value, error) { + var resultBody any + + // If the response body cannot be JSON/YAML decoded, + // an error will not be returned. Instead, the "body" field + // in the result will be null. + switch { + case forceJSONDecode || isJSONType(headers): + _ = util.UnmarshalJSON(body, &resultBody) + case forceYAMLDecode || isContentType(headers, "application/yaml", "application/x-yaml"): + _ = util.Unmarshal(body, &resultBody) + } + + result := make(map[string]any) + result["status"] = status + result["status_code"] = statusCode + result["body"] = resultBody + result["raw_body"] = string(body) + result["headers"] = getResponseHeaders(headers) + + resultObj, err := ast.InterfaceToValue(result) + if err != nil { + return nil, err + } + + return resultObj, nil +} + +func getResponseHeaders(headers http.Header) map[string]any { + respHeaders := map[string]any{} + for headerName, values := range headers { + var respValues []any + for _, v := range values { + respValues = append(respValues, v) + } + respHeaders[strings.ToLower(headerName)] = respValues + } + return respHeaders +} + +// httpRequestExecutor defines an interface for the http send cache +type httpRequestExecutor interface { + CheckCache() (ast.Value, error) + InsertIntoCache(value *http.Response) (ast.Value, error) + InsertErrorIntoCache(err error) + ExecuteHTTPRequest() (*http.Response, error) +} + +// newHTTPRequestExecutor returns a new HTTP request executor that wraps either an inter-query or +// intra-query cache implementation +func newHTTPRequestExecutor(bctx BuiltinContext, req ast.Object, key ast.Object) (httpRequestExecutor, error) { + useInterQueryCache, forceCacheParams, err := useInterQueryCache(req) + if err != nil { + return nil, handleHTTPSendErr(bctx, err) + } + + if useInterQueryCache && bctx.InterQueryBuiltinCache != nil { + return newInterQueryCache(bctx, req, key, forceCacheParams) + } + return newIntraQueryCache(bctx, req, key) +} + +type interQueryCache struct { + bctx BuiltinContext + req ast.Object + key ast.Object + httpReq *http.Request + httpClient *http.Client + forceJSONDecode bool + forceYAMLDecode bool + forceCacheParams *forceCacheParams +} + +func newInterQueryCache(bctx BuiltinContext, req ast.Object, key ast.Object, forceCacheParams *forceCacheParams) (*interQueryCache, error) { + return &interQueryCache{bctx: bctx, req: req, key: key, forceCacheParams: forceCacheParams}, nil +} + +// CheckCache checks the cache for the value of the key set on this object +func (c *interQueryCache) CheckCache() (ast.Value, error) { + var err error + + // Checking the intra-query cache first ensures consistency of errors and HTTP responses within a query. + resp, err := checkHTTPSendCache(c.bctx, c.key) + if err != nil { + return nil, err + } + if resp != nil { + return resp, nil + } + + c.forceJSONDecode, err = getBoolValFromReqObj(c.key, ast.InternedTerm("force_json_decode")) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + c.forceYAMLDecode, err = getBoolValFromReqObj(c.key, ast.InternedTerm("force_yaml_decode")) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + + resp, err = c.checkHTTPSendInterQueryCache() + // Always insert the result of the inter-query cache into the intra-query cache, to maintain consistency within the same query. + if err != nil { + insertErrorIntoHTTPSendCache(c.bctx, c.key, err) + } + if resp != nil { + insertIntoHTTPSendCache(c.bctx, c.key, resp) + } + return resp, err +} + +// InsertIntoCache inserts the key set on this object into the cache with the given value +func (c *interQueryCache) InsertIntoCache(value *http.Response) (ast.Value, error) { + result, respBody, err := formatHTTPResponseToAST(value, c.forceJSONDecode, c.forceYAMLDecode) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + + // Always insert into the intra-query cache, to maintain consistency within the same query. + insertIntoHTTPSendCache(c.bctx, c.key, result) + + // We ignore errors when populating the inter-query cache, because we've already populated the intra-cache, + // and query consistency is our primary concern. + _ = insertIntoHTTPSendInterQueryCache(c.bctx, c.key, value, respBody, c.forceCacheParams) + return result, nil +} + +func (c *interQueryCache) InsertErrorIntoCache(err error) { + insertErrorIntoHTTPSendCache(c.bctx, c.key, err) +} + +// ExecuteHTTPRequest executes a HTTP request +func (c *interQueryCache) ExecuteHTTPRequest() (*http.Response, error) { + var err error + c.httpReq, c.httpClient, err = createHTTPRequest(c.bctx, c.req) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + + // Increment counter for actual network requests + c.bctx.Metrics.Counter(httpSendNetworkRequests).Incr() + + return executeHTTPRequest(c.httpReq, c.httpClient, c.req) +} + +type intraQueryCache struct { + bctx BuiltinContext + req ast.Object + key ast.Object +} + +func newIntraQueryCache(bctx BuiltinContext, req ast.Object, key ast.Object) (*intraQueryCache, error) { + return &intraQueryCache{bctx: bctx, req: req, key: key}, nil +} + +// CheckCache checks the cache for the value of the key set on this object +func (c *intraQueryCache) CheckCache() (ast.Value, error) { + return checkHTTPSendCache(c.bctx, c.key) +} + +// InsertIntoCache inserts the key set on this object into the cache with the given value +func (c *intraQueryCache) InsertIntoCache(value *http.Response) (ast.Value, error) { + forceJSONDecode, err := getBoolValFromReqObj(c.key, ast.InternedTerm("force_json_decode")) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + forceYAMLDecode, err := getBoolValFromReqObj(c.key, ast.InternedTerm("force_yaml_decode")) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + + result, _, err := formatHTTPResponseToAST(value, forceJSONDecode, forceYAMLDecode) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + + if cacheableCodes.Contains(ast.InternedTerm(value.StatusCode)) { + insertIntoHTTPSendCache(c.bctx, c.key, result) + } + + return result, nil +} + +func (c *intraQueryCache) InsertErrorIntoCache(err error) { + insertErrorIntoHTTPSendCache(c.bctx, c.key, err) +} + +// ExecuteHTTPRequest executes a HTTP request +func (c *intraQueryCache) ExecuteHTTPRequest() (*http.Response, error) { + httpReq, httpClient, err := createHTTPRequest(c.bctx, c.req) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + + // Increment counter for actual network requests + c.bctx.Metrics.Counter(httpSendNetworkRequests).Incr() + + return executeHTTPRequest(httpReq, httpClient, c.req) +} + +func useInterQueryCache(req ast.Object) (bool, *forceCacheParams, error) { + value, err := getBoolValFromReqObj(req, ast.InternedTerm("cache")) + if err != nil { + return false, nil, err + } + + valueForceCache, err := getBoolValFromReqObj(req, ast.InternedTerm("force_cache")) + if err != nil { + return false, nil, err + } + + if valueForceCache { + forceCacheParams, err := newForceCacheParams(req) + return true, forceCacheParams, err + } + + return value, nil, nil +} + +type forceCacheParams struct { + forceCacheDurationSeconds int32 +} + +func newForceCacheParams(req ast.Object) (*forceCacheParams, error) { + term := req.Get(ast.InternedTerm("force_cache_duration_seconds")) + if term == nil { + return nil, errors.New("'force_cache' set but 'force_cache_duration_seconds' parameter is missing") + } + + forceCacheDurationSeconds := term.String() + + value, err := strconv.ParseInt(forceCacheDurationSeconds, 10, 32) + if err != nil { + return nil, err + } + + return &forceCacheParams{forceCacheDurationSeconds: int32(value)}, nil +} + +func getRaiseErrorValue(req ast.Object) (bool, error) { + result := ast.Boolean(true) + var ok bool + if v := req.Get(ast.InternedTerm("raise_error")); v != nil { + if result, ok = v.Value.(ast.Boolean); !ok { + return false, errors.New("invalid value for raise_error field") + } + } + return bool(result), nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup.go new file mode 100644 index 0000000000..5ad2abd8a8 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup.go @@ -0,0 +1,7 @@ +//go:build !darwin + +package topdown + +func fixupDarwinGo118(x string, _ string) string { + return x +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/http_fixup_darwin.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup_darwin.go similarity index 82% rename from vendor/github.com/open-policy-agent/opa/topdown/http_fixup_darwin.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup_darwin.go index ff3058ef40..e2941cbfa1 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/http_fixup_darwin.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup_darwin.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - package topdown func fixupDarwinGo118(x, y string) string { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/input.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/input.go similarity index 95% rename from vendor/github.com/open-policy-agent/opa/topdown/input.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/input.go index cb70aeb71e..ec37b36451 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/input.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/input.go @@ -5,12 +5,12 @@ package topdown import ( - "fmt" + "errors" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) -var errBadPath = fmt.Errorf("bad document path") +var errBadPath = errors.New("bad document path") func mergeTermWithValues(exist *ast.Term, pairs [][2]*ast.Term) (*ast.Term, error) { diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/instrumentation.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/instrumentation.go new file mode 100644 index 0000000000..93da1d0022 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/instrumentation.go @@ -0,0 +1,63 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import "github.com/open-policy-agent/opa/v1/metrics" + +const ( + evalOpPlug = "eval_op_plug" + evalOpResolve = "eval_op_resolve" + evalOpRuleIndex = "eval_op_rule_index" + evalOpBuiltinCall = "eval_op_builtin_call" + evalOpVirtualCacheHit = "eval_op_virtual_cache_hit" + evalOpVirtualCacheMiss = "eval_op_virtual_cache_miss" + evalOpBaseCacheHit = "eval_op_base_cache_hit" + evalOpBaseCacheMiss = "eval_op_base_cache_miss" + evalOpComprehensionCacheSkip = "eval_op_comprehension_cache_skip" + evalOpComprehensionCacheBuild = "eval_op_comprehension_cache_build" + evalOpComprehensionCacheHit = "eval_op_comprehension_cache_hit" + evalOpComprehensionCacheMiss = "eval_op_comprehension_cache_miss" + partialOpSaveUnify = "partial_op_save_unify" + partialOpSaveSetContains = "partial_op_save_set_contains" + partialOpSaveSetContainsRec = "partial_op_save_set_contains_rec" + partialOpCopyPropagation = "partial_op_copy_propagation" +) + +// Instrumentation implements helper functions to instrument query evaluation +// to diagnose performance issues. Instrumentation may be expensive in some +// cases, so it is disabled by default. +type Instrumentation struct { + m metrics.Metrics +} + +// NewInstrumentation returns a new Instrumentation object. Performance +// diagnostics recorded on this Instrumentation object will stored in m. +func NewInstrumentation(m metrics.Metrics) *Instrumentation { + return &Instrumentation{ + m: m, + } +} + +func (instr *Instrumentation) startTimer(name string) { + if instr == nil { + return + } + instr.m.Timer(name).Start() +} + +func (instr *Instrumentation) stopTimer(name string) { + if instr == nil { + return + } + delta := instr.m.Timer(name).Stop() + instr.m.Histogram(name).Update(delta) +} + +func (instr *Instrumentation) counterIncr(name string) { + if instr == nil { + return + } + instr.m.Counter(name).Incr() +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/json.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/json.go new file mode 100644 index 0000000000..9bca11cbe4 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/json.go @@ -0,0 +1,382 @@ +// Copyright 2019 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "errors" + "fmt" + "strings" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + + "github.com/open-policy-agent/opa/internal/edittree" +) + +func builtinJSONRemove(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + // Expect an object and a string or array/set of strings + if _, err := builtins.ObjectOperand(operands[0].Value, 1); err != nil { + return err + } + + // Build a list of json pointers to remove + paths, err := getJSONPaths(operands[1].Value) + if err != nil { + return err + } + + newObj, err := jsonRemove(operands[0], ast.NewTerm(pathsToObject(paths))) + if err != nil { + return err + } + + if newObj == nil { + return nil + } + + return iter(newObj) +} + +// jsonRemove returns a new term that is the result of walking +// through a and omitting removing any values that are in b but +// have ast.Null values (ie leaf nodes for b). +func jsonRemove(a *ast.Term, b *ast.Term) (*ast.Term, error) { + if b == nil { + // The paths diverged, return a + return a, nil + } + + var bObj ast.Object + switch bValue := b.Value.(type) { + case ast.Object: + bObj = bValue + case ast.Null: + // Means we hit a leaf node on "b", dont add the value for a + return nil, nil + default: + // The paths diverged, return a + return a, nil + } + + switch aValue := a.Value.(type) { + case ast.String, ast.Number, ast.Boolean, ast.Null: + return a, nil + case ast.Object: + newObj := ast.NewObjectWithCapacity(aValue.Len()) + err := aValue.Iter(func(k *ast.Term, v *ast.Term) error { + // recurse and add the diff of sub objects as needed + diffValue, err := jsonRemove(v, bObj.Get(k)) + if err != nil || diffValue == nil { + return err + } + newObj.Insert(k, diffValue) + return nil + }) + if err != nil { + return nil, err + } + return ast.NewTerm(newObj), nil + case ast.Set: + newSet := ast.NewSetWithCapacity(aValue.Len()) + err := aValue.Iter(func(v *ast.Term) error { + // recurse and add the diff of sub objects as needed + diffValue, err := jsonRemove(v, bObj.Get(v)) + if err != nil || diffValue == nil { + return err + } + newSet.Add(diffValue) + return nil + }) + if err != nil { + return nil, err + } + return ast.NewTerm(newSet), nil + case *ast.Array: + // When indexes are removed we shift left to close empty spots in the array + // as per the JSON patch spec. + newArraySlice := make([]*ast.Term, 0, aValue.Len()) + for i := range aValue.Len() { + v := aValue.Elem(i) + // recurse and add the diff of sub objects as needed + // Note: Keys in b will be strings for the index, eg path /a/1/b => {"a": {"1": {"b": null}}} + diffValue, err := jsonRemove(v, bObj.Get(ast.InternedIntegerString(i))) + if err != nil { + return nil, err + } + if diffValue != nil { + newArraySlice = append(newArraySlice, diffValue) + } + } + return ast.ArrayTerm(newArraySlice...), nil + default: + return nil, fmt.Errorf("invalid value type %T", a) + } +} + +func builtinJSONFilter(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + // Ensure we have the right parameters, expect an object and a string or array/set of strings + obj, err := builtins.ObjectOperand(operands[0].Value, 1) + if err != nil { + return err + } + + // Build a list of filter strings + filters, err := getJSONPaths(operands[1].Value) + if err != nil { + return err + } + + // Actually do the filtering + filterObj := pathsToObject(filters) + r, err := obj.Filter(filterObj) + if err != nil { + return err + } + + return iter(ast.NewTerm(r)) +} + +func getJSONPaths(operand ast.Value) (paths []ast.Ref, err error) { + switch v := operand.(type) { + case *ast.Array: + paths = make([]ast.Ref, 0, v.Len()) + for i := range v.Len() { + filter, err := parsePath(v.Elem(i)) + if err != nil { + return nil, err + } + paths = append(paths, filter) + } + case ast.Set: + paths = make([]ast.Ref, 0, v.Len()) + for _, item := range v.Slice() { + filter, err := parsePath(item) + if err != nil { + return nil, err + } + paths = append(paths, filter) + } + default: + return nil, builtins.NewOperandTypeErr(2, v, "set", "array") + } + + return paths, nil +} + +// parsePath parses a JSON pointer path or array of path segments into an ast.Ref. +func parsePath(path *ast.Term) (ast.Ref, error) { + // paths can either be a `/` separated json path or + // an array or set of values + var pathSegments ast.Ref + switch p := path.Value.(type) { + case ast.String: + if p == "" { + return ast.InternedEmptyRefValue.(ast.Ref), nil + } + + s := strings.TrimLeft(string(p), "/") + n := strings.Count(s, "/") + 1 + + pathSegments = make(ast.Ref, 0, n) + + part, remaining, found := strings.Cut(s, "/") + unescaped := strings.ReplaceAll(strings.ReplaceAll(part, "~1", "/"), "~0", "~") + pathSegments = append(pathSegments, ast.InternedTerm(unescaped)) + + for found { + part, remaining, found = strings.Cut(remaining, "/") + unescaped := strings.ReplaceAll(strings.ReplaceAll(part, "~1", "/"), "~0", "~") + pathSegments = append(pathSegments, ast.InternedTerm(unescaped)) + } + case *ast.Array: + pathSegments = make(ast.Ref, 0, p.Len()) + for i := range p.Len() { + pathSegments = append(pathSegments, p.Elem(i)) + } + default: + return nil, builtins.NewOperandErr(2, + "must be one of {set, array} containing string paths or array of path segments but got "+ast.ValueName(p), + ) + } + + return pathSegments, nil +} + +func pathsToObject(paths []ast.Ref) ast.Object { + root := ast.NewObjectWithCapacity(len(paths)) + + for _, path := range paths { + node := root + done := false + + // If the path is an empty JSON path, skip all further processing. + if len(path) == 0 { + done = true + } + + // Otherwise, we should have 1+ path segments to work with. + for i := 0; i < len(path)-1 && !done; i++ { + k := path[i] + child := node.Get(k) + + if child == nil { + obj := ast.NewObject() + node.Insert(k, ast.NewTerm(obj)) + node = obj + continue + } + + switch v := child.Value.(type) { + case ast.Null: + done = true + case ast.Object: + node = v + default: + panic("unreachable") + } + } + + if !done { + node.Insert(path[len(path)-1], ast.InternedNullTerm) + } + } + + return root +} + +func applyPatches(source *ast.Term, operations *ast.Array) (*ast.Term, error) { + et := edittree.EditTreeFromPool(source) + defer edittree.Dispose(et) + + for i := range operations.Len() { + object, ok := operations.Elem(i).Value.(ast.Object) + if !ok { + return nil, errors.New("must be an array of JSON-Patch objects, but at least one element is not an object") + } + + // Validate + if object.Get(ast.InternedTerm("path")) == nil { + return nil, errors.New("missing required attribute 'path'") + } + + opTerm := object.Get(ast.InternedTerm("op")) + if opTerm == nil { + return nil, errors.New("missing required attribute 'op'") + } + + opStr, ok := opTerm.Value.(ast.String) + if !ok { + return nil, errors.New("attribute 'op' must be a string but found: " + ast.ValueName(opTerm.Value)) + } + + path, err := parsePath(object.Get(ast.InternedTerm("path"))) + if err != nil { + return nil, err + } + + switch string(opStr) { + case "add": + value := object.Get(ast.InternedTerm("value")) + if value == nil { + return nil, errors.New("missing required attribute 'value'") + } + if _, err = et.InsertAtPath(path, value); err != nil { + return nil, err + } + case "remove": + if _, err = et.DeleteAtPath(path); err != nil { + return nil, err + } + case "replace": + if _, err = et.DeleteAtPath(path); err != nil { + return nil, err + } + value := object.Get(ast.InternedTerm("value")) + if value == nil { + return nil, errors.New("missing required attribute 'value'") + } + if _, err = et.InsertAtPath(path, value); err != nil { + return nil, err + } + case "move": + fromValue := object.Get(ast.InternedTerm("from")) + if fromValue == nil { + return nil, errors.New("missing required attribute 'from'") + } + + from, err := parsePath(fromValue) + if err != nil { + return nil, err + } + chunk, err := et.RenderAtPath(from) + if err != nil { + return nil, err + } + if _, err = et.DeleteAtPath(from); err != nil { + return nil, err + } + if _, err = et.InsertAtPath(path, chunk); err != nil { + return nil, err + } + case "copy": + fromValue := object.Get(ast.InternedTerm("from")) + if fromValue == nil { + return nil, errors.New("missing required attribute 'from'") + } + from, err := parsePath(fromValue) + if err != nil { + return nil, err + } + chunk, err := et.RenderAtPath(from) + if err != nil { + return nil, err + } + if _, err = et.InsertAtPath(path, chunk); err != nil { + return nil, err + } + case "test": + chunk, err := et.RenderAtPath(path) + if err != nil { + return nil, err + } + value := object.Get(ast.InternedTerm("value")) + if value == nil { + return nil, errors.New("missing required attribute 'value'") + } + if !chunk.Equal(value) { + return nil, fmt.Errorf("value from EditTree != patch value.\n\nExpected: %v\n\nFound: %v", value, chunk) + } + default: + return nil, fmt.Errorf("unrecognized op: '%s'", string(opStr)) + } + } + + return et.Render(), nil +} + +func builtinJSONPatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + // Expect an array of operations. + operations, err := builtins.ArrayOperand(operands[1].Value, 2) + if err != nil { + return err + } + + // JSON patch supports arrays, objects as well as values as the target. + patched, err := applyPatches(operands[0], operations) + if err != nil { + return err + } + return iter(patched) +} + +func init() { + for _, key := range []string{"op", "path", "from", "value", "add", "remove", "replace", "move", "copy", "test"} { + ast.InternStringTerm(key) + } + + RegisterBuiltinFunc(ast.JSONFilter.Name, builtinJSONFilter) + RegisterBuiltinFunc(ast.JSONRemove.Name, builtinJSONRemove) + RegisterBuiltinFunc(ast.JSONPatch.Name, builtinJSONPatch) +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/jsonschema.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/jsonschema.go similarity index 94% rename from vendor/github.com/open-policy-agent/opa/topdown/jsonschema.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/jsonschema.go index d319bc0b0d..e48719d145 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/jsonschema.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/jsonschema.go @@ -8,8 +8,8 @@ import ( "encoding/json" "errors" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/gojsonschema" + "github.com/open-policy-agent/opa/v1/ast" ) // astValueToJSONSchemaLoader converts a value to JSON Loader. @@ -27,9 +27,9 @@ func astValueToJSONSchemaLoader(value ast.Value) (gojsonschema.JSONLoader, error return nil, errors.New("invalid JSON string") } loader = gojsonschema.NewStringLoader(string(x)) - case ast.Object: + case ast.Object, *ast.Array: // In case of object serialize it to JSON representation. - var data interface{} + var data any data, err = ast.JSON(value) if err != nil { return nil, err @@ -44,7 +44,7 @@ func astValueToJSONSchemaLoader(value ast.Value) (gojsonschema.JSONLoader, error } func newResultTerm(valid bool, data *ast.Term) *ast.Term { - return ast.ArrayTerm(ast.BooleanTerm(valid), data) + return ast.ArrayTerm(ast.InternedTerm(valid), data) } // builtinJSONSchemaVerify accepts 1 argument which can be string or object and checks if it is valid JSON schema. @@ -61,7 +61,7 @@ func builtinJSONSchemaVerify(_ BuiltinContext, operands []*ast.Term, iter func(* return iter(newResultTerm(false, ast.StringTerm("jsonschema: "+err.Error()))) } - return iter(newResultTerm(true, ast.NullTerm())) + return iter(newResultTerm(true, ast.InternedNullTerm)) } // builtinJSONMatchSchema accepts 2 arguments both can be string or object and verifies if the document matches the JSON schema. @@ -110,7 +110,7 @@ func builtinJSONMatchSchema(bctx BuiltinContext, operands []*ast.Term, iter func } // In case of validation errors produce Rego array of objects to describe the errors. - arr := ast.NewArray() + arr := ast.NewArrayWithCapacity(len(result.Errors())) for _, re := range result.Errors() { o := ast.NewObject( [...]*ast.Term{ast.StringTerm("error"), ast.StringTerm(re.String())}, diff --git a/vendor/github.com/open-policy-agent/opa/topdown/net.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/net.go similarity index 90% rename from vendor/github.com/open-policy-agent/opa/topdown/net.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/net.go index 534520529a..6caa068b47 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/net.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/net.go @@ -8,8 +8,8 @@ import ( "net" "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) type lookupIPAddrCacheKey string @@ -49,7 +49,7 @@ func builtinLookupIPAddr(bctx BuiltinContext, operands []*ast.Term, iter func(*a return err } - ret := ast.NewSet() + ret := ast.NewSetWithCapacity(len(addrs)) for _, a := range addrs { ret.Add(ast.StringTerm(a.String())) diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go new file mode 100644 index 0000000000..fdf6444939 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go @@ -0,0 +1,202 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "errors" + "fmt" + "math/big" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +type randIntCachingKey string + +var ( + zero = big.NewInt(0) + one = big.NewInt(1) +) + +func builtinNumbersRange(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + if canGenerateCheapRange(operands) { + return generateCheapRange(operands, 1, iter) + } + + x, err := builtins.BigIntOperand(operands[0].Value, 1) + if err != nil { + return err + } + + y, err := builtins.BigIntOperand(operands[1].Value, 2) + if err != nil { + return err + } + + ast, err := generateRange(bctx, x, y, one, "numbers.range") + if err != nil { + return err + } + + return iter(ast) +} + +func builtinNumbersRangeStep(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + if canGenerateCheapRangeStep(operands) { + step, _ := builtins.IntOperand(operands[2].Value, 3) + if step <= 0 { + return errors.New("numbers.range_step: step must be a positive integer") + } + + return generateCheapRange(operands, step, iter) + } + + x, err := builtins.BigIntOperand(operands[0].Value, 1) + if err != nil { + return err + } + + y, err := builtins.BigIntOperand(operands[1].Value, 2) + if err != nil { + return err + } + + step, err := builtins.BigIntOperand(operands[2].Value, 3) + if err != nil { + return err + } + + if step.Cmp(zero) <= 0 { + return errors.New("numbers.range_step: step must be a positive integer") + } + + ast, err := generateRange(bctx, x, y, step, "numbers.range_step") + if err != nil { + return err + } + + return iter(ast) +} + +func canGenerateCheapRange(operands []*ast.Term) bool { + x, err := builtins.IntOperand(operands[0].Value, 1) + if err != nil || !ast.HasInternedIntNumberTerm(x) { + return false + } + + y, err := builtins.IntOperand(operands[1].Value, 2) + if err != nil || !ast.HasInternedIntNumberTerm(y) { + return false + } + + return true +} + +func canGenerateCheapRangeStep(operands []*ast.Term) bool { + if canGenerateCheapRange(operands) { + step, err := builtins.IntOperand(operands[2].Value, 3) + if err == nil && ast.HasInternedIntNumberTerm(step) { + return true + } + } + + return false +} + +func generateCheapRange(operands []*ast.Term, step int, iter func(*ast.Term) error) error { + x, err := builtins.IntOperand(operands[0].Value, 1) + if err != nil { + return err + } + + y, err := builtins.IntOperand(operands[1].Value, 2) + if err != nil { + return err + } + + terms := make([]*ast.Term, 0, y+1) + + if x <= y { + for i := x; i <= y; i += step { + terms = append(terms, ast.InternedTerm(i)) + } + } else { + for i := x; i >= y; i -= step { + terms = append(terms, ast.InternedTerm(i)) + } + } + + return iter(ast.ArrayTerm(terms...)) +} + +func generateRange(bctx BuiltinContext, x *big.Int, y *big.Int, step *big.Int, funcName string) (*ast.Term, error) { + cmp := x.Cmp(y) + + comp := func(i *big.Int, y *big.Int) bool { return i.Cmp(y) <= 0 } + iter := func(i *big.Int) *big.Int { return i.Add(i, step) } + + if cmp > 0 { + comp = func(i *big.Int, y *big.Int) bool { return i.Cmp(y) >= 0 } + iter = func(i *big.Int) *big.Int { return i.Sub(i, step) } + } + + result := ast.NewArray() + haltErr := Halt{ + Err: &Error{ + Code: CancelErr, + Message: funcName + ": timed out before generating all numbers in range", + }, + } + + for i := new(big.Int).Set(x); comp(i, y); i = iter(i) { + if bctx.Cancel != nil && bctx.Cancel.Cancelled() { + return nil, haltErr + } + result = result.Append(ast.NewTerm(builtins.IntToNumber(i))) + } + + return ast.NewTerm(result), nil +} + +func builtinRandIntn(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + strOp, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + n, err := builtins.IntOperand(operands[1].Value, 2) + if err != nil { + return err + } + + if n == 0 { + return iter(ast.InternedTerm(0)) + } + + if n < 0 { + n = -n + } + + key := randIntCachingKey(fmt.Sprintf("%s-%d", strOp, n)) + + if val, ok := bctx.Cache.Get(key); ok { + return iter(val.(*ast.Term)) + } + + r, err := bctx.Rand() + if err != nil { + return err + } + result := ast.InternedTerm(r.Intn(n)) + bctx.Cache.Put(key, result) + + return iter(result) +} + +func init() { + RegisterBuiltinFunc(ast.NumbersRange.Name, builtinNumbersRange) + RegisterBuiltinFunc(ast.NumbersRangeStep.Name, builtinNumbersRangeStep) + RegisterBuiltinFunc(ast.RandIntn.Name, builtinRandIntn) +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/object.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/object.go similarity index 80% rename from vendor/github.com/open-policy-agent/opa/topdown/object.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/object.go index ba5d77ff37..fe5ccf093f 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/object.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/object.go @@ -5,9 +5,9 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/ref" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func builtinObjectUnion(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -21,6 +21,16 @@ func builtinObjectUnion(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T return err } + if objA.Len() == 0 { + return iter(operands[1]) + } + if objB.Len() == 0 { + return iter(operands[0]) + } + if objA.Compare(objB) == 0 { + return iter(operands[0]) + } + r := mergeWithOverwrite(objA, objB) return iter(ast.NewTerm(r)) @@ -42,17 +52,22 @@ func builtinObjectUnionN(_ BuiltinContext, operands []*ast.Term, iter func(*ast. // Example: // Input: [{"a": {"b": 2}}, {"a": 4}, {"a": {"c": 3}}] // Want Output: {"a": {"c": 3}} - result := ast.NewObject() - frozenKeys := map[*ast.Term]struct{}{} - for i := arr.Len() - 1; i >= 0; i-- { + + // First pass: count total keys for pre-allocation + totalSize := 0 + for i := range arr.Len() { o, ok := arr.Elem(i).Value.(ast.Object) if !ok { return builtins.NewOperandElementErr(1, arr, arr.Elem(i).Value, "object") } + totalSize += o.Len() + } + + result := ast.NewObjectWithCapacity(totalSize) + frozenKeys := make(map[*ast.Term]struct{}, totalSize) + for i := arr.Len() - 1; i >= 0; i-- { + o := arr.Elem(i).Value.(ast.Object) // Already validated above mergewithOverwriteInPlace(result, o, frozenKeys) - if err != nil { - return err - } } return iter(ast.NewTerm(result)) @@ -70,7 +85,9 @@ func builtinObjectRemove(_ BuiltinContext, operands []*ast.Term, iter func(*ast. if err != nil { return err } - r := ast.NewObject() + + // Pre-allocate with obj size (upper bound for result) + r := ast.NewObjectWithCapacity(obj.Len()) obj.Foreach(func(key *ast.Term, value *ast.Term) { if !keysToRemove.Contains(key) { r.Insert(key, value) @@ -93,9 +110,10 @@ func builtinObjectFilter(_ BuiltinContext, operands []*ast.Term, iter func(*ast. return err } - filterObj := ast.NewObject() + // Pre-allocate with keys size (upper bound for filter object) + filterObj := ast.NewObjectWithCapacity(keys.Len()) keys.Foreach(func(key *ast.Term) { - filterObj.Insert(key, ast.NullTerm()) + filterObj.Insert(key, ast.InternedNullTerm) }) // Actually do the filtering @@ -114,8 +132,8 @@ func builtinObjectGet(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter } // if the get key is not an array, attempt to get the top level key for the operand value in the object - path, err := builtins.ArrayOperand(operands[1].Value, 2) - if err != nil { + path, ok := operands[1].Value.(*ast.Array) + if !ok { if ret := object.Get(operands[1]); ret != nil { return iter(ret) } @@ -143,38 +161,32 @@ func builtinObjectKeys(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te if err != nil { return err } + if object.Len() == 0 { + return iter(ast.InternedEmptySet) + } - keys := ast.SetTerm(object.Keys()...) - - return iter(keys) + return iter(ast.SetTerm(object.Keys()...)) } // getObjectKeysParam returns a set of key values -// from a supplied ast array, object, set value +// from a supplied ast array, object, set value. +// The returned set must not be mutated. For Set +// inputs, it may be the original. func getObjectKeysParam(arrayOrSet ast.Value) (ast.Set, error) { - keys := ast.NewSet() - switch v := arrayOrSet.(type) { case *ast.Array: - _ = v.Iter(func(f *ast.Term) error { - keys.Add(f) - return nil - }) + keys := ast.NewSetWithCapacity(v.Len()) + v.Foreach(keys.Add) + return keys, nil case ast.Set: - _ = v.Iter(func(f *ast.Term) error { - keys.Add(f) - return nil - }) + // Return directly. Callers only use this for Contains() checks + // without mutating the set. + return v, nil case ast.Object: - _ = v.Iter(func(k *ast.Term, _ *ast.Term) error { - keys.Add(k) - return nil - }) - default: - return nil, builtins.NewOperandTypeErr(2, arrayOrSet, "object", "set", "array") + return ast.NewSet(v.Keys()...), nil } - return keys, nil + return nil, builtins.NewOperandTypeErr(2, arrayOrSet, "object", "set", "array") } func mergeWithOverwrite(objA, objB ast.Object) ast.Object { diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/parse.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse.go new file mode 100644 index 0000000000..464e0141a2 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse.go @@ -0,0 +1,60 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +func builtinRegoParseModule(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + filename, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + input, err := builtins.StringOperand(operands[1].Value, 1) + if err != nil { + return err + } + + // FIXME: Use configured rego-version? + module, err := ast.ParseModule(string(filename), string(input)) + if err != nil { + return err + } + + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(module); err != nil { + return err + } + + term, err := ast.ParseTerm(buf.String()) + if err != nil { + return err + } + + return iter(term) +} + +func registerRegoMetadataBuiltinFunction(builtin *ast.Builtin) { + f := func(BuiltinContext, []*ast.Term, func(*ast.Term) error) error { + // The compiler should replace all usage of this function, so the only way to get here is within a query; + // which cannot define rules. + return fmt.Errorf("the %s function must only be called within the scope of a rule", builtin.Name) + } + RegisterBuiltinFunc(builtin.Name, f) +} + +func init() { + RegisterBuiltinFunc(ast.RegoParseModule.Name, builtinRegoParseModule) + registerRegoMetadataBuiltinFunction(ast.RegoMetadataChain) + registerRegoMetadataBuiltinFunction(ast.RegoMetadataRule) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_bytes.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_bytes.go new file mode 100644 index 0000000000..cd36b87b17 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_bytes.go @@ -0,0 +1,157 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "fmt" + "math/big" + "strings" + "unicode" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +const ( + none uint64 = 1 << (10 * iota) + ki + mi + gi + ti + pi + ei + + kb uint64 = 1000 + mb = kb * 1000 + gb = mb * 1000 + tb = gb * 1000 + pb = tb * 1000 + eb = pb * 1000 +) + +func parseNumBytesError(msg string) error { + return fmt.Errorf("%s: %s", ast.UnitsParseBytes.Name, msg) +} + +func errBytesUnitNotRecognized(unit string) error { + return parseNumBytesError(fmt.Sprintf("byte unit %s not recognized", unit)) +} + +var ( + errBytesValueNoAmount = parseNumBytesError("no byte amount provided") + errBytesValueNumConv = parseNumBytesError("could not parse byte amount to a number") + errBytesValueIncludesSpaces = parseNumBytesError("spaces not allowed in resource strings") +) + +func builtinNumBytes(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + var m big.Float + + raw, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + s := formatString(raw) + + if strings.Contains(s, " ") { + return errBytesValueIncludesSpaces + } + + num, unit := extractNumAndUnit(s) + if num == "" { + return errBytesValueNoAmount + } + + switch unit { + case "": + m.SetUint64(none) + case "kb", "k": + m.SetUint64(kb) + case "kib", "ki": + m.SetUint64(ki) + case "mb", "m": + m.SetUint64(mb) + case "mib", "mi": + m.SetUint64(mi) + case "gb", "g": + m.SetUint64(gb) + case "gib", "gi": + m.SetUint64(gi) + case "tb", "t": + m.SetUint64(tb) + case "tib", "ti": + m.SetUint64(ti) + case "pb", "p": + m.SetUint64(pb) + case "pib", "pi": + m.SetUint64(pi) + case "eb", "e": + m.SetUint64(eb) + case "eib", "ei": + m.SetUint64(ei) + default: + return errBytesUnitNotRecognized(unit) + } + + numFloat, ok := new(big.Float).SetString(num) + if !ok { + return errBytesValueNumConv + } + + var total big.Int + numFloat.Mul(numFloat, &m).Int(&total) + return iter(ast.NewTerm(builtins.IntToNumber(&total))) +} + +// Makes the string lower case and removes quotation marks +func formatString(s ast.String) string { + str := string(s) + lower := strings.ToLower(str) + return strings.ReplaceAll(lower, "\"", "") +} + +// Splits the string into a number string à la "10" or "10.2" and a unit +// string à la "gb" or "MiB" or "foo". Either can be an empty string +// (error handling is provided elsewhere). +func extractNumAndUnit(s string) (string, string) { + isNum := func(r rune) bool { + return unicode.IsDigit(r) || r == '.' + } + + firstNonNumIdx := -1 + for idx := 0; idx < len(s); idx++ { + r := rune(s[idx]) + // Identify the first non-numeric character, marking the boundary between the number and the unit. + if !isNum(r) && r != 'e' && r != 'E' && r != '+' && r != '-' { + firstNonNumIdx = idx + break + } + if r == 'e' || r == 'E' { + // Check if the next character is a valid digit or +/- for scientific notation + if idx == len(s)-1 || (!unicode.IsDigit(rune(s[idx+1])) && rune(s[idx+1]) != '+' && rune(s[idx+1]) != '-') { + firstNonNumIdx = idx + break + } + // Skip the next character if it is '+' or '-' + if idx+1 < len(s) && (s[idx+1] == '+' || s[idx+1] == '-') { + idx++ + } + } + } + + if firstNonNumIdx == -1 { // only digits, '.', or valid scientific notation + return s, "" + } + if firstNonNumIdx == 0 { // only units (starts with non-digit) + return "", s + } + + // Return the number and the rest as the unit + return s[:firstNonNumIdx], s[firstNonNumIdx:] +} + +func init() { + RegisterBuiltinFunc(ast.UnitsParseBytes.Name, builtinNumBytes) +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/parse_units.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_units.go similarity index 95% rename from vendor/github.com/open-policy-agent/opa/topdown/parse_units.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/parse_units.go index daf240214c..44aec86299 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/parse_units.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_units.go @@ -10,8 +10,8 @@ import ( "math/big" "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) // Binary Si unit constants are borrowed from topdown/parse_bytes @@ -50,7 +50,7 @@ func builtinUnits(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e // We remove escaped quotes from strings here to retain parity with units.parse_bytes. s := string(raw) - s = strings.Replace(s, "\"", "", -1) + s = strings.ReplaceAll(s, "\"", "") if strings.Contains(s, " ") { return errIncludesSpaces diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/print.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/print.go new file mode 100644 index 0000000000..acf55f0f3f --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/print.go @@ -0,0 +1,97 @@ +// Copyright 2021 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "fmt" + "io" + "strings" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/print" +) + +func NewPrintHook(w io.Writer) print.Hook { + return printHook{w: w} +} + +type printHook struct { + w io.Writer +} + +func (h printHook) Print(_ print.Context, msg string) error { + _, err := fmt.Fprintln(h.w, msg) + return err +} + +func builtinPrint(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + if bctx.PrintHook == nil { + return iter(nil) + } + + arr, err := builtins.ArrayOperand(operands[0].Value, 1) + if err != nil { + return err + } + + buf := make([]string, arr.Len()) + + err = builtinPrintCrossProductOperands(bctx.Location, buf, arr, 0, func(buf []string) error { + pctx := print.Context{ + Context: bctx.Context, + Location: bctx.Location, + } + return bctx.PrintHook.Print(pctx, strings.Join(buf, " ")) + }) + if err != nil { + return err + } + + return iter(nil) +} + +func builtinPrintCrossProductOperands(loc *ast.Location, buf []string, operands *ast.Array, i int, f func([]string) error) error { + if i >= operands.Len() { + return f(buf) + } + + operand := operands.Elem(i) + + // We allow primitives ... + switch x := operand.Value.(type) { + case ast.String: + buf[i] = string(x) + return builtinPrintCrossProductOperands(loc, buf, operands, i+1, f) + case ast.Number, ast.Boolean, ast.Null: + buf[i] = x.String() + return builtinPrintCrossProductOperands(loc, buf, operands, i+1, f) + } + + // ... but all other operand types must be sets. + xs, ok := operand.Value.(ast.Set) + if !ok { + return Halt{Err: internalErr(loc, "illegal argument type: "+ast.ValueName(operand.Value))} + } + + if xs.Len() == 0 { + buf[i] = "" + return builtinPrintCrossProductOperands(loc, buf, operands, i+1, f) + } + + return xs.Iter(func(x *ast.Term) error { + switch v := x.Value.(type) { + case ast.String: + buf[i] = string(v) + default: + buf[i] = v.String() + } + return builtinPrintCrossProductOperands(loc, buf, operands, i+1, f) + }) +} + +func init() { + RegisterBuiltinFunc(ast.InternalPrint.Name, builtinPrint) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/print/print.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/print/print.go new file mode 100644 index 0000000000..ce684ae945 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/print/print.go @@ -0,0 +1,21 @@ +package print + +import ( + "context" + + "github.com/open-policy-agent/opa/v1/ast" +) + +// Context provides the Hook implementation context about the print() call. +type Context struct { + Context context.Context // request context passed when query executed + Location *ast.Location // location of print call +} + +// Hook defines the interface that callers can implement to receive print +// statement outputs. If the hook returns an error, it will be surfaced if +// strict builtin error checking is enabled (otherwise, it will not halt +// execution.) +type Hook interface { + Print(Context, string) error +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/providers.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/providers.go similarity index 78% rename from vendor/github.com/open-policy-agent/opa/topdown/providers.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/providers.go index 77db917982..29d721e4b2 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/providers.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/providers.go @@ -9,17 +9,12 @@ import ( "net/url" "time" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/providers/aws" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) -var awsRequiredConfigKeyNames = ast.NewSet( - ast.StringTerm("aws_service"), - ast.StringTerm("aws_access_key"), - ast.StringTerm("aws_secret_access_key"), - ast.StringTerm("aws_region"), -) +var awsRequiredConfigKeyNames ast.Set func stringFromTerm(t *ast.Term) string { if v, ok := t.Value.(ast.String); ok { @@ -103,7 +98,7 @@ func builtinAWSSigV4SignReq(_ BuiltinContext, operands []*ast.Term, iter func(*a if err != nil { return err } - service := stringFromTerm(awsConfigObj.Get(ast.StringTerm("aws_service"))) + service := stringFromTerm(awsConfigObj.Get(ast.InternedTerm("aws_service"))) awsCreds := aws.CredentialsFromObject(awsConfigObj) // Timestamp for signing. @@ -119,9 +114,6 @@ func builtinAWSSigV4SignReq(_ BuiltinContext, operands []*ast.Term, iter func(*a } signingTimestamp = time.Unix(0, ts) - if err != nil { - return err - } // Make sure our required keys exist! // This check is stricter than required, but better to break here than downstream. @@ -133,11 +125,12 @@ func builtinAWSSigV4SignReq(_ BuiltinContext, operands []*ast.Term, iter func(*a // Prepare required fields from the HTTP request object. var theURL *url.URL var method string - reqURL := reqObj.Get(ast.StringTerm("url")) - reqMethod := reqObj.Get(ast.StringTerm("method")) + reqURL := reqObj.Get(ast.InternedTerm("url")) + keyMethod := ast.InternedTerm("method") + reqMethod := reqObj.Get(keyMethod) headers := ast.NewObject() - headersTerm := reqObj.Get(ast.StringTerm("headers")) + headersTerm := reqObj.Get(ast.InternedTerm("headers")) if headersTerm != nil { var ok bool headers, ok = headersTerm.Value.(ast.Object) @@ -149,10 +142,10 @@ func builtinAWSSigV4SignReq(_ BuiltinContext, operands []*ast.Term, iter func(*a // Check types on the request parameters. invalidParameters := ast.NewSet() if _, ok := reqURL.Value.(ast.String); !ok { - invalidParameters.Add(ast.StringTerm("url")) + invalidParameters.Add(ast.InternedTerm("url")) } if _, ok := reqMethod.Value.(ast.String); !ok { - invalidParameters.Add(ast.StringTerm("method")) + invalidParameters.Add(keyMethod) } if invalidParameters.Len() > 0 { return builtins.NewOperandErr(1, "invalid values for required request parameters(s): %v", invalidParameters) @@ -164,8 +157,8 @@ func builtinAWSSigV4SignReq(_ BuiltinContext, operands []*ast.Term, iter func(*a } method = stringFromTerm(reqMethod) - bodyTerm := reqObj.Get(ast.StringTerm("body")) - rawBodyTerm := reqObj.Get(ast.StringTerm("raw_body")) + bodyTerm := reqObj.Get(ast.InternedTerm("body")) + rawBodyTerm := reqObj.Get(ast.InternedTerm("raw_body")) body, err := getReqBodyBytes(bodyTerm, rawBodyTerm) if err != nil { return err @@ -176,7 +169,7 @@ func builtinAWSSigV4SignReq(_ BuiltinContext, operands []*ast.Term, iter func(*a // if payload signing config is set, pass it down to the signing method disablePayloadSigning := false - t := awsConfigObj.Get(ast.StringTerm("disable_payload_signing")) + t := awsConfigObj.Get(ast.InternedTerm("disable_payload_signing")) if t != nil { if v, ok := t.Value.(ast.Boolean); ok { disablePayloadSigning = bool(v) @@ -191,24 +184,37 @@ func builtinAWSSigV4SignReq(_ BuiltinContext, operands []*ast.Term, iter func(*a for k, v := range headersMap { // objectToMap doesn't support arrays if len(v) == 1 { - signedHeadersObj.Insert(ast.StringTerm(k), ast.StringTerm(v[0])) + signedHeadersObj.Insert(ast.InternedTerm(k), ast.StringTerm(v[0])) } } // Set authorization header - signedHeadersObj.Insert(ast.StringTerm("Authorization"), ast.StringTerm(authHeader)) + signedHeadersObj.Insert(ast.InternedTerm("Authorization"), ast.StringTerm(authHeader)) // set aws signature headers for k, v := range awsHeadersMap { - signedHeadersObj.Insert(ast.StringTerm(k), ast.StringTerm(v)) + signedHeadersObj.Insert(ast.InternedTerm(k), ast.StringTerm(v)) } // Create new request object with updated headers. out := reqObj.Copy() - out.Insert(ast.StringTerm("headers"), ast.NewTerm(signedHeadersObj)) + out.Insert(ast.InternedTerm("headers"), ast.NewTerm(signedHeadersObj)) return iter(ast.NewTerm(out)) } func init() { + for _, key := range []string{ + "aws_service", "aws_access_key", "aws_secret_access_key", "aws_region", "disable_payload_signing", + } { + ast.InternStringTerm(key) + } + + awsRequiredConfigKeyNames = ast.NewSet( + ast.InternedTerm("aws_service"), + ast.InternedTerm("aws_access_key"), + ast.InternedTerm("aws_secret_access_key"), + ast.InternedTerm("aws_region"), + ) + RegisterBuiltinFunc(ast.ProvidersAWSSignReqObj.Name, builtinAWSSigV4SignReq) } diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/query.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/query.go new file mode 100644 index 0000000000..a65f8a312c --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/query.go @@ -0,0 +1,640 @@ +package topdown + +import ( + "context" + "crypto/rand" + "io" + "sort" + "time" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/resolver" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/copypropagation" + "github.com/open-policy-agent/opa/v1/topdown/print" + "github.com/open-policy-agent/opa/v1/tracing" +) + +// QueryResultSet represents a collection of results returned by a query. +type QueryResultSet []QueryResult + +// QueryResult represents a single result returned by a query. The result +// contains bindings for all variables that appear in the query. +type QueryResult map[ast.Var]*ast.Term + +// Query provides a configurable interface for performing query evaluation. +type Query struct { + seed io.Reader + time time.Time + cancel Cancel + query ast.Body + queryCompiler ast.QueryCompiler + compiler *ast.Compiler + store storage.Store + txn storage.Transaction + input *ast.Term + external *resolverTrie + tracers []QueryTracer + plugTraceVars bool + unknowns []*ast.Term + partialNamespace string + skipSaveNamespace bool + metrics metrics.Metrics + instr *Instrumentation + disableInlining []ast.Ref + shallowInlining bool + nondeterministicBuiltins bool + genvarprefix string + runtime *ast.Term + builtins map[string]*Builtin + indexing bool + earlyExit bool + interQueryBuiltinCache cache.InterQueryCache + interQueryBuiltinValueCache cache.InterQueryValueCache + ndBuiltinCache builtins.NDBCache + strictBuiltinErrors bool + builtinErrorList *[]Error + strictObjects bool + roundTripper CustomizeRoundTripper + printHook print.Hook + tracingOpts tracing.Options + virtualCache VirtualCache + baseCache BaseCache +} + +// Builtin represents a built-in function that queries can call. +type Builtin struct { + Decl *ast.Builtin + Func BuiltinFunc +} + +// NewQuery returns a new Query object that can be run. +func NewQuery(query ast.Body) *Query { + return &Query{ + query: query, + genvarprefix: ast.WildcardPrefix, + indexing: true, + earlyExit: true, + } +} + +// WithQueryCompiler sets the queryCompiler used for the query. +func (q *Query) WithQueryCompiler(queryCompiler ast.QueryCompiler) *Query { + q.queryCompiler = queryCompiler + return q +} + +// WithCompiler sets the compiler to use for the query. +func (q *Query) WithCompiler(compiler *ast.Compiler) *Query { + q.compiler = compiler + return q +} + +// WithStore sets the store to use for the query. +func (q *Query) WithStore(store storage.Store) *Query { + q.store = store + return q +} + +// WithTransaction sets the transaction to use for the query. All queries +// should be performed over a consistent snapshot of the storage layer. +func (q *Query) WithTransaction(txn storage.Transaction) *Query { + q.txn = txn + return q +} + +// WithCancel sets the cancellation object to use for the query. Set this if +// you need to abort queries based on a deadline. This is optional. +func (q *Query) WithCancel(cancel Cancel) *Query { + q.cancel = cancel + return q +} + +// WithInput sets the input object to use for the query. References rooted at +// input will be evaluated against this value. This is optional. +func (q *Query) WithInput(input *ast.Term) *Query { + q.input = input + return q +} + +// WithTracer adds a query tracer to use during evaluation. This is optional. +// +// Deprecated: Use WithQueryTracer instead. +func (q *Query) WithTracer(tracer Tracer) *Query { + qt, ok := tracer.(QueryTracer) + if !ok { + qt = WrapLegacyTracer(tracer) + } + return q.WithQueryTracer(qt) +} + +// WithQueryTracer adds a query tracer to use during evaluation. This is optional. +// Disabled QueryTracers will be ignored. +func (q *Query) WithQueryTracer(tracer QueryTracer) *Query { + if tracer == nil || !tracer.Enabled() { + return q + } + + q.tracers = append(q.tracers, tracer) + + // If *any* of the tracers require local variable metadata we need to + // enabled plugging local trace variables. + conf := tracer.Config() + if conf.PlugLocalVars { + q.plugTraceVars = true + } + + return q +} + +// WithMetrics sets the metrics collection to add evaluation metrics to. This +// is optional. +func (q *Query) WithMetrics(m metrics.Metrics) *Query { + q.metrics = m + return q +} + +// WithInstrumentation sets the instrumentation configuration to enable on the +// evaluation process. By default, instrumentation is turned off. +func (q *Query) WithInstrumentation(instr *Instrumentation) *Query { + q.instr = instr + return q +} + +// WithUnknowns sets the initial set of variables or references to treat as +// unknown during query evaluation. This is required for partial evaluation. +func (q *Query) WithUnknowns(terms []*ast.Term) *Query { + q.unknowns = terms + return q +} + +// WithPartialNamespace sets the namespace to use for supporting rules +// generated as part of the partial evaluation process. The ns value must be a +// valid package path component. +func (q *Query) WithPartialNamespace(ns string) *Query { + q.partialNamespace = ns + return q +} + +// WithSkipPartialNamespace disables namespacing of saved support rules that are generated +// from the original policy (rules which are completely synthetic are still namespaced.) +func (q *Query) WithSkipPartialNamespace(yes bool) *Query { + q.skipSaveNamespace = yes + return q +} + +// WithDisableInlining adds a set of paths to the query that should be excluded from +// inlining. Inlining during partial evaluation can be expensive in some cases +// (e.g., when a cross-product is computed.) Disabling inlining avoids expensive +// computation at the cost of generating support rules. +func (q *Query) WithDisableInlining(paths []ast.Ref) *Query { + q.disableInlining = paths + return q +} + +// WithShallowInlining disables aggressive inlining performed during partial evaluation. +// When shallow inlining is enabled rules that depend (transitively) on unknowns are not inlined. +// Only rules/values that are completely known will be inlined. +func (q *Query) WithShallowInlining(yes bool) *Query { + q.shallowInlining = yes + return q +} + +// WithRuntime sets the runtime data to execute the query with. The runtime data +// can be returned by the `opa.runtime` built-in function. +func (q *Query) WithRuntime(runtime *ast.Term) *Query { + q.runtime = runtime + return q +} + +// WithBuiltins adds a set of built-in functions that can be called by the +// query. +func (q *Query) WithBuiltins(builtins map[string]*Builtin) *Query { + q.builtins = builtins + return q +} + +// WithIndexing will enable or disable using rule indexing for the evaluation +// of the query. The default is enabled. +func (q *Query) WithIndexing(enabled bool) *Query { + q.indexing = enabled + return q +} + +// WithEarlyExit will enable or disable using 'early exit' for the evaluation +// of the query. The default is enabled. +func (q *Query) WithEarlyExit(enabled bool) *Query { + q.earlyExit = enabled + return q +} + +// WithSeed sets a reader that will seed randomization required by built-in functions. +// If a seed is not provided crypto/rand.Reader is used. +func (q *Query) WithSeed(r io.Reader) *Query { + q.seed = r + return q +} + +// WithTime sets the time that will be returned by the time.now_ns() built-in function. +func (q *Query) WithTime(x time.Time) *Query { + q.time = x + return q +} + +// WithInterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize. +func (q *Query) WithInterQueryBuiltinCache(c cache.InterQueryCache) *Query { + q.interQueryBuiltinCache = c + return q +} + +// WithInterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize. +func (q *Query) WithInterQueryBuiltinValueCache(c cache.InterQueryValueCache) *Query { + q.interQueryBuiltinValueCache = c + return q +} + +// WithNDBuiltinCache sets the non-deterministic builtin cache. +func (q *Query) WithNDBuiltinCache(c builtins.NDBCache) *Query { + q.ndBuiltinCache = c + return q +} + +// WithStrictBuiltinErrors tells the evaluator to treat all built-in function errors as fatal errors. +func (q *Query) WithStrictBuiltinErrors(yes bool) *Query { + q.strictBuiltinErrors = yes + return q +} + +// WithBuiltinErrorList supplies a pointer to an Error slice to store built-in function errors +// encountered during evaluation. This error slice can be inspected after evaluation to determine +// which built-in function errors occurred. +func (q *Query) WithBuiltinErrorList(list *[]Error) *Query { + q.builtinErrorList = list + return q +} + +// WithResolver configures an external resolver to use for the given ref. +func (q *Query) WithResolver(ref ast.Ref, r resolver.Resolver) *Query { + if q.external == nil { + q.external = newResolverTrie() + } + q.external.Put(ref, r) + return q +} + +// WithHTTPRoundTripper configures a custom HTTP transport for built-in functions that make HTTP requests. +func (q *Query) WithHTTPRoundTripper(t CustomizeRoundTripper) *Query { + q.roundTripper = t + return q +} + +func (q *Query) WithPrintHook(h print.Hook) *Query { + q.printHook = h + return q +} + +// WithDistributedTracingOpts sets the options to be used by distributed tracing. +func (q *Query) WithDistributedTracingOpts(tr tracing.Options) *Query { + q.tracingOpts = tr + return q +} + +// WithStrictObjects tells the evaluator to avoid the "lazy object" optimization +// applied when reading objects from the store. It will result in higher memory +// usage and should only be used temporarily while adjusting code that breaks +// because of the optimization. +func (q *Query) WithStrictObjects(yes bool) *Query { + q.strictObjects = yes + return q +} + +// WithVirtualCache sets the VirtualCache to use during evaluation. This is +// optional, and if not set, the default cache is used. +func (q *Query) WithVirtualCache(vc VirtualCache) *Query { + q.virtualCache = vc + return q +} + +// WithBaseCache sets the BaseCache to use during evaluation. This is +// optional, and if not set, the default cache is used. +func (q *Query) WithBaseCache(bc BaseCache) *Query { + q.baseCache = bc + return q +} + +// WithNondeterministicBuiltins causes non-deterministic builtins to be evalued +// during partial evaluation. This is needed to pull in external data, or validate +// a JWT, during PE, so that the result informs what queries are returned. +func (q *Query) WithNondeterministicBuiltins(yes bool) *Query { + q.nondeterministicBuiltins = yes + return q +} + +// PartialRun executes partial evaluation on the query with respect to unknown +// values. Partial evaluation attempts to evaluate as much of the query as +// possible without requiring values for the unknowns set on the query. The +// result of partial evaluation is a new set of queries that can be evaluated +// once the unknown value is known. In addition to new queries, partial +// evaluation may produce additional support modules that should be used in +// conjunction with the partially evaluated queries. +func (q *Query) PartialRun(ctx context.Context) (partials []ast.Body, support []*ast.Module, err error) { + if q.partialNamespace == "" { + q.partialNamespace = "partial" // lazily initialize partial namespace + } + if q.seed == nil { + q.seed = rand.Reader + } + if q.time.IsZero() { + q.time = time.Now() + } + if q.metrics == nil { + q.metrics = metrics.New() + } + + f := &queryIDFactory{} + b := newBindings(0, q.instr) + + var vc VirtualCache + if q.virtualCache != nil { + vc = q.virtualCache + } else { + vc = NewVirtualCache() + } + + var bc BaseCache + if q.baseCache != nil { + bc = q.baseCache + } else { + bc = newBaseCache() + } + + e := &eval{ + ctx: ctx, + metrics: q.metrics, + seed: q.seed, + timeStart: q.time.UnixNano(), + cancel: q.cancel, + query: q.query, + queryCompiler: q.queryCompiler, + queryIDFact: f, + queryID: f.Next(), + bindings: b, + compiler: q.compiler, + store: q.store, + baseCache: bc, + txn: q.txn, + input: q.input, + external: q.external, + tracers: q.tracers, + traceEnabled: len(q.tracers) > 0, + plugTraceVars: q.plugTraceVars, + instr: q.instr, + builtins: q.builtins, + builtinCache: builtins.Cache{}, + interQueryBuiltinCache: q.interQueryBuiltinCache, + interQueryBuiltinValueCache: q.interQueryBuiltinValueCache, + ndBuiltinCache: q.ndBuiltinCache, + virtualCache: vc, + saveSet: newSaveSet(q.unknowns, b, q.instr), + saveStack: newSaveStack(), + saveSupport: newSaveSupport(), + saveNamespace: ast.InternedTerm(q.partialNamespace), + skipSaveNamespace: q.skipSaveNamespace, + inliningControl: &inliningControl{ + shallow: q.shallowInlining, + nondeterministicBuiltins: q.nondeterministicBuiltins, + }, + genvarprefix: q.genvarprefix, + runtime: q.runtime, + indexing: q.indexing, + earlyExit: q.earlyExit, + builtinErrors: &builtinErrors{}, + printHook: q.printHook, + strictObjects: q.strictObjects, + } + + if len(q.disableInlining) > 0 { + e.inliningControl.PushDisable(q.disableInlining, false) + } + + e.caller = e + q.metrics.Timer(metrics.RegoPartialEval).Start() + defer q.metrics.Timer(metrics.RegoPartialEval).Stop() + + livevars := ast.NewVarSet() + for _, t := range q.unknowns { + switch v := t.Value.(type) { + case ast.Var: + livevars.Add(v) + case ast.Ref: + livevars.Add(v[0].Value.(ast.Var)) + } + } + + ast.WalkVars(q.query, func(x ast.Var) bool { + if !x.IsGenerated() { + livevars.Add(x) + } + return false + }) + + p := copypropagation.New(livevars).WithCompiler(q.compiler) + + err = e.Run(func(e *eval) error { + + // Build output from saved expressions. + body := ast.NewBody() + + for _, elem := range e.saveStack.Stack[len(e.saveStack.Stack)-1] { + body.Append(elem.Plug(e.bindings)) + } + + // Include bindings as exprs so that when caller evals the result, they + // can obtain values for the vars in their query. + bindingExprs := []*ast.Expr{} + _ = e.bindings.Iter(e.bindings, func(a, b *ast.Term) error { + bindingExprs = append(bindingExprs, ast.Equality.Expr(a, b)) + return nil + }) // cannot return error + + // Sort binding expressions so that results are deterministic. + sort.Slice(bindingExprs, func(i, j int) bool { + return bindingExprs[i].Compare(bindingExprs[j]) < 0 + }) + + for i := range bindingExprs { + body.Append(bindingExprs[i]) + } + + // Skip this rule body if it fails to type-check. + // Type-checking failure means the rule body will never succeed. + if !e.compiler.PassesTypeCheck(body) { + return nil + } + + if !q.shallowInlining { + body = applyCopyPropagation(p, e.instr, body) + } + + partials = append(partials, body) + return nil + }) + + support = e.saveSupport.List() + + if len(e.builtinErrors.errs) > 0 { + if q.strictBuiltinErrors { + err = e.builtinErrors.errs[0] + } else if q.builtinErrorList != nil { + // If a builtinErrorList has been supplied, we must use pointer indirection + // to append to it. builtinErrorList is a slice pointer so that errors can be + // appended to it without returning a new slice and changing the interface + // of PartialRun. + for _, err := range e.builtinErrors.errs { + if tdError, ok := err.(*Error); ok { + *(q.builtinErrorList) = append(*(q.builtinErrorList), *tdError) + } else { + *(q.builtinErrorList) = append(*(q.builtinErrorList), Error{ + Code: BuiltinErr, + Message: err.Error(), + }) + } + } + } + } + + for i, m := range support { + if regoVersion := q.compiler.DefaultRegoVersion(); regoVersion != ast.RegoUndefined { + ast.SetModuleRegoVersion(m, q.compiler.DefaultRegoVersion()) + } + + sort.Slice(support[i].Rules, func(j, k int) bool { + return support[i].Rules[j].Compare(support[i].Rules[k]) < 0 + }) + } + + return partials, support, err +} + +// Run is a wrapper around Iter that accumulates query results and returns them +// in one shot. +func (q *Query) Run(ctx context.Context) (QueryResultSet, error) { + qrs := QueryResultSet{} + return qrs, q.Iter(ctx, func(qr QueryResult) error { + qrs = append(qrs, qr) + return nil + }) +} + +// Iter executes the query and invokes the iter function with query results +// produced by evaluating the query. +func (q *Query) Iter(ctx context.Context, iter func(QueryResult) error) error { + // Query evaluation must not be allowed if the compiler has errors and is in an undefined, possibly inconsistent state + if q.compiler != nil && len(q.compiler.Errors) > 0 { + return &Error{ + Code: InternalErr, + Message: "compiler has errors", + } + } + + if q.seed == nil { + q.seed = rand.Reader + } + if q.time.IsZero() { + q.time = time.Now() + } + if q.metrics == nil { + q.metrics = metrics.New() + } + + f := &queryIDFactory{} + + var vc VirtualCache + if q.virtualCache != nil { + vc = q.virtualCache + } else { + vc = NewVirtualCache() + } + + var bc BaseCache + if q.baseCache != nil { + bc = q.baseCache + } else { + bc = newBaseCache() + } + + e := &eval{ + ctx: ctx, + metrics: q.metrics, + seed: q.seed, + timeStart: q.time.UnixNano(), + cancel: q.cancel, + query: q.query, + queryCompiler: q.queryCompiler, + queryIDFact: f, + queryID: f.Next(), + bindings: newBindings(0, q.instr), + compiler: q.compiler, + store: q.store, + baseCache: bc, + txn: q.txn, + input: q.input, + external: q.external, + tracers: q.tracers, + traceEnabled: len(q.tracers) > 0, + plugTraceVars: q.plugTraceVars, + instr: q.instr, + builtins: q.builtins, + builtinCache: builtins.Cache{}, + interQueryBuiltinCache: q.interQueryBuiltinCache, + interQueryBuiltinValueCache: q.interQueryBuiltinValueCache, + ndBuiltinCache: q.ndBuiltinCache, + virtualCache: vc, + genvarprefix: q.genvarprefix, + runtime: q.runtime, + indexing: q.indexing, + earlyExit: q.earlyExit, + builtinErrors: &builtinErrors{}, + printHook: q.printHook, + tracingOpts: q.tracingOpts, + strictObjects: q.strictObjects, + roundTripper: q.roundTripper, + } + e.caller = e + q.metrics.Timer(metrics.RegoQueryEval).Start() + err := e.Run(func(e *eval) error { + qr := QueryResult{} + _ = e.bindings.Iter(nil, func(k, v *ast.Term) error { + qr[k.Value.(ast.Var)] = v + return nil + }) // cannot return error + return iter(qr) + }) + + if len(e.builtinErrors.errs) > 0 { + if q.strictBuiltinErrors { + err = e.builtinErrors.errs[0] + } else if q.builtinErrorList != nil { + // If a builtinErrorList has been supplied, we must use pointer indirection + // to append to it. builtinErrorList is a slice pointer so that errors can be + // appended to it without returning a new slice and changing the interface + // of Iter. + for _, err := range e.builtinErrors.errs { + if tdError, ok := err.(*Error); ok { + *(q.builtinErrorList) = append(*(q.builtinErrorList), *tdError) + } else { + *(q.builtinErrorList) = append(*(q.builtinErrorList), Error{ + Code: BuiltinErr, + Message: err.Error(), + }) + } + } + } + } + + q.metrics.Timer(metrics.RegoQueryEval).Stop() + return err +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/reachable.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/reachable.go similarity index 97% rename from vendor/github.com/open-policy-agent/opa/topdown/reachable.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/reachable.go index 8d61018e76..1c31019db9 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/reachable.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/reachable.go @@ -5,8 +5,8 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) // Helper: sets of vertices can be represented as Arrays or Sets. diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/regex.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/regex.go new file mode 100644 index 0000000000..0313452033 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/regex.go @@ -0,0 +1,321 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "fmt" + "regexp" + "regexp/syntax" + "sync" + + gintersect "github.com/yashtewari/glob-intersection" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +const ( + regexCacheMaxSize = 100 + regexInterQueryValueCacheHits = "rego_builtin_regex_interquery_value_cache_hits" +) + +var ( + regexpCacheLock = sync.RWMutex{} + regexpCache = make(map[string]*regexp.Regexp) +) + +func builtinRegexIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + if s, err := builtins.StringOperand(operands[0].Value, 1); err == nil { + if _, err = syntax.Parse(string(s), syntax.Perl); err == nil { + return iter(ast.InternedTerm(true)) + } + } + + return iter(ast.InternedTerm(false)) +} + +func builtinRegexMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s1, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + s2, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + re, err := getRegexp(bctx, string(s1)) + if err != nil { + return err + } + return iter(ast.InternedTerm(re.MatchString(string(s2)))) +} + +func builtinRegexMatchTemplate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + pattern, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + match, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + start, err := builtins.StringOperand(operands[2].Value, 3) + if err != nil { + return err + } + end, err := builtins.StringOperand(operands[3].Value, 4) + if err != nil { + return err + } + if len(start) != 1 { + return fmt.Errorf("start delimiter has to be exactly one character long but is %d long", len(start)) + } + if len(end) != 1 { + return fmt.Errorf("end delimiter has to be exactly one character long but is %d long", len(start)) + } + re, err := getRegexpTemplate(string(pattern), string(start)[0], string(end)[0]) + if err != nil { + return err + } + return iter(ast.InternedTerm(re.MatchString(string(match)))) +} + +func builtinRegexSplit(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s1, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + s2, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + re, err := getRegexp(bctx, string(s1)) + if err != nil { + return err + } + + elems := re.Split(string(s2), -1) + arr := make([]*ast.Term, len(elems)) + for i := range elems { + arr[i] = ast.StringTerm(elems[i]) + } + return iter(ast.ArrayTerm(arr...)) +} + +func getRegexp(bctx BuiltinContext, pat string) (*regexp.Regexp, error) { + if bctx.InterQueryBuiltinValueCache != nil { + // TODO: Use named cache + var key ast.Value = ast.String(pat) + val, ok := bctx.InterQueryBuiltinValueCache.Get(key) + if ok { + res, valid := val.(*regexp.Regexp) + if !valid { + // The cache key may exist for a different value type (eg. glob). + // In this case, we calculate the regex and return the result w/o updating the cache. + return regexp.Compile(pat) + } + + bctx.Metrics.Counter(regexInterQueryValueCacheHits).Incr() + return res, nil + } + + re, err := regexp.Compile(pat) + if err != nil { + return nil, err + } + bctx.InterQueryBuiltinValueCache.Insert(key, re) + return re, nil + } + + regexpCacheLock.RLock() + re, ok := regexpCache[pat] + numCached := len(regexpCache) + regexpCacheLock.RUnlock() + if !ok { + var err error + re, err = regexp.Compile(pat) + if err != nil { + return nil, err + } + + regexpCacheLock.Lock() + if numCached >= regexCacheMaxSize { + // Delete a (semi-)random key to make room for the new one. + for k := range regexpCache { + delete(regexpCache, k) + break + } + } + regexpCache[pat] = re + regexpCacheLock.Unlock() + } + return re, nil +} + +func getRegexpTemplate(pat string, delimStart, delimEnd byte) (*regexp.Regexp, error) { + regexpCacheLock.RLock() + re, ok := regexpCache[pat] + regexpCacheLock.RUnlock() + if !ok { + var err error + re, err = compileRegexTemplate(pat, delimStart, delimEnd) + if err != nil { + return nil, err + } + regexpCacheLock.Lock() + regexpCache[pat] = re + regexpCacheLock.Unlock() + } + return re, nil +} + +func builtinGlobsMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s1, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + s2, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + ne, err := gintersect.NonEmpty(string(s1), string(s2)) + if err != nil { + return err + } + return iter(ast.InternedTerm(ne)) +} + +func builtinRegexFind(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s1, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + s2, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + n, err := builtins.IntOperand(operands[2].Value, 3) + if err != nil { + return err + } + re, err := getRegexp(bctx, string(s1)) + if err != nil { + return err + } + + elems := re.FindAllString(string(s2), n) + arr := make([]*ast.Term, len(elems)) + for i := range elems { + arr[i] = ast.StringTerm(elems[i]) + } + return iter(ast.ArrayTerm(arr...)) +} + +func builtinRegexFindAllStringSubmatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s1, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + s2, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + n, err := builtins.IntOperand(operands[2].Value, 3) + if err != nil { + return err + } + + re, err := getRegexp(bctx, string(s1)) + if err != nil { + return err + } + matches := re.FindAllStringSubmatch(string(s2), n) + + outer := make([]*ast.Term, len(matches)) + for i := range matches { + inner := make([]*ast.Term, len(matches[i])) + for j := range matches[i] { + inner[j] = ast.StringTerm(matches[i][j]) + } + outer[i] = ast.ArrayTerm(inner...) + } + + return iter(ast.ArrayTerm(outer...)) +} + +func builtinRegexReplace(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + base, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + pattern, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + value, err := builtins.StringOperand(operands[2].Value, 3) + if err != nil { + return err + } + + re, err := getRegexp(bctx, string(pattern)) + if err != nil { + return err + } + + // If no cancellation context, use the fast path + if bctx.Cancel == nil { + res := re.ReplaceAllString(string(base), string(value)) + if res == string(base) { + return iter(operands[0]) + } + return iter(ast.InternedTerm(res)) + } + + // Use sink writer for cancellation-aware replacement + sink := newSink(ast.RegexReplace.Name, len(base), bctx.Cancel) + src := []byte(base) + repl := []byte(value) + + // Find all matches at once to preserve anchor behavior: replace("foo", "^[a-z]", "F") => "Foo" + allMatches := re.FindAllSubmatchIndex(src, -1) + + lastEnd := 0 + for _, match := range allMatches { + if _, err := sink.Write(src[lastEnd:match[0]]); err != nil { + return err + } + + if _, err := sink.Write(re.Expand(nil, repl, src, match)); err != nil { + return err + } + + lastEnd = match[1] + } + + if _, err := sink.Write(src[lastEnd:]); err != nil { + return err + } + + res := sink.String() + if res == string(base) { + return iter(operands[0]) + } + + return iter(ast.InternedTerm(res)) +} + +func init() { + RegisterBuiltinFunc(ast.RegexIsValid.Name, builtinRegexIsValid) + RegisterBuiltinFunc(ast.RegexMatch.Name, builtinRegexMatch) + RegisterBuiltinFunc(ast.RegexMatchDeprecated.Name, builtinRegexMatch) + RegisterBuiltinFunc(ast.RegexSplit.Name, builtinRegexSplit) + RegisterBuiltinFunc(ast.GlobsMatch.Name, builtinGlobsMatch) + RegisterBuiltinFunc(ast.RegexTemplateMatch.Name, builtinRegexMatchTemplate) + RegisterBuiltinFunc(ast.RegexFind.Name, builtinRegexFind) + RegisterBuiltinFunc(ast.RegexFindAllStringSubmatch.Name, builtinRegexFindAllStringSubmatch) + RegisterBuiltinFunc(ast.RegexReplace.Name, builtinRegexReplace) +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/regex_template.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/regex_template.go similarity index 99% rename from vendor/github.com/open-policy-agent/opa/topdown/regex_template.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/regex_template.go index 4bcddc060b..a1d946fd59 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/regex_template.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/regex_template.go @@ -45,7 +45,7 @@ import ( func delimiterIndices(s string, delimiterStart, delimiterEnd byte) ([]int, error) { var level, idx int idxs := make([]int, 0) - for i := 0; i < len(s); i++ { + for i := range len(s) { switch s[i] { case delimiterStart: if level++; level == 1 { diff --git a/vendor/github.com/open-policy-agent/opa/topdown/resolver.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/resolver.go similarity index 83% rename from vendor/github.com/open-policy-agent/opa/topdown/resolver.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/resolver.go index 5ed6c1e443..6688c83061 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/resolver.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/resolver.go @@ -5,9 +5,9 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/resolver" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/resolver" ) type resolverTrie struct { @@ -35,6 +35,10 @@ func (t *resolverTrie) Put(ref ast.Ref, r resolver.Resolver) { func (t *resolverTrie) Resolve(e *eval, ref ast.Ref) (ast.Value, error) { e.metrics.Timer(metrics.RegoExternalResolve).Start() defer e.metrics.Timer(metrics.RegoExternalResolve).Stop() + + if t == nil { + return nil, nil + } node := t for i, t := range ref { child, ok := node.children[t.Value] @@ -48,7 +52,11 @@ func (t *resolverTrie) Resolve(e *eval, ref ast.Ref) (ast.Value, error) { Input: e.input, Metrics: e.metrics, } - e.traceWasm(e.query[e.index], &in.Ref) + if e.traceEnabled { + // avoid leaking pointer if trace is disabled + cpy := in.Ref + e.traceWasm(e.query[e.index], &cpy) + } if e.data != nil { return nil, errInScopeWithStmt } @@ -75,7 +83,10 @@ func (t *resolverTrie) Resolve(e *eval, ref ast.Ref) (ast.Value, error) { func (t *resolverTrie) mktree(e *eval, in resolver.Input) (ast.Value, error) { if t.r != nil { - e.traceWasm(e.query[e.index], &in.Ref) + if e.traceEnabled { + cpy := in.Ref + e.traceWasm(e.query[e.index], &cpy) + } if e.data != nil { return nil, errInScopeWithStmt } @@ -88,7 +99,7 @@ func (t *resolverTrie) mktree(e *eval, in resolver.Input) (ast.Value, error) { } return result.Value, nil } - obj := ast.NewObject() + obj := ast.NewObjectWithCapacity(len(t.children)) for k, child := range t.children { v, err := child.mktree(e, resolver.Input{Ref: append(in.Ref, ast.NewTerm(k)), Input: in.Input, Metrics: in.Metrics}) if err != nil { diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/runtime.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/runtime.go new file mode 100644 index 0000000000..2bbfb43f39 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/runtime.go @@ -0,0 +1,130 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "errors" + "fmt" + + "github.com/open-policy-agent/opa/v1/ast" +) + +var nothingResolver ast.Resolver = illegalResolver{} + +func builtinOPARuntime(bctx BuiltinContext, _ []*ast.Term, iter func(*ast.Term) error) error { + + if bctx.Runtime == nil { + return iter(ast.InternedEmptyObject) + } + + if bctx.Runtime.Get(ast.InternedTerm("config")) != nil { + iface, err := ast.ValueToInterface(bctx.Runtime.Value, nothingResolver) + if err != nil { + return err + } + if object, ok := iface.(map[string]any); ok { + if cfgRaw, ok := object["config"]; ok { + if config, ok := cfgRaw.(map[string]any); ok { + configPurged, err := activeConfig(config) + if err != nil { + return err + } + object["config"] = configPurged + value, err := ast.InterfaceToValue(object) + if err != nil { + return err + } + return iter(ast.NewTerm(value)) + } + } + } + } + + return iter(bctx.Runtime) +} + +func init() { + RegisterBuiltinFunc(ast.OPARuntime.Name, builtinOPARuntime) +} + +func activeConfig(config map[string]any) (any, error) { + + if config["services"] != nil { + err := removeServiceCredentials(config["services"]) + if err != nil { + return nil, err + } + } + + if config["keys"] != nil { + err := removeCryptoKeys(config["keys"]) + if err != nil { + return nil, err + } + } + + return config, nil +} + +func removeServiceCredentials(x any) error { + + switch x := x.(type) { + case []any: + for _, v := range x { + err := removeKey(v, "credentials") + if err != nil { + return err + } + } + + case map[string]any: + for _, v := range x { + err := removeKey(v, "credentials") + if err != nil { + return err + } + } + default: + return fmt.Errorf("illegal service config type: %T", x) + } + + return nil +} + +func removeCryptoKeys(x any) error { + + switch x := x.(type) { + case map[string]any: + for _, v := range x { + err := removeKey(v, "key", "private_key") + if err != nil { + return err + } + } + default: + return fmt.Errorf("illegal keys config type: %T", x) + } + + return nil +} + +func removeKey(x any, keys ...string) error { + val, ok := x.(map[string]any) + if !ok { + return errors.New("type assertion error") + } + + for _, key := range keys { + delete(val, key) + } + + return nil +} + +type illegalResolver struct{} + +func (illegalResolver) Resolve(ref ast.Ref) (any, error) { + return nil, fmt.Errorf("illegal value: %v", ref) +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/save.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/save.go similarity index 86% rename from vendor/github.com/open-policy-agent/opa/topdown/save.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/save.go index 0468692cc6..47bf7521b4 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/save.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/save.go @@ -1,11 +1,13 @@ package topdown import ( + "cmp" "container/list" "fmt" + "slices" "strings" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) // saveSet contains a stack of terms that are considered 'unknown' during @@ -355,17 +357,23 @@ func splitPackageAndRule(path ast.Ref) (ast.Ref, ast.Ref) { // being saved. This check allows the evaluator to evaluate statements // completely during partial evaluation as long as they do not depend on any // kind of unknown value or statements that would generate saves. -func saveRequired(c *ast.Compiler, ic *inliningControl, icIgnoreInternal bool, ss *saveSet, b *bindings, x interface{}, rec bool) bool { +func saveRequired(c *ast.Compiler, ic *inliningControl, icIgnoreInternal bool, ss *saveSet, b *bindings, x any, rec bool) bool { var found bool - vis := ast.NewGenericVisitor(func(node interface{}) bool { + vis := ast.NewGenericVisitor(func(node any) bool { if found { return found } switch node := node.(type) { case *ast.Expr: - found = len(node.With) > 0 || ignoreExprDuringPartial(node) + found = len(node.With) > 0 + if found { + return found + } + if !ic.nondeterministicBuiltins { // skip evaluating non-det builtins for PE + found = ignoreExprDuringPartial(node) + } case *ast.Term: switch v := node.Value.(type) { case ast.Var: @@ -412,35 +420,56 @@ func ignoreDuringPartial(bi *ast.Builtin) bool { // Note(philipc): We keep this legacy check around to avoid breaking // existing library users. //nolint:staticcheck // We specifically ignore our own linter warning here. - for _, ignore := range ast.IgnoreDuringPartialEval { - if bi == ignore { - return true - } - } - // Otherwise, ensure all non-deterministic builtins are thrown out. - return bi.Nondeterministic + return cmp.Or(slices.Contains(ast.IgnoreDuringPartialEval, bi), bi.Nondeterministic) } type inliningControl struct { - shallow bool - disable []disableInliningFrame + shallow bool + disable []disableInliningFrame + nondeterministicBuiltins bool // evaluate non-det builtins during PE (if args are known) } type disableInliningFrame struct { internal bool refs []ast.Ref + v ast.Var } -func (i *inliningControl) PushDisable(refs []ast.Ref, internal bool) { +func (i *inliningControl) PushDisable(x any, internal bool) { if i == nil { return } + + switch x := x.(type) { + case []ast.Ref: + i.PushDisableRefs(x, internal) + case ast.Var: + i.PushDisableVar(x, internal) + } +} + +func (i *inliningControl) PushDisableRefs(refs []ast.Ref, internal bool) { + if i == nil { + return + } + i.disable = append(i.disable, disableInliningFrame{ internal: internal, refs: refs, }) } +func (i *inliningControl) PushDisableVar(v ast.Var, internal bool) { + if i == nil { + return + } + + i.disable = append(i.disable, disableInliningFrame{ + internal: internal, + v: v, + }) +} + func (i *inliningControl) PopDisable() { if i == nil { return @@ -448,10 +477,26 @@ func (i *inliningControl) PopDisable() { i.disable = i.disable[:len(i.disable)-1] } -func (i *inliningControl) Disabled(ref ast.Ref, ignoreInternal bool) bool { +func (i *inliningControl) Disabled(x any, ignoreInternal bool) bool { if i == nil { return false } + + switch x := x.(type) { + case ast.Ref: + return i.DisabledRef(x, ignoreInternal) + case ast.Var: + return i.DisabledVar(x, ignoreInternal) + } + + return false +} + +func (i *inliningControl) DisabledRef(ref ast.Ref, ignoreInternal bool) bool { + if i == nil { + return false + } + for _, frame := range i.disable { if !frame.internal || !ignoreInternal { for _, other := range frame.refs { @@ -463,3 +508,16 @@ func (i *inliningControl) Disabled(ref ast.Ref, ignoreInternal bool) bool { } return false } + +func (i *inliningControl) DisabledVar(v ast.Var, ignoreInternal bool) bool { + if i == nil { + return false + } + + for _, frame := range i.disable { + if (!frame.internal || !ignoreInternal) && frame.v.Equal(v) { + return true + } + } + return false +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/semver.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/semver.go new file mode 100644 index 0000000000..1b2ac79038 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/semver.go @@ -0,0 +1,50 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "fmt" + + "github.com/open-policy-agent/opa/internal/semver" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +func builtinSemVerCompare(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + versionStringA, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + versionStringB, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + versionA, err := semver.Parse(string(versionStringA)) + if err != nil { + return fmt.Errorf("operand 1: string %s is not a valid SemVer", versionStringA) + } + versionB, err := semver.Parse(string(versionStringB)) + if err != nil { + return fmt.Errorf("operand 2: string %s is not a valid SemVer", versionStringB) + } + + return iter(ast.InternedTerm(versionA.Compare(versionB))) +} + +func builtinSemVerIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + versionString, err := builtins.StringOperand(operands[0].Value, 1) + if err == nil { + _, err = semver.Parse(string(versionString)) + } + + return iter(ast.InternedTerm(err == nil)) +} + +func init() { + RegisterBuiltinFunc(ast.SemVerCompare.Name, builtinSemVerCompare) + RegisterBuiltinFunc(ast.SemVerIsValid.Name, builtinSemVerIsValid) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/sets.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/sets.go new file mode 100644 index 0000000000..6ee467efc8 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/sets.go @@ -0,0 +1,106 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +// Deprecated: deprecated in v0.4.2 in favour of minus/infix "-" operation. +func builtinSetDiff(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + s1, err := builtins.SetOperand(operands[0].Value, 1) + if err != nil { + return err + } + + s2, err := builtins.SetOperand(operands[1].Value, 2) + if err != nil { + return err + } + + return iter(ast.NewTerm(s1.Diff(s2))) +} + +// builtinSetIntersection returns the intersection of the given input sets +func builtinSetIntersection(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + inputSet, err := builtins.SetOperand(operands[0].Value, 1) + if err != nil { + return err + } + + // empty input set + if inputSet.Len() == 0 { + return iter(ast.InternedEmptySet) + } + + var result ast.Set + + err = inputSet.Iter(func(x *ast.Term) error { + n, err := builtins.SetOperand(x.Value, 1) + if err != nil { + return err + } + + if result == nil { + result = n + } else { + result = result.Intersect(n) + } + return nil + }) + if err != nil { + return err + } + return iter(ast.NewTerm(result)) +} + +// builtinSetUnion returns the union of the given input sets +func builtinSetUnion(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + // The set union logic here is manually inlined on purpose. By lifting + // this logic up a level and not doing pairwise set unions, we avoid + // many heap allocations. We also pre-allocate the result set by first + // counting total elements across all input sets. + inputSet, err := builtins.SetOperand(operands[0].Value, 1) + if err != nil { + return err + } + + // First pass: count total elements for pre-allocation + totalSize := 0 + err = inputSet.Iter(func(x *ast.Term) error { + item, err := builtins.SetOperand(x.Value, 1) + if err != nil { + return err + } + totalSize += item.Len() + return nil + }) + if err != nil { + return err + } + + // Pre-allocate result set with estimated capacity + result := ast.NewSetWithCapacity(totalSize) + + err = inputSet.Iter(func(x *ast.Term) error { + item, _ := builtins.SetOperand(x.Value, 1) // error checked above + item.Foreach(result.Add) + return nil + }) + if err != nil { + return err + } + + return iter(ast.NewTerm(result)) +} + +func init() { + RegisterBuiltinFunc(ast.SetDiff.Name, builtinSetDiff) + RegisterBuiltinFunc(ast.Intersection.Name, builtinSetIntersection) + RegisterBuiltinFunc(ast.Union.Name, builtinSetUnion) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/sink.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/sink.go new file mode 100644 index 0000000000..15208086b2 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/sink.go @@ -0,0 +1,73 @@ +package topdown + +import ( + "bytes" + "io" +) + +var _ io.Writer = (*sinkW)(nil) + +type sinkWriter interface { + io.Writer + String() string + Grow(int) + WriteByte(byte) error + WriteString(string) (int, error) +} + +type sinkW struct { + buf *bytes.Buffer + cancel Cancel + err error +} + +func newSink(name string, hint int, c Cancel) sinkWriter { + b := &bytes.Buffer{} + if hint > 0 { + b.Grow(hint) + } + + if c == nil { + return b + } + + return &sinkW{ + cancel: c, + buf: b, + err: Halt{ + Err: &Error{ + Code: CancelErr, + Message: name + ": timed out before finishing", + }, + }, + } +} + +func (sw *sinkW) Grow(n int) { + sw.buf.Grow(n) +} + +func (sw *sinkW) Write(bs []byte) (int, error) { + if sw.cancel.Cancelled() { + return 0, sw.err + } + return sw.buf.Write(bs) +} + +func (sw *sinkW) WriteByte(b byte) error { + if sw.cancel.Cancelled() { + return sw.err + } + return sw.buf.WriteByte(b) +} + +func (sw *sinkW) WriteString(s string) (int, error) { + if sw.cancel.Cancelled() { + return 0, sw.err + } + return sw.buf.WriteString(s) +} + +func (sw *sinkW) String() string { + return sw.buf.String() +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/strings.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/strings.go new file mode 100644 index 0000000000..27c841102b --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/strings.go @@ -0,0 +1,816 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "errors" + "fmt" + "math/big" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/tchap/go-patricia/v2/patricia" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/util" +) + +func builtinAnyPrefixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + a, b := operands[0].Value, operands[1].Value + + var strs []string + switch a := a.(type) { + case ast.String: + strs = []string{string(a)} + case *ast.Array, ast.Set: + var err error + strs, err = builtins.StringSliceOperand(a, 1) + if err != nil { + return err + } + default: + return builtins.NewOperandTypeErr(1, a, "string", "set", "array") + } + + var prefixes []string + switch b := b.(type) { + case ast.String: + prefixes = []string{string(b)} + case *ast.Array, ast.Set: + var err error + prefixes, err = builtins.StringSliceOperand(b, 2) + if err != nil { + return err + } + default: + return builtins.NewOperandTypeErr(2, b, "string", "set", "array") + } + + return iter(ast.InternedTerm(anyStartsWithAny(strs, prefixes))) +} + +func builtinAnySuffixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + a, b := operands[0].Value, operands[1].Value + + var strsReversed []string + switch a := a.(type) { + case ast.String: + strsReversed = []string{reverseString(string(a))} + case *ast.Array, ast.Set: + strs, err := builtins.StringSliceOperand(a, 1) + if err != nil { + return err + } + strsReversed = make([]string, len(strs)) + for i := range strs { + strsReversed[i] = reverseString(strs[i]) + } + default: + return builtins.NewOperandTypeErr(1, a, "string", "set", "array") + } + + var suffixesReversed []string + switch b := b.(type) { + case ast.String: + suffixesReversed = []string{reverseString(string(b))} + case *ast.Array, ast.Set: + suffixes, err := builtins.StringSliceOperand(b, 2) + if err != nil { + return err + } + suffixesReversed = make([]string, len(suffixes)) + for i := range suffixes { + suffixesReversed[i] = reverseString(suffixes[i]) + } + default: + return builtins.NewOperandTypeErr(2, b, "string", "set", "array") + } + + return iter(ast.InternedTerm(anyStartsWithAny(strsReversed, suffixesReversed))) +} + +func anyStartsWithAny(strs []string, prefixes []string) bool { + if len(strs) == 0 || len(prefixes) == 0 { + return false + } + if len(strs) == 1 && len(prefixes) == 1 { + return strings.HasPrefix(strs[0], prefixes[0]) + } + + trie := patricia.NewTrie() + for i := range strs { + trie.Insert([]byte(strs[i]), true) + } + + for i := range prefixes { + if trie.MatchSubtree([]byte(prefixes[i])) { + return true + } + } + + return false +} + +func builtinFormatInt(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + input, err := builtins.NumberOperand(operands[0].Value, 1) + if err != nil { + return err + } + + base, err := builtins.NumberOperand(operands[1].Value, 2) + if err != nil { + return err + } + + var format string + switch base { + case ast.Number("2"): + format = "%b" + case ast.Number("8"): + format = "%o" + case ast.Number("10"): + if i, ok := input.Int(); ok { + return iter(ast.InternedIntegerString(i)) + } + format = "%d" + case ast.Number("16"): + format = "%x" + default: + return builtins.NewOperandEnumErr(2, "2", "8", "10", "16") + } + + f := builtins.NumberToFloat(input) + i, _ := f.Int(nil) + + return iter(ast.InternedTerm(fmt.Sprintf(format, i))) +} + +func builtinConcat(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + join, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + // fast path for empty or single string array/set, allocates no memory + if term, ok := zeroOrOneStringTerm(operands[1].Value); ok { + return iter(term) + } + + sb := newSink(ast.Concat.Name, 0, bctx.Cancel) + + // NOTE(anderseknert): + // More or less Go's strings.Join implementation, but where we avoid + // creating an intermediate []string slice to pass to that function, + // as that's expensive (3.5x more space allocated). Instead we build + // the string directly using the sink to concatenate the string + // values from the array/set with the separator. + n := 0 + switch b := operands[1].Value.(type) { + case *ast.Array: + l := b.Len() + for i := range l { + s, ok := b.Elem(i).Value.(ast.String) + if !ok { + return builtins.NewOperandElementErr(2, b, b.Elem(i).Value, "string") + } + n += len(s) + } + sep := string(join) + n += len(sep) * (l - 1) + sb.Grow(n) + if _, err := sb.WriteString(string(b.Elem(0).Value.(ast.String))); err != nil { + return err + } + if sep == "" { + for i := 1; i < l; i++ { + if _, err := sb.WriteString(string(b.Elem(i).Value.(ast.String))); err != nil { + return err + } + } + } else if len(sep) == 1 { + // when the separator is a single byte, sb.WriteByte is substantially faster + bsep := sep[0] + for i := 1; i < l; i++ { + if err := sb.WriteByte(bsep); err != nil { + return err + } + if _, err := sb.WriteString(string(b.Elem(i).Value.(ast.String))); err != nil { + return err + } + } + } else { + // for longer separators, there is no such difference between WriteString and Write + for i := 1; i < l; i++ { + if _, err := sb.WriteString(sep); err != nil { + return err + } + if _, err := sb.WriteString(string(b.Elem(i).Value.(ast.String))); err != nil { + return err + } + } + } + return iter(ast.InternedTerm(sb.String())) + case ast.Set: + for _, v := range b.Slice() { + s, ok := v.Value.(ast.String) + if !ok { + return builtins.NewOperandElementErr(2, b, v.Value, "string") + } + n += len(s) + } + sep := string(join) + l := b.Len() + n += len(sep) * (l - 1) + sb.Grow(n) + for i, v := range b.Slice() { + if _, err := sb.WriteString(string(v.Value.(ast.String))); err != nil { + return err + } + if i < l-1 { + if _, err := sb.WriteString(sep); err != nil { + return err + } + } + } + return iter(ast.InternedTerm(sb.String())) + } + + return builtins.NewOperandTypeErr(2, operands[1].Value, "set", "array") +} + +func zeroOrOneStringTerm(a ast.Value) (*ast.Term, bool) { + switch b := a.(type) { + case *ast.Array: + if b.Len() == 0 { + return ast.InternedEmptyString, true + } + if b.Len() == 1 { + e := b.Elem(0) + if _, ok := e.Value.(ast.String); ok { + return e, true + } + } + case ast.Set: + if b.Len() == 0 { + return ast.InternedEmptyString, true + } + if b.Len() == 1 { + e := b.Slice()[0] + if _, ok := e.Value.(ast.String); ok { + return e, true + } + } + } + return nil, false +} + +func runesEqual(a, b []rune) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} + +func builtinIndexOf(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + base, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + search, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + if len(string(search)) == 0 { + return errors.New("empty search character") + } + + if isASCII(string(base)) && isASCII(string(search)) { + // this is a false positive in the indexAlloc rule that thinks + // we're converting byte arrays to strings + //nolint:gocritic + return iter(ast.InternedTerm(strings.Index(string(base), string(search)))) + } + + baseRunes := []rune(string(base)) + searchRunes := []rune(string(search)) + searchLen := len(searchRunes) + + for i, r := range baseRunes { + if len(baseRunes) >= i+searchLen { + if r == searchRunes[0] && runesEqual(baseRunes[i:i+searchLen], searchRunes) { + return iter(ast.InternedTerm(i)) + } + } else { + break + } + } + + return iter(ast.InternedTerm(-1)) +} + +func builtinIndexOfN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + base, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + search, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + if len(string(search)) == 0 { + return errors.New("empty search character") + } + + baseRunes := []rune(string(base)) + searchRunes := []rune(string(search)) + searchLen := len(searchRunes) + + var arr []*ast.Term + for i, r := range baseRunes { + if len(baseRunes) >= i+searchLen { + if r == searchRunes[0] && runesEqual(baseRunes[i:i+searchLen], searchRunes) { + arr = append(arr, ast.InternedTerm(i)) + } + } else { + break + } + } + + return iter(ast.ArrayTerm(arr...)) +} + +func builtinSubstring(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + base, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + startIndex, err := builtins.IntOperand(operands[1].Value, 2) + if err != nil { + return err + } + + length, err := builtins.IntOperand(operands[2].Value, 3) + if err != nil { + return err + } + + if startIndex < 0 { + return errors.New("negative offset") + } + + sbase := string(base) + if sbase == "" { + return iter(ast.InternedEmptyString) + } + + // Optimized path for the likely common case of ASCII strings. + // This allocates less memory and runs in about 1/3 the time. + if isASCII(sbase) { + if startIndex >= len(sbase) { + return iter(ast.InternedEmptyString) + } + + if length < 0 { + return iter(ast.InternedTerm(sbase[startIndex:])) + } + + if startIndex == 0 && length >= len(sbase) { + return iter(operands[0]) + } + + upto := min(len(sbase), startIndex+length) + return iter(ast.InternedTerm(sbase[startIndex:upto])) + } + + if startIndex == 0 && length >= utf8.RuneCountInString(sbase) { + return iter(operands[0]) + } + + runes := []rune(base) + + if startIndex >= len(runes) { + return iter(ast.InternedEmptyString) + } + + var s string + if length < 0 { + s = string(runes[startIndex:]) + } else { + upto := min(len(runes), startIndex+length) + s = string(runes[startIndex:upto]) + } + + return iter(ast.InternedTerm(s)) +} + +func isASCII(s string) bool { + for i := range len(s) { + if s[i] > unicode.MaxASCII { + return false + } + } + return true +} + +func builtinContains(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + substr, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + return iter(ast.InternedTerm(strings.Contains(string(s), string(substr)))) +} + +func builtinStringCount(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + substr, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + baseTerm := string(s) + searchTerm := string(substr) + count := strings.Count(baseTerm, searchTerm) + + return iter(ast.InternedTerm(count)) +} + +func builtinStartsWith(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + prefix, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + return iter(ast.InternedTerm(strings.HasPrefix(string(s), string(prefix)))) +} + +func builtinEndsWith(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + suffix, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + return iter(ast.InternedTerm(strings.HasSuffix(string(s), string(suffix)))) +} + +func builtinLower(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + arg := string(s) + low := strings.ToLower(arg) + + if arg == low { + return iter(operands[0]) + } + + return iter(ast.InternedTerm(low)) +} + +func builtinUpper(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + arg := string(s) + upp := strings.ToUpper(arg) + + if arg == upp { + return iter(operands[0]) + } + + return iter(ast.InternedTerm(upp)) +} + +func builtinSplit(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + d, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + text, delim := string(s), string(d) + if !strings.Contains(text, delim) { + return iter(ast.ArrayTerm(operands[0])) + } + + return iter(ast.ArrayTerm(util.SplitMap(text, delim, ast.InternedTerm)...)) +} + +func builtinReplace(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + old, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + n, err := builtins.StringOperand(operands[2].Value, 3) + if err != nil { + return err + } + + sink := newSink(ast.Replace.Name, len(s), bctx.Cancel) + replacer := strings.NewReplacer(string(old), string(n)) + if _, err := replacer.WriteString(sink, string(s)); err != nil { + return err + } + replaced := sink.String() + if replaced == string(s) { + return iter(operands[0]) + } + + return iter(ast.InternedTerm(replaced)) +} + +func builtinReplaceN(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + patterns, err := builtins.ObjectOperand(operands[0].Value, 1) + if err != nil { + return err + } + + s, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + keys := util.SortedFunc(patterns.Keys(), ast.TermValueCompare) + pairs := make([]string, 0, len(keys)*2) + + for _, k := range keys { + keyVal, ok := k.Value.(ast.String) + if !ok { + return builtins.NewOperandErr(1, "non-string key found in pattern object") + } + strVal, ok := patterns.Get(k).Value.(ast.String) + if !ok { + return builtins.NewOperandErr(1, "non-string value found in pattern object") + } + pairs = append(pairs, string(keyVal), string(strVal)) + } + + sink := newSink(ast.ReplaceN.Name, len(s), bctx.Cancel) + replacer := strings.NewReplacer(pairs...) + if _, err := replacer.WriteString(sink, string(s)); err != nil { + return err + } + return iter(ast.InternedTerm(sink.String())) +} + +func builtinTrim(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + c, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + str := string(s) + trimmed := strings.Trim(str, string(c)) + if trimmed == str { + return iter(operands[0]) + } + + return iter(ast.InternedTerm(trimmed)) +} + +func builtinTrimLeft(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + c, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + trimmed := strings.TrimLeft(string(s), string(c)) + if trimmed == string(s) { + return iter(operands[0]) + } + + return iter(ast.InternedTerm(trimmed)) +} + +func builtinTrimPrefix(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + pre, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + trimmed := strings.TrimPrefix(string(s), string(pre)) + if trimmed == string(s) { + return iter(operands[0]) + } + + return iter(ast.InternedTerm(trimmed)) +} + +func builtinTrimRight(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + c, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + trimmed := strings.TrimRight(string(s), string(c)) + if trimmed == string(s) { + return iter(operands[0]) + } + + return iter(ast.InternedTerm(trimmed)) +} + +func builtinTrimSuffix(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + suf, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + trimmed := strings.TrimSuffix(string(s), string(suf)) + if trimmed == string(s) { + return iter(operands[0]) + } + + return iter(ast.InternedTerm(trimmed)) +} + +func builtinTrimSpace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + trimmed := strings.TrimSpace(string(s)) + if trimmed == string(s) { + return iter(operands[0]) + } + + return iter(ast.InternedTerm(trimmed)) +} + +func builtinSprintf(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + astArr, ok := operands[1].Value.(*ast.Array) + if !ok { + return builtins.NewOperandTypeErr(2, operands[1].Value, "array") + } + + // Optimized path for where sprintf is used as a "to_string" function for + // a single integer, i.e. sprintf("%d", [x]) where x is an integer. + if s == "%d" && astArr.Len() == 1 { + if n, ok := astArr.Elem(0).Value.(ast.Number); ok { + if i, ok := n.Int(); ok { + if interned := ast.InternedIntegerString(i); interned != nil { + return iter(interned) + } + return iter(ast.StringTerm(strconv.Itoa(i))) + } + } + } + + args := make([]any, astArr.Len()) + + for i := range args { + switch v := astArr.Elem(i).Value.(type) { + case ast.Number: + if n, ok := v.Int(); ok { + args[i] = n + } else if b, ok := new(big.Int).SetString(v.String(), 10); ok { + args[i] = b + } else if f, ok := v.Float64(); ok { + args[i] = f + } else { + args[i] = v.String() + } + case ast.String: + args[i] = string(v) + default: + args[i] = astArr.Elem(i).String() + } + } + + return iter(ast.InternedTerm(fmt.Sprintf(string(s), args...))) +} + +func builtinReverse(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + return iter(ast.InternedTerm(reverseString(string(s)))) +} + +func reverseString(str string) string { + var buf []byte + var arr [255]byte + size := len(str) + + if size < 255 { + buf = arr[:size:size] + } else { + buf = make([]byte, size) + } + + for start := 0; start < size; { + r, n := utf8.DecodeRuneInString(str[start:]) + start += n + utf8.EncodeRune(buf[size-start:], r) + } + + return string(buf) +} + +func init() { + RegisterBuiltinFunc(ast.FormatInt.Name, builtinFormatInt) + RegisterBuiltinFunc(ast.Concat.Name, builtinConcat) + RegisterBuiltinFunc(ast.IndexOf.Name, builtinIndexOf) + RegisterBuiltinFunc(ast.IndexOfN.Name, builtinIndexOfN) + RegisterBuiltinFunc(ast.Substring.Name, builtinSubstring) + RegisterBuiltinFunc(ast.Contains.Name, builtinContains) + RegisterBuiltinFunc(ast.StringCount.Name, builtinStringCount) + RegisterBuiltinFunc(ast.StartsWith.Name, builtinStartsWith) + RegisterBuiltinFunc(ast.EndsWith.Name, builtinEndsWith) + RegisterBuiltinFunc(ast.Upper.Name, builtinUpper) + RegisterBuiltinFunc(ast.Lower.Name, builtinLower) + RegisterBuiltinFunc(ast.Split.Name, builtinSplit) + RegisterBuiltinFunc(ast.Replace.Name, builtinReplace) + RegisterBuiltinFunc(ast.ReplaceN.Name, builtinReplaceN) + RegisterBuiltinFunc(ast.Trim.Name, builtinTrim) + RegisterBuiltinFunc(ast.TrimLeft.Name, builtinTrimLeft) + RegisterBuiltinFunc(ast.TrimPrefix.Name, builtinTrimPrefix) + RegisterBuiltinFunc(ast.TrimRight.Name, builtinTrimRight) + RegisterBuiltinFunc(ast.TrimSuffix.Name, builtinTrimSuffix) + RegisterBuiltinFunc(ast.TrimSpace.Name, builtinTrimSpace) + RegisterBuiltinFunc(ast.Sprintf.Name, builtinSprintf) + RegisterBuiltinFunc(ast.AnyPrefixMatch.Name, builtinAnyPrefixMatch) + RegisterBuiltinFunc(ast.AnySuffixMatch.Name, builtinAnySuffixMatch) + RegisterBuiltinFunc(ast.StringReverse.Name, builtinReverse) +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/subset.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/subset.go similarity index 82% rename from vendor/github.com/open-policy-agent/opa/topdown/subset.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/subset.go index 7b152a5ef9..d50dc2db77 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/subset.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/subset.go @@ -5,8 +5,8 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func bothObjects(t1, t2 *ast.Term) (bool, ast.Object, ast.Object) { @@ -88,9 +88,8 @@ func arraySet(t1, t2 *ast.Term) (bool, *ast.Array, ast.Set) { // associated with a key. func objectSubset(super ast.Object, sub ast.Object) bool { var superTerm *ast.Term - isSubset := true - sub.Until(func(key, subTerm *ast.Term) bool { + notSubset := sub.Until(func(key, subTerm *ast.Term) bool { // This really wants to be a for loop, hence the somewhat // weird internal structure. However, using Until() in this // was is a performance optimization, as it avoids performing @@ -98,10 +97,9 @@ func objectSubset(super ast.Object, sub ast.Object) bool { superTerm = super.Get(key) - // subTerm is can't be nil because we got it from Until(), so + // subTerm can't be nil because we got it from Until(), so // we only need to verify that super is non-nil. if superTerm == nil { - isSubset = false return true // break, not a subset } @@ -114,58 +112,39 @@ func objectSubset(super ast.Object, sub ast.Object) bool { // them normally. If only one term is an object, then we // do a normal comparison which will come up false. if ok, superObj, subObj := bothObjects(superTerm, subTerm); ok { - if !objectSubset(superObj, subObj) { - isSubset = false - return true // break, not a subset - } - - return false // continue + return !objectSubset(superObj, subObj) } if ok, superSet, subSet := bothSets(superTerm, subTerm); ok { - if !setSubset(superSet, subSet) { - isSubset = false - return true // break, not a subset - } - - return false // continue + return !setSubset(superSet, subSet) } if ok, superArray, subArray := bothArrays(superTerm, subTerm); ok { - if !arraySubset(superArray, subArray) { - isSubset = false - return true // break, not a subset - } - - return false // continue + return !arraySubset(superArray, subArray) } // We have already checked for exact equality, as well as for // all of the types of nested subsets we care about, so if we // get here it means this isn't a subset. - isSubset = false return true // break, not a subset }) - return isSubset + return !notSubset } // setSubset implements the subset operation on sets. // // Unlike in the object case, this is not recursive, we just compare values -// using ast.Set.Contains() because we have no well defined way to "match up" +// using ast.Set.Contains() because we have no well-defined way to "match up" // objects that are in different sets. func setSubset(super ast.Set, sub ast.Set) bool { - isSubset := true - sub.Until(func(t *ast.Term) bool { - if !super.Contains(t) { - isSubset = false - return true + for _, elem := range sub.Slice() { + if !super.Contains(elem) { + return false } - return false - }) + } - return isSubset + return true } // arraySubset implements the subset operation on arrays. @@ -197,12 +176,12 @@ func arraySubset(super, sub *ast.Array) bool { return false } - subElem := sub.Elem(subCursor) superElem := super.Elem(superCursor + subCursor) if superElem == nil { return false } + subElem := sub.Elem(subCursor) if superElem.Value.Compare(subElem.Value) == 0 { subCursor++ } else { @@ -237,22 +216,22 @@ func builtinObjectSubset(_ BuiltinContext, operands []*ast.Term, iter func(*ast. if ok, superObj, subObj := bothObjects(superTerm, subTerm); ok { // Both operands are objects. - return iter(ast.BooleanTerm(objectSubset(superObj, subObj))) + return iter(ast.InternedTerm(objectSubset(superObj, subObj))) } if ok, superSet, subSet := bothSets(superTerm, subTerm); ok { // Both operands are sets. - return iter(ast.BooleanTerm(setSubset(superSet, subSet))) + return iter(ast.InternedTerm(setSubset(superSet, subSet))) } if ok, superArray, subArray := bothArrays(superTerm, subTerm); ok { // Both operands are sets. - return iter(ast.BooleanTerm(arraySubset(superArray, subArray))) + return iter(ast.InternedTerm(arraySubset(superArray, subArray))) } if ok, superArray, subSet := arraySet(superTerm, subTerm); ok { // Super operand is array and sub operand is set - return iter(ast.BooleanTerm(arraySetSubset(superArray, subSet))) + return iter(ast.InternedTerm(arraySetSubset(superArray, subSet))) } return builtins.ErrOperand("both arguments object.subset must be of the same type or array and set") diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/template.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/template.go new file mode 100644 index 0000000000..524c5bde0d --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/template.go @@ -0,0 +1,45 @@ +package topdown + +import ( + "bytes" + "strings" + "text/template" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +func renderTemplate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + preContentTerm, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + templateVariablesTerm, err := builtins.ObjectOperand(operands[1].Value, 2) + if err != nil { + return err + } + + var templateVariables map[string]any + + if err := ast.As(templateVariablesTerm, &templateVariables); err != nil { + return err + } + + tmpl, err := template.New("template").Parse(string(preContentTerm)) + if err != nil { + return err + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, templateVariables); err != nil { + return err + } + + res := strings.ReplaceAll(buf.String(), "", "") + return iter(ast.StringTerm(res)) +} + +func init() { + RegisterBuiltinFunc(ast.RenderTemplate.Name, renderTemplate) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/template_string.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/template_string.go new file mode 100644 index 0000000000..0e705b81bf --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/template_string.go @@ -0,0 +1,45 @@ +// Copyright 2025 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "strings" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +func builtinTemplateString(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + arr, err := builtins.ArrayOperand(operands[0].Value, 1) + if err != nil { + return err + } + + buf := make([]string, arr.Len()) + + var count int + err = builtinPrintCrossProductOperands(bctx.Location, buf, arr, 0, func(buf []string) error { + count += 1 + // Precautionary run-time assertion that template-strings can't produce multiple outputs; e.g. for custom relation type built-ins not known at compile-time. + if count > 1 { + return Halt{Err: &Error{ + Code: ConflictErr, + Location: bctx.Location, + Message: "template-strings must not produce multiple outputs", + }} + } + return nil + }) + + if err != nil { + return err + } + + return iter(ast.StringTerm(strings.Join(buf, ""))) +} + +func init() { + RegisterBuiltinFunc(ast.InternalTemplateString.Name, builtinTemplateString) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/test.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/test.go new file mode 100644 index 0000000000..02958d2264 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/test.go @@ -0,0 +1,30 @@ +// Copyright 2025 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import "github.com/open-policy-agent/opa/v1/ast" + +const TestCaseOp Op = "TestCase" + +func builtinTestCase(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + e := &Event{ + Op: TestCaseOp, + QueryID: bctx.QueryID, + Node: ast.NewExpr([]*ast.Term{ + ast.NewTerm(ast.InternalTestCase.Ref()), + ast.NewTerm(operands[0].Value), + }), + } + + for _, tracer := range bctx.QueryTracers { + tracer.TraceEvent(*e) + } + + return iter(ast.BooleanTerm(true)) +} + +func init() { + RegisterBuiltinFunc(ast.InternalTestCase.Name, builtinTestCase) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/time.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/time.go new file mode 100644 index 0000000000..16eae3e0bd --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/time.go @@ -0,0 +1,341 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "encoding/json" + "errors" + "math" + "math/big" + "strconv" + "sync" + "time" + _ "time/tzdata" // this is needed to have LoadLocation when no filesystem tzdata is available + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +var tzCache map[string]*time.Location +var tzCacheMutex *sync.Mutex + +// 1677-09-21T00:12:43.145224192-00:00 +var minDateAllowedForNsConversion = time.Unix(0, math.MinInt64) + +// 2262-04-11T23:47:16.854775807-00:00 +var maxDateAllowedForNsConversion = time.Unix(0, math.MaxInt64) + +func toSafeUnixNano(t time.Time, iter func(*ast.Term) error) error { + if t.Before(minDateAllowedForNsConversion) || t.After(maxDateAllowedForNsConversion) { + return errors.New("time outside of valid range") + } + + return iter(ast.NewTerm(ast.Number(int64ToJSONNumber(t.UnixNano())))) +} + +func builtinTimeNowNanos(bctx BuiltinContext, _ []*ast.Term, iter func(*ast.Term) error) error { + return iter(bctx.Time) +} + +func builtinTimeParseNanos(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + format, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + value, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + formatStr := string(format) + // look for the formatStr in our acceptedTimeFormats and + // use the constant instead if it matches + if f, ok := acceptedTimeFormats[formatStr]; ok { + formatStr = f + } + result, err := time.Parse(formatStr, string(value)) + if err != nil { + return err + } + + return toSafeUnixNano(result, iter) +} + +func builtinTimeParseRFC3339Nanos(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + value, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + result, err := time.Parse(time.RFC3339, string(value)) + if err != nil { + return err + } + + return toSafeUnixNano(result, iter) +} +func builtinParseDurationNanos(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + duration, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + value, err := time.ParseDuration(string(duration)) + if err != nil { + return err + } + return iter(ast.NumberTerm(int64ToJSONNumber(int64(value)))) +} + +// Represent exposed constants for formatting from the stdlib time pkg +var acceptedTimeFormats = map[string]string{ + "ANSIC": time.ANSIC, + "UnixDate": time.UnixDate, + "RubyDate": time.RubyDate, + "RFC822": time.RFC822, + "RFC822Z": time.RFC822Z, + "RFC850": time.RFC850, + "RFC1123": time.RFC1123, + "RFC1123Z": time.RFC1123Z, + "RFC3339": time.RFC3339, + "RFC3339Nano": time.RFC3339Nano, +} + +func builtinFormat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + t, layout, err := tzTime(operands[0].Value) + if err != nil { + return err + } + // Using RFC3339Nano time formatting as default + if layout == "" { + layout = time.RFC3339Nano + } else if layoutStr, ok := acceptedTimeFormats[layout]; ok { + // if we can find a constant specified, use the constant + layout = layoutStr + } + // otherwise try to treat the fmt string as a datetime fmt string + + timestamp := t.Format(layout) + return iter(ast.StringTerm(timestamp)) +} + +func builtinDate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + t, _, err := tzTime(operands[0].Value) + if err != nil { + return err + } + year, month, day := t.Date() + + return iter(ast.ArrayTerm(ast.InternedTerm(year), ast.InternedTerm(int(month)), ast.InternedTerm(day))) +} + +func builtinClock(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + t, _, err := tzTime(operands[0].Value) + if err != nil { + return err + } + hour, minute, second := t.Clock() + result := ast.NewArray(ast.InternedTerm(hour), ast.InternedTerm(minute), ast.InternedTerm(second)) + return iter(ast.NewTerm(result)) +} + +func builtinWeekday(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + t, _, err := tzTime(operands[0].Value) + if err != nil { + return err + } + weekday := t.Weekday().String() + return iter(ast.StringTerm(weekday)) +} + +func builtinAddDate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + t, _, err := tzTime(operands[0].Value) + if err != nil { + return err + } + + years, err := builtins.IntOperand(operands[1].Value, 2) + if err != nil { + return err + } + + months, err := builtins.IntOperand(operands[2].Value, 3) + if err != nil { + return err + } + + days, err := builtins.IntOperand(operands[3].Value, 4) + if err != nil { + return err + } + + result := t.AddDate(years, months, days) + + return toSafeUnixNano(result, iter) +} + +func builtinDiff(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + t1, _, err := tzTime(operands[0].Value) + if err != nil { + return err + } + t2, _, err := tzTime(operands[1].Value) + if err != nil { + return err + } + + // The following implementation of this function is taken + // from https://github.com/icza/gox licensed under Apache 2.0. + // The only modification made is to variable names. + // + // For details, see https://stackoverflow.com/a/36531443/1705598 + // + // Copyright 2021 icza + // BEGIN REDISTRIBUTION FROM APACHE 2.0 LICENSED PROJECT + if t1.Location() != t2.Location() { + t2 = t2.In(t1.Location()) + } + if t1.After(t2) { + t1, t2 = t2, t1 + } + y1, M1, d1 := t1.Date() + y2, M2, d2 := t2.Date() + + h1, m1, s1 := t1.Clock() + h2, m2, s2 := t2.Clock() + + year := y2 - y1 + month := int(M2 - M1) + day := d2 - d1 + hour := h2 - h1 + min := m2 - m1 + sec := s2 - s1 + + // Normalize negative values + if sec < 0 { + sec += 60 + min-- + } + if min < 0 { + min += 60 + hour-- + } + if hour < 0 { + hour += 24 + day-- + } + if day < 0 { + // Days in month: + t := time.Date(y1, M1, 32, 0, 0, 0, 0, time.UTC) + day += 32 - t.Day() + month-- + } + if month < 0 { + month += 12 + year-- + } + // END REDISTRIBUTION FROM APACHE 2.0 LICENSED PROJECT + + return iter(ast.ArrayTerm(ast.InternedTerm(year), ast.InternedTerm(month), ast.InternedTerm(day), + ast.InternedTerm(hour), ast.InternedTerm(min), ast.InternedTerm(sec))) +} + +func tzTime(a ast.Value) (t time.Time, lay string, err error) { + var nVal ast.Value + loc := time.UTC + layout := "" + switch va := a.(type) { + case *ast.Array: + if va.Len() == 0 { + return time.Time{}, layout, builtins.NewOperandTypeErr(1, a, "either number (ns) or [number (ns), string (tz)]") + } + + nVal, err = builtins.NumberOperand(va.Elem(0).Value, 1) + if err != nil { + return time.Time{}, layout, err + } + + if va.Len() > 1 { + tzVal, err := builtins.StringOperand(va.Elem(1).Value, 1) + if err != nil { + return time.Time{}, layout, err + } + + tzName := string(tzVal) + + switch tzName { + case "", "UTC": + // loc is already UTC + + case "Local": + loc = time.Local + + default: + var ok bool + + tzCacheMutex.Lock() + loc, ok = tzCache[tzName] + + if !ok { + loc, err = time.LoadLocation(tzName) + if err != nil { + tzCacheMutex.Unlock() + return time.Time{}, layout, err + } + tzCache[tzName] = loc + } + tzCacheMutex.Unlock() + } + } + + if va.Len() > 2 { + lay, err := builtins.StringOperand(va.Elem(2).Value, 1) + if err != nil { + return time.Time{}, layout, err + } + layout = string(lay) + } + + case ast.Number: + nVal = a + + default: + return time.Time{}, layout, builtins.NewOperandTypeErr(1, a, "either number (ns) or [number (ns), string (tz)]") + } + + value, err := builtins.NumberOperand(nVal, 1) + if err != nil { + return time.Time{}, layout, err + } + + f := builtins.NumberToFloat(value) + i64, acc := f.Int64() + if acc != big.Exact { + return time.Time{}, layout, errors.New("timestamp too big") + } + + t = time.Unix(0, i64).In(loc) + + return t, layout, nil +} + +func int64ToJSONNumber(i int64) json.Number { + return json.Number(strconv.FormatInt(i, 10)) +} + +func init() { + RegisterBuiltinFunc(ast.NowNanos.Name, builtinTimeNowNanos) + RegisterBuiltinFunc(ast.ParseRFC3339Nanos.Name, builtinTimeParseRFC3339Nanos) + RegisterBuiltinFunc(ast.ParseNanos.Name, builtinTimeParseNanos) + RegisterBuiltinFunc(ast.ParseDurationNanos.Name, builtinParseDurationNanos) + RegisterBuiltinFunc(ast.Format.Name, builtinFormat) + RegisterBuiltinFunc(ast.Date.Name, builtinDate) + RegisterBuiltinFunc(ast.Clock.Name, builtinClock) + RegisterBuiltinFunc(ast.Weekday.Name, builtinWeekday) + RegisterBuiltinFunc(ast.AddDate.Name, builtinAddDate) + RegisterBuiltinFunc(ast.Diff.Name, builtinDiff) + tzCacheMutex = &sync.Mutex{} + tzCache = make(map[string]*time.Location) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/tokens.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/tokens.go new file mode 100644 index 0000000000..bb2c9e1c1c --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/tokens.go @@ -0,0 +1,1316 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/hmac" + "crypto/rsa" + "crypto/sha256" + "crypto/sha512" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "hash" + "math/big" + "strconv" + "strings" + + "github.com/lestrrat-go/jwx/v3/jwk" + "github.com/lestrrat-go/jwx/v3/jws/jwsbb" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" +) + +const headerJwt = "JWT" + +// JSONWebToken represent the 3 parts (header, payload & signature) of +// +// a JWT in Base64. +type JSONWebToken struct { + header string + payload string + signature string + decodedHeader ast.Object +} + +// decodeHeader populates the decodedHeader field. +func (token *JSONWebToken) decodeHeader() error { + result, err := getResult(builtinBase64UrlDecode, ast.StringTerm(token.header)) + if err != nil { + return fmt.Errorf("JWT header had invalid encoding: %w", err) + } + decodedHeader, err := validateJWTHeader(string(result.Value.(ast.String))) + if err != nil { + return err + } + token.decodedHeader = decodedHeader + return nil +} + +// Implements JWT decoding/validation based on RFC 7519 Section 7.2: +// https://tools.ietf.org/html/rfc7519#section-7.2 +// It does no data validation, it merely checks that the given string +// represents a structurally valid JWT. It supports JWTs using JWS compact +// serialization. +func builtinJWTDecode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + token, err := decodeJWT(operands[0].Value) + if err != nil { + return err + } + + if err = token.decodeHeader(); err != nil { + return err + } + + p, err := getResult(builtinBase64UrlDecode, ast.StringTerm(token.payload)) + if err != nil { + return fmt.Errorf("JWT payload had invalid encoding: %v", err) + } + + if cty := token.decodedHeader.Get(ast.InternedTerm("cty")); cty != nil { + ctyVal := string(cty.Value.(ast.String)) + // It is possible for the contents of a token to be another + // token as a result of nested signing or encryption. To handle + // the case where we are given a token such as this, we check + // the content type and recurse on the payload if the content + // is "JWT". + // When the payload is itself another encoded JWT, then its + // contents are quoted (behavior of https://jwt.io/). To fix + // this, remove leading and trailing quotes. + if ctyVal == headerJwt { + p, err = getResult(builtinTrim, p, ast.StringTerm(`"'`)) + if err != nil { + panic("not reached") + } + result, err := getResult(builtinJWTDecode, p) + if err != nil { + return err + } + return iter(result) + } + } + + payload, err := extractJSONObject(string(p.Value.(ast.String))) + if err != nil { + return err + } + + s, err := getResult(builtinBase64UrlDecode, ast.StringTerm(token.signature)) + if err != nil { + return fmt.Errorf("JWT signature had invalid encoding: %v", err) + } + sign := hex.EncodeToString([]byte(s.Value.(ast.String))) + + arr := []*ast.Term{ + ast.NewTerm(token.decodedHeader), + ast.NewTerm(payload), + ast.StringTerm(sign), + } + + return iter(ast.ArrayTerm(arr...)) +} + +// Implements RS256 JWT signature verification +func builtinJWTVerifyRS256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { + return rsa.VerifyPKCS1v15( + publicKey, + crypto.SHA256, + digest, + signature) + }) + if err == nil { + return iter(ast.InternedTerm(result)) + } + return err +} + +// Implements RS384 JWT signature verification +func builtinJWTVerifyRS384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { + return rsa.VerifyPKCS1v15( + publicKey, + crypto.SHA384, + digest, + signature) + }) + if err == nil { + return iter(ast.InternedTerm(result)) + } + return err +} + +// Implements RS512 JWT signature verification +func builtinJWTVerifyRS512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { + return rsa.VerifyPKCS1v15( + publicKey, + crypto.SHA512, + digest, + signature) + }) + if err == nil { + return iter(ast.InternedTerm(result)) + } + return err +} + +// Implements PS256 JWT signature verification +func builtinJWTVerifyPS256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { + return rsa.VerifyPSS( + publicKey, + crypto.SHA256, + digest, + signature, + nil) + }) + if err == nil { + return iter(ast.InternedTerm(result)) + } + return err +} + +// Implements PS384 JWT signature verification +func builtinJWTVerifyPS384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { + return rsa.VerifyPSS( + publicKey, + crypto.SHA384, + digest, + signature, + nil) + }) + if err == nil { + return iter(ast.InternedTerm(result)) + } + return err +} + +// Implements PS512 JWT signature verification +func builtinJWTVerifyPS512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { + return rsa.VerifyPSS( + publicKey, + crypto.SHA512, + digest, + signature, + nil) + }) + if err == nil { + return iter(ast.InternedTerm(result)) + } + return err +} + +// Implements RSA JWT signature verification. +func builtinJWTVerifyRSA(bctx BuiltinContext, jwt ast.Value, keyStr ast.Value, hasher func() hash.Hash, verify func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error) (bool, error) { + return builtinJWTVerify(bctx, jwt, keyStr, hasher, func(publicKey any, digest []byte, signature []byte) error { + publicKeyRsa, ok := publicKey.(*rsa.PublicKey) + if !ok { + return errors.New("incorrect public key type") + } + return verify(publicKeyRsa, digest, signature) + }) +} + +// Implements ES256 JWT signature verification. +func builtinJWTVerifyES256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerify(bctx, operands[0].Value, operands[1].Value, sha256.New, verifyES) + if err == nil { + return iter(ast.InternedTerm(result)) + } + return err +} + +// Implements ES384 JWT signature verification +func builtinJWTVerifyES384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerify(bctx, operands[0].Value, operands[1].Value, sha512.New384, verifyES) + if err == nil { + return iter(ast.InternedTerm(result)) + } + return err +} + +// Implements ES512 JWT signature verification +func builtinJWTVerifyES512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerify(bctx, operands[0].Value, operands[1].Value, sha512.New, verifyES) + if err == nil { + return iter(ast.InternedTerm(result)) + } + return err +} + +func verifyES(publicKey any, digest []byte, signature []byte) (err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("ECDSA signature verification error: %v", r) + } + }() + publicKeyEcdsa, ok := publicKey.(*ecdsa.PublicKey) + if !ok { + return errors.New("incorrect public key type") + } + r, s := &big.Int{}, &big.Int{} + n := len(signature) / 2 + r.SetBytes(signature[:n]) + s.SetBytes(signature[n:]) + if ecdsa.Verify(publicKeyEcdsa, digest, r, s) { + return nil + } + return errors.New("ECDSA signature verification error") +} + +// Implements EdDSA JWT signature verification +func builtinJWTVerifyEdDSA(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerify(bctx, operands[0].Value, operands[1].Value, nil, verifyEd25519) + if err == nil { + return iter(ast.InternedTerm(result)) + } + return err +} + +func verifyEd25519(publicKey any, digest []byte, signature []byte) (err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("EdDSA signature verification error: %v", r) + } + }() + publicKeyEcdsa, ok := publicKey.(ed25519.PublicKey) + if !ok { + return errors.New("incorrect public key type") + } + if ed25519.Verify(publicKeyEcdsa, digest, signature) { + return nil + } + return errors.New("ECDSA signature verification error") +} + +type verificationKey struct { + alg string + kid string + key any +} + +// getKeysFromCertOrJWK returns the public key found in a X.509 certificate or JWK key(s). +// A valid PEM block is never valid JSON (and vice versa), hence can try parsing both. +// When provided a JWKS, each key additionally likely contains a key ID and the key algorithm. +func getKeysFromCertOrJWK(certificate string) ([]verificationKey, error) { + if block, rest := pem.Decode([]byte(certificate)); block != nil { + if len(rest) > 0 { + return nil, errors.New("extra data after a PEM certificate block") + } + + if block.Type == blockTypeCertificate { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("failed to parse a PEM certificate: %w", err) + } + return []verificationKey{{key: cert.PublicKey}}, nil + } + + if block.Type == "PUBLIC KEY" { + key, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("failed to parse a PEM public key: %w", err) + } + + return []verificationKey{{key: key}}, nil + } + + return nil, errors.New("failed to extract a Key from the PEM certificate") + } + + jwks, err := jwk.ParseString(certificate) + if err != nil { + return nil, fmt.Errorf("failed to parse a JWK key (set): %w", err) + } + + keys := make([]verificationKey, 0, jwks.Len()) + for i := range jwks.Len() { + k, ok := jwks.Key(i) + if !ok { + continue + } + var key any + if err := jwk.Export(k, &key); err != nil { + return nil, err + } + var alg string + if algInterface, ok := k.Algorithm(); ok { + alg = algInterface.String() + } + + // Skip keys with unknown/unsupported algorithms + if alg != "" { + if _, ok := tokenAlgorithms[alg]; !ok { + continue + } + } + + var kid string + if kidValue, ok := k.KeyID(); ok { + kid = kidValue + } + + keys = append(keys, verificationKey{ + alg: alg, + kid: kid, + key: key, + }) + } + + return keys, nil +} + +func getKeyByKid(kid string, keys []verificationKey) *verificationKey { + for _, key := range keys { + if key.kid == kid { + return &key + } + } + return nil +} + +// Implements JWT signature verification. +func builtinJWTVerify(bctx BuiltinContext, jwt ast.Value, keyStr ast.Value, hasher func() hash.Hash, verify func(publicKey any, digest []byte, signature []byte) error) (bool, error) { + if found, _, _, valid := getTokenFromCache(bctx, jwt, keyStr); found { + return valid, nil + } + + token, err := decodeJWT(jwt) + if err != nil { + return false, err + } + + s, err := builtins.StringOperand(keyStr, 2) + if err != nil { + return false, err + } + + keys, err := getKeysFromCertOrJWK(string(s)) + if err != nil { + return false, err + } + + signature, err := token.decodeSignature() + if err != nil { + return false, err + } + + err = token.decodeHeader() + if err != nil { + return false, err + } + header, err := parseTokenHeader(token) + if err != nil { + return false, err + } + + done := func(valid bool) (bool, error) { + putTokenInCache(bctx, jwt, keyStr, nil, nil, valid) + return valid, nil + } + + // Validate the JWT signature + + // First, check if there's a matching key ID (`kid`) in both token header and key(s). + // If a match is found, verify using only that key. Only applicable when a JWKS was provided. + if header.kid != "" { + if key := getKeyByKid(header.kid, keys); key != nil { + err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), signature) + + return done(err == nil) + } + } + + // If no key ID matched, try to verify using any key in the set + // If an alg is present in both the JWT header and the key, skip verification unless they match + for _, key := range keys { + if key.alg == "" { + // No algorithm provided for the key - this is likely a certificate and not a JWKS, so + // we'll need to verify to find out + err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), signature) + if err == nil { + return done(true) + } + } else { + if header.alg != key.alg { + continue + } + err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), signature) + if err == nil { + return done(true) + } + } + } + + // None of the keys worked, return false + return done(false) +} + +// Implements HS256 (secret) JWT signature verification +func builtinJWTVerifyHS256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + return builtinJWTVerifyHS(bctx, operands, sha256.New, iter) +} + +// Implements HS384 JWT signature verification +func builtinJWTVerifyHS384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + return builtinJWTVerifyHS(bctx, operands, sha512.New384, iter) +} + +// Implements HS512 JWT signature verification +func builtinJWTVerifyHS512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + return builtinJWTVerifyHS(bctx, operands, sha512.New, iter) +} + +func builtinJWTVerifyHS(bctx BuiltinContext, operands []*ast.Term, hashF func() hash.Hash, iter func(*ast.Term) error) error { + jwt, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + // Process Secret input + astSecret, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + if found, _, _, valid := getTokenFromCache(bctx, jwt, astSecret); found { + return iter(ast.InternedTerm(valid)) + } + + // Decode the JSON Web Token + token, err := decodeJWT(jwt) + if err != nil { + return err + } + + secret := string(astSecret) + + mac := hmac.New(hashF, []byte(secret)) + _, err = mac.Write([]byte(token.header + "." + token.payload)) + if err != nil { + return err + } + + signature, err := token.decodeSignature() + if err != nil { + return err + } + + valid := hmac.Equal(signature, mac.Sum(nil)) + + putTokenInCache(bctx, jwt, astSecret, nil, nil, valid) + + return iter(ast.InternedTerm(valid)) +} + +// -- Full JWT verification and decoding -- + +// Verification constraints. See tokens_test.go for unit tests. + +// tokenConstraints holds decoded JWT verification constraints. +type tokenConstraints struct { + // The set of asymmetric keys we can verify with. + keys []verificationKey + + // The single symmetric key we will verify with. + secret string + + // The algorithm that must be used to verify. + // If "", any algorithm is acceptable. + alg string + + // The required issuer. + // If "", any issuer is acceptable. + iss string + + // The required audience. + // If "", no audience is acceptable. + aud string + + // The time to validate against, or -1 if no constraint set. + // (If unset, the current time will be used.) + time float64 +} + +// tokenConstraintHandler is the handler type for JWT verification constraints. +type tokenConstraintHandler func(value ast.Value, parameters *tokenConstraints) error + +// tokenConstraintTypes maps known JWT verification constraints to handlers. +var tokenConstraintTypes = map[string]tokenConstraintHandler{ + "cert": tokenConstraintCert, + "secret": func(value ast.Value, constraints *tokenConstraints) error { + return tokenConstraintString("secret", value, &constraints.secret) + }, + "alg": func(value ast.Value, constraints *tokenConstraints) error { + return tokenConstraintString("alg", value, &constraints.alg) + }, + "iss": func(value ast.Value, constraints *tokenConstraints) error { + return tokenConstraintString("iss", value, &constraints.iss) + }, + "aud": func(value ast.Value, constraints *tokenConstraints) error { + return tokenConstraintString("aud", value, &constraints.aud) + }, + "time": tokenConstraintTime, +} + +// tokenConstraintCert handles the `cert` constraint. +func tokenConstraintCert(value ast.Value, constraints *tokenConstraints) error { + s, ok := value.(ast.String) + if !ok { + return errors.New("cert constraint: must be a string") + } + + keys, err := getKeysFromCertOrJWK(string(s)) + if err != nil { + return err + } + + constraints.keys = keys + return nil +} + +// tokenConstraintTime handles the `time` constraint. +func tokenConstraintTime(value ast.Value, constraints *tokenConstraints) error { + t, err := timeFromValue(value) + if err != nil { + return err + } + constraints.time = t + return nil +} + +func timeFromValue(value ast.Value) (float64, error) { + time, ok := value.(ast.Number) + if !ok { + return 0, errors.New("token time constraint: must be a number") + } + timeFloat, ok := time.Float64() + if !ok { + return 0, errors.New("token time constraint: unvalid float64") + } + if timeFloat < 0 { + return 0, errors.New("token time constraint: must not be negative") + } + return timeFloat, nil +} + +// tokenConstraintString handles string constraints. +func tokenConstraintString(name string, value ast.Value, where *string) error { + av, ok := value.(ast.String) + if !ok { + return fmt.Errorf("%s constraint: must be a string", name) + } + *where = string(av) + return nil +} + +// parseTokenConstraints parses the constraints argument. +func parseTokenConstraints(o ast.Object, wallclock *ast.Term) (*tokenConstraints, error) { + constraints := tokenConstraints{ + time: -1, + } + if err := o.Iter(func(k *ast.Term, v *ast.Term) error { + name := string(k.Value.(ast.String)) + handler, ok := tokenConstraintTypes[name] + if ok { + return handler(v.Value, &constraints) + } + // Anything unknown is rejected. + return fmt.Errorf("unknown token validation constraint: %s", name) + }); err != nil { + return nil, err + } + if constraints.time == -1 { // no time provided in constraint object + t, err := timeFromValue(wallclock.Value) + if err != nil { + return nil, err + } + constraints.time = t + } + return &constraints, nil +} + +// validate validates the constraints argument. +func (constraints *tokenConstraints) validate() error { + keys := 0 + if constraints.keys != nil { + keys++ + } + if constraints.secret != "" { + keys++ + } + if keys > 1 { + return errors.New("duplicate key constraints") + } + if keys < 1 { + return errors.New("no key constraint") + } + return nil +} + +// verify verifies a JWT using the constraints and the algorithm from the header +func (constraints *tokenConstraints) verify(kid, alg, header, payload string, signature []byte) error { + // Construct the payload + plaintext := append(append([]byte(header), '.'), []byte(payload)...) + + // If we're configured with asymmetric key(s) then only trust that + if constraints.keys != nil { + if kid != "" { + if key := getKeyByKid(kid, constraints.keys); key != nil { + err := jwsbb.Verify(key.key, alg, plaintext, signature) + if err != nil { + return errSignatureNotVerified + } + return nil + } + } + + verified := false + for _, key := range constraints.keys { + if key.alg == "" { + err := jwsbb.Verify(key.key, alg, plaintext, signature) + if err == nil { + verified = true + break + } + } else { + if alg != key.alg { + continue + } + err := jwsbb.Verify(key.key, alg, plaintext, signature) + if err == nil { + verified = true + break + } + } + } + + if !verified { + return errSignatureNotVerified + } + return nil + } + if constraints.secret != "" { + err := jwsbb.Verify([]byte(constraints.secret), alg, plaintext, signature) + if err != nil { + return errSignatureNotVerified + } + return nil + } + // (*tokenConstraints)validate() should prevent this happening + return errors.New("unexpectedly found no keys to trust") +} + +// validAudience checks the audience of the JWT. +// It returns true if it meets the constraints and false otherwise. +func (constraints *tokenConstraints) validAudience(aud ast.Value) bool { + s, ok := aud.(ast.String) + if ok { + return string(s) == constraints.aud + } + a, ok := aud.(*ast.Array) + if !ok { + return false + } + return a.Until(func(elem *ast.Term) bool { + if s, ok := elem.Value.(ast.String); ok { + return string(s) == constraints.aud + } + return false + }) +} + +// JWT algorithms + +// tokenAlgorithms is the known JWT algorithms +var tokenAlgorithms = map[string]struct{}{ + "RS256": {}, + "RS384": {}, + "RS512": {}, + "PS256": {}, + "PS384": {}, + "PS512": {}, + "ES256": {}, + "ES384": {}, + "ES512": {}, + "HS256": {}, + "HS384": {}, + "HS512": {}, + "EdDSA": {}, +} + +// errSignatureNotVerified is returned when a signature cannot be verified. +var errSignatureNotVerified = errors.New("signature not verified") + +// JWT header parsing and parameters. See tokens_test.go for unit tests. + +// tokenHeaderType represents a recognized JWT header field +// tokenHeader is a parsed JWT header +type tokenHeader struct { + alg string + kid string + typ string + cty string + crit map[string]bool + unknown []string +} + +// tokenHeaderHandler handles a JWT header parameters +type tokenHeaderHandler func(header *tokenHeader, value ast.Value) error + +// tokenHeaderTypes maps known JWT header parameters to handlers +var tokenHeaderTypes = map[string]tokenHeaderHandler{ + "alg": func(header *tokenHeader, value ast.Value) error { + return tokenHeaderString("alg", &header.alg, value) + }, + "kid": func(header *tokenHeader, value ast.Value) error { + return tokenHeaderString("kid", &header.kid, value) + }, + "typ": func(header *tokenHeader, value ast.Value) error { + return tokenHeaderString("typ", &header.typ, value) + }, + "cty": func(header *tokenHeader, value ast.Value) error { + return tokenHeaderString("cty", &header.cty, value) + }, + "crit": tokenHeaderCrit, +} + +// tokenHeaderCrit handles the 'crit' header parameter +func tokenHeaderCrit(header *tokenHeader, value ast.Value) error { + v, ok := value.(*ast.Array) + if !ok { + return errors.New("crit: must be a list") + } + header.crit = map[string]bool{} + _ = v.Iter(func(elem *ast.Term) error { + tv, ok := elem.Value.(ast.String) + if !ok { + return errors.New("crit: must be a list of strings") + } + header.crit[string(tv)] = true + return nil + }) + if len(header.crit) == 0 { + return errors.New("crit: must be a nonempty list") // 'MUST NOT' use the empty list + } + return nil +} + +// tokenHeaderString handles string-format JWT header parameters +func tokenHeaderString(name string, where *string, value ast.Value) error { + v, ok := value.(ast.String) + if !ok { + return fmt.Errorf("%s: must be a string", name) + } + *where = string(v) + return nil +} + +// parseTokenHeader parses the JWT header. +func parseTokenHeader(token *JSONWebToken) (*tokenHeader, error) { + header := tokenHeader{ + unknown: []string{}, + } + if err := token.decodedHeader.Iter(func(k *ast.Term, v *ast.Term) error { + ks := string(k.Value.(ast.String)) + handler, ok := tokenHeaderTypes[ks] + if !ok { + header.unknown = append(header.unknown, ks) + return nil + } + return handler(&header, v.Value) + }); err != nil { + return nil, err + } + return &header, nil +} + +// validTokenHeader returns true if the JOSE header is valid, otherwise false. +func (header *tokenHeader) valid() bool { + // RFC7515 s4.1.1 alg MUST be present + if header.alg == "" { + return false + } + // RFC7515 4.1.11 JWS is invalid if there is a critical parameter that we did not recognize + for _, u := range header.unknown { + if header.crit[u] { + return false + } + } + return true +} + +func commonBuiltinJWTEncodeSign(bctx BuiltinContext, inputHeaders, jwsPayload, jwkSrc []byte, iter func(*ast.Term) error) error { + keys, err := jwk.Parse(jwkSrc) + if err != nil { + return err + } + + if keys.Len() == 0 { + return errors.New("no keys found in JWK set") + } + + key, ok := keys.Key(0) + if !ok { + return errors.New("failed to get first key from JWK set") + } + + // Parse headers to get algorithm. + headers := jwsbb.HeaderParse(inputHeaders) + algStr, err := jwsbb.HeaderGetString(headers, "alg") + if err != nil { + return fmt.Errorf("missing or invalid 'alg' header: %w", err) + } + // Make sure the algorithm is supported. + _, ok = tokenAlgorithms[algStr] + if !ok { + return fmt.Errorf("unknown JWS algorithm: %s", algStr) + } + + typ, err := jwsbb.HeaderGetString(headers, "typ") + if (err != nil || typ == headerJwt) && !json.Valid(jwsPayload) { + return errors.New("type is JWT but payload is not JSON") + } + + payload := jwsbb.SignBuffer(nil, inputHeaders, jwsPayload, base64.RawURLEncoding, true) + + signature, err := jwsbb.Sign(key, algStr, payload, bctx.Seed) + if err != nil { + return err + } + + jwsCompact := string(payload) + "." + base64.RawURLEncoding.EncodeToString(signature) + + return iter(ast.StringTerm(jwsCompact)) +} + +func builtinJWTEncodeSign(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + inputHeadersAsJSON, err := ast.JSON(operands[0].Value) + if err != nil { + return fmt.Errorf("failed to prepare JWT headers for marshalling: %v", err) + } + + inputHeadersBs, err := json.Marshal(inputHeadersAsJSON) + if err != nil { + return fmt.Errorf("failed to marshal JWT headers: %v", err) + } + + payloadAsJSON, err := ast.JSON(operands[1].Value) + if err != nil { + return fmt.Errorf("failed to prepare JWT payload for marshalling: %v", err) + } + + payloadBs, err := json.Marshal(payloadAsJSON) + if err != nil { + return fmt.Errorf("failed to marshal JWT payload: %v", err) + } + + signatureAsJSON, err := ast.JSON(operands[2].Value) + if err != nil { + return fmt.Errorf("failed to prepare JWT signature for marshalling: %v", err) + } + + signatureBs, err := json.Marshal(signatureAsJSON) + if err != nil { + return fmt.Errorf("failed to marshal JWT signature: %v", err) + } + + return commonBuiltinJWTEncodeSign( + bctx, + inputHeadersBs, + payloadBs, + signatureBs, + iter, + ) +} + +func builtinJWTEncodeSignRaw(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + jwkSrc, err := builtins.StringOperand(operands[2].Value, 3) + if err != nil { + return err + } + inputHeaders, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + jwsPayload, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + return commonBuiltinJWTEncodeSign(bctx, []byte(inputHeaders), []byte(jwsPayload), []byte(jwkSrc), iter) +} + +// Implements full JWT decoding, validation and verification. +func builtinJWTDecodeVerify(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + // io.jwt.decode_verify(string, constraints, [valid, header, payload]) + // + // If valid is true then the signature verifies and all constraints are met. + // If valid is false then either the signature did not verify or some constrain + // was not met. + // + // Decoding errors etc are returned as errors. + a := operands[0].Value + + b, err := builtins.ObjectOperand(operands[1].Value, 2) + if err != nil { + return err + } + + unverified := ast.ArrayTerm( + ast.InternedTerm(false), + ast.InternedEmptyObject, + ast.InternedEmptyObject, + ) + constraints, err := parseTokenConstraints(b, bctx.Time) + if err != nil { + return err + } + if err := constraints.validate(); err != nil { + return err + } + var token *JSONWebToken + var payload ast.Object + var header ast.Object + + // FIXME: optimize + k, _ := b.Filter(ast.NewObject( + ast.Item(ast.InternedTerm("secret"), ast.InternedEmptyObject), + ast.Item(ast.InternedTerm("cert"), ast.InternedEmptyObject), + )) + + if found, th, tp, validSignature := getTokenFromCache(bctx, a, k); found { + if !validSignature { + // For the given token and key(s), the signature is invalid + return iter(unverified) + } + + if th != nil && tp != nil { + header = th + payload = tp + } else { + // Cache entry was created by one of the other built-ins that doesn't decode header/payload + + if token, err = decodeJWT(a); err != nil { + return err + } + + header = token.decodedHeader + + p, err := getResult(builtinBase64UrlDecode, ast.StringTerm(token.payload)) + if err != nil { + return fmt.Errorf("JWT payload had invalid encoding: %v", err) + } + + payload, err = extractJSONObject(string(p.Value.(ast.String))) + if err != nil { + return err + } + + putTokenInCache(bctx, a, k, header, payload, true) + } + } else { + var p *ast.Term + + for { + // RFC7519 7.2 #1-2 split into parts + if token, err = decodeJWT(a); err != nil { + return err + } + + // RFC7519 7.2 #3, #4, #6 + if err := token.decodeHeader(); err != nil { + return err + } + + // RFC7159 7.2 #5 (and RFC7159 5.2 #5) validate header fields + header, err := parseTokenHeader(token) + if err != nil { + return err + } + + if !header.valid() { + return iter(unverified) + } + + // Check constraints that impact signature verification. + if constraints.alg != "" && constraints.alg != header.alg { + return iter(unverified) + } + + // RFC7159 7.2 #7 verify the signature + signature, err := token.decodeSignature() + if err != nil { + return err + } + + if err := constraints.verify(header.kid, header.alg, token.header, token.payload, signature); err != nil { + if err == errSignatureNotVerified { + putTokenInCache(bctx, a, k, nil, nil, false) + return iter(unverified) + } + return err + } + + // RFC7159 7.2 #9-10 decode the payload + p, err = getResult(builtinBase64UrlDecode, ast.StringTerm(token.payload)) + if err != nil { + return fmt.Errorf("JWT payload had invalid encoding: %v", err) + } + + // RFC7159 7.2 #8 and 5.2 cty + if strings.EqualFold(header.cty, headerJwt) { + // Nested JWT, go round again with payload as first argument + a = p.Value + continue + } + + // Non-nested JWT (or we've reached the bottom of the nesting). + break + } + + payload, err = extractJSONObject(string(p.Value.(ast.String))) + if err != nil { + return err + } + + header = token.decodedHeader + + putTokenInCache(bctx, a, k, header, payload, true) + } + + // Check registered claim names against constraints or environment + // RFC7159 4.1.1 iss + if constraints.iss != "" { + if iss := payload.Get(ast.InternedTerm("iss")); iss != nil { + issVal := string(iss.Value.(ast.String)) + if constraints.iss != issVal { + return iter(unverified) + } + } else { + return iter(unverified) + } + } + // RFC7159 4.1.3 aud + if aud := payload.Get(ast.InternedTerm("aud")); aud != nil { + if !constraints.validAudience(aud.Value) { + return iter(unverified) + } + } else { + if constraints.aud != "" { + return iter(unverified) + } + } + // RFC7159 4.1.4 exp + if exp := payload.Get(ast.InternedTerm("exp")); exp != nil { + switch v := exp.Value.(type) { + case ast.Number: + // constraints.time is in nanoseconds but exp Value is in seconds + compareTime := ast.FloatNumberTerm(constraints.time / 1000000000) + if ast.Compare(compareTime, v) != -1 { + return iter(unverified) + } + default: + return errors.New("exp value must be a number") + } + } + // RFC7159 4.1.5 nbf + if nbf := payload.Get(ast.InternedTerm("nbf")); nbf != nil { + switch v := nbf.Value.(type) { + case ast.Number: + // constraints.time is in nanoseconds but nbf Value is in seconds + compareTime := ast.Number(strconv.FormatFloat(constraints.time/1000000000, 'g', -1, 64)) + if compareTime.Compare(v) == -1 { + return iter(unverified) + } + default: + return errors.New("nbf value must be a number") + } + } + + verified := ast.ArrayTerm( + ast.InternedTerm(true), + ast.NewTerm(header), + ast.NewTerm(payload), + ) + return iter(verified) +} + +// -- Utilities -- + +func decodeJWT(a ast.Value) (*JSONWebToken, error) { + // Parse the JSON Web Token + astEncode, err := builtins.StringOperand(a, 1) + if err != nil { + return nil, err + } + + encoding := string(astEncode) + if !strings.Contains(encoding, ".") { + return nil, errors.New("encoded JWT had no period separators") + } + + parts := strings.Split(encoding, ".") + if len(parts) != 3 { + return nil, fmt.Errorf("encoded JWT must have 3 sections, found %d", len(parts)) + } + + return &JSONWebToken{header: parts[0], payload: parts[1], signature: parts[2]}, nil +} + +func (token *JSONWebToken) decodeSignature() ([]byte, error) { + decodedSignature, err := getResult(builtinBase64UrlDecode, ast.StringTerm(token.signature)) + if err != nil { + return nil, err + } + + signatureBs, err := builtins.StringOperandByteSlice(decodedSignature.Value, 1) + if err != nil { + return nil, err + } + return signatureBs, nil +} + +// Extract, validate and return the JWT header as an ast.Object. +func validateJWTHeader(h string) (ast.Object, error) { + header, err := extractJSONObject(h) + if err != nil { + return nil, fmt.Errorf("bad JWT header: %v", err) + } + + // There are two kinds of JWT tokens, a JSON Web Signature (JWS) and + // a JSON Web Encryption (JWE). The latter is very involved, and we + // won't support it for now. + // This code checks which kind of JWT we are dealing with according to + // RFC 7516 Section 9: https://tools.ietf.org/html/rfc7516#section-9 + if header.Get(ast.InternedTerm("enc")) != nil { + return nil, errors.New("JWT is a JWE object, which is not supported") + } + + return header, nil +} + +func extractJSONObject(s string) (ast.Object, error) { + // XXX: This code relies on undocumented behavior of Go's + // json.Unmarshal using the last occurrence of duplicate keys in a JSON + // Object. If duplicate keys are present in a JWT, the last must be + // used or the token rejected. Since detecting duplicates is tantamount + // to parsing it ourselves, we're relying on the Go implementation + // using the last occurring instance of the key, which is the behavior + // as of Go 1.8.1. + v, err := getResult(builtinJSONUnmarshal, ast.StringTerm(s)) + if err != nil { + return nil, fmt.Errorf("invalid JSON: %v", err) + } + + o, ok := v.Value.(ast.Object) + if !ok { + return nil, errors.New("decoded JSON type was not an Object") + } + + return o, nil +} + +// getInputSha returns the SHA checksum of the input +func getInputSHA(input []byte, h func() hash.Hash) []byte { + if h == nil { + return input + } + + hasher := h() + hasher.Write(input) + return hasher.Sum(nil) +} + +type jwtCacheEntry struct { + payload ast.Object + header ast.Object + validSignature bool +} + +const tokenCacheName = "io_jwt" + +func getTokenFromCache(bctx BuiltinContext, serializedJwt ast.Value, publicKey ast.Value) (bool, ast.Object, ast.Object, bool) { + if bctx.InterQueryBuiltinValueCache == nil { + return false, nil, nil, false + } + + c := bctx.InterQueryBuiltinValueCache.GetCache(tokenCacheName) + if c == nil { + return false, nil, nil, false + } + + key := createTokenCacheKey(serializedJwt, publicKey) + + entry, ok := c.Get(key) + if !ok { + return false, nil, nil, false + } + + if jwtEntry, ok := entry.(jwtCacheEntry); ok { + return true, jwtEntry.header, jwtEntry.payload, jwtEntry.validSignature + } + + return false, nil, nil, false +} + +func putTokenInCache(bctx BuiltinContext, serializedJwt ast.Value, publicKey ast.Value, header ast.Object, payload ast.Object, validSignature bool) { + if bctx.InterQueryBuiltinValueCache == nil { + return + } + + c := bctx.InterQueryBuiltinValueCache.GetCache(tokenCacheName) + if c == nil { + return + } + + key := createTokenCacheKey(serializedJwt, publicKey) + + c.Insert(key, jwtCacheEntry{header: header, payload: payload, validSignature: validSignature}) +} + +func createTokenCacheKey(serializedJwt ast.Value, publicKey ast.Value) ast.Value { + // We need to create a key that is unique to the serialized JWT (for lookup) and the public key used to verify it, + // so that we don't get a misleading cached validation result for a different, invalid key. + return ast.NewArray(ast.NewTerm(serializedJwt), ast.NewTerm(publicKey)) +} + +func init() { + // By default, the JWT cache is disabled. + disabled := true + var tokenCache = cache.NamedValueCacheConfig{ + Disabled: &disabled, + } + cache.RegisterDefaultInterQueryBuiltinValueCacheConfig(tokenCacheName, &tokenCache) + + RegisterBuiltinFunc(ast.JWTDecode.Name, builtinJWTDecode) + RegisterBuiltinFunc(ast.JWTVerifyRS256.Name, builtinJWTVerifyRS256) + RegisterBuiltinFunc(ast.JWTVerifyRS384.Name, builtinJWTVerifyRS384) + RegisterBuiltinFunc(ast.JWTVerifyRS512.Name, builtinJWTVerifyRS512) + RegisterBuiltinFunc(ast.JWTVerifyPS256.Name, builtinJWTVerifyPS256) + RegisterBuiltinFunc(ast.JWTVerifyPS384.Name, builtinJWTVerifyPS384) + RegisterBuiltinFunc(ast.JWTVerifyPS512.Name, builtinJWTVerifyPS512) + RegisterBuiltinFunc(ast.JWTVerifyES256.Name, builtinJWTVerifyES256) + RegisterBuiltinFunc(ast.JWTVerifyES384.Name, builtinJWTVerifyES384) + RegisterBuiltinFunc(ast.JWTVerifyES512.Name, builtinJWTVerifyES512) + RegisterBuiltinFunc(ast.JWTVerifyEdDSA.Name, builtinJWTVerifyEdDSA) + RegisterBuiltinFunc(ast.JWTVerifyHS256.Name, builtinJWTVerifyHS256) + RegisterBuiltinFunc(ast.JWTVerifyHS384.Name, builtinJWTVerifyHS384) + RegisterBuiltinFunc(ast.JWTVerifyHS512.Name, builtinJWTVerifyHS512) + RegisterBuiltinFunc(ast.JWTDecodeVerify.Name, builtinJWTDecodeVerify) + RegisterBuiltinFunc(ast.JWTEncodeSignRaw.Name, builtinJWTEncodeSignRaw) + RegisterBuiltinFunc(ast.JWTEncodeSign.Name, builtinJWTEncodeSign) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/trace.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/trace.go new file mode 100644 index 0000000000..52451dc6bd --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/trace.go @@ -0,0 +1,897 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "bytes" + "fmt" + "io" + "slices" + "strings" + + iStrs "github.com/open-policy-agent/opa/internal/strings" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +const ( + minLocationWidth = 5 // len("query") + maxIdealLocationWidth = 64 + columnPadding = 4 + maxExprVarWidth = 32 + maxPrettyExprVarWidth = 64 +) + +// Op defines the types of tracing events. +type Op string + +const ( + // EnterOp is emitted when a new query is about to be evaluated. + EnterOp Op = "Enter" + + // ExitOp is emitted when a query has evaluated to true. + ExitOp Op = "Exit" + + // EvalOp is emitted when an expression is about to be evaluated. + EvalOp Op = "Eval" + + // RedoOp is emitted when an expression, rule, or query is being re-evaluated. + RedoOp Op = "Redo" + + // SaveOp is emitted when an expression is saved instead of evaluated + // during partial evaluation. + SaveOp Op = "Save" + + // FailOp is emitted when an expression evaluates to false. + FailOp Op = "Fail" + + // DuplicateOp is emitted when a query has produced a duplicate value. The search + // will stop at the point where the duplicate was emitted and backtrack. + DuplicateOp Op = "Duplicate" + + // NoteOp is emitted when an expression invokes a tracing built-in function. + NoteOp Op = "Note" + + // IndexOp is emitted during an expression evaluation to represent lookup + // matches. + IndexOp Op = "Index" + + // WasmOp is emitted when resolving a ref using an external + // Resolver. + WasmOp Op = "Wasm" + + // UnifyOp is emitted when two terms are unified. Node will be set to an + // equality expression with the two terms. This Node will not have location + // info. + UnifyOp Op = "Unify" + FailedAssertionOp Op = "FailedAssertion" +) + +// VarMetadata provides some user facing information about +// a variable in some policy. +type VarMetadata struct { + Name ast.Var `json:"name"` + Location *ast.Location `json:"location"` +} + +// Event contains state associated with a tracing event. +type Event struct { + Op Op // Identifies type of event. + Node ast.Node // Contains AST node relevant to the event. + Location *ast.Location // The location of the Node this event relates to. + QueryID uint64 // Identifies the query this event belongs to. + ParentID uint64 // Identifies the parent query this event belongs to. + Locals *ast.ValueMap // Contains local variable bindings from the query context. Nil if variables were not included in the trace event. + LocalMetadata map[ast.Var]VarMetadata // Contains metadata for the local variable bindings. Nil if variables were not included in the trace event. + Message string // Contains message for Note events. + Ref *ast.Ref // Identifies the subject ref for the event. Only applies to Index and Wasm operations. + + input *ast.Term + bindings *bindings + localVirtualCacheSnapshot *ast.ValueMap +} + +func (evt *Event) WithInput(input *ast.Term) *Event { + evt.input = input + return evt +} + +// HasRule returns true if the Event contains an ast.Rule. +func (evt *Event) HasRule() bool { + _, ok := evt.Node.(*ast.Rule) + return ok +} + +// HasBody returns true if the Event contains an ast.Body. +func (evt *Event) HasBody() bool { + _, ok := evt.Node.(ast.Body) + return ok +} + +// HasExpr returns true if the Event contains an ast.Expr. +func (evt *Event) HasExpr() bool { + _, ok := evt.Node.(*ast.Expr) + return ok +} + +// Equal returns true if this event is equal to the other event. +func (evt *Event) Equal(other *Event) bool { + if evt.Op != other.Op { + return false + } + if evt.QueryID != other.QueryID { + return false + } + if evt.ParentID != other.ParentID { + return false + } + if !evt.equalNodes(other) { + return false + } + return evt.Locals.Equal(other.Locals) +} + +func (evt *Event) String() string { + return fmt.Sprintf("%v %v %v (qid=%v, pqid=%v)", evt.Op, evt.Node, evt.Locals, evt.QueryID, evt.ParentID) +} + +// Input returns the input object as it was at the event. +func (evt *Event) Input() *ast.Term { + return evt.input +} + +// Plug plugs event bindings into the provided ast.Term. Because bindings are mutable, this only makes sense to do when +// the event is emitted rather than on recorded trace events as the bindings are going to be different by then. +func (evt *Event) Plug(term *ast.Term) *ast.Term { + return evt.bindings.Plug(term) +} + +func (evt *Event) equalNodes(other *Event) bool { + switch a := evt.Node.(type) { + case ast.Body: + if b, ok := other.Node.(ast.Body); ok { + return a.Equal(b) + } + case *ast.Rule: + if b, ok := other.Node.(*ast.Rule); ok { + return a.Equal(b) + } + case *ast.Expr: + if b, ok := other.Node.(*ast.Expr); ok { + return a.Equal(b) + } + case nil: + return other.Node == nil + } + return false +} + +// Tracer defines the interface for tracing in the top-down evaluation engine. +// +// Deprecated: Use QueryTracer instead. +type Tracer interface { + Enabled() bool + Trace(*Event) +} + +// QueryTracer defines the interface for tracing in the top-down evaluation engine. +// The implementation can provide additional configuration to modify the tracing +// behavior for query evaluations. +type QueryTracer interface { + Enabled() bool + TraceEvent(Event) + Config() TraceConfig +} + +// TraceConfig defines some common configuration for Tracer implementations +type TraceConfig struct { + PlugLocalVars bool // Indicate whether to plug local variable bindings before calling into the tracer. +} + +// legacyTracer Implements the QueryTracer interface by wrapping an older Tracer instance. +type legacyTracer struct { + t Tracer +} + +func (l *legacyTracer) Enabled() bool { + return l.t.Enabled() +} + +func (*legacyTracer) Config() TraceConfig { + return TraceConfig{ + PlugLocalVars: true, // For backwards compatibility old tracers will plug local variables + } +} + +func (l *legacyTracer) TraceEvent(evt Event) { + l.t.Trace(&evt) +} + +// WrapLegacyTracer will create a new QueryTracer which wraps an +// older Tracer instance. +func WrapLegacyTracer(tracer Tracer) QueryTracer { + return &legacyTracer{t: tracer} +} + +// BufferTracer implements the Tracer and QueryTracer interface by +// simply buffering all events received. +type BufferTracer []*Event + +// NewBufferTracer returns a new BufferTracer. +func NewBufferTracer() *BufferTracer { + return &BufferTracer{} +} + +// Enabled always returns true if the BufferTracer is instantiated. +func (b *BufferTracer) Enabled() bool { + return b != nil +} + +// Trace adds the event to the buffer. +// +// Deprecated: Use TraceEvent instead. +func (b *BufferTracer) Trace(evt *Event) { + *b = append(*b, evt) +} + +// TraceEvent adds the event to the buffer. +func (b *BufferTracer) TraceEvent(evt Event) { + *b = append(*b, &evt) +} + +// Config returns the Tracers standard configuration +func (*BufferTracer) Config() TraceConfig { + return TraceConfig{PlugLocalVars: true} +} + +// PrettyTrace pretty prints the trace to the writer. +func PrettyTrace(w io.Writer, trace []*Event) { + PrettyTraceWithOpts(w, trace, PrettyTraceOptions{}) +} + +// PrettyTraceWithLocation prints the trace to the writer and includes location information +func PrettyTraceWithLocation(w io.Writer, trace []*Event) { + PrettyTraceWithOpts(w, trace, PrettyTraceOptions{Locations: true}) +} + +type PrettyTraceOptions struct { + Locations bool // Include location information + ExprVariables bool // Include variables found in the expression + LocalVariables bool // Include all local variables +} + +type traceRow []string + +func (r *traceRow) add(s string) { + *r = append(*r, s) +} + +type traceTable struct { + rows []traceRow + maxWidths []int +} + +func (t *traceTable) add(row traceRow) { + t.rows = append(t.rows, row) + for i := range row { + if i >= len(t.maxWidths) { + t.maxWidths = append(t.maxWidths, len(row[i])) + } else if len(row[i]) > t.maxWidths[i] { + t.maxWidths[i] = len(row[i]) + } + } +} + +func (t *traceTable) write(w io.Writer, padding int) { + for _, row := range t.rows { + for i, cell := range row { + width := t.maxWidths[i] + padding + if i < len(row)-1 { + _, _ = fmt.Fprintf(w, "%-*s ", width, cell) + } else { + _, _ = fmt.Fprintf(w, "%s", cell) + } + } + _, _ = fmt.Fprintln(w) + } +} + +func PrettyTraceWithOpts(w io.Writer, trace []*Event, opts PrettyTraceOptions) { + depths := depths{} + + // FIXME: Can we shorten each location as we process each trace event instead of beforehand? + filePathAliases, _ := getShortenedFileNames(trace) + + table := traceTable{} + + for _, event := range trace { + depth := depths.GetOrSet(event.QueryID, event.ParentID) + row := traceRow{} + + if opts.Locations { + location := formatLocation(event, filePathAliases) + row.add(location) + } + + row.add(formatEvent(event, depth)) + + if opts.ExprVariables { + vars := exprLocalVars(event) + keys := sortedKeys(vars) + + buf := new(bytes.Buffer) + buf.WriteString("{") + for i, k := range keys { + if i > 0 { + buf.WriteString(", ") + } + _, _ = fmt.Fprintf(buf, "%v: %s", k, iStrs.Truncate(vars.Get(k).String(), maxExprVarWidth)) + } + buf.WriteString("}") + row.add(buf.String()) + } + + if opts.LocalVariables { + if locals := event.Locals; locals != nil { + keys := sortedKeys(locals) + + buf := new(bytes.Buffer) + buf.WriteString("{") + for i, k := range keys { + if i > 0 { + buf.WriteString(", ") + } + _, _ = fmt.Fprintf(buf, "%v: %s", k, iStrs.Truncate(locals.Get(k).String(), maxExprVarWidth)) + } + buf.WriteString("}") + row.add(buf.String()) + } else { + row.add("{}") + } + } + + table.add(row) + } + + table.write(w, columnPadding) +} + +func sortedKeys(vm *ast.ValueMap) []ast.Value { + keys := make([]ast.Value, 0, vm.Len()) + vm.Iter(func(k, _ ast.Value) bool { + keys = append(keys, k) + return false + }) + slices.SortFunc(keys, func(a, b ast.Value) int { + return strings.Compare(a.String(), b.String()) + }) + return keys +} + +func exprLocalVars(e *Event) *ast.ValueMap { + vars := ast.NewValueMap() + + findVars := func(term *ast.Term) bool { + if name, ok := term.Value.(ast.Var); ok { + if meta, ok := e.LocalMetadata[name]; ok { + if val := e.Locals.Get(name); val != nil { + vars.Put(meta.Name, val) + } + } + } + return false + } + + if r, ok := e.Node.(*ast.Rule); ok { + // We're only interested in vars in the head, not the body + ast.WalkTerms(r.Head, findVars) + return vars + } + + // The local cache snapshot only contains a snapshot for those refs present in the event node, + // so they can all be added to the vars map. + e.localVirtualCacheSnapshot.Iter(func(k, v ast.Value) bool { + vars.Put(k, v) + return false + }) + + ast.WalkTerms(e.Node, findVars) + + return vars +} + +func formatEvent(event *Event, depth int) string { + padding := formatEventPadding(event, depth) + if event.Op == NoteOp { + return fmt.Sprintf("%v%v %q", padding, event.Op, event.Message) + } + + var details any + if node, ok := event.Node.(*ast.Rule); ok { + details = ast.RulePath(node) + } else if event.Ref != nil { + details = event.Ref + } else { + details = rewrite(event).Node + } + + template := "%v%v %v" + opts := []any{padding, event.Op, details} + + if event.Message != "" { + template += " %v" + opts = append(opts, event.Message) + } + + return fmt.Sprintf(template, opts...) +} + +func formatEventPadding(event *Event, depth int) string { + spaces := formatEventSpaces(event, depth) + if spaces > 1 { + return strings.Repeat("| ", spaces-1) + } + return "" +} + +func formatEventSpaces(event *Event, depth int) int { + switch event.Op { + case EnterOp: + return depth + case RedoOp: + if _, ok := event.Node.(*ast.Expr); !ok { + return depth + } + } + return depth + 1 +} + +// getShortenedFileNames will return a map of file paths to shortened aliases +// that were found in the trace. It also returns the longest location expected +func getShortenedFileNames(trace []*Event) (map[string]string, int) { + // Get a deduplicated list of all file paths + // and the longest file path size + fpAliases := map[string]string{} + var canShorten []string + longestLocation := 0 + for _, event := range trace { + if event.Location != nil { + if event.Location.File != "" { + // length of ":" + curLen := len(event.Location.File) + numDigits10(event.Location.Row) + 1 + if curLen > longestLocation { + longestLocation = curLen + } + + if _, ok := fpAliases[event.Location.File]; ok { + continue + } + + canShorten = append(canShorten, event.Location.File) + + // Default to just alias their full path + fpAliases[event.Location.File] = event.Location.File + } else { + // length of ":" + curLen := minLocationWidth + numDigits10(event.Location.Row) + 1 + if curLen > longestLocation { + longestLocation = curLen + } + } + } + } + + if len(canShorten) > 0 && longestLocation > maxIdealLocationWidth { + fpAliases, longestLocation = iStrs.TruncateFilePaths(maxIdealLocationWidth, longestLocation, canShorten...) + } + + return fpAliases, longestLocation +} + +func numDigits10(n int) int { + if n < 10 { + return 1 + } + return numDigits10(n/10) + 1 +} + +func formatLocation(event *Event, fileAliases map[string]string) string { + + location := event.Location + if location == nil { + return "" + } + + if location.File == "" { + return fmt.Sprintf("query:%v", location.Row) + } + + return fmt.Sprintf("%v:%v", fileAliases[location.File], location.Row) +} + +// depths is a helper for computing the depth of an event. Events within the +// same query all have the same depth. The depth of query is +// depth(parent(query))+1. +type depths map[uint64]int + +func (ds depths) GetOrSet(qid uint64, pqid uint64) int { + depth := ds[qid] + if depth == 0 { + depth = ds[pqid] + depth++ + ds[qid] = depth + } + return depth +} + +func builtinTrace(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + str, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return handleBuiltinErr(ast.Trace.Name, bctx.Location, err) + } + + if !bctx.TraceEnabled { + return iter(ast.InternedTerm(true)) + } + + evt := Event{ + Op: NoteOp, + Location: bctx.Location, + QueryID: bctx.QueryID, + ParentID: bctx.ParentID, + Message: string(str), + } + + for i := range bctx.QueryTracers { + bctx.QueryTracers[i].TraceEvent(evt) + } + + return iter(ast.InternedTerm(true)) +} + +func rewrite(event *Event) *Event { + + cpy := *event + + var node ast.Node + + switch v := event.Node.(type) { + case *ast.Expr: + expr := v.Copy() + + // Hide generated local vars in 'key' position that have not been + // rewritten. + if ev, ok := v.Terms.(*ast.Every); ok { + if kv, ok := ev.Key.Value.(ast.Var); ok { + if rw, ok := cpy.LocalMetadata[kv]; !ok || rw.Name.IsGenerated() { + expr.Terms.(*ast.Every).Key = nil + } + } + } + node = expr + case ast.Body: + node = v.Copy() + case *ast.Rule: + node = v.Copy() + } + + _, _ = ast.TransformVars(node, func(v ast.Var) (ast.Value, error) { + if meta, ok := cpy.LocalMetadata[v]; ok { + return meta.Name, nil + } + return v, nil + }) + + cpy.Node = node + + return &cpy +} + +type varInfo struct { + VarMetadata + val ast.Value + exprLoc *ast.Location + col int // 0-indexed column +} + +func (v varInfo) Value() string { + if v.val != nil { + return v.val.String() + } + return "undefined" +} + +func (v varInfo) Title() string { + if v.exprLoc != nil && v.exprLoc.Text != nil { + return string(v.exprLoc.Text) + } + return string(v.Name) +} + +func padLocationText(loc *ast.Location) string { + if loc == nil { + return "" + } + + text := string(loc.Text) + + if loc.Col == 0 { + return text + } + + buf := new(bytes.Buffer) + j := 0 + for i := 1; i < loc.Col; i++ { + if len(loc.Tabs) > 0 && j < len(loc.Tabs) && loc.Tabs[j] == i { + buf.WriteString("\t") + j++ + } else { + buf.WriteString(" ") + } + } + + buf.WriteString(text) + return buf.String() +} + +type PrettyEventOpts struct { + PrettyVars bool +} + +func walkTestTerms(x any, f func(*ast.Term) bool) { + var vis *ast.GenericVisitor + vis = ast.NewGenericVisitor(func(x any) bool { + switch x := x.(type) { + case ast.Call: + for _, t := range x[1:] { + vis.Walk(t) + } + return true + case *ast.Expr: + if x.IsCall() { + for _, o := range x.Operands() { + vis.Walk(o) + } + for i := range x.With { + vis.Walk(x.With[i]) + } + return true + } + case *ast.Term: + return f(x) + case *ast.With: + vis.Walk(x.Value) + return true + } + return false + }) + vis.Walk(x) +} + +func PrettyEvent(w io.Writer, e *Event, opts PrettyEventOpts) error { + if !opts.PrettyVars { + _, _ = fmt.Fprintln(w, padLocationText(e.Location)) + return nil + } + + buf := new(bytes.Buffer) + exprVars := map[string]varInfo{} + + findVars := func(unknownAreUndefined bool) func(term *ast.Term) bool { + return func(term *ast.Term) bool { + if term.Location == nil { + return false + } + + switch v := term.Value.(type) { + case *ast.ArrayComprehension, *ast.SetComprehension, *ast.ObjectComprehension: + // we don't report on the internals of a comprehension, as it's already evaluated, and we won't have the local vars. + return true + case ast.Var: + var info *varInfo + if meta, ok := e.LocalMetadata[v]; ok { + info = &varInfo{ + VarMetadata: meta, + val: e.Locals.Get(v), + exprLoc: term.Location, + } + } else if unknownAreUndefined { + info = &varInfo{ + VarMetadata: VarMetadata{Name: v}, + exprLoc: term.Location, + col: term.Location.Col, + } + } + + if info != nil { + if v, exists := exprVars[info.Title()]; !exists || v.val == nil { + if term.Location != nil { + info.col = term.Location.Col + } + exprVars[info.Title()] = *info + } + } + } + return false + } + } + + expr, ok := e.Node.(*ast.Expr) + if !ok || expr == nil { + return nil + } + + base := expr.BaseCogeneratedExpr() + exprText := padLocationText(base.Location) + buf.WriteString(exprText) + + e.localVirtualCacheSnapshot.Iter(func(k, v ast.Value) bool { + var info *varInfo + switch k := k.(type) { + case ast.Ref: + info = &varInfo{ + VarMetadata: VarMetadata{Name: ast.Var(k.String())}, + val: v, + exprLoc: k[0].Location, + col: k[0].Location.Col, + } + case *ast.ArrayComprehension: + info = &varInfo{ + VarMetadata: VarMetadata{Name: ast.Var(k.String())}, + val: v, + exprLoc: k.Term.Location, + col: k.Term.Location.Col, + } + case *ast.SetComprehension: + info = &varInfo{ + VarMetadata: VarMetadata{Name: ast.Var(k.String())}, + val: v, + exprLoc: k.Term.Location, + col: k.Term.Location.Col, + } + case *ast.ObjectComprehension: + info = &varInfo{ + VarMetadata: VarMetadata{Name: ast.Var(k.String())}, + val: v, + exprLoc: k.Key.Location, + col: k.Key.Location.Col, + } + } + + if info != nil { + exprVars[info.Title()] = *info + } + + return false + }) + + // If the expression is negated, we can't confidently assert that vars with unknown values are 'undefined', + // since the compiler might have opted out of the necessary rewrite. + walkTestTerms(expr, findVars(!expr.Negated)) + coExprs := expr.CogeneratedExprs() + for _, coExpr := range coExprs { + // Only the current "co-expr" can have undefined vars, if we don't know the value for a var in any other co-expr, + // it's unknown, not undefined. A var can be unknown if it hasn't been assigned a value yet, because the co-expr + // hasn't been evaluated yet (the fail happened before it). + walkTestTerms(coExpr, findVars(false)) + } + + printPrettyVars(buf, exprVars) + _, _ = fmt.Fprint(w, buf.String()) + return nil +} + +func printPrettyVars(w *bytes.Buffer, exprVars map[string]varInfo) { + containsTabs := false + varRows := make(map[int]any) + for _, info := range exprVars { + if len(info.exprLoc.Tabs) > 0 { + containsTabs = true + } + varRows[info.exprLoc.Row] = nil + } + + if containsTabs && len(varRows) > 1 { + // We can't (currently) reliably point to var locations when they are on different rows that contain tabs. + // So we'll just print them in alphabetical order instead. + byName := make([]varInfo, 0, len(exprVars)) + for _, info := range exprVars { + byName = append(byName, info) + } + slices.SortStableFunc(byName, func(a, b varInfo) int { + return strings.Compare(a.Title(), b.Title()) + }) + + w.WriteString("\n\nWhere:\n") + for _, info := range byName { + fmt.Fprintf(w, "\n%s: %s", info.Title(), iStrs.Truncate(info.Value(), maxPrettyExprVarWidth)) + } + + return + } + + byCol := make([]varInfo, 0, len(exprVars)) + for _, info := range exprVars { + byCol = append(byCol, info) + } + slices.SortFunc(byCol, func(a, b varInfo) int { + // sort first by column, then by reverse row (to present vars in the same order they appear in the expr) + if a.col == b.col { + if a.exprLoc.Row == b.exprLoc.Row { + return strings.Compare(a.Title(), b.Title()) + } + return b.exprLoc.Row - a.exprLoc.Row + } + return a.col - b.col + }) + + if len(byCol) == 0 { + return + } + + w.WriteString("\n") + printArrows(w, byCol, -1) + for i := len(byCol) - 1; i >= 0; i-- { + w.WriteString("\n") + printArrows(w, byCol, i) + } +} + +func printArrows(w *bytes.Buffer, l []varInfo, printValueAt int) { + prevCol := 0 + var slice []varInfo + if printValueAt >= 0 { + slice = l[:printValueAt+1] + } else { + slice = l + } + isFirst := true + for i, info := range slice { + + isLast := i >= len(slice)-1 + col := info.col + + if !isLast && col == l[i+1].col { + // We're sharing the same column with another, subsequent var + continue + } + + spaces := col - 1 + if i > 0 && !isFirst { + spaces = (col - prevCol) - 1 + } + + for j := range spaces { + tab := false + if slices.Contains(info.exprLoc.Tabs, j+prevCol+1) { + w.WriteString("\t") + tab = true + } + if !tab { + w.WriteString(" ") + } + } + + if isLast && printValueAt >= 0 { + valueStr := iStrs.Truncate(info.Value(), maxPrettyExprVarWidth) + if (i > 0 && col == l[i-1].col) || (i < len(l)-1 && col == l[i+1].col) { + // There is another var on this column, so we need to include the name to differentiate them. + fmt.Fprintf(w, "%s: %s", info.Title(), valueStr) + } else { + w.WriteString(valueStr) + } + } else { + w.WriteString("|") + } + prevCol = col + isFirst = false + } +} + +func init() { + RegisterBuiltinFunc(ast.Trace.Name, builtinTrace) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/type.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/type.go new file mode 100644 index 0000000000..0e23d2721b --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/type.go @@ -0,0 +1,82 @@ +// Copyright 2022 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "github.com/open-policy-agent/opa/v1/ast" +) + +func builtinIsNumber(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch operands[0].Value.(type) { + case ast.Number: + return iter(ast.InternedTerm(true)) + default: + return iter(ast.InternedTerm(false)) + } +} + +func builtinIsString(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch operands[0].Value.(type) { + case ast.String: + return iter(ast.InternedTerm(true)) + default: + return iter(ast.InternedTerm(false)) + } +} + +func builtinIsBoolean(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch operands[0].Value.(type) { + case ast.Boolean: + return iter(ast.InternedTerm(true)) + default: + return iter(ast.InternedTerm(false)) + } +} + +func builtinIsArray(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch operands[0].Value.(type) { + case *ast.Array: + return iter(ast.InternedTerm(true)) + default: + return iter(ast.InternedTerm(false)) + } +} + +func builtinIsSet(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch operands[0].Value.(type) { + case ast.Set: + return iter(ast.InternedTerm(true)) + default: + return iter(ast.InternedTerm(false)) + } +} + +func builtinIsObject(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch operands[0].Value.(type) { + case ast.Object: + return iter(ast.InternedTerm(true)) + default: + return iter(ast.InternedTerm(false)) + } +} + +func builtinIsNull(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch operands[0].Value.(type) { + case ast.Null: + return iter(ast.InternedTerm(true)) + default: + return iter(ast.InternedTerm(false)) + } +} + +func init() { + RegisterBuiltinFunc(ast.IsNumber.Name, builtinIsNumber) + RegisterBuiltinFunc(ast.IsString.Name, builtinIsString) + RegisterBuiltinFunc(ast.IsBoolean.Name, builtinIsBoolean) + RegisterBuiltinFunc(ast.IsArray.Name, builtinIsArray) + RegisterBuiltinFunc(ast.IsSet.Name, builtinIsSet) + RegisterBuiltinFunc(ast.IsObject.Name, builtinIsObject) + RegisterBuiltinFunc(ast.IsNull.Name, builtinIsNull) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/type_name.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/type_name.go new file mode 100644 index 0000000000..9c079500c2 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/type_name.go @@ -0,0 +1,36 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "errors" + + "github.com/open-policy-agent/opa/v1/ast" +) + +func builtinTypeName(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch operands[0].Value.(type) { + case ast.Null: + return iter(ast.InternedTerm("null")) + case ast.Boolean: + return iter(ast.InternedTerm("boolean")) + case ast.Number: + return iter(ast.InternedTerm("number")) + case ast.String: + return iter(ast.InternedTerm("string")) + case *ast.Array: + return iter(ast.InternedTerm("array")) + case ast.Object: + return iter(ast.InternedTerm("object")) + case ast.Set: + return iter(ast.InternedTerm("set")) + } + + return errors.New("illegal value") +} + +func init() { + RegisterBuiltinFunc(ast.TypeNameBuiltin.Name, builtinTypeName) +} diff --git a/vendor/github.com/open-policy-agent/opa/topdown/uuid.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/uuid.go similarity index 89% rename from vendor/github.com/open-policy-agent/opa/topdown/uuid.go rename to vendor/github.com/open-policy-agent/opa/v1/topdown/uuid.go index d3a7a5f900..141fb908bd 100644 --- a/vendor/github.com/open-policy-agent/opa/topdown/uuid.go +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/uuid.go @@ -5,9 +5,9 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/uuid" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) type uuidCachingKey string @@ -26,7 +26,7 @@ func builtinUUIDRFC4122(bctx BuiltinContext, operands []*ast.Term, iter func(*as return err } - result := ast.NewTerm(ast.String(s)) + result := ast.StringTerm(s) bctx.Cache.Put(key, result) return iter(result) diff --git a/vendor/github.com/open-policy-agent/opa/v1/topdown/walk.go b/vendor/github.com/open-policy-agent/opa/v1/topdown/walk.go new file mode 100644 index 0000000000..9f39f12567 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/topdown/walk.go @@ -0,0 +1,164 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "github.com/open-policy-agent/opa/v1/ast" +) + +func evalWalk(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + input := operands[0] + + if pathIsWildcard(operands) { + // When the path assignment is a wildcard: walk(input, [_, value]) + // we may skip the path construction entirely, and simply return + // same pointer in each iteration. This is a *much* more efficient + // path when only the values are needed. + return walkNoPath(ast.ArrayTerm(ast.InternedEmptyArray, input), iter) + } + + filter := getOutputPath(operands) + return walk(filter, nil, input, iter) +} + +func walk(filter, path *ast.Array, input *ast.Term, iter func(*ast.Term) error) error { + if filter == nil || filter.Len() == 0 { + if path == nil { + if err := iter(ast.ArrayTerm(ast.NewTerm(ast.InternedEmptyArrayValue), input)); err != nil { + return err + } + } else { + // Shallow copy, as while the array is modified, the elements are not + pathCopy := copyShallow(path) + + // TODO(ae): I'd *really* like these terms to be retrieved from a sync.Pool, and + // returned after iter is called. However, all my atttempts to do this have failed + // as there seems to be something holding on to these references after the call, + // leading to modifications that entirely alter the results. Perhaps this is not + // possible to do, but if it is,it would be a huge performance win. + if err := iter(ast.ArrayTerm(ast.NewTerm(pathCopy), input)); err != nil { + return err + } + } + } + + if filter != nil && filter.Len() > 0 { + key := filter.Elem(0) + filter = filter.Slice(1, -1) + if key.IsGround() { + if term := input.Get(key); term != nil { + return walk(filter, pathAppend(path, key), term, iter) + } + return nil + } + } + + switch v := input.Value.(type) { + case *ast.Array: + for i := range v.Len() { + if err := walk(filter, pathAppend(path, ast.InternedTerm(i)), v.Elem(i), iter); err != nil { + return err + } + } + case ast.Object: + for _, k := range v.Keys() { + if err := walk(filter, pathAppend(path, k), v.Get(k), iter); err != nil { + return err + } + } + case ast.Set: + for _, elem := range v.Slice() { + if err := walk(filter, pathAppend(path, elem), elem, iter); err != nil { + return err + } + } + } + + return nil +} + +func walkNoPath(input *ast.Term, iter func(*ast.Term) error) error { + // Note: the path array is embedded in the input from the start here + // in order to avoid an extra allocation per iteration. This leads to + // a little convoluted code below in order to extract and set the value, + // but since walk is commonly used to traverse large data structures, + // the performance gain is worth it. + if err := iter(input); err != nil { + return err + } + + inputArray := input.Value.(*ast.Array) + value := inputArray.Get(ast.InternedTerm(1)).Value + + switch v := value.(type) { + case ast.Object: + for _, k := range v.Keys() { + inputArray.Set(1, v.Get(k)) + if err := walkNoPath(input, iter); err != nil { + return err + } + } + case *ast.Array: + for i := range v.Len() { + inputArray.Set(1, v.Elem(i)) + if err := walkNoPath(input, iter); err != nil { + return err + } + } + case ast.Set: + for _, elem := range v.Slice() { + inputArray.Set(1, elem) + if err := walkNoPath(input, iter); err != nil { + return err + } + } + } + + return nil +} + +func pathAppend(path *ast.Array, key *ast.Term) *ast.Array { + if path == nil { + return ast.NewArray(key) + } + + return path.Append(key) +} + +func getOutputPath(operands []*ast.Term) *ast.Array { + if len(operands) == 2 { + if arr, ok := operands[1].Value.(*ast.Array); ok && arr.Len() == 2 { + if path, ok := arr.Elem(0).Value.(*ast.Array); ok { + return path + } + } + } + return nil +} + +func pathIsWildcard(operands []*ast.Term) bool { + if len(operands) == 2 { + if arr, ok := operands[1].Value.(*ast.Array); ok && arr.Len() == 2 { + if v, ok := arr.Elem(0).Value.(ast.Var); ok { + return v.IsWildcard() + } + } + } + return false +} + +func copyShallow(arr *ast.Array) *ast.Array { + cpy := make([]*ast.Term, 0, arr.Len()) + + arr.Foreach(func(elem *ast.Term) { + cpy = append(cpy, elem) + }) + + return ast.NewArray(cpy...) +} + +func init() { + RegisterBuiltinFunc(ast.WalkBuiltin.Name, evalWalk) +} diff --git a/vendor/github.com/open-policy-agent/opa/tracing/tracing.go b/vendor/github.com/open-policy-agent/opa/v1/tracing/tracing.go similarity index 96% rename from vendor/github.com/open-policy-agent/opa/tracing/tracing.go rename to vendor/github.com/open-policy-agent/opa/v1/tracing/tracing.go index 2708b78e29..df2fb434a6 100644 --- a/vendor/github.com/open-policy-agent/opa/tracing/tracing.go +++ b/vendor/github.com/open-policy-agent/opa/v1/tracing/tracing.go @@ -11,10 +11,10 @@ package tracing import "net/http" // Options are options for the HTTPTracingService, passed along as-is. -type Options []interface{} +type Options []any // NewOptions is a helper method for constructing `tracing.Options` -func NewOptions(opts ...interface{}) Options { +func NewOptions(opts ...any) Options { return opts } diff --git a/vendor/github.com/open-policy-agent/opa/v1/types/decode.go b/vendor/github.com/open-policy-agent/opa/v1/types/decode.go new file mode 100644 index 0000000000..367b64bffb --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/types/decode.go @@ -0,0 +1,191 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package types + +import ( + "encoding/json" + "fmt" + + "github.com/open-policy-agent/opa/v1/util" +) + +const ( + typeNull = "null" + typeBoolean = "boolean" + typeNumber = "number" + typeString = "string" + typeArray = "array" + typeSet = "set" + typeObject = "object" + typeAny = "any" + typeFunction = "function" +) + +// Unmarshal deserializes bs and returns the resulting type. +func Unmarshal(bs []byte) (result Type, err error) { + + var hint rawtype + + if err = util.UnmarshalJSON(bs, &hint); err == nil { + switch hint.Type { + case typeNull: + result = Nl + case typeBoolean: + result = B + case typeNumber: + result = N + case typeString: + result = S + case typeArray: + var arr rawarray + if err = util.UnmarshalJSON(bs, &arr); err == nil { + var err error + var static []Type + var dynamic Type + if static, err = unmarshalSlice(arr.Static); err != nil { + return nil, err + } + if len(arr.Dynamic) != 0 { + if dynamic, err = Unmarshal(arr.Dynamic); err != nil { + return nil, err + } + } + result = NewArray(static, dynamic) + } + case typeObject: + var obj rawobject + if err = util.UnmarshalJSON(bs, &obj); err == nil { + var err error + var static []*StaticProperty + var dynamic *DynamicProperty + if static, err = unmarshalStaticPropertySlice(obj.Static); err != nil { + return nil, err + } + if dynamic, err = unmarshalDynamicProperty(obj.Dynamic); err != nil { + return nil, err + } + result = NewObject(static, dynamic) + } + case typeSet: + var set rawset + if err = util.UnmarshalJSON(bs, &set); err == nil { + var of Type + if of, err = Unmarshal(set.Of); err == nil { + result = NewSet(of) + } + } + case typeAny: + var union rawunion + if err = util.UnmarshalJSON(bs, &union); err == nil { + var of []Type + if of, err = unmarshalSlice(union.Of); err == nil { + result = NewAny(of...) + } + } + case typeFunction: + var decl rawdecl + if err = util.UnmarshalJSON(bs, &decl); err == nil { + args, err := unmarshalSlice(decl.Args) + if err != nil { + return nil, err + } + var ret Type + if len(decl.Result) > 0 { + ret, err = Unmarshal(decl.Result) + if err != nil { + return nil, err + } + } + if len(decl.Variadic) > 0 { + varargs, err := Unmarshal(decl.Variadic) + if err != nil { + return nil, err + } + result = NewVariadicFunction(args, varargs, ret) + } else { + result = NewFunction(args, ret) + } + } + default: + err = fmt.Errorf("unsupported type '%v'", hint.Type) + } + } + + return result, err +} + +type rawtype struct { + Type string `json:"type"` +} + +type rawarray struct { + Static []json.RawMessage `json:"static"` + Dynamic json.RawMessage `json:"dynamic"` +} + +type rawobject struct { + Static []rawstaticproperty `json:"static"` + Dynamic rawdynamicproperty `json:"dynamic"` +} + +type rawstaticproperty struct { + Key any `json:"key"` + Value json.RawMessage `json:"value"` +} + +type rawdynamicproperty struct { + Key json.RawMessage `json:"key"` + Value json.RawMessage `json:"value"` +} + +type rawset struct { + Of json.RawMessage `json:"of"` +} + +type rawunion struct { + Of []json.RawMessage `json:"of"` +} + +type rawdecl struct { + Args []json.RawMessage `json:"args"` + Result json.RawMessage `json:"result"` + Variadic json.RawMessage `json:"variadic"` +} + +func unmarshalSlice(elems []json.RawMessage) (result []Type, err error) { + result = make([]Type, len(elems)) + for i := range elems { + if result[i], err = Unmarshal(elems[i]); err != nil { + return nil, err + } + } + return result, err +} + +func unmarshalStaticPropertySlice(elems []rawstaticproperty) (result []*StaticProperty, err error) { + result = make([]*StaticProperty, len(elems)) + for i := range elems { + value, err := Unmarshal(elems[i].Value) + if err != nil { + return nil, err + } + result[i] = NewStaticProperty(elems[i].Key, value) + } + return result, err +} + +func unmarshalDynamicProperty(x rawdynamicproperty) (result *DynamicProperty, err error) { + if len(x.Key) == 0 { + return nil, nil + } + var key Type + if key, err = Unmarshal(x.Key); err == nil { + var value Type + if value, err = Unmarshal(x.Value); err == nil { + return NewDynamicProperty(key, value), nil + } + } + return nil, err +} diff --git a/vendor/github.com/open-policy-agent/opa/types/types.go b/vendor/github.com/open-policy-agent/opa/v1/types/types.go similarity index 90% rename from vendor/github.com/open-policy-agent/opa/types/types.go rename to vendor/github.com/open-policy-agent/opa/v1/types/types.go index 2a050927dd..fc1db120a0 100644 --- a/vendor/github.com/open-policy-agent/opa/types/types.go +++ b/vendor/github.com/open-policy-agent/opa/v1/types/types.go @@ -8,11 +8,29 @@ package types import ( "encoding/json" + "errors" "fmt" + "slices" "sort" "strings" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/util" +) + +var ( + // Nl represents an instance of the null type. + Nl Type = NewNull() + // B represents an instance of the boolean type. + B Type = NewBoolean() + // S represents an instance of the string type. + S Type = NewString() + // N represents an instance of the number type. + N Type = NewNumber() + // A represents the superset of all types. + A Type = NewAny() + + // Boxed set types. + SetOfAny, SetOfStr, SetOfNum Type = NewSet(A), NewSet(S), NewSet(N) ) // Sprint returns the string representation of the type. @@ -58,12 +76,12 @@ type NamedType struct { func (n *NamedType) typeMarker() string { return n.Type.typeMarker() } func (n *NamedType) String() string { return n.Name + ": " + n.Type.String() } func (n *NamedType) MarshalJSON() ([]byte, error) { - var obj map[string]interface{} + var obj map[string]any switch x := n.Type.(type) { - case interface{ toMap() map[string]interface{} }: + case interface{ toMap() map[string]any }: obj = x.toMap() default: - obj = map[string]interface{}{ + obj = map[string]any{ "type": n.Type.typeMarker(), } } @@ -91,7 +109,7 @@ func Named(name string, t Type) *NamedType { // MarshalJSON returns the JSON encoding of t. func (t Null) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ + return json.Marshal(map[string]any{ "type": t.typeMarker(), }) } @@ -105,16 +123,13 @@ func unwrap(t Type) Type { } } -func (t Null) String() string { +func (Null) String() string { return typeNull } // Boolean represents the boolean type. type Boolean struct{} -// B represents an instance of the boolean type. -var B = NewBoolean() - // NewBoolean returns a new Boolean type. func NewBoolean() Boolean { return Boolean{} @@ -122,7 +137,7 @@ func NewBoolean() Boolean { // MarshalJSON returns the JSON encoding of t. func (t Boolean) MarshalJSON() ([]byte, error) { - repr := map[string]interface{}{ + repr := map[string]any{ "type": t.typeMarker(), } return json.Marshal(repr) @@ -135,9 +150,6 @@ func (t Boolean) String() string { // String represents the string type. type String struct{} -// S represents an instance of the string type. -var S = NewString() - // NewString returns a new String type. func NewString() String { return String{} @@ -145,7 +157,7 @@ func NewString() String { // MarshalJSON returns the JSON encoding of t. func (t String) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ + return json.Marshal(map[string]any{ "type": t.typeMarker(), }) } @@ -157,9 +169,6 @@ func (String) String() string { // Number represents the number type. type Number struct{} -// N represents an instance of the number type. -var N = NewNumber() - // NewNumber returns a new Number type. func NewNumber() Number { return Number{} @@ -167,7 +176,7 @@ func NewNumber() Number { // MarshalJSON returns the JSON encoding of t. func (t Number) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ + return json.Marshal(map[string]any{ "type": t.typeMarker(), }) } @@ -195,8 +204,8 @@ func (t *Array) MarshalJSON() ([]byte, error) { return json.Marshal(t.toMap()) } -func (t *Array) toMap() map[string]interface{} { - repr := map[string]interface{}{ +func (t *Array) toMap() map[string]any { + repr := map[string]any{ "type": t.typeMarker(), } if len(t.static) != 0 { @@ -210,7 +219,7 @@ func (t *Array) toMap() map[string]interface{} { func (t *Array) String() string { prefix := "array" - buf := []string{} + buf := make([]string, 0, len(t.static)) for _, tpe := range t.static { buf = append(buf, Sprint(tpe)) } @@ -268,8 +277,8 @@ func (t *Set) MarshalJSON() ([]byte, error) { return json.Marshal(t.toMap()) } -func (t *Set) toMap() map[string]interface{} { - repr := map[string]interface{}{ +func (t *Set) toMap() map[string]any { + repr := map[string]any{ "type": t.typeMarker(), } if t.of != nil { @@ -285,12 +294,12 @@ func (t *Set) String() string { // StaticProperty represents a static object property. type StaticProperty struct { - Key interface{} + Key any Value Type } // NewStaticProperty returns a new StaticProperty object. -func NewStaticProperty(key interface{}, value Type) *StaticProperty { +func NewStaticProperty(key any, value Type) *StaticProperty { return &StaticProperty{ Key: key, Value: value, @@ -299,7 +308,7 @@ func NewStaticProperty(key interface{}, value Type) *StaticProperty { // MarshalJSON returns the JSON encoding of p. func (p *StaticProperty) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ + return json.Marshal(map[string]any{ "key": p.Key, "value": p.Value, }) @@ -321,7 +330,7 @@ func NewDynamicProperty(key, value Type) *DynamicProperty { // MarshalJSON returns the JSON encoding of p. func (p *DynamicProperty) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ + return json.Marshal(map[string]any{ "key": p.Key, "value": p.Value, }) @@ -339,9 +348,8 @@ type Object struct { // NewObject returns a new Object type. func NewObject(static []*StaticProperty, dynamic *DynamicProperty) *Object { - sort.Slice(static, func(i, j int) bool { - cmp := util.Compare(static[i].Key, static[j].Key) - return cmp == -1 + slices.SortFunc(static, func(a, b *StaticProperty) int { + return util.Compare(a.Key, b.Key) }) return &Object{ static: static, @@ -384,8 +392,8 @@ func (t *Object) StaticProperties() []*StaticProperty { } // Keys returns the keys of the object's static elements. -func (t *Object) Keys() []interface{} { - sl := make([]interface{}, 0, len(t.static)) +func (t *Object) Keys() []any { + sl := make([]any, 0, len(t.static)) for _, p := range t.static { sl = append(sl, p.Key) } @@ -397,8 +405,8 @@ func (t *Object) MarshalJSON() ([]byte, error) { return json.Marshal(t.toMap()) } -func (t *Object) toMap() map[string]interface{} { - repr := map[string]interface{}{ +func (t *Object) toMap() map[string]any { + repr := map[string]any{ "type": t.typeMarker(), } if len(t.static) != 0 { @@ -411,7 +419,7 @@ func (t *Object) toMap() map[string]interface{} { } // Select returns the type of the named property. -func (t *Object) Select(name interface{}) Type { +func (t *Object) Select(name any) Type { pos := sort.Search(len(t.static), func(x int) bool { return util.Compare(t.static[x].Key, name) >= 0 }) @@ -471,7 +479,7 @@ func mergeObjects(a, b *Object) *Object { dynamicProps = b.dynamic } - staticPropsMap := make(map[interface{}]Type) + staticPropsMap := make(map[any]Type) for _, sp := range a.static { staticPropsMap[sp.Key] = sp.Value @@ -503,9 +511,6 @@ func mergeObjects(a, b *Object) *Object { // Any represents a dynamic type. type Any []Type -// A represents the superset of all types. -var A = NewAny() - // NewAny returns a new Any type. func NewAny(of ...Type) Any { sl := make(Any, len(of)) @@ -536,8 +541,8 @@ func (t Any) MarshalJSON() ([]byte, error) { return json.Marshal(t.toMap()) } -func (t Any) toMap() map[string]interface{} { - repr := map[string]interface{}{ +func (t Any) toMap() map[string]any { + repr := map[string]any{ "type": t.typeMarker(), } if len(t) != 0 { @@ -578,10 +583,7 @@ func (t Any) Union(other Any) Any { return other } // Prealloc the output list. - maxLen := lenT - if lenT < lenOther { - maxLen = lenOther - } + maxLen := max(lenT, lenOther) merged := make(Any, 0, maxLen) // Note(philipc): Create a merged slice, doing the minimum number of // comparisons along the way. We treat this as a problem of merging two @@ -675,7 +677,7 @@ func Arity(x Type) int { if !ok { return 0 } - return len(f.FuncArgs().Args) + return f.Arity() } // NewFunction returns a new Function object of the given argument and result types. @@ -714,6 +716,7 @@ func (t *Function) NamedFuncArgs() FuncArgs { } // Args returns the function's arguments as a slice, ignoring variadic arguments. +// // Deprecated: Use FuncArgs instead. func (t *Function) Args() []Type { cpy := make([]Type, len(t.args)) @@ -723,6 +726,11 @@ func (t *Function) Args() []Type { return cpy } +// Arity returns the number of arguments in the function signature. +func (t *Function) Arity() int { + return len(t.args) +} + // Result returns the function's result type. func (t *Function) Result() Type { return unwrap(t.result) @@ -739,7 +747,7 @@ func (t *Function) String() string { // MarshalJSON returns the JSON encoding of t. func (t *Function) MarshalJSON() ([]byte, error) { - repr := map[string]interface{}{ + repr := map[string]any{ "type": t.typeMarker(), } if len(t.args) > 0 { @@ -763,7 +771,7 @@ func (t *Function) UnmarshalJSON(bs []byte) error { f, ok := tpe.(*Function) if !ok { - return fmt.Errorf("invalid type") + return errors.New("invalid type") } *t = *f @@ -780,14 +788,15 @@ func (t *Function) Union(other *Function) *Function { return other } - a := t.Args() - b := other.Args() - if len(a) != len(b) { + if t.Arity() != other.Arity() { return nil } - aIsVariadic := t.FuncArgs().Variadic != nil - bIsVariadic := other.FuncArgs().Variadic != nil + tfa := t.FuncArgs() + ofa := other.FuncArgs() + + aIsVariadic := tfa.Variadic != nil + bIsVariadic := ofa.Variadic != nil if aIsVariadic && !bIsVariadic { return nil @@ -795,13 +804,16 @@ func (t *Function) Union(other *Function) *Function { return nil } + a := t.Args() + b := other.Args() + args := make([]Type, len(a)) for i := range a { args[i] = Or(a[i], b[i]) } result := NewFunction(args, Or(t.Result(), other.Result())) - result.variadic = Or(t.FuncArgs().Variadic, other.FuncArgs().Variadic) + result.variadic = Or(tfa.Variadic, ofa.Variadic) return result } @@ -841,7 +853,7 @@ func Compare(a, b Type) int { } else if x < y { return -1 } - switch a.(type) { + switch a.(type) { //nolint:gocritic case nil, Null, Boolean, Number, String: return 0 case *Array: @@ -878,12 +890,9 @@ func Compare(a, b Type) int { lenStaticA := len(objA.static) lenStaticB := len(objB.static) - minLen := lenStaticA - if lenStaticB < minLen { - minLen = lenStaticB - } + minLen := min(lenStaticB, lenStaticA) - for i := 0; i < minLen; i++ { + for i := range minLen { if cmp := util.Compare(objA.static[i].Key, objB.static[i].Key); cmp != 0 { return cmp } @@ -922,7 +931,7 @@ func Compare(a, b Type) int { } else if len(fA.args) > len(fB.args) { return 1 } - for i := 0; i < len(fA.args); i++ { + for i := range len(fA.args) { if cmp := Compare(fA.args[i], fB.args[i]); cmp != 0 { return cmp } @@ -975,7 +984,7 @@ func Or(a, b Type) Type { } // Select returns a property or item of a. -func Select(a Type, x interface{}) Type { +func Select(a Type, x any) Type { switch a := unwrap(a).(type) { case *Array: n, ok := x.(json.Number) @@ -1086,17 +1095,13 @@ func Nil(a Type) bool { case nil: return true case *Function: - for i := range a.args { - if Nil(a.args[i]) { - return true - } + if slices.ContainsFunc(a.args, Nil) { + return true } return Nil(a.result) case *Array: - for i := range a.static { - if Nil(a.static[i]) { - return true - } + if slices.ContainsFunc(a.static, Nil) { + return true } if a.dynamic != nil { return Nil(a.dynamic) @@ -1117,32 +1122,32 @@ func Nil(a Type) bool { } // TypeOf returns the type of the Golang native value. -func TypeOf(x interface{}) Type { +func TypeOf(x any) Type { switch x := x.(type) { case nil: - return NewNull() + return Nl case bool: return B case string: return S case json.Number: return N - case map[string]interface{}: - // The ast.ValueToInterface() function returns ast.Object values as map[string]interface{} - // so map[string]interface{} must be handled here because the type checker uses the value + case map[string]any: + // The ast.ValueToInterface() function returns ast.Object values as map[string]any + // so map[string]any must be handled here because the type checker uses the value // to interface conversion when inferring object types. static := make([]*StaticProperty, 0, len(x)) for k, v := range x { static = append(static, NewStaticProperty(k, TypeOf(v))) } return NewObject(static, nil) - case map[interface{}]interface{}: + case map[any]any: static := make([]*StaticProperty, 0, len(x)) for k, v := range x { static = append(static, NewStaticProperty(k, TypeOf(v))) } return NewObject(static, nil) - case []interface{}: + case []any: static := make([]Type, len(x)) for i := range x { static[i] = TypeOf(x[i]) @@ -1155,15 +1160,12 @@ func TypeOf(x interface{}) Type { type typeSlice []Type func (s typeSlice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 } -func (s typeSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x } +func (s typeSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s typeSlice) Len() int { return len(s) } func typeSliceCompare(a, b []Type) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { + minLen := min(len(b), len(a)) + for i := range minLen { if cmp := Compare(a[i], b[i]); cmp != 0 { return cmp } diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/backoff.go b/vendor/github.com/open-policy-agent/opa/v1/util/backoff.go new file mode 100644 index 0000000000..d58af616da --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/util/backoff.go @@ -0,0 +1,44 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package util + +import ( + "math/rand" + "time" +) + +// DefaultBackoff returns a delay with an exponential backoff based on the +// number of retries. +func DefaultBackoff(base, maxNS float64, retries int) time.Duration { + return Backoff(base, maxNS, .2, 1.6, retries) +} + +// Backoff returns a delay with an exponential backoff based on the number of +// retries. Same algorithm used in gRPC. +// Note that if maxNS is smaller than base, the backoff will still be capped at +// maxNS. +func Backoff(base, maxNS, jitter, factor float64, retries int) time.Duration { + if retries == 0 { + return 0 + } + + backoff, maxNS := base, maxNS + for backoff < maxNS && retries > 0 { + backoff *= factor + retries-- + } + if backoff > maxNS { + backoff = maxNS + } + + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + jitter*(rand.Float64()*2-1) + if backoff < 0 { + return 0 + } + + return time.Duration(backoff) +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/channel.go b/vendor/github.com/open-policy-agent/opa/v1/util/channel.go new file mode 100644 index 0000000000..e2653ac7fd --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/util/channel.go @@ -0,0 +1,32 @@ +package util + +import ( + "github.com/open-policy-agent/opa/v1/metrics" +) + +// This prevents getting blocked forever writing to a full buffer, in case another routine fills the last space. +// Retrying maxEventRetry times to drop the oldest event. Dropping the incoming event if there still isn't room. +const maxEventRetry = 1000 + +// PushFIFO pushes data into a buffered channel without blocking when full, making room by dropping the oldest data. +// An optional metric can be recorded when data is dropped. +func PushFIFO[T any](buffer chan T, data T, metrics metrics.Metrics, metricName string) { + + for range maxEventRetry { + // non-blocking send to the buffer, to prevent blocking if buffer is full so room can be made. + select { + case buffer <- data: + return + default: + } + + // non-blocking drop from the buffer to make room for incoming event + select { + case <-buffer: + if metrics != nil && metricName != "" { + metrics.Counter(metricName).Incr() + } + default: + } + } +} diff --git a/vendor/github.com/open-policy-agent/opa/util/close.go b/vendor/github.com/open-policy-agent/opa/v1/util/close.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/util/close.go rename to vendor/github.com/open-policy-agent/opa/v1/util/close.go diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/compare.go b/vendor/github.com/open-policy-agent/opa/v1/util/compare.go new file mode 100644 index 0000000000..df78f64755 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/util/compare.go @@ -0,0 +1,169 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package util + +import ( + "encoding/json" + "fmt" + "math/big" +) + +// Compare returns 0 if a equals b, -1 if a is less than b, and 1 if b is than a. +// +// For comparison between values of different types, the following ordering is used: +// nil < bool < int, float64 < string < []any < map[string]any. Slices and maps +// are compared recursively. If one slice or map is a subset of the other slice or map +// it is considered "less than". Nil is always equal to nil. +func Compare(a, b any) int { + aSortOrder := sortOrder(a) + bSortOrder := sortOrder(b) + if aSortOrder < bSortOrder { + return -1 + } else if bSortOrder < aSortOrder { + return 1 + } + switch a := a.(type) { + case nil: + return 0 + case bool: + switch b := b.(type) { + case bool: + if a == b { + return 0 + } + if !a { + return -1 + } + return 1 + } + case json.Number: + switch b := b.(type) { + case json.Number: + return compareJSONNumber(a, b) + } + case int: + switch b := b.(type) { + case int: + if a == b { + return 0 + } else if a < b { + return -1 + } + return 1 + } + case float64: + switch b := b.(type) { + case float64: + if a == b { + return 0 + } else if a < b { + return -1 + } + return 1 + } + case string: + switch b := b.(type) { + case string: + if a == b { + return 0 + } else if a < b { + return -1 + } + return 1 + } + case []any: + switch b := b.(type) { + case []any: + bLen := len(b) + aLen := len(a) + minLen := min(bLen, aLen) + for i := range minLen { + cmp := Compare(a[i], b[i]) + if cmp != 0 { + return cmp + } + } + if aLen == bLen { + return 0 + } else if aLen < bLen { + return -1 + } + return 1 + } + case map[string]any: + switch b := b.(type) { + case map[string]any: + aKeys := KeysSorted(a) + bKeys := KeysSorted(b) + aLen := len(aKeys) + bLen := len(bKeys) + minLen := min(bLen, aLen) + for i := range minLen { + if aKeys[i] < bKeys[i] { + return -1 + } else if bKeys[i] < aKeys[i] { + return 1 + } + aVal := a[aKeys[i]] + bVal := b[bKeys[i]] + cmp := Compare(aVal, bVal) + if cmp != 0 { + return cmp + } + } + if aLen == bLen { + return 0 + } else if aLen < bLen { + return -1 + } + return 1 + } + } + + panic(fmt.Sprintf("illegal arguments of type %T and type %T", a, b)) +} + +const ( + nilSort = iota + boolSort = iota + numberSort = iota + stringSort = iota + arraySort = iota + objectSort = iota +) + +func compareJSONNumber(a, b json.Number) int { + bigA, ok := new(big.Float).SetString(string(a)) + if !ok { + panic("illegal value") + } + bigB, ok := new(big.Float).SetString(string(b)) + if !ok { + panic("illegal value") + } + return bigA.Cmp(bigB) +} + +func sortOrder(v any) int { + switch v.(type) { + case nil: + return nilSort + case bool: + return boolSort + case json.Number: + return numberSort + case int: + return numberSort + case float64: + return numberSort + case string: + return stringSort + case []any: + return arraySort + case map[string]any: + return objectSort + } + panic(fmt.Sprintf("illegal argument of type %T", v)) +} diff --git a/vendor/github.com/open-policy-agent/opa/util/decoding/context.go b/vendor/github.com/open-policy-agent/opa/v1/util/decoding/context.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/util/decoding/context.go rename to vendor/github.com/open-policy-agent/opa/v1/util/decoding/context.go diff --git a/vendor/github.com/open-policy-agent/opa/util/doc.go b/vendor/github.com/open-policy-agent/opa/v1/util/doc.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/util/doc.go rename to vendor/github.com/open-policy-agent/opa/v1/util/doc.go diff --git a/vendor/github.com/open-policy-agent/opa/util/enumflag.go b/vendor/github.com/open-policy-agent/opa/v1/util/enumflag.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/util/enumflag.go rename to vendor/github.com/open-policy-agent/opa/v1/util/enumflag.go diff --git a/vendor/github.com/open-policy-agent/opa/util/graph.go b/vendor/github.com/open-policy-agent/opa/v1/util/graph.go similarity index 95% rename from vendor/github.com/open-policy-agent/opa/util/graph.go rename to vendor/github.com/open-policy-agent/opa/v1/util/graph.go index f0e8242454..acb62590b6 100644 --- a/vendor/github.com/open-policy-agent/opa/util/graph.go +++ b/vendor/github.com/open-policy-agent/opa/v1/util/graph.go @@ -77,13 +77,10 @@ func dfsRecursive(t Traversal, eq Equals, u, z T, path []T) []T { } for _, v := range t.Edges(u) { if eq(v, z) { - path = append(path, z) - path = append(path, u) - return path + return append(path, z, u) } if p := dfsRecursive(t, eq, v, z, path); len(p) > 0 { - path = append(p, u) - return path + return append(p, u) } } return path diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/hashmap.go b/vendor/github.com/open-policy-agent/opa/v1/util/hashmap.go new file mode 100644 index 0000000000..69a90cbb53 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/util/hashmap.go @@ -0,0 +1,271 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package util + +import ( + "fmt" + "strings" +) + +// T is a concise way to refer to T. +type T any + +type Hasher interface { + Hash() int +} + +type hashEntry[K any, V any] struct { + k K + v V + next *hashEntry[K, V] +} + +// TypedHashMap represents a key/value map. +type TypedHashMap[K any, V any] struct { + keq func(K, K) bool + veq func(V, V) bool + khash func(K) int + vhash func(V) int + def V + table map[int]*hashEntry[K, V] + size int +} + +// NewTypedHashMap returns a new empty TypedHashMap. +func NewTypedHashMap[K any, V any](keq func(K, K) bool, veq func(V, V) bool, khash func(K) int, vhash func(V) int, def V) *TypedHashMap[K, V] { + return &TypedHashMap[K, V]{ + keq: keq, + veq: veq, + khash: khash, + vhash: vhash, + def: def, + table: make(map[int]*hashEntry[K, V]), + size: 0, + } +} + +// HashMap represents a key/value map. +type HashMap = TypedHashMap[T, T] + +// NewHashMap returns a new empty HashMap. +func NewHashMap(eq func(T, T) bool, hash func(T) int) *HashMap { + return &HashMap{ + keq: eq, + veq: eq, + khash: hash, + vhash: hash, + def: nil, + table: make(map[int]*hashEntry[T, T]), + size: 0, + } +} + +// Copy returns a shallow copy of this HashMap. +func (h *TypedHashMap[K, V]) Copy() *TypedHashMap[K, V] { + cpy := NewTypedHashMap(h.keq, h.veq, h.khash, h.vhash, h.def) + h.Iter(func(k K, v V) bool { + cpy.Put(k, v) + return false + }) + return cpy +} + +// Equal returns true if this HashMap equals the other HashMap. +// Two hash maps are equal if they contain the same key/value pairs. +func (h *TypedHashMap[K, V]) Equal(other *TypedHashMap[K, V]) bool { + if h.Len() != other.Len() { + return false + } + return !h.Iter(func(k K, v V) bool { + ov, ok := other.Get(k) + if !ok { + return true + } + return !h.veq(v, ov) + }) +} + +// Get returns the value for k. +func (h *TypedHashMap[K, V]) Get(k K) (V, bool) { + hash := h.khash(k) + for entry := h.table[hash]; entry != nil; entry = entry.next { + if h.keq(entry.k, k) { + return entry.v, true + } + } + return h.def, false +} + +// Delete removes the key k. +func (h *TypedHashMap[K, V]) Delete(k K) { + hash := h.khash(k) + var prev *hashEntry[K, V] + for entry := h.table[hash]; entry != nil; entry = entry.next { + if h.keq(entry.k, k) { + if prev != nil { + prev.next = entry.next + } else { + h.table[hash] = entry.next + } + h.size-- + return + } + prev = entry + } +} + +// Hash returns the hash code for this hash map. +func (h *TypedHashMap[K, V]) Hash() int { + var hash int + h.Iter(func(k K, v V) bool { + hash += h.khash(k) + h.vhash(v) + return false + }) + return hash +} + +// Iter invokes the iter function for each element in the HashMap. +// If the iter function returns true, iteration stops and the return value is true. +// If the iter function never returns true, iteration proceeds through all elements +// and the return value is false. +func (h *TypedHashMap[K, V]) Iter(iter func(K, V) bool) bool { + for _, entry := range h.table { + for ; entry != nil; entry = entry.next { + if iter(entry.k, entry.v) { + return true + } + } + } + return false +} + +// Len returns the current size of this HashMap. +func (h *TypedHashMap[K, V]) Len() int { + return h.size +} + +// Put inserts a key/value pair into this HashMap. If the key is already present, the existing +// value is overwritten. +func (h *TypedHashMap[K, V]) Put(k K, v V) { + hash := h.khash(k) + head := h.table[hash] + for entry := head; entry != nil; entry = entry.next { + if h.keq(entry.k, k) { + entry.v = v + return + } + } + h.table[hash] = &hashEntry[K, V]{k: k, v: v, next: head} + h.size++ +} + +func (h *TypedHashMap[K, V]) String() string { + var buf []string + h.Iter(func(k K, v V) bool { + buf = append(buf, fmt.Sprintf("%v: %v", k, v)) + return false + }) + return "{" + strings.Join(buf, ", ") + "}" +} + +// Update returns a new HashMap with elements from the other HashMap put into this HashMap. +// If the other HashMap contains elements with the same key as this HashMap, the value +// from the other HashMap overwrites the value from this HashMap. +func (h *TypedHashMap[K, V]) Update(other *TypedHashMap[K, V]) *TypedHashMap[K, V] { + updated := h.Copy() + other.Iter(func(k K, v V) bool { + updated.Put(k, v) + return false + }) + return updated +} + +type hasherEntry[K Hasher, V any] struct { + k K + v V + next *hasherEntry[K, V] +} + +// HasherMap represents a simpler version of TypedHashMap that uses Hasher's +// for keys, and requires only an equality function for keys. Ideally we'd have +// and Equal method for all key types too, and we could get rid of that requirement. +type HasherMap[K Hasher, V any] struct { + keq func(K, K) bool + table map[int]*hasherEntry[K, V] + size int +} + +// NewHasherMap returns a new empty HasherMap. +func NewHasherMap[K Hasher, V any](keq func(K, K) bool) *HasherMap[K, V] { + return &HasherMap[K, V]{ + keq: keq, + table: make(map[int]*hasherEntry[K, V]), + size: 0, + } +} + +// Get returns the value for k. +func (h *HasherMap[K, V]) Get(k K) (V, bool) { + for entry := h.table[k.Hash()]; entry != nil; entry = entry.next { + if h.keq(entry.k, k) { + return entry.v, true + } + } + var zero V + return zero, false +} + +// Put inserts a key/value pair into this HashMap. If the key is already present, the existing +// value is overwritten. +func (h *HasherMap[K, V]) Put(k K, v V) { + hash := k.Hash() + head := h.table[hash] + for entry := head; entry != nil; entry = entry.next { + if h.keq(entry.k, k) { + entry.v = v + return + } + } + h.table[hash] = &hasherEntry[K, V]{k: k, v: v, next: head} + h.size++ +} + +// Delete removes the key k. +func (h *HasherMap[K, V]) Delete(k K) { + hash := k.Hash() + var prev *hasherEntry[K, V] + for entry := h.table[hash]; entry != nil; entry = entry.next { + if h.keq(entry.k, k) { + if prev != nil { + prev.next = entry.next + } else { + h.table[hash] = entry.next + } + h.size-- + return + } + prev = entry + } +} + +// Iter invokes the iter function for each element in the HasherMap. +// If the iter function returns true, iteration stops and the return value is true. +// If the iter function never returns true, iteration proceeds through all elements +// and the return value is false. +func (h *HasherMap[K, V]) Iter(iter func(K, V) bool) bool { + for _, entry := range h.table { + for ; entry != nil; entry = entry.next { + if iter(entry.k, entry.v) { + return true + } + } + } + return false +} + +// Len returns the current size of this HashMap. +func (h *HasherMap[K, V]) Len() int { + return h.size +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/json.go b/vendor/github.com/open-policy-agent/opa/v1/util/json.go new file mode 100644 index 0000000000..de95ed50bf --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/util/json.go @@ -0,0 +1,195 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package util + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "strconv" + + "sigs.k8s.io/yaml" + + "github.com/open-policy-agent/opa/v1/loader/extension" +) + +// UnmarshalJSON parses the JSON encoded data and stores the result in the value +// pointed to by x. +// +// This function is intended to be used in place of the standard [json.Marshal] +// function when [json.Number] is required. +func UnmarshalJSON(bs []byte, x any) error { + return unmarshalJSON(bs, x, true) +} + +func unmarshalJSON(bs []byte, x any, ext bool) error { + decoder := NewJSONDecoder(bytes.NewBuffer(bs)) + if err := decoder.Decode(x); err != nil { + if handler := extension.FindExtension(".json"); handler != nil && ext { + return handler(bs, x) + } + return err + } + + // Since decoder.Decode validates only the first json structure in bytes, + // check if decoder has more bytes to consume to validate whole input bytes. + tok, err := decoder.Token() + if tok != nil { + return fmt.Errorf("error: invalid character '%s' after top-level value", tok) + } + if err != nil && err != io.EOF { + return err + } + return nil +} + +// NewJSONDecoder returns a new decoder that reads from r. +// +// This function is intended to be used in place of the standard [json.NewDecoder] +// when [json.Number] is required. +func NewJSONDecoder(r io.Reader) *json.Decoder { + decoder := json.NewDecoder(r) + decoder.UseNumber() + return decoder +} + +// MustUnmarshalJSON parse the JSON encoded data and returns the result. +// +// If the data cannot be decoded, this function will panic. This function is for +// test purposes. +func MustUnmarshalJSON(bs []byte) any { + var x any + if err := UnmarshalJSON(bs, &x); err != nil { + panic(err) + } + return x +} + +// MustMarshalJSON returns the JSON encoding of x +// +// If the data cannot be encoded, this function will panic. This function is for +// test purposes. +func MustMarshalJSON(x any) []byte { + bs, err := json.Marshal(x) + if err != nil { + panic(err) + } + return bs +} + +// RoundTrip encodes to JSON, and decodes the result again. +// +// Thereby, it is converting its argument to the representation expected by +// rego.Input and inmem's Write operations. Works with both references and +// values. +func RoundTrip(x *any) error { + // Avoid round-tripping types that won't change as a result of + // marshalling/unmarshalling, as even for those values, round-tripping + // comes with a significant cost. + if x == nil || !NeedsRoundTrip(*x) { + return nil + } + + // For number types, we can write the json.Number representation + // directly into x without marshalling to bytes and back. + a := *x + switch v := a.(type) { + case int: + *x = json.Number(strconv.Itoa(v)) + return nil + case int8: + *x = json.Number(strconv.FormatInt(int64(v), 10)) + return nil + case int16: + *x = json.Number(strconv.FormatInt(int64(v), 10)) + return nil + case int32: + *x = json.Number(strconv.FormatInt(int64(v), 10)) + return nil + case int64: + *x = json.Number(strconv.FormatInt(v, 10)) + return nil + case uint: + *x = json.Number(strconv.FormatUint(uint64(v), 10)) + return nil + case uint8: + *x = json.Number(strconv.FormatUint(uint64(v), 10)) + return nil + case uint16: + *x = json.Number(strconv.FormatUint(uint64(v), 10)) + return nil + case uint32: + *x = json.Number(strconv.FormatUint(uint64(v), 10)) + return nil + case uint64: + *x = json.Number(strconv.FormatUint(v, 10)) + return nil + case float32: + *x = json.Number(strconv.FormatFloat(float64(v), 'f', -1, 32)) + return nil + case float64: + *x = json.Number(strconv.FormatFloat(v, 'f', -1, 64)) + return nil + } + + bs, err := json.Marshal(x) + if err != nil { + return err + } + return UnmarshalJSON(bs, x) +} + +// NeedsRoundTrip returns true if the value won't change as a result of +// a marshalling/unmarshalling round-trip. Since [RoundTrip] itself calls +// this you normally don't need to call this function directly, unless you +// want to make decisions based on the round-tripability of a value without +// actually doing the round-trip. +func NeedsRoundTrip(x any) bool { + switch x.(type) { + case nil, bool, string, json.Number: + return false + } + return true +} + +// Reference returns a pointer to its argument unless the argument already is +// a pointer. If the argument is **t, or ***t, etc, it will return *t. +// +// Used for preparing Go types (including pointers to structs) into values to be +// put through [RoundTrip]. +func Reference(x any) *any { + var y any + rv := reflect.ValueOf(x) + if rv.Kind() == reflect.Pointer { + return Reference(rv.Elem().Interface()) + } + if rv.Kind() != reflect.Invalid { + y = rv.Interface() + return &y + } + return &x +} + +// Unmarshal decodes a YAML, JSON or JSON extension value into the specified type. +func Unmarshal(bs []byte, v any) error { + if len(bs) > 2 && bs[0] == 0xef && bs[1] == 0xbb && bs[2] == 0xbf { + bs = bs[3:] // Strip UTF-8 BOM, see https://www.rfc-editor.org/rfc/rfc8259#section-8.1 + } + + if json.Valid(bs) { + return unmarshalJSON(bs, v, false) + } + nbs, err := yaml.YAMLToJSON(bs) + if err == nil { + return unmarshalJSON(nbs, v, false) + } + // not json or yaml: try extensions + if handler := extension.FindExtension(".json"); handler != nil { + return handler(bs, v) + } + return err +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/maps.go b/vendor/github.com/open-policy-agent/opa/v1/util/maps.go new file mode 100644 index 0000000000..c56fbe98ac --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/util/maps.go @@ -0,0 +1,34 @@ +package util + +import ( + "cmp" + "slices" +) + +// Keys returns a slice of keys from any map. +func Keys[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} + +// KeysSorted returns a slice of keys from any map, sorted in ascending order. +func KeysSorted[M ~map[K]V, K cmp.Ordered, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + slices.Sort(r) + return r +} + +// Values returns a slice of values from any map. Copied from golang.org/x/exp/maps. +func Values[M ~map[K]V, K comparable, V any](m M) []V { + r := make([]V, 0, len(m)) + for _, v := range m { + r = append(r, v) + } + return r +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/performance.go b/vendor/github.com/open-policy-agent/opa/v1/util/performance.go new file mode 100644 index 0000000000..3c852638e2 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/util/performance.go @@ -0,0 +1,178 @@ +package util + +import ( + "slices" + "strconv" + "strings" + "sync" + "unsafe" +) + +// SyncPool is a generic sync.Pool for type T, providing some convenience +// over sync.Pool directly: [SyncPool.Put] ensures that nil values are not +// put into the pool, and [SyncPool.Get] returns a pointer to T without having +// to do a type assertion at the call site. +type SyncPool[T any] struct { + pool sync.Pool +} + +func NewSyncPool[T any]() *SyncPool[T] { + return &SyncPool[T]{ + pool: sync.Pool{ + New: func() any { + return new(T) + }, + }, + } +} + +func (p *SyncPool[T]) Get() *T { + return p.pool.Get().(*T) +} + +func (p *SyncPool[T]) Put(x *T) { + if x != nil { + p.pool.Put(x) + } +} + +// NewPtrSlice returns a slice of pointers to T with length n, +// with only 2 allocations performed no matter the size of n. +// See: +// https://gist.github.com/CAFxX/e96e8a5c3841d152f16d266a1fe7f8bd#slices-of-pointers +func NewPtrSlice[T any](n int) []*T { + return GrowPtrSlice[T](nil, n) +} + +// GrowPtrSlice appends n elements to the slice, each pointing to +// a newly-allocated T. The resulting slice has length equal to len(s)+n. +// +// It performs at most 2 allocations, regardless of n. +func GrowPtrSlice[T any](s []*T, n int) []*T { + s = slices.Grow(s, n) + p := make([]T, n) + for i := range n { + s = append(s, &p[i]) + } + return s +} + +// Allocation free conversion from []byte to string (unsafe) +// Note that the byte slice must not be modified after conversion +func ByteSliceToString(bs []byte) string { + return unsafe.String(unsafe.SliceData(bs), len(bs)) +} + +// Allocation free conversion from ~string to []byte (unsafe) +// Note that the byte slice must not be modified after conversion +func StringToByteSlice[T ~string](s T) []byte { + return unsafe.Slice(unsafe.StringData(string(s)), len(s)) +} + +// NumDigitsInt returns the number of digits in n. +// This is useful for pre-allocating buffers for string conversion. +func NumDigitsInt(n int) int { + return NumDigitsInt64(int64(n)) +} + +// NumDigitsInt64 returns the number of digits in n. +// This is useful for pre-allocating buffers for string conversion. +func NumDigitsInt64(n int64) int { + if n == 0 { + return 1 + } + + if n < 0 { + n = -n + } + + count := 0 + for n > 0 { + n /= 10 + count++ + } + return count +} + +// NumDigitsUint returns the number of digits in n. +// This is useful for pre-allocating buffers for string conversion. +func NumDigitsUint(n uint64) int { + if n == 0 { + return 1 + } + + count := 0 + for n > 0 { + n /= 10 + count++ + } + return count +} + +// AppendInt is a less messy version of strconv.AppendInt for base 10 ints. +func AppendInt(buf []byte, n int) []byte { + return strconv.AppendInt(buf, int64(n), 10) +} + +// SplitMap calls fn for each delim-separated part of text and returns a slice of the results. +// Cheaper than calling fn on strings.Split(text, delim), as it avoids allocating an intermediate slice of strings. +func SplitMap[T any](text string, delim string, fn func(string) T) []T { + sl := make([]T, 0, strings.Count(text, delim)+1) + for s := range strings.SplitSeq(text, delim) { + sl = append(sl, fn(s)) + } + return sl +} + +// SlicePool is a pool for (pointers to) slices of type T. +// It uses sync.Pool to pool the slices, and grows them as needed. +type SlicePool[T any] struct { + pool sync.Pool +} + +// NewSlicePool creates a new SlicePool for slices of type T with the given initial length. +// This number is only a hint, as the slices will grow as needed. For best performance, store +// slices of similar lengths in the same pool. +func NewSlicePool[T any](length int) *SlicePool[T] { + return &SlicePool[T]{ + pool: sync.Pool{ + New: func() any { + s := make([]T, length) + return &s + }, + }, + } +} + +// Get returns a pointer to a slice of type T with the given length +// from the pool. The slice capacity will grow as needed to accommodate +// the requested length. The returned slice will have all its elements +// set to the zero value of T. Returns a pointer to avoid allocating. +func (sp *SlicePool[T]) Get(length int) *[]T { + s := sp.pool.Get().(*[]T) + d := *s + + if cap(d) < length { + d = slices.Grow(d, length) + } + + d = d[:length] // reslice to requested length, while keeping capacity + + clear(d) + + *s = d + return s +} + +// Put returns a pointer to a slice of type T to the pool. +func (sp *SlicePool[T]) Put(s *[]T) { + if s != nil { + sp.pool.Put(s) + } +} + +// SortedFunc is simply a shorthand for [slices.SortFunc] which also returns the sorted slice. +func SortedFunc[T any, S ~[]T](s S, cmp func(a, b T) int) S { + slices.SortFunc(s, cmp) + return s +} diff --git a/vendor/github.com/open-policy-agent/opa/util/queue.go b/vendor/github.com/open-policy-agent/opa/v1/util/queue.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/util/queue.go rename to vendor/github.com/open-policy-agent/opa/v1/util/queue.go diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/read_gzip_body.go b/vendor/github.com/open-policy-agent/opa/v1/util/read_gzip_body.go new file mode 100644 index 0000000000..97dacd0c96 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/util/read_gzip_body.go @@ -0,0 +1,60 @@ +package util + +import ( + "bytes" + "compress/gzip" + "errors" + "io" + "net/http" + "strings" + + "github.com/open-policy-agent/opa/v1/util/decoding" +) + +var gzipReaderPool = NewSyncPool[gzip.Reader]() + +// Note(philipc): Originally taken from server/server.go +// The DecodingLimitHandler handles setting the max size limits in the context. +// This function enforces those limits. For gzip payloads, we use a LimitReader +// to ensure we don't decompress more than the allowed maximum, preventing +// memory exhaustion from forged gzip trailers. +func ReadMaybeCompressedBody(r *http.Request) ([]byte, error) { + length := r.ContentLength + if maxLenConf, ok := decoding.GetServerDecodingMaxLen(r.Context()); ok { + length = maxLenConf + } + + content, err := io.ReadAll(io.LimitReader(r.Body, length)) + if err != nil { + return nil, err + } + + if strings.Contains(r.Header.Get("Content-Encoding"), "gzip") { + gzipMaxLength, _ := decoding.GetServerDecodingGzipMaxLen(r.Context()) + + gzReader := gzipReaderPool.Get() + defer func() { + gzReader.Close() + gzipReaderPool.Put(gzReader) + }() + + if err := gzReader.Reset(bytes.NewReader(content)); err != nil { + return nil, err + } + + decompressed := bytes.NewBuffer(make([]byte, 0, len(content))) + limitReader := io.LimitReader(gzReader, gzipMaxLength+1) + if _, err := decompressed.ReadFrom(limitReader); err != nil { + return nil, err + } + + if int64(decompressed.Len()) > gzipMaxLength { + return nil, errors.New("gzip payload too large") + } + + return decompressed.Bytes(), nil + } + + // Request was not compressed; return the content bytes. + return content, nil +} diff --git a/vendor/github.com/open-policy-agent/opa/v1/util/strings.go b/vendor/github.com/open-policy-agent/opa/v1/util/strings.go new file mode 100644 index 0000000000..8ea0aedc35 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/util/strings.go @@ -0,0 +1,13 @@ +package util + +import "strings" + +// WithPrefix ensures that the string s starts with the given prefix. +// If s already starts with prefix, it is returned unchanged. +func WithPrefix(s, prefix string) string { + if strings.HasPrefix(s, prefix) { + return s + } + + return prefix + s +} diff --git a/vendor/github.com/open-policy-agent/opa/util/time.go b/vendor/github.com/open-policy-agent/opa/v1/util/time.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/util/time.go rename to vendor/github.com/open-policy-agent/opa/v1/util/time.go diff --git a/vendor/github.com/open-policy-agent/opa/util/wait.go b/vendor/github.com/open-policy-agent/opa/v1/util/wait.go similarity index 94% rename from vendor/github.com/open-policy-agent/opa/util/wait.go rename to vendor/github.com/open-policy-agent/opa/v1/util/wait.go index b70ab6fcf9..b1ea84fd53 100644 --- a/vendor/github.com/open-policy-agent/opa/util/wait.go +++ b/vendor/github.com/open-policy-agent/opa/v1/util/wait.go @@ -5,7 +5,7 @@ package util import ( - "fmt" + "errors" "time" ) @@ -24,7 +24,7 @@ func WaitFunc(fun func() bool, interval, timeout time.Duration) error { for { select { case <-timer.C: - return fmt.Errorf("timeout") + return errors.New("timeout") case <-ticker.C: if fun() { return nil diff --git a/vendor/github.com/open-policy-agent/opa/v1/version/version.go b/vendor/github.com/open-policy-agent/opa/v1/version/version.go new file mode 100644 index 0000000000..7e71b3b7a9 --- /dev/null +++ b/vendor/github.com/open-policy-agent/opa/v1/version/version.go @@ -0,0 +1,58 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package version contains version information that is set at build time. +package version + +import ( + "runtime" + "runtime/debug" +) + +var Version = "1.15.2" + +// GoVersion is the version of Go this was built with +var GoVersion = runtime.Version() + +// Platform is the runtime OS and architecture of this OPA binary +var Platform = runtime.GOOS + "/" + runtime.GOARCH + +// Additional version information that is displayed by the "version" command and used to +// identify the version of running instances of OPA. +var ( + Vcs = "" + Timestamp = "" + Hostname = "" +) + +func init() { + bi, ok := debug.ReadBuildInfo() + if !ok { + return + } + var dirty bool + var binTimestamp, binVcs string + + for _, s := range bi.Settings { + switch s.Key { + case "vcs.time": + binTimestamp = s.Value + case "vcs.revision": + binVcs = s.Value + case "vcs.modified": + dirty = s.Value == "true" + } + } + + if Timestamp == "" { + Timestamp = binTimestamp + } + + if Vcs == "" { + Vcs = binVcs + if dirty { + Vcs += "-dirty" + } + } +} diff --git a/vendor/github.com/open-policy-agent/opa/version/wasm.go b/vendor/github.com/open-policy-agent/opa/v1/version/wasm.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/version/wasm.go rename to vendor/github.com/open-policy-agent/opa/v1/version/wasm.go diff --git a/vendor/github.com/open-policy-agent/opa/version/version.go b/vendor/github.com/open-policy-agent/opa/version/version.go deleted file mode 100644 index 862556bce0..0000000000 --- a/vendor/github.com/open-policy-agent/opa/version/version.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package version contains version information that is set at build time. -package version - -import ( - "runtime" - "runtime/debug" -) - -// Version is the canonical version of OPA. -var Version = "0.70.0" - -// GoVersion is the version of Go this was built with -var GoVersion = runtime.Version() - -// Platform is the runtime OS and architecture of this OPA binary -var Platform = runtime.GOOS + "/" + runtime.GOARCH - -// Additional version information that is displayed by the "version" command and used to -// identify the version of running instances of OPA. -var ( - Vcs = "" - Timestamp = "" - Hostname = "" -) - -func init() { - bi, ok := debug.ReadBuildInfo() - if !ok { - return - } - dirty := false - for _, s := range bi.Settings { - switch s.Key { - case "vcs.time": - Timestamp = s.Value - case "vcs.revision": - Vcs = s.Value - case "vcs.modified": - dirty = s.Value == "true" - } - } - if dirty { - Vcs = Vcs + "-dirty" - } -} diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/github.com/prometheus/client_golang/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE deleted file mode 100644 index b9cc55abbb..0000000000 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ /dev/null @@ -1,18 +0,0 @@ -Prometheus instrumentation library for Go applications -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). - - -The following components are included in this product: - -perks - a fork of https://github.com/bmizerany/perks -https://github.com/beorn7/perks -Copyright 2013-2015 Blake Mizerany, Björn Rabenstein -See https://github.com/beorn7/perks/blob/master/README.md for license details. - -Go support for Protocol Buffers - Google's data interchange format -http://github.com/golang/protobuf/ -Copyright 2010 The Go Authors -See source code for license details. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore deleted file mode 100644 index 3460f0346d..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -command-line-arguments.test diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md deleted file mode 100644 index c67ff1b7fa..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/README.md +++ /dev/null @@ -1 +0,0 @@ -See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go deleted file mode 100644 index 450189f35e..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "runtime/debug" - -// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewBuildInfoCollector instead. -func NewBuildInfoCollector() Collector { - path, version, sum := "unknown", "unknown", "unknown" - if bi, ok := debug.ReadBuildInfo(); ok { - path = bi.Main.Path - version = bi.Main.Version - sum = bi.Main.Sum - } - c := &selfCollector{MustNewConstMetric( - NewDesc( - "go_build_info", - "Build information about the main Go module.", - nil, Labels{"path": path, "version": version, "checksum": sum}, - ), - GaugeValue, 1)} - c.init(c.self) - return c -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go deleted file mode 100644 index cf05079fb8..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Collector is the interface implemented by anything that can be used by -// Prometheus to collect metrics. A Collector has to be registered for -// collection. See Registerer.Register. -// -// The stock metrics provided by this package (Gauge, Counter, Summary, -// Histogram, Untyped) are also Collectors (which only ever collect one metric, -// namely itself). An implementer of Collector may, however, collect multiple -// metrics in a coordinated fashion and/or create metrics on the fly. Examples -// for collectors already implemented in this library are the metric vectors -// (i.e. collection of multiple instances of the same Metric but with different -// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. -type Collector interface { - // Describe sends the super-set of all possible descriptors of metrics - // collected by this Collector to the provided channel and returns once - // the last descriptor has been sent. The sent descriptors fulfill the - // consistency and uniqueness requirements described in the Desc - // documentation. - // - // It is valid if one and the same Collector sends duplicate - // descriptors. Those duplicates are simply ignored. However, two - // different Collectors must not send duplicate descriptors. - // - // Sending no descriptor at all marks the Collector as “unchecked”, - // i.e. no checks will be performed at registration time, and the - // Collector may yield any Metric it sees fit in its Collect method. - // - // This method idempotently sends the same descriptors throughout the - // lifetime of the Collector. It may be called concurrently and - // therefore must be implemented in a concurrency safe way. - // - // If a Collector encounters an error while executing this method, it - // must send an invalid descriptor (created with NewInvalidDesc) to - // signal the error to the registry. - Describe(chan<- *Desc) - // Collect is called by the Prometheus registry when collecting - // metrics. The implementation sends each collected metric via the - // provided channel and returns once the last metric has been sent. The - // descriptor of each sent metric is one of those returned by Describe - // (unless the Collector is unchecked, see above). Returned metrics that - // share the same descriptor must differ in their variable label - // values. - // - // This method may be called concurrently and must therefore be - // implemented in a concurrency safe way. Blocking occurs at the expense - // of total performance of rendering all registered metrics. Ideally, - // Collector implementations support concurrent readers. - Collect(chan<- Metric) -} - -// DescribeByCollect is a helper to implement the Describe method of a custom -// Collector. It collects the metrics from the provided Collector and sends -// their descriptors to the provided channel. -// -// If a Collector collects the same metrics throughout its lifetime, its -// Describe method can simply be implemented as: -// -// func (c customCollector) Describe(ch chan<- *Desc) { -// DescribeByCollect(c, ch) -// } -// -// However, this will not work if the metrics collected change dynamically over -// the lifetime of the Collector in a way that their combined set of descriptors -// changes as well. The shortcut implementation will then violate the contract -// of the Describe method. If a Collector sometimes collects no metrics at all -// (for example vectors like CounterVec, GaugeVec, etc., which only collect -// metrics after a metric with a fully specified label set has been accessed), -// it might even get registered as an unchecked Collector (cf. the Register -// method of the Registerer interface). Hence, only use this shortcut -// implementation of Describe if you are certain to fulfill the contract. -// -// The Collector example demonstrates a use of DescribeByCollect. -func DescribeByCollect(c Collector, descs chan<- *Desc) { - metrics := make(chan Metric) - go func() { - c.Collect(metrics) - close(metrics) - }() - for m := range metrics { - descs <- m.Desc() - } -} - -// selfCollector implements Collector for a single Metric so that the Metric -// collects itself. Add it as an anonymous field to a struct that implements -// Metric, and call init with the Metric itself as an argument. -type selfCollector struct { - self Metric -} - -// init provides the selfCollector with a reference to the metric it is supposed -// to collect. It is usually called within the factory function to create a -// metric. See example. -func (c *selfCollector) init(self Metric) { - c.self = self -} - -// Describe implements Collector. -func (c *selfCollector) Describe(ch chan<- *Desc) { - ch <- c.self.Desc() -} - -// Collect implements Collector. -func (c *selfCollector) Collect(ch chan<- Metric) { - ch <- c.self -} - -// collectorMetric is a metric that is also a collector. -// Because of selfCollector, most (if not all) Metrics in -// this package are also collectors. -type collectorMetric interface { - Metric - Collector -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go b/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go deleted file mode 100644 index 9a71a15db1..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2025 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// CollectorFunc is a convenient way to implement a Prometheus Collector -// without interface boilerplate. -// This implementation is based on DescribeByCollect method. -// familiarize yourself to it before using. -type CollectorFunc func(chan<- Metric) - -// Collect calls the defined CollectorFunc function with the provided Metrics channel -func (f CollectorFunc) Collect(ch chan<- Metric) { - f(ch) -} - -// Describe sends the descriptor information using DescribeByCollect -func (f CollectorFunc) Describe(ch chan<- *Desc) { - DescribeByCollect(f, ch) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go deleted file mode 100644 index 4ce84e7a80..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ /dev/null @@ -1,358 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "math" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/types/known/timestamppb" -) - -// Counter is a Metric that represents a single numerical value that only ever -// goes up. That implies that it cannot be used to count items whose number can -// also go down, e.g. the number of currently running goroutines. Those -// "counters" are represented by Gauges. -// -// A Counter is typically used to count requests served, tasks completed, errors -// occurred, etc. -// -// To create Counter instances, use NewCounter. -type Counter interface { - Metric - Collector - - // Inc increments the counter by 1. Use Add to increment it by arbitrary - // non-negative values. - Inc() - // Add adds the given value to the counter. It panics if the value is < - // 0. - Add(float64) -} - -// ExemplarAdder is implemented by Counters that offer the option of adding a -// value to the Counter together with an exemplar. Its AddWithExemplar method -// works like the Add method of the Counter interface but also replaces the -// currently saved exemplar (if any) with a new one, created from the provided -// value, the current time as timestamp, and the provided labels. Empty Labels -// will lead to a valid (label-less) exemplar. But if Labels is nil, the current -// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any -// of the provided labels are invalid, or if the provided labels contain more -// than 128 runes in total. -type ExemplarAdder interface { - AddWithExemplar(value float64, exemplar Labels) -} - -// CounterOpts is an alias for Opts. See there for doc comments. -type CounterOpts Opts - -// CounterVecOpts bundles the options to create a CounterVec metric. -// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels -// is optional and can safely be left to its default value. -type CounterVecOpts struct { - CounterOpts - - // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Constraint - // function, if provided. - VariableLabels ConstrainableLabels -} - -// NewCounter creates a new Counter based on the provided CounterOpts. -// -// The returned implementation also implements ExemplarAdder. It is safe to -// perform the corresponding type assertion. -// -// The returned implementation tracks the counter value in two separate -// variables, a float64 and a uint64. The latter is used to track calls of the -// Inc method and calls of the Add method with a value that can be represented -// as a uint64. This allows atomic increments of the counter with optimal -// performance. (It is common to have an Inc call in very hot execution paths.) -// Both internal tracking values are added up in the Write method. This has to -// be taken into account when it comes to precision and overflow behavior. -func NewCounter(opts CounterOpts) Counter { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - if opts.now == nil { - opts.now = time.Now - } - result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now} - result.init(result) // Init self-collection. - result.createdTs = timestamppb.New(opts.now()) - return result -} - -type counter struct { - // valBits contains the bits of the represented float64 value, while - // valInt stores values that are exact integers. Both have to go first - // in the struct to guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - valInt uint64 - - selfCollector - desc *Desc - - createdTs *timestamppb.Timestamp - labelPairs []*dto.LabelPair - exemplar atomic.Value // Containing nil or a *dto.Exemplar. - - // now is for testing purposes, by default it's time.Now. - now func() time.Time -} - -func (c *counter) Desc() *Desc { - return c.desc -} - -func (c *counter) Add(v float64) { - if v < 0 { - panic(errors.New("counter cannot decrease in value")) - } - - ival := uint64(v) - if float64(ival) == v { - atomic.AddUint64(&c.valInt, ival) - return - } - - for { - oldBits := atomic.LoadUint64(&c.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { - return - } - } -} - -func (c *counter) AddWithExemplar(v float64, e Labels) { - c.Add(v) - c.updateExemplar(v, e) -} - -func (c *counter) Inc() { - atomic.AddUint64(&c.valInt, 1) -} - -func (c *counter) get() float64 { - fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) - ival := atomic.LoadUint64(&c.valInt) - return fval + float64(ival) -} - -func (c *counter) Write(out *dto.Metric) error { - // Read the Exemplar first and the value second. This is to avoid a race condition - // where users see an exemplar for a not-yet-existing observation. - var exemplar *dto.Exemplar - if e := c.exemplar.Load(); e != nil { - exemplar = e.(*dto.Exemplar) - } - val := c.get() - return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs) -} - -func (c *counter) updateExemplar(v float64, l Labels) { - if l == nil { - return - } - e, err := newExemplar(v, c.now(), l) - if err != nil { - panic(err) - } - c.exemplar.Store(e) -} - -// CounterVec is a Collector that bundles a set of Counters that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. number of HTTP requests, partitioned by response code and -// method). Create instances with NewCounterVec. -type CounterVec struct { - *MetricVec -} - -// NewCounterVec creates a new CounterVec based on the provided CounterOpts and -// partitioned by the given label names. -func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { - return V2.NewCounterVec(CounterVecOpts{ - CounterOpts: opts, - VariableLabels: UnconstrainedLabels(labelNames), - }) -} - -// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts. -func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec { - desc := V2.NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - opts.VariableLabels, - opts.ConstLabels, - ) - if opts.now == nil { - opts.now = time.Now - } - return &CounterVec{ - MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels.names) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs)) - } - result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now} - result.init(result) // Init self-collection. - result.createdTs = timestamppb.New(opts.now()) - return result - }), - } -} - -// GetMetricWithLabelValues returns the Counter for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Counter is created. -// -// It is possible to call this method without using the returned Counter to only -// create the new Counter but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Counter for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Counter from the CounterVec. In that case, -// the Counter will still exist, but it will not be exported anymore, even if a -// Counter with the same label values is created later. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// GetMetricWith returns the Counter for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Counter is created. Implications of -// creating a Counter without using it and keeping the Counter for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { - metric, err := v.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// -// myVec.WithLabelValues("404", "GET").Add(42) -func (v *CounterVec) WithLabelValues(lvs ...string) Counter { - c, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return c -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) -func (v *CounterVec) With(labels Labels) Counter { - c, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return c -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the CounterVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { - vec, err := v.MetricVec.CurryWith(labels) - if vec != nil { - return &CounterVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -// CounterFunc is a Counter whose value is determined at collect time by calling a -// provided function. -// -// To create CounterFunc instances, use NewCounterFunc. -type CounterFunc interface { - Metric - Collector -} - -// NewCounterFunc creates a new CounterFunc based on the provided -// CounterOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a CounterFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. The function should also honor -// the contract for a Counter (values only go up, not down), but compliance will -// not be checked. -// -// Check out the ExampleGaugeFunc examples for the similar GaugeFunc. -func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), CounterValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go deleted file mode 100644 index 2331b8b4f3..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sort" - "strings" - - "github.com/cespare/xxhash/v2" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" - "google.golang.org/protobuf/proto" - - "github.com/prometheus/client_golang/prometheus/internal" -) - -// Desc is the descriptor used by every Prometheus Metric. It is essentially -// the immutable meta-data of a Metric. The normal Metric implementations -// included in this package manage their Desc under the hood. Users only have to -// deal with Desc if they use advanced features like the ExpvarCollector or -// custom Collectors and Metrics. -// -// Descriptors registered with the same registry have to fulfill certain -// consistency and uniqueness criteria if they share the same fully-qualified -// name: They must have the same help string and the same label names (aka label -// dimensions) in each, constLabels and variableLabels, but they must differ in -// the values of the constLabels. -// -// Descriptors that share the same fully-qualified names and the same label -// values of their constLabels are considered equal. -// -// Use NewDesc to create new Desc instances. -type Desc struct { - // fqName has been built from Namespace, Subsystem, and Name. - fqName string - // help provides some helpful information about this metric. - help string - // constLabelPairs contains precalculated DTO label pairs based on - // the constant labels. - constLabelPairs []*dto.LabelPair - // variableLabels contains names of labels and normalization function for - // which the metric maintains variable values. - variableLabels *compiledLabels - // id is a hash of the values of the ConstLabels and fqName. This - // must be unique among all registered descriptors and can therefore be - // used as an identifier of the descriptor. - id uint64 - // dimHash is a hash of the label names (preset and variable) and the - // Help string. Each Desc with the same fqName must have the same - // dimHash. - dimHash uint64 - // err is an error that occurred during construction. It is reported on - // registration time. - err error -} - -// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc -// and will be reported on registration time. variableLabels and constLabels can -// be nil if no such labels should be set. fqName must not be empty. -// -// variableLabels only contain the label names. Their label values are variable -// and therefore not part of the Desc. (They are managed within the Metric.) -// -// For constLabels, the label values are constant. Therefore, they are fully -// specified in the Desc. See the Collector example for a usage pattern. -func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { - return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels) -} - -// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc -// and will be reported on registration time. variableLabels and constLabels can -// be nil if no such labels should be set. fqName must not be empty. -// -// variableLabels only contain the label names and normalization functions. Their -// label values are variable and therefore not part of the Desc. (They are managed -// within the Metric.) -// -// For constLabels, the label values are constant. Therefore, they are fully -// specified in the Desc. See the Collector example for a usage pattern. -func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc { - d := &Desc{ - fqName: fqName, - help: help, - variableLabels: variableLabels.compile(), - } - //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. - if !model.NameValidationScheme.IsValidMetricName(fqName) { - d.err = fmt.Errorf("%q is not a valid metric name", fqName) - return d - } - // labelValues contains the label values of const labels (in order of - // their sorted label names) plus the fqName (at position 0). - labelValues := make([]string, 1, len(constLabels)+1) - labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names)) - labelNameSet := map[string]struct{}{} - // First add only the const label names and sort them... - for labelName := range constLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) - return d - } - labelNames = append(labelNames, labelName) - labelNameSet[labelName] = struct{}{} - } - sort.Strings(labelNames) - // ... so that we can now add const label values in the order of their names. - for _, labelName := range labelNames { - labelValues = append(labelValues, constLabels[labelName]) - } - // Validate the const label values. They can't have a wrong cardinality, so - // use in len(labelValues) as expectedNumberOfValues. - if err := validateLabelValues(labelValues, len(labelValues)); err != nil { - d.err = err - return d - } - // Now add the variable label names, but prefix them with something that - // cannot be in a regular label name. That prevents matching the label - // dimension with a different mix between preset and variable labels. - for _, label := range d.variableLabels.names { - if !checkLabelName(label) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName) - return d - } - labelNames = append(labelNames, "$"+label) - labelNameSet[label] = struct{}{} - } - if len(labelNames) != len(labelNameSet) { - d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName) - return d - } - - xxh := xxhash.New() - for _, val := range labelValues { - xxh.WriteString(val) - xxh.Write(separatorByteSlice) - } - d.id = xxh.Sum64() - // Sort labelNames so that order doesn't matter for the hash. - sort.Strings(labelNames) - // Now hash together (in this order) the help string and the sorted - // label names. - xxh.Reset() - xxh.WriteString(help) - xxh.Write(separatorByteSlice) - for _, labelName := range labelNames { - xxh.WriteString(labelName) - xxh.Write(separatorByteSlice) - } - d.dimHash = xxh.Sum64() - - d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) - for n, v := range constLabels { - d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(v), - }) - } - sort.Sort(internal.LabelPairSorter(d.constLabelPairs)) - return d -} - -// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the -// provided error set. If a collector returning such a descriptor is registered, -// registration will fail with the provided error. NewInvalidDesc can be used by -// a Collector to signal inability to describe itself. -func NewInvalidDesc(err error) *Desc { - return &Desc{ - err: err, - } -} - -func (d *Desc) String() string { - lpStrings := make([]string, 0, len(d.constLabelPairs)) - for _, lp := range d.constLabelPairs { - lpStrings = append( - lpStrings, - fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), - ) - } - vlStrings := []string{} - if d.variableLabels != nil { - vlStrings = make([]string, 0, len(d.variableLabels.names)) - for _, vl := range d.variableLabels.names { - if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { - vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) - } else { - vlStrings = append(vlStrings, vl) - } - } - } - return fmt.Sprintf( - "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}", - d.fqName, - d.help, - strings.Join(lpStrings, ","), - strings.Join(vlStrings, ","), - ) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go deleted file mode 100644 index 962608f02c..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package prometheus is the core instrumentation package. It provides metrics -// primitives to instrument code for monitoring. It also offers a registry for -// metrics. Sub-packages allow to expose the registered metrics via HTTP -// (package promhttp) or push them to a Pushgateway (package push). There is -// also a sub-package promauto, which provides metrics constructors with -// automatic registration. -// -// All exported functions and methods are safe to be used concurrently unless -// specified otherwise. -// -// # A Basic Example -// -// As a starting point, a very basic usage example: -// -// package main -// -// import ( -// "log" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// type metrics struct { -// cpuTemp prometheus.Gauge -// hdFailures *prometheus.CounterVec -// } -// -// func NewMetrics(reg prometheus.Registerer) *metrics { -// m := &metrics{ -// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }), -// hdFailures: prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ), -// } -// reg.MustRegister(m.cpuTemp) -// reg.MustRegister(m.hdFailures) -// return m -// } -// -// func main() { -// // Create a non-global registry. -// reg := prometheus.NewRegistry() -// -// // Create new metrics and register them using the custom registry. -// m := NewMetrics(reg) -// // Set values for the new created metrics. -// m.cpuTemp.Set(65.3) -// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() -// -// // Expose metrics and custom registry via an HTTP server -// // using the HandleFor function. "/metrics" is the usual endpoint for that. -// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) -// log.Fatal(http.ListenAndServe(":8080", nil)) -// } -// -// This is a complete program that exports two metrics, a Gauge and a Counter, -// the latter with a label attached to turn it into a (one-dimensional) vector. -// It register the metrics using a custom registry and exposes them via an HTTP server -// on the /metrics endpoint. -// -// # Metrics -// -// The number of exported identifiers in this package might appear a bit -// overwhelming. However, in addition to the basic plumbing shown in the example -// above, you only need to understand the different metric types and their -// vector versions for basic usage. Furthermore, if you are not concerned with -// fine-grained control of when and how to register metrics with the registry, -// have a look at the promauto package, which will effectively allow you to -// ignore registration altogether in simple cases. -// -// Above, you have already touched the Counter and the Gauge. There are two more -// advanced metric types: the Summary and Histogram. A more thorough description -// of those four metric types can be found in the Prometheus docs: -// https://prometheus.io/docs/concepts/metric_types/ -// -// In addition to the fundamental metric types Gauge, Counter, Summary, and -// Histogram, a very important part of the Prometheus data model is the -// partitioning of samples along dimensions called labels, which results in -// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, -// and HistogramVec. -// -// While only the fundamental metric types implement the Metric interface, both -// the metrics and their vector versions implement the Collector interface. A -// Collector manages the collection of a number of Metrics, but for convenience, -// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and -// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec, -// and HistogramVec are not. -// -// To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. -// -// # Custom Collectors and constant Metrics -// -// While you could create your own implementations of Metric, most likely you -// will only ever implement the Collector interface on your own. At a first -// glance, a custom Collector seems handy to bundle Metrics for common -// registration (with the prime example of the different metric vectors above, -// which bundle all the metrics of the same name but with different labels). -// -// There is a more involved use case, too: If you already have metrics -// available, created outside of the Prometheus context, you don't need the -// interface of the various Metric types. You essentially want to mirror the -// existing numbers into Prometheus Metrics during collection. An own -// implementation of the Collector interface is perfect for that. You can create -// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and -// NewConstSummary (and their respective Must… versions). NewConstMetric is used -// for all metric types with just a float64 as their value: Counter, Gauge, and -// a special “type” called Untyped. Use the latter if you are not sure if the -// mirrored metric is a Counter or a Gauge. Creation of the Metric instance -// happens in the Collect method. The Describe method has to return separate -// Desc instances, representative of the “throw-away” metrics to be created -// later. NewDesc comes in handy to create those Desc instances. Alternatively, -// you could return no Desc at all, which will mark the Collector “unchecked”. -// No checks are performed at registration time, but metric consistency will -// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape -// errors. Thus, with unchecked Collectors, the responsibility to not collect -// metrics that lead to inconsistencies in the total scrape result lies with the -// implementer of the Collector. While this is not a desirable state, it is -// sometimes necessary. The typical use case is a situation where the exact -// metrics to be returned by a Collector cannot be predicted at registration -// time, but the implementer has sufficient knowledge of the whole system to -// guarantee metric consistency. -// -// The Collector example illustrates the use case. You can also look at the -// source code of the processCollector (mirroring process metrics), the -// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar -// metrics) as examples that are used in this package itself. -// -// If you just need to call a function to get a single float value to collect as -// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting -// shortcuts. -// -// # Advanced Uses of the Registry -// -// While MustRegister is the by far most common way of registering a Collector, -// sometimes you might want to handle the errors the registration might cause. -// As suggested by the name, MustRegister panics if an error occurs. With the -// Register function, the error is returned and can be handled. -// -// An error is returned if the registered Collector is incompatible or -// inconsistent with already registered metrics. The registry aims for -// consistency of the collected metrics according to the Prometheus data model. -// Inconsistencies are ideally detected at registration time, not at collect -// time. The former will usually be detected at start-up time of a program, -// while the latter will only happen at scrape time, possibly not even on the -// first scrape if the inconsistency only becomes relevant later. That is the -// main reason why a Collector and a Metric have to describe themselves to the -// registry. -// -// So far, everything we did operated on the so-called default registry, as it -// can be found in the global DefaultRegisterer variable. With NewRegistry, you -// can create a custom registry, or you can even implement the Registerer or -// Gatherer interfaces yourself. The methods Register and Unregister work in the -// same way on a custom registry as the global functions Register and Unregister -// on the default registry. -// -// There are a number of uses for custom registries: You can use registries with -// special properties, see NewPedanticRegistry. You can avoid global state, as -// it is imposed by the DefaultRegisterer. You can use multiple registries at -// the same time to expose different metrics in different ways. You can use -// separate registries for testing purposes. -// -// Also note that the DefaultRegisterer comes registered with a Collector for Go -// runtime metrics (via NewGoCollector) and a Collector for process metrics (via -// NewProcessCollector). With a custom registry, you are in control and decide -// yourself about the Collectors to register. -// -// # HTTP Exposition -// -// The Registry implements the Gatherer interface. The caller of the Gather -// method can then expose the gathered metrics in some way. Usually, the metrics -// are served via HTTP on the /metrics endpoint. That's happening in the example -// above. The tools to expose metrics via HTTP are in the promhttp sub-package. -// -// # Pushing to the Pushgateway -// -// Function for pushing to the Pushgateway can be found in the push sub-package. -// -// # Graphite Bridge -// -// Functions and examples to push metrics from a Gatherer to Graphite can be -// found in the graphite sub-package. -// -// # Other Means of Exposition -// -// More ways of exposing metrics can easily be added by following the approaches -// of the existing implementations. -package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go deleted file mode 100644 index de5a856293..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "encoding/json" - "expvar" -) - -type expvarCollector struct { - exports map[string]*Desc -} - -// NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewExpvarCollector instead. -func NewExpvarCollector(exports map[string]*Desc) Collector { - return &expvarCollector{ - exports: exports, - } -} - -// Describe implements Collector. -func (e *expvarCollector) Describe(ch chan<- *Desc) { - for _, desc := range e.exports { - ch <- desc - } -} - -// Collect implements Collector. -func (e *expvarCollector) Collect(ch chan<- Metric) { - for name, desc := range e.exports { - var m Metric - expVar := expvar.Get(name) - if expVar == nil { - continue - } - var v interface{} - labels := make([]string, len(desc.variableLabels.names)) - if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { - ch <- NewInvalidMetric(desc, err) - continue - } - var processValue func(v interface{}, i int) - processValue = func(v interface{}, i int) { - if i >= len(labels) { - copiedLabels := append(make([]string, 0, len(labels)), labels...) - switch v := v.(type) { - case float64: - m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) - case bool: - if v { - m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) - } else { - m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) - } - default: - return - } - ch <- m - return - } - vm, ok := v.(map[string]interface{}) - if !ok { - return - } - for lv, val := range vm { - labels[i] = lv - processValue(val, i+1) - } - } - processValue(v, 0) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go deleted file mode 100644 index 3d383a735c..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializies a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go deleted file mode 100644 index dd2eac9406..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "math" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" -) - -// Gauge is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// A Gauge is typically used for measured values like temperatures or current -// memory usage, but also "counts" that can go up and down, like the number of -// running goroutines. -// -// To create Gauge instances, use NewGauge. -type Gauge interface { - Metric - Collector - - // Set sets the Gauge to an arbitrary value. - Set(float64) - // Inc increments the Gauge by 1. Use Add to increment it by arbitrary - // values. - Inc() - // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary - // values. - Dec() - // Add adds the given value to the Gauge. (The value can be negative, - // resulting in a decrease of the Gauge.) - Add(float64) - // Sub subtracts the given value from the Gauge. (The value can be - // negative, resulting in an increase of the Gauge.) - Sub(float64) - - // SetToCurrentTime sets the Gauge to the current Unix time in seconds. - SetToCurrentTime() -} - -// GaugeOpts is an alias for Opts. See there for doc comments. -type GaugeOpts Opts - -// GaugeVecOpts bundles the options to create a GaugeVec metric. -// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels -// is optional and can safely be left to its default value. -type GaugeVecOpts struct { - GaugeOpts - - // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Constraint - // function, if provided. - VariableLabels ConstrainableLabels -} - -// NewGauge creates a new Gauge based on the provided GaugeOpts. -// -// The returned implementation is optimized for a fast Set method. If you have a -// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick -// the former. For example, the Inc method of the returned Gauge is slower than -// the Inc method of a Counter returned by NewCounter. This matches the typical -// scenarios for Gauges and Counters, where the former tends to be Set-heavy and -// the latter Inc-heavy. -func NewGauge(opts GaugeOpts) Gauge { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} - result.init(result) // Init self-collection. - return result -} - -type gauge struct { - // valBits contains the bits of the represented float64 value. It has - // to go first in the struct to guarantee alignment for atomic - // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - - selfCollector - - desc *Desc - labelPairs []*dto.LabelPair -} - -func (g *gauge) Desc() *Desc { - return g.desc -} - -func (g *gauge) Set(val float64) { - atomic.StoreUint64(&g.valBits, math.Float64bits(val)) -} - -func (g *gauge) SetToCurrentTime() { - g.Set(float64(time.Now().UnixNano()) / 1e9) -} - -func (g *gauge) Inc() { - g.Add(1) -} - -func (g *gauge) Dec() { - g.Add(-1) -} - -func (g *gauge) Add(val float64) { - for { - oldBits := atomic.LoadUint64(&g.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + val) - if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { - return - } - } -} - -func (g *gauge) Sub(val float64) { - g.Add(val * -1) -} - -func (g *gauge) Write(out *dto.Metric) error { - val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) - return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil) -} - -// GaugeVec is a Collector that bundles a set of Gauges that all share the same -// Desc, but have different values for their variable labels. This is used if -// you want to count the same thing partitioned by various dimensions -// (e.g. number of operations queued, partitioned by user and operation -// type). Create instances with NewGaugeVec. -type GaugeVec struct { - *MetricVec -} - -// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and -// partitioned by the given label names. -func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { - return V2.NewGaugeVec(GaugeVecOpts{ - GaugeOpts: opts, - VariableLabels: UnconstrainedLabels(labelNames), - }) -} - -// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts. -func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec { - desc := V2.NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - opts.VariableLabels, - opts.ConstLabels, - ) - return &GaugeVec{ - MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels.names) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs)) - } - result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} - result.init(result) // Init self-collection. - return result - }), - } -} - -// GetMetricWithLabelValues returns the Gauge for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Gauge is created. -// -// It is possible to call this method without using the returned Gauge to only -// create the new Gauge but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Gauge for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Gauge from the GaugeVec. In that case, the -// Gauge will still exist, but it will not be exported anymore, even if a -// Gauge with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// GetMetricWith returns the Gauge for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Gauge is created. Implications of -// creating a Gauge without using it and keeping the Gauge for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { - metric, err := v.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// -// myVec.WithLabelValues("404", "GET").Add(42) -func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { - g, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return g -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) -func (v *GaugeVec) With(labels Labels) Gauge { - g, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return g -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the GaugeVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { - vec, err := v.MetricVec.CurryWith(labels) - if vec != nil { - return &GaugeVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -// GaugeFunc is a Gauge whose value is determined at collect time by calling a -// provided function. -// -// To create GaugeFunc instances, use NewGaugeFunc. -type GaugeFunc interface { - Metric - Collector -} - -// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The -// value reported is determined by calling the given function from within the -// Write method. Take into account that metric collection may happen -// concurrently. Therefore, it must be safe to call the provided function -// concurrently. -// -// NewGaugeFunc is a good way to create an “info” style metric with a constant -// value of 1. Example: -// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56 -func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), GaugeValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go deleted file mode 100644 index 614fd61be9..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !js || wasm -// +build !js wasm - -package prometheus - -import "os" - -func getPIDFn() func() (int, error) { - pid := os.Getpid() - return func() (int, error) { - return pid, nil - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go deleted file mode 100644 index eaf8059ee1..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build js && !wasm -// +build js,!wasm - -package prometheus - -func getPIDFn() func() (int, error) { - return func() (int, error) { - return 1, nil - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go deleted file mode 100644 index 520cbd7d41..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "runtime" - "runtime/debug" - "time" -) - -// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. -// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so -// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is -// populated using runtime/metrics. Those are the defaults we can't alter. -func goRuntimeMemStats() memStatsMetrics { - return memStatsMetrics{ - { - desc: NewDesc( - memstatNamespace("alloc_bytes"), - "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("sys_bytes"), - "Number of bytes obtained from system. Equals to /memory/classes/total:byte.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mallocs_total"), - // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric. - "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("frees_total"), - "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_objects"), - "Number of currently allocated objects. Equals to /gc/heap/objects:objects.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_inuse_bytes"), - "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, - valType: GaugeValue, - }, - } -} - -type baseGoCollector struct { - goroutinesDesc *Desc - threadsDesc *Desc - gcDesc *Desc - gcLastTimeDesc *Desc - goInfoDesc *Desc -} - -func newBaseGoCollector() baseGoCollector { - return baseGoCollector{ - goroutinesDesc: NewDesc( - "go_goroutines", - "Number of goroutines that currently exist.", - nil, nil), - threadsDesc: NewDesc( - "go_threads", - "Number of OS threads created.", - nil, nil), - gcDesc: NewDesc( - "go_gc_duration_seconds", - "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.", - nil, nil), - gcLastTimeDesc: NewDesc( - "go_memstats_last_gc_time_seconds", - "Number of seconds since 1970 of last garbage collection.", - nil, nil), - goInfoDesc: NewDesc( - "go_info", - "Information about the Go environment.", - nil, Labels{"version": runtime.Version()}), - } -} - -// Describe returns all descriptions of the collector. -func (c *baseGoCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutinesDesc - ch <- c.threadsDesc - ch <- c.gcDesc - ch <- c.gcLastTimeDesc - ch <- c.goInfoDesc -} - -// Collect returns the current state of all metrics of the collector. -func (c *baseGoCollector) Collect(ch chan<- Metric) { - ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) - - n := getRuntimeNumThreads() - ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n) - - var stats debug.GCStats - stats.PauseQuantiles = make([]time.Duration, 5) - debug.ReadGCStats(&stats) - - quantiles := make(map[float64]float64) - for idx, pq := range stats.PauseQuantiles[1:] { - quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() - } - quantiles[0.0] = stats.PauseQuantiles[0].Seconds() - ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) - ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9) - ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) -} - -func memstatNamespace(s string) string { - return "go_memstats_" + s -} - -// memStatsMetrics provide description, evaluator, runtime/metrics name, and -// value type for memstat metrics. -type memStatsMetrics []struct { - desc *Desc - eval func(*runtime.MemStats) float64 - valType ValueType -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go deleted file mode 100644 index 897a6e906b..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.17 -// +build !go1.17 - -package prometheus - -import ( - "runtime" - "sync" - "time" -) - -type goCollector struct { - base baseGoCollector - - // ms... are memstats related. - msLast *runtime.MemStats // Previously collected memstats. - msLastTimestamp time.Time - msMtx sync.Mutex // Protects msLast and msLastTimestamp. - msMetrics memStatsMetrics - msRead func(*runtime.MemStats) // For mocking in tests. - msMaxWait time.Duration // Wait time for fresh memstats. - msMaxAge time.Duration // Maximum allowed age of old memstats. -} - -// NewGoCollector is the obsolete version of collectors.NewGoCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewGoCollector instead. -func NewGoCollector() Collector { - msMetrics := goRuntimeMemStats() - msMetrics = append(msMetrics, struct { - desc *Desc - eval func(*runtime.MemStats) float64 - valType ValueType - }{ - // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 - desc: NewDesc( - memstatNamespace("gc_cpu_fraction"), - "The fraction of this program's available CPU time used by the GC since the program started.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, - valType: GaugeValue, - }) - return &goCollector{ - base: newBaseGoCollector(), - msLast: &runtime.MemStats{}, - msRead: runtime.ReadMemStats, - msMaxWait: time.Second, - msMaxAge: 5 * time.Minute, - msMetrics: msMetrics, - } -} - -// Describe returns all descriptions of the collector. -func (c *goCollector) Describe(ch chan<- *Desc) { - c.base.Describe(ch) - for _, i := range c.msMetrics { - ch <- i.desc - } -} - -// Collect returns the current state of all metrics of the collector. -func (c *goCollector) Collect(ch chan<- Metric) { - var ( - ms = &runtime.MemStats{} - done = make(chan struct{}) - ) - // Start reading memstats first as it might take a while. - go func() { - c.msRead(ms) - c.msMtx.Lock() - c.msLast = ms - c.msLastTimestamp = time.Now() - c.msMtx.Unlock() - close(done) - }() - - // Collect base non-memory metrics. - c.base.Collect(ch) - - timer := time.NewTimer(c.msMaxWait) - select { - case <-done: // Our own ReadMemStats succeeded in time. Use it. - timer.Stop() // Important for high collection frequencies to not pile up timers. - c.msCollect(ch, ms) - return - case <-timer.C: // Time out, use last memstats if possible. Continue below. - } - c.msMtx.Lock() - if time.Since(c.msLastTimestamp) < c.msMaxAge { - // Last memstats are recent enough. Collect from them under the lock. - c.msCollect(ch, c.msLast) - c.msMtx.Unlock() - return - } - // If we are here, the last memstats are too old or don't exist. We have - // to wait until our own ReadMemStats finally completes. For that to - // happen, we have to release the lock. - c.msMtx.Unlock() - <-done - c.msCollect(ch, ms) -} - -func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { - for _, i := range c.msMetrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go deleted file mode 100644 index 6b8684731c..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ /dev/null @@ -1,574 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.17 -// +build go1.17 - -package prometheus - -import ( - "fmt" - "math" - "runtime" - "runtime/metrics" - "strings" - "sync" - - "github.com/prometheus/client_golang/prometheus/internal" - - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" -) - -const ( - // constants for strings referenced more than once. - goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects" - goGCHeapAllocsObjects = "/gc/heap/allocs:objects" - goGCHeapFreesObjects = "/gc/heap/frees:objects" - goGCHeapFreesBytes = "/gc/heap/frees:bytes" - goGCHeapAllocsBytes = "/gc/heap/allocs:bytes" - goGCHeapObjects = "/gc/heap/objects:objects" - goGCHeapGoalBytes = "/gc/heap/goal:bytes" - goMemoryClassesTotalBytes = "/memory/classes/total:bytes" - goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes" - goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes" - goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes" - goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes" - goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes" - goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes" - goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes" - goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes" - goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes" - goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes" - goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes" - goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes" - goMemoryClassesOtherBytes = "/memory/classes/other:bytes" -) - -// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic. -var rmNamesForMemStatsMetrics = []string{ - goGCHeapTinyAllocsObjects, - goGCHeapAllocsObjects, - goGCHeapFreesObjects, - goGCHeapAllocsBytes, - goGCHeapObjects, - goGCHeapGoalBytes, - goMemoryClassesTotalBytes, - goMemoryClassesHeapObjectsBytes, - goMemoryClassesHeapUnusedBytes, - goMemoryClassesHeapReleasedBytes, - goMemoryClassesHeapFreeBytes, - goMemoryClassesHeapStacksBytes, - goMemoryClassesOSStacksBytes, - goMemoryClassesMetadataMSpanInuseBytes, - goMemoryClassesMetadataMSPanFreeBytes, - goMemoryClassesMetadataMCacheInuseBytes, - goMemoryClassesMetadataMCacheFreeBytes, - goMemoryClassesProfilingBucketsBytes, - goMemoryClassesMetadataOtherBytes, - goMemoryClassesOtherBytes, -} - -func bestEffortLookupRM(lookup []string) []metrics.Description { - ret := make([]metrics.Description, 0, len(lookup)) - for _, rm := range metrics.All() { - for _, m := range lookup { - if m == rm.Name { - ret = append(ret, rm) - } - } - } - return ret -} - -type goCollector struct { - base baseGoCollector - - // mu protects updates to all fields ensuring a consistent - // snapshot is always produced by Collect. - mu sync.Mutex - - // Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed). - sampleBuf []metrics.Sample - // sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums. - sampleMap map[string]*metrics.Sample - - // rmExposedMetrics represents all runtime/metrics package metrics - // that were configured to be exposed. - rmExposedMetrics []collectorMetric - rmExactSumMapForHist map[string]string - - // With Go 1.17, the runtime/metrics package was introduced. - // From that point on, metric names produced by the runtime/metrics - // package could be generated from runtime/metrics names. However, - // these differ from the old names for the same values. - // - // This field exists to export the same values under the old names - // as well. - msMetrics memStatsMetrics - msMetricsEnabled bool -} - -type rmMetricDesc struct { - metrics.Description -} - -func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc { - var descs []rmMetricDesc - for _, d := range metrics.All() { - var ( - deny = true - desc rmMetricDesc - ) - - for _, r := range rules { - if !r.Matcher.MatchString(d.Name) { - continue - } - deny = r.Deny - } - if deny { - continue - } - - desc.Description = d - descs = append(descs, desc) - } - return descs -} - -func defaultGoCollectorOptions() internal.GoCollectorOptions { - return internal.GoCollectorOptions{ - RuntimeMetricSumForHist: map[string]string{ - "/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes, - "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, - }, - RuntimeMetricRules: []internal.GoCollectorRule{ - // Recommended metrics we want by default from runtime/metrics. - {Matcher: internal.GoCollectorDefaultRuntimeMetrics}, - }, - } -} - -// NewGoCollector is the obsolete version of collectors.NewGoCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewGoCollector instead. -func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { - opt := defaultGoCollectorOptions() - for _, o := range opts { - o(&opt) - } - - exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules) - - // Collect all histogram samples so that we can get their buckets. - // The API guarantees that the buckets are always fixed for the lifetime - // of the process. - var histograms []metrics.Sample - for _, d := range exposedDescriptions { - if d.Kind == metrics.KindFloat64Histogram { - histograms = append(histograms, metrics.Sample{Name: d.Name}) - } - } - - if len(histograms) > 0 { - metrics.Read(histograms) - } - - bucketsMap := make(map[string][]float64) - for i := range histograms { - bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets - } - - // Generate a collector for each exposed runtime/metrics metric. - metricSet := make([]collectorMetric, 0, len(exposedDescriptions)) - // SampleBuf is used for reading from runtime/metrics. - // We are assuming the largest case to have stable pointers for sampleMap purposes. - sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics)) - sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions)) - for _, d := range exposedDescriptions { - namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description) - if !ok { - // Just ignore this metric; we can't do anything with it here. - // If a user decides to use the latest version of Go, we don't want - // to fail here. This condition is tested in TestExpectedRuntimeMetrics. - continue - } - help := attachOriginalName(d.Description.Description, d.Name) - - sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) - sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] - - var m collectorMetric - if d.Kind == metrics.KindFloat64Histogram { - _, hasSum := opt.RuntimeMetricSumForHist[d.Name] - unit := d.Name[strings.IndexRune(d.Name, ':')+1:] - m = newBatchHistogram( - NewDesc( - BuildFQName(namespace, subsystem, name), - help, - nil, - nil, - ), - internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit), - hasSum, - ) - } else if d.Cumulative { - m = NewCounter(CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: name, - Help: help, - }, - ) - } else { - m = NewGauge(GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: name, - Help: help, - }) - } - metricSet = append(metricSet, m) - } - - // Add exact sum metrics to sampleBuf if not added before. - for _, h := range histograms { - sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name] - if !ok { - continue - } - - if _, ok := sampleMap[sumMetric]; ok { - continue - } - sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric}) - sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1] - } - - var ( - msMetrics memStatsMetrics - msDescriptions []metrics.Description - ) - - if !opt.DisableMemStatsLikeMetrics { - msMetrics = goRuntimeMemStats() - msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics) - - // Check if metric was not exposed before and if not, add to sampleBuf. - for _, mdDesc := range msDescriptions { - if _, ok := sampleMap[mdDesc.Name]; ok { - continue - } - sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name}) - sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1] - } - } - - return &goCollector{ - base: newBaseGoCollector(), - sampleBuf: sampleBuf, - sampleMap: sampleMap, - rmExposedMetrics: metricSet, - rmExactSumMapForHist: opt.RuntimeMetricSumForHist, - msMetrics: msMetrics, - msMetricsEnabled: !opt.DisableMemStatsLikeMetrics, - } -} - -func attachOriginalName(desc, origName string) string { - return fmt.Sprintf("%s Sourced from %s.", desc, origName) -} - -// Describe returns all descriptions of the collector. -func (c *goCollector) Describe(ch chan<- *Desc) { - c.base.Describe(ch) - for _, i := range c.msMetrics { - ch <- i.desc - } - for _, m := range c.rmExposedMetrics { - ch <- m.Desc() - } -} - -// Collect returns the current state of all metrics of the collector. -func (c *goCollector) Collect(ch chan<- Metric) { - // Collect base non-memory metrics. - c.base.Collect(ch) - - if len(c.sampleBuf) == 0 { - return - } - - // Collect must be thread-safe, so prevent concurrent use of - // sampleBuf elements. Just read into sampleBuf but write all the data - // we get into our Metrics or MemStats. - // - // This lock also ensures that the Metrics we send out are all from - // the same updates, ensuring their mutual consistency insofar as - // is guaranteed by the runtime/metrics package. - // - // N.B. This locking is heavy-handed, but Collect is expected to be called - // relatively infrequently. Also the core operation here, metrics.Read, - // is fast (O(tens of microseconds)) so contention should certainly be - // low, though channel operations and any allocations may add to that. - c.mu.Lock() - defer c.mu.Unlock() - - // Populate runtime/metrics sample buffer. - metrics.Read(c.sampleBuf) - - // Collect all our runtime/metrics user chose to expose from sampleBuf (if any). - for i, metric := range c.rmExposedMetrics { - // We created samples for exposed metrics first in order, so indexes match. - sample := c.sampleBuf[i] - - // N.B. switch on concrete type because it's significantly more efficient - // than checking for the Counter and Gauge interface implementations. In - // this case, we control all the types here. - switch m := metric.(type) { - case *counter: - // Guard against decreases. This should never happen, but a failure - // to do so will result in a panic, which is a harsh consequence for - // a metrics collection bug. - v0, v1 := m.get(), unwrapScalarRMValue(sample.Value) - if v1 > v0 { - m.Add(unwrapScalarRMValue(sample.Value) - m.get()) - } - m.Collect(ch) - case *gauge: - m.Set(unwrapScalarRMValue(sample.Value)) - m.Collect(ch) - case *batchHistogram: - m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name)) - m.Collect(ch) - default: - panic("unexpected metric type") - } - } - - if c.msMetricsEnabled { - // ms is a dummy MemStats that we populate ourselves so that we can - // populate the old metrics from it if goMemStatsCollection is enabled. - var ms runtime.MemStats - memStatsFromRM(&ms, c.sampleMap) - for _, i := range c.msMetrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms)) - } - } -} - -// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed -// to be scalar and returns the equivalent float64 value. Panics if the -// value is not scalar. -func unwrapScalarRMValue(v metrics.Value) float64 { - switch v.Kind() { - case metrics.KindUint64: - return float64(v.Uint64()) - case metrics.KindFloat64: - return v.Float64() - case metrics.KindBad: - // Unsupported metric. - // - // This should never happen because we always populate our metric - // set from the runtime/metrics package. - panic("unexpected bad kind metric") - default: - // Unsupported metric kind. - // - // This should never happen because we check for this during initialization - // and flag and filter metrics whose kinds we don't understand. - panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind())) - } -} - -// exactSumFor takes a runtime/metrics metric name (that is assumed to -// be of kind KindFloat64Histogram) and returns its exact sum and whether -// its exact sum exists. -// -// The runtime/metrics API for histograms doesn't currently expose exact -// sums, but some of the other metrics are in fact exact sums of histograms. -func (c *goCollector) exactSumFor(rmName string) float64 { - sumName, ok := c.rmExactSumMapForHist[rmName] - if !ok { - return 0 - } - s, ok := c.sampleMap[sumName] - if !ok { - return 0 - } - return unwrapScalarRMValue(s.Value) -} - -func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) { - lookupOrZero := func(name string) uint64 { - if s, ok := rm[name]; ok { - return s.Value.Uint64() - } - return 0 - } - - // Currently, MemStats adds tiny alloc count to both Mallocs AND Frees. - // The reason for this is because MemStats couldn't be extended at the time - // but there was a desire to have Mallocs at least be a little more representative, - // while having Mallocs - Frees still represent a live object count. - // Unfortunately, MemStats doesn't actually export a large allocation count, - // so it's impossible to pull this number out directly. - tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects) - ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs - ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs - - ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes) - ms.Sys = lookupOrZero(goMemoryClassesTotalBytes) - ms.Lookups = 0 // Already always zero. - ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes) - ms.Alloc = ms.HeapAlloc - ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes) - ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes) - ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes) - ms.HeapSys = ms.HeapInuse + ms.HeapIdle - ms.HeapObjects = lookupOrZero(goGCHeapObjects) - ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes) - ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes) - ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes) - ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes) - ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes) - ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes) - ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes) - ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes) - ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes) - ms.NextGC = lookupOrZero(goGCHeapGoalBytes) - - // N.B. GCCPUFraction is intentionally omitted. This metric is not useful, - // and often misleading due to the fact that it's an average over the lifetime - // of the process. - // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 - // for more details. - ms.GCCPUFraction = 0 -} - -// batchHistogram is a mutable histogram that is updated -// in batches. -type batchHistogram struct { - selfCollector - - // Static fields updated only once. - desc *Desc - hasSum bool - - // Because this histogram operates in batches, it just uses a - // single mutex for everything. updates are always serialized - // but Write calls may operate concurrently with updates. - // Contention between these two sources should be rare. - mu sync.Mutex - buckets []float64 // Inclusive lower bounds, like runtime/metrics. - counts []uint64 - sum float64 // Used if hasSum is true. -} - -// newBatchHistogram creates a new batch histogram value with the given -// Desc, buckets, and whether or not it has an exact sum available. -// -// buckets must always be from the runtime/metrics package, following -// the same conventions. -func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram { - // We need to remove -Inf values. runtime/metrics keeps them around. - // But -Inf bucket should not be allowed for prometheus histograms. - if buckets[0] == math.Inf(-1) { - buckets = buckets[1:] - } - h := &batchHistogram{ - desc: desc, - buckets: buckets, - // Because buckets follows runtime/metrics conventions, there's - // 1 more value in the buckets list than there are buckets represented, - // because in runtime/metrics, the bucket values represent *boundaries*, - // and non-Inf boundaries are inclusive lower bounds for that bucket. - counts: make([]uint64, len(buckets)-1), - hasSum: hasSum, - } - h.init(h) - return h -} - -// update updates the batchHistogram from a runtime/metrics histogram. -// -// sum must be provided if the batchHistogram was created to have an exact sum. -// h.buckets must be a strict subset of his.Buckets. -func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) { - counts, buckets := his.Counts, his.Buckets - - h.mu.Lock() - defer h.mu.Unlock() - - // Clear buckets. - for i := range h.counts { - h.counts[i] = 0 - } - // Copy and reduce buckets. - var j int - for i, count := range counts { - h.counts[j] += count - if buckets[i+1] == h.buckets[j+1] { - j++ - } - } - if h.hasSum { - h.sum = sum - } -} - -func (h *batchHistogram) Desc() *Desc { - return h.desc -} - -func (h *batchHistogram) Write(out *dto.Metric) error { - h.mu.Lock() - defer h.mu.Unlock() - - sum := float64(0) - if h.hasSum { - sum = h.sum - } - dtoBuckets := make([]*dto.Bucket, 0, len(h.counts)) - totalCount := uint64(0) - for i, count := range h.counts { - totalCount += count - if !h.hasSum { - if count != 0 { - // N.B. This computed sum is an underestimate. - sum += h.buckets[i] * float64(count) - } - } - - // Skip the +Inf bucket, but only for the bucket list. - // It must still count for sum and totalCount. - if math.IsInf(h.buckets[i+1], 1) { - break - } - // Float64Histogram's upper bound is exclusive, so make it inclusive - // by obtaining the next float64 value down, in order. - upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i]) - dtoBuckets = append(dtoBuckets, &dto.Bucket{ - CumulativeCount: proto.Uint64(totalCount), - UpperBound: proto.Float64(upperBound), - }) - } - out.Histogram = &dto.Histogram{ - Bucket: dtoBuckets, - SampleCount: proto.Uint64(totalCount), - SampleSum: proto.Float64(sum), - } - return nil -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go deleted file mode 100644 index c453b754a7..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ /dev/null @@ -1,2056 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "math" - "runtime" - "sort" - "sync" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" - - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" -) - -const ( - nativeHistogramSchemaMaximum = 8 - nativeHistogramSchemaMinimum = -4 -) - -// nativeHistogramBounds for the frac of observed values. Only relevant for -// schema > 0. The position in the slice is the schema. (0 is never used, just -// here for convenience of using the schema directly as the index.) -// -// TODO(beorn7): Currently, we do a binary search into these slices. There are -// ways to turn it into a small number of simple array lookups. It probably only -// matters for schema 5 and beyond, but should be investigated. See this comment -// as a starting point: -// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310 -var nativeHistogramBounds = [][]float64{ - // Schema "0": - {0.5}, - // Schema 1: - {0.5, 0.7071067811865475}, - // Schema 2: - {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, - // Schema 3: - { - 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, - 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, - }, - // Schema 4: - { - 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, - 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, - 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, - 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, - }, - // Schema 5: - { - 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, - 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, - 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, - 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, - 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, - 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, - 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, - 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, - }, - // Schema 6: - { - 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, - 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, - 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, - 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, - 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349, - 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891, - 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515, - 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555, - 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234, - 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269, - 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334, - 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681, - 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, - 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, - 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, - 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, - }, - // Schema 7: - { - 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, - 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, - 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, - 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, - 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393, - 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595, - 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754, - 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704, - 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907, - 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665, - 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253, - 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329, - 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032, - 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728, - 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265, - 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076, - 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491, - 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908, - 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126, - 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777, - 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764, - 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465, - 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821, - 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981, - 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312, - 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842, - 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671, - 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263, - 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, - 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, - 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, - 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, - }, - // Schema 8: - { - 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, - 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, - 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, - 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, - 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313, - 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321, - 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954, - 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847, - 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111, - 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088, - 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098, - 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026, - 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894, - 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493, - 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185, - 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968, - 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903, - 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005, - 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725, - 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082, - 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581, - 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031, - 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346, - 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447, - 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385, - 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788, - 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727, - 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171, - 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058, - 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119, - 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999, - 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352, - 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471, - 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126, - 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218, - 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837, - 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984, - 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031, - 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071, - 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282, - 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442, - 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707, - 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818, - 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853, - 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642, - 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003, - 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079, - 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391, - 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661, - 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629, - 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553, - 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389, - 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771, - 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002, - 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155, - 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483, - 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253, - 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191, - 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693, - 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947, - 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, - 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, - 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, - 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, - }, -} - -// The nativeHistogramBounds above can be generated with the code below. -// -// TODO(beorn7): It's tempting to actually use `go generate` to generate the -// code above. However, this could lead to slightly different numbers on -// different architectures. We still need to come to terms if we are fine with -// that, or if we might prefer to specify precise numbers in the standard. -// -// var nativeHistogramBounds [][]float64 = make([][]float64, 9) -// -// func init() { -// // Populate nativeHistogramBounds. -// numBuckets := 1 -// for i := range nativeHistogramBounds { -// bounds := []float64{0.5} -// factor := math.Exp2(math.Exp2(float64(-i))) -// for j := 0; j < numBuckets-1; j++ { -// var bound float64 -// if (j+1)%2 == 0 { -// // Use previously calculated value for increased precision. -// bound = nativeHistogramBounds[i-1][j/2+1] -// } else { -// bound = bounds[j] * factor -// } -// bounds = append(bounds, bound) -// } -// numBuckets *= 2 -// nativeHistogramBounds[i] = bounds -// } -// } - -// A Histogram counts individual observations from an event or sample stream in -// configurable static buckets (or in dynamic sparse buckets as part of the -// experimental Native Histograms, see below for more details). Similar to a -// Summary, it also provides a sum of observations and an observation count. -// -// On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile PromQL function. -// -// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL -// (see the documentation for detailed procedures). However, Histograms require -// the user to pre-define suitable buckets, and they are in general less -// accurate. (Both problems are addressed by the experimental Native -// Histograms. To use them, configure a NativeHistogramBucketFactor in the -// HistogramOpts. They also require a Prometheus server v2.40+ with the -// corresponding feature flag enabled.) -// -// The Observe method of a Histogram has a very low performance overhead in -// comparison with the Observe method of a Summary. -// -// To create Histogram instances, use NewHistogram. -type Histogram interface { - Metric - Collector - - // Observe adds a single observation to the histogram. Observations are - // usually positive or zero. Negative observations are accepted but - // prevent current versions of Prometheus from properly detecting - // counter resets in the sum of observations. (The experimental Native - // Histograms handle negative observations properly.) See - // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations - // for details. - Observe(float64) -} - -// bucketLabel is used for the label that defines the upper bound of a -// bucket of a histogram ("le" -> "less or equal"). -const bucketLabel = "le" - -// DefBuckets are the default Histogram buckets. The default buckets are -// tailored to broadly measure the response time (in seconds) of a network -// service. Most likely, however, you will be required to define buckets -// customized to your use case. -var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - -// DefNativeHistogramZeroThreshold is the default value for -// NativeHistogramZeroThreshold in the HistogramOpts. -// -// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation), -// which is a bucket boundary at all possible resolutions. -const DefNativeHistogramZeroThreshold = 2.938735877055719e-39 - -// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold -// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero -// bucket that only receives observations of precisely zero. -const NativeHistogramZeroThresholdZero = -1 - -var errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, -) - -// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the -// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not -// counted and not included in the returned slice. The returned slice is meant -// to be used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is zero or negative. -func LinearBuckets(start, width float64, count int) []float64 { - if count < 1 { - panic("LinearBuckets needs a positive count") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start += width - } - return buckets -} - -// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket -// has an upper bound of 'start' and each following bucket's upper bound is -// 'factor' times the previous bucket's upper bound. The final +Inf bucket is -// not counted and not included in the returned slice. The returned slice is -// meant to be used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, -// or if 'factor' is less than or equal 1. -func ExponentialBuckets(start, factor float64, count int) []float64 { - if count < 1 { - panic("ExponentialBuckets needs a positive count") - } - if start <= 0 { - panic("ExponentialBuckets needs a positive start value") - } - if factor <= 1 { - panic("ExponentialBuckets needs a factor greater than 1") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start *= factor - } - return buckets -} - -// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is -// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative. -func ExponentialBucketsRange(minBucket, maxBucket float64, count int) []float64 { - if count < 1 { - panic("ExponentialBucketsRange count needs a positive count") - } - if minBucket <= 0 { - panic("ExponentialBucketsRange min needs to be greater than 0") - } - - // Formula for exponential buckets. - // max = min*growthFactor^(bucketCount-1) - - // We know max/min and highest bucket. Solve for growthFactor. - growthFactor := math.Pow(maxBucket/minBucket, 1.0/float64(count-1)) - - // Now that we know growthFactor, solve for each bucket. - buckets := make([]float64, count) - for i := 1; i <= count; i++ { - buckets[i-1] = minBucket * math.Pow(growthFactor, float64(i-1)) - } - return buckets -} - -// HistogramOpts bundles the options for creating a Histogram metric. It is -// mandatory to set Name to a non-empty string. All other fields are optional -// and can safely be left at their zero value, although it is strongly -// encouraged to set a Help string. -type HistogramOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Histogram (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Histogram must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Histogram. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels - ConstLabels Labels - - // Buckets defines the buckets into which observations are counted. Each - // element in the slice is the upper inclusive bound of a bucket. The - // values must be sorted in strictly increasing order. There is no need - // to add a highest bucket with +Inf bound, it will be added - // implicitly. If Buckets is left as nil or set to a slice of length - // zero, it is replaced by default buckets. The default buckets are - // DefBuckets if no buckets for a native histogram (see below) are used, - // otherwise the default is no buckets. (In other words, if you want to - // use both regular buckets and buckets for a native histogram, you have - // to define the regular buckets here explicitly.) - Buckets []float64 - - // If NativeHistogramBucketFactor is greater than one, so-called sparse - // buckets are used (in addition to the regular buckets, if defined - // above). A Histogram with sparse buckets will be ingested as a Native - // Histogram by a Prometheus server with that feature enabled (requires - // Prometheus v2.40+). Sparse buckets are exponential buckets covering - // the whole float64 range (with the exception of the “zero” bucket, see - // NativeHistogramZeroThreshold below). From any one bucket to the next, - // the width of the bucket grows by a constant - // factor. NativeHistogramBucketFactor provides an upper bound for this - // factor (exception see below). The smaller - // NativeHistogramBucketFactor, the more buckets will be used and thus - // the more costly the histogram will become. A generally good trade-off - // between cost and accuracy is a value of 1.1 (each bucket is at most - // 10% wider than the previous one), which will result in each power of - // two divided into 8 buckets (e.g. there will be 8 buckets between 1 - // and 2, same as between 2 and 4, and 4 and 8, etc.). - // - // Details about the actually used factor: The factor is calculated as - // 2^(2^-n), where n is an integer number between (and including) -4 and - // 8. n is chosen so that the resulting factor is the largest that is - // still smaller or equal to NativeHistogramBucketFactor. Note that the - // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) - // ). If NativeHistogramBucketFactor is greater than 1 but smaller than - // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though - // it is larger than the provided NativeHistogramBucketFactor. - // - // NOTE: Native Histograms are still an experimental feature. Their - // behavior might still change without a major version - // bump. Subsequently, all NativeHistogram... options here might still - // change their behavior or name (or might completely disappear) without - // a major version bump. - NativeHistogramBucketFactor float64 - // All observations with an absolute value of less or equal - // NativeHistogramZeroThreshold are accumulated into a “zero” bucket. - // For best results, this should be close to a bucket boundary. This is - // usually the case if picking a power of two. If - // NativeHistogramZeroThreshold is left at zero, - // DefNativeHistogramZeroThreshold is used as the threshold. To - // configure a zero bucket with an actual threshold of zero (i.e. only - // observations of precisely zero will go into the zero bucket), set - // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero - // constant (or any negative float value). - NativeHistogramZeroThreshold float64 - - // The next three fields define a strategy to limit the number of - // populated sparse buckets. If NativeHistogramMaxBucketNumber is left - // at zero, the number of buckets is not limited. (Note that this might - // lead to unbounded memory consumption if the values observed by the - // Histogram are sufficiently wide-spread. In particular, this could be - // used as a DoS attack vector. Where the observed values depend on - // external inputs, it is highly recommended to set a - // NativeHistogramMaxBucketNumber.) Once the set - // NativeHistogramMaxBucketNumber is exceeded, the following strategy is - // enacted: - // - First, if the last reset (or the creation) of the histogram is at - // least NativeHistogramMinResetDuration ago, then the whole - // histogram is reset to its initial state (including regular - // buckets). - // - If less time has passed, or if NativeHistogramMinResetDuration is - // zero, no reset is performed. Instead, the zero threshold is - // increased sufficiently to reduce the number of buckets to or below - // NativeHistogramMaxBucketNumber, but not to more than - // NativeHistogramMaxZeroThreshold. Thus, if - // NativeHistogramMaxZeroThreshold is already at or below the current - // zero threshold, nothing happens at this step. - // - After that, if the number of buckets still exceeds - // NativeHistogramMaxBucketNumber, the resolution of the histogram is - // reduced by doubling the width of the sparse buckets (up to a - // growth factor between one bucket to the next of 2^(2^4) = 65536, - // see above). - // - Any increased zero threshold or reduced resolution is reset back - // to their original values once NativeHistogramMinResetDuration has - // passed (since the last reset or the creation of the histogram). - NativeHistogramMaxBucketNumber uint32 - NativeHistogramMinResetDuration time.Duration - NativeHistogramMaxZeroThreshold float64 - - // NativeHistogramMaxExemplars limits the number of exemplars - // that are kept in memory for each native histogram. If you leave it at - // zero, a default value of 10 is used. If no exemplars should be kept specifically - // for native histograms, set it to a negative value. (Scrapers can - // still use the exemplars exposed for classic buckets, which are managed - // independently.) - NativeHistogramMaxExemplars int - // NativeHistogramExemplarTTL is only checked once - // NativeHistogramMaxExemplars is exceeded. In that case, the - // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL. - // Otherwise, the older exemplar in the pair of exemplars that are closest - // together (on an exponential scale) is removed. - // If NativeHistogramExemplarTTL is left at its zero value, a default value of - // 5m is used. To always delete the oldest exemplar, set it to a negative value. - NativeHistogramExemplarTTL time.Duration - - // now is for testing purposes, by default it's time.Now. - now func() time.Time - - // afterFunc is for testing purposes, by default it's time.AfterFunc. - afterFunc func(time.Duration, func()) *time.Timer -} - -// HistogramVecOpts bundles the options to create a HistogramVec metric. -// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels -// is optional and can safely be left to its default value. -type HistogramVecOpts struct { - HistogramOpts - - // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Constraint - // function, if provided. - VariableLabels ConstrainableLabels -} - -// NewHistogram creates a new Histogram based on the provided HistogramOpts. It -// panics if the buckets in HistogramOpts are not in strictly increasing order. -// -// The returned implementation also implements ExemplarObserver. It is safe to -// perform the corresponding type assertion. Exemplars are tracked separately -// for each bucket. -func NewHistogram(opts HistogramOpts) Histogram { - return newHistogram( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { - if len(desc.variableLabels.names) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues)) - } - - for _, n := range desc.variableLabels.names { - if n == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - - if opts.now == nil { - opts.now = time.Now - } - if opts.afterFunc == nil { - opts.afterFunc = time.AfterFunc - } - - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, - nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, - nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, - lastResetTime: opts.now(), - now: opts.now, - afterFunc: opts.afterFunc, - } - if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { - h.upperBounds = DefBuckets - } - if opts.NativeHistogramBucketFactor <= 1 { - h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets. - } else { - switch { - case opts.NativeHistogramZeroThreshold > 0: - h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold - case opts.NativeHistogramZeroThreshold == 0: - h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold - } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. - h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) - h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars) - } - for i, upperBound := range h.upperBounds { - if i < len(h.upperBounds)-1 { - if upperBound >= h.upperBounds[i+1] { - panic(fmt.Errorf( - "histogram buckets must be in increasing order: %f >= %f", - upperBound, h.upperBounds[i+1], - )) - } - } else { - if math.IsInf(upperBound, +1) { - // The +Inf bucket is implicit. Remove it here. - h.upperBounds = h.upperBounds[:i] - } - } - } - // Finally we know the final length of h.upperBounds and can make buckets - // for both counts as well as exemplars: - h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))} - atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) - atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema) - h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))} - atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) - atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema) - h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) - - h.init(h) // Init self-collection. - return h -} - -type histogramCounts struct { - // Order in this struct matters for the alignment required by atomic - // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG - - // sumBits contains the bits of the float64 representing the sum of all - // observations. - sumBits uint64 - count uint64 - - // nativeHistogramZeroBucket counts all (positive and negative) - // observations in the zero bucket (with an absolute value less or equal - // the current threshold, see next field. - nativeHistogramZeroBucket uint64 - // nativeHistogramZeroThresholdBits is the bit pattern of the current - // threshold for the zero bucket. It's initially equal to - // nativeHistogramZeroThreshold but may change according to the bucket - // count limitation strategy. - nativeHistogramZeroThresholdBits uint64 - // nativeHistogramSchema may change over time according to the bucket - // count limitation strategy and therefore has to be saved here. - nativeHistogramSchema int32 - // Number of (positive and negative) sparse buckets. - nativeHistogramBucketsNumber uint32 - - // Regular buckets. - buckets []uint64 - - // The sparse buckets for native histograms are implemented with a - // sync.Map for now. A dedicated data structure will likely be more - // efficient. There are separate maps for negative and positive - // observations. The map's value is an *int64, counting observations in - // that bucket. (Note that we don't use uint64 as an int64 won't - // overflow in practice, and working with signed numbers from the - // beginning simplifies the handling of deltas.) The map's key is the - // index of the bucket according to the used - // nativeHistogramSchema. Index 0 is for an upper bound of 1. - nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map -} - -// observe manages the parts of observe that only affects -// histogramCounts. doSparse is true if sparse buckets should be done, -// too. -func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { - if bucket < len(hc.buckets) { - atomic.AddUint64(&hc.buckets[bucket], 1) - } - atomicAddFloat(&hc.sumBits, v) - if doSparse && !math.IsNaN(v) { - var ( - key int - schema = atomic.LoadInt32(&hc.nativeHistogramSchema) - zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits)) - bucketCreated, isInf bool - ) - if math.IsInf(v, 0) { - // Pretend v is MaxFloat64 but later increment key by one. - if math.IsInf(v, +1) { - v = math.MaxFloat64 - } else { - v = -math.MaxFloat64 - } - isInf = true - } - frac, exp := math.Frexp(math.Abs(v)) - if schema > 0 { - bounds := nativeHistogramBounds[schema] - key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds) - } else { - key = exp - if frac == 0.5 { - key-- - } - offset := (1 << -schema) - 1 - key = (key + offset) >> -schema - } - if isInf { - key++ - } - switch { - case v > zeroThreshold: - bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1) - case v < -zeroThreshold: - bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1) - default: - atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1) - } - if bucketCreated { - atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1) - } - } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hc.count, 1) -} - -type histogram struct { - // countAndHotIdx enables lock-free writes with use of atomic updates. - // The most significant bit is the hot index [0 or 1] of the count field - // below. Observe calls update the hot one. All remaining bits count the - // number of Observe calls. Observe starts by incrementing this counter, - // and finish by incrementing the count field in the respective - // histogramCounts, as a marker for completion. - // - // Calls of the Write method (which are non-mutating reads from the - // perspective of the histogram) swap the hot–cold under the writeMtx - // lock. A cooldown is awaited (while locked) by comparing the number of - // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cold fields must - // be merged into the new hot before releasing writeMtx. - // - // Fields with atomic access first! See alignment constraint: - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - countAndHotIdx uint64 - - selfCollector - desc *Desc - - // Only used in the Write method and for sparse bucket management. - mtx sync.Mutex - - // Two counts, one is "hot" for lock-free observations, the other is - // "cold" for writing out a dto.Metric. It has to be an array of - // pointers to guarantee 64bit alignment of the histogramCounts, see - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. - counts [2]*histogramCounts - - upperBounds []float64 - labelPairs []*dto.LabelPair - exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. - nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used. - nativeHistogramZeroThreshold float64 // The initial zero threshold. - nativeHistogramMaxZeroThreshold float64 - nativeHistogramMaxBuckets uint32 - nativeHistogramMinResetDuration time.Duration - // lastResetTime is protected by mtx. It is also used as created timestamp. - lastResetTime time.Time - // resetScheduled is protected by mtx. It is true if a reset is - // scheduled for a later time (when nativeHistogramMinResetDuration has - // passed). - resetScheduled bool - nativeExemplars nativeExemplars - - // now is for testing purposes, by default it's time.Now. - now func() time.Time - - // afterFunc is for testing purposes, by default it's time.AfterFunc. - afterFunc func(time.Duration, func()) *time.Timer -} - -func (h *histogram) Desc() *Desc { - return h.desc -} - -func (h *histogram) Observe(v float64) { - h.observe(v, h.findBucket(v)) -} - -// ObserveWithExemplar should not be called in a high-frequency setting -// for a native histogram with configured exemplars. For this case, -// the implementation isn't lock-free and might suffer from lock contention. -func (h *histogram) ObserveWithExemplar(v float64, e Labels) { - i := h.findBucket(v) - h.observe(v, i) - h.updateExemplar(v, i, e) -} - -func (h *histogram) Write(out *dto.Metric) error { - // For simplicity, we protect this whole method by a mutex. It is not in - // the hot path, i.e. Observe is called much more often than Write. The - // complication of making Write lock-free isn't worth it, if possible at - // all. - h.mtx.Lock() - defer h.mtx.Unlock() - - // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) - // without touching the count bits. See the struct comments for a full - // description of the algorithm. - n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) - // count is contained unchanged in the lower 63 bits. - count := n & ((1 << 63) - 1) - // The most significant bit tells us which counts is hot. The complement - // is thus the cold one. - hotCounts := h.counts[n>>63] - coldCounts := h.counts[(^n)>>63] - - waitForCooldown(count, coldCounts) - - his := &dto.Histogram{ - Bucket: make([]*dto.Bucket, len(h.upperBounds)), - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), - CreatedTimestamp: timestamppb.New(h.lastResetTime), - } - out.Histogram = his - out.Label = h.labelPairs - - var cumCount uint64 - for i, upperBound := range h.upperBounds { - cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) - his.Bucket[i] = &dto.Bucket{ - CumulativeCount: proto.Uint64(cumCount), - UpperBound: proto.Float64(upperBound), - } - if e := h.exemplars[i].Load(); e != nil { - his.Bucket[i].Exemplar = e.(*dto.Exemplar) - } - } - // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly. - if e := h.exemplars[len(h.upperBounds)].Load(); e != nil { - b := &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(math.Inf(1)), - Exemplar: e.(*dto.Exemplar), - } - his.Bucket = append(his.Bucket, b) - } - if h.nativeHistogramSchema > math.MinInt32 { - his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits))) - his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema)) - zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket) - - defer func() { - coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber)) - coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber)) - }() - - his.ZeroCount = proto.Uint64(zeroBucket) - his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) - his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) - - // Add a no-op span to a histogram without observations and with - // a zero threshold of zero. Otherwise, a native histogram would - // look like a classic histogram to scrapers. - if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 { - his.PositiveSpan = []*dto.BucketSpan{{ - Offset: proto.Int32(0), - Length: proto.Uint32(0), - }} - } - - if h.nativeExemplars.isEnabled() { - h.nativeExemplars.Lock() - his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) - h.nativeExemplars.Unlock() - } - - } - addAndResetCounts(hotCounts, coldCounts) - return nil -} - -// findBucket returns the index of the bucket for the provided value, or -// len(h.upperBounds) for the +Inf bucket. -func (h *histogram) findBucket(v float64) int { - n := len(h.upperBounds) - if n == 0 { - return 0 - } - - // Early exit: if v is less than or equal to the first upper bound, return 0 - if v <= h.upperBounds[0] { - return 0 - } - - // Early exit: if v is greater than the last upper bound, return len(h.upperBounds) - if v > h.upperBounds[n-1] { - return n - } - - // For small arrays, use simple linear search - // "magic number" 35 is result of tests on couple different (AWS and baremetal) servers - // see more details here: https://github.com/prometheus/client_golang/pull/1662 - if n < 35 { - for i, bound := range h.upperBounds { - if v <= bound { - return i - } - } - // If v is greater than all upper bounds, return len(h.upperBounds) - return n - } - - // For larger arrays, use stdlib's binary search - return sort.SearchFloat64s(h.upperBounds, v) -} - -// observe is the implementation for Observe without the findBucket part. -func (h *histogram) observe(v float64, bucket int) { - // Do not add to sparse buckets for NaN observations. - doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) - // We increment h.countAndHotIdx so that the counter in the lower - // 63 bits gets incremented. At the same time, we get the new value - // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 1) - hotCounts := h.counts[n>>63] - hotCounts.observe(v, bucket, doSparse) - if doSparse { - h.limitBuckets(hotCounts, v, bucket) - } -} - -// limitBuckets applies a strategy to limit the number of populated sparse -// buckets. It's generally best effort, and there are situations where the -// number can go higher (if even the lowest resolution isn't enough to reduce -// the number sufficiently, or if the provided counts aren't fully updated yet -// by a concurrently happening Write call). -func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) { - if h.nativeHistogramMaxBuckets == 0 { - return // No limit configured. - } - if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) { - return // Bucket limit not exceeded yet. - } - - h.mtx.Lock() - defer h.mtx.Unlock() - - // The hot counts might have been swapped just before we acquired the - // lock. Re-fetch the hot counts first... - n := atomic.LoadUint64(&h.countAndHotIdx) - hotIdx := n >> 63 - coldIdx := (^n) >> 63 - hotCounts := h.counts[hotIdx] - coldCounts := h.counts[coldIdx] - // ...and then check again if we really have to reduce the bucket count. - if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) { - return // Bucket limit not exceeded after all. - } - // Try the various strategies in order. - if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { - return - } - // One of the other strategies will happen. To undo what they will do as - // soon as enough time has passed to satisfy - // h.nativeHistogramMinResetDuration, schedule a reset at the right time - // if we haven't done so already. - if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled { - h.resetScheduled = true - h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset) - } - - if h.maybeWidenZeroBucket(hotCounts, coldCounts) { - return - } - h.doubleBucketWidth(hotCounts, coldCounts) -} - -// maybeReset resets the whole histogram if at least -// h.nativeHistogramMinResetDuration has been passed. It returns true if the -// histogram has been reset. The caller must have locked h.mtx. -func (h *histogram) maybeReset( - hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int, -) bool { - // We are using the possibly mocked h.now() rather than - // time.Since(h.lastResetTime) to enable testing. - if h.nativeHistogramMinResetDuration == 0 || // No reset configured. - h.resetScheduled || // Do not interefere if a reset is already scheduled. - h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { - return false - } - // Completely reset coldCounts. - h.resetCounts(cold) - // Repeat the latest observation to not lose it completely. - cold.observe(value, bucket, true) - // Make coldCounts the new hot counts while resetting countAndHotIdx. - n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) - count := n & ((1 << 63) - 1) - waitForCooldown(count, hot) - // Finally, reset the formerly hot counts, too. - h.resetCounts(hot) - h.lastResetTime = h.now() - return true -} - -// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be -// called without having locked h.mtx. -func (h *histogram) reset() { - h.mtx.Lock() - defer h.mtx.Unlock() - - n := atomic.LoadUint64(&h.countAndHotIdx) - hotIdx := n >> 63 - coldIdx := (^n) >> 63 - hot := h.counts[hotIdx] - cold := h.counts[coldIdx] - // Completely reset coldCounts. - h.resetCounts(cold) - // Make coldCounts the new hot counts while resetting countAndHotIdx. - n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63) - count := n & ((1 << 63) - 1) - waitForCooldown(count, hot) - // Finally, reset the formerly hot counts, too. - h.resetCounts(hot) - h.lastResetTime = h.now() - h.resetScheduled = false -} - -// maybeWidenZeroBucket widens the zero bucket until it includes the existing -// buckets closest to the zero bucket (which could be two, if an equidistant -// negative and a positive bucket exists, but usually it's only one bucket to be -// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold -// limits how far the zero bucket can be extended, and if that's not enough to -// include an existing bucket, the method returns false. The caller must have -// locked h.mtx. -func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool { - currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits)) - if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold { - return false - } - // Find the key of the bucket closest to zero. - smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive) - smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative) - if smallestNegativeKey < smallestKey { - smallestKey = smallestNegativeKey - } - if smallestKey == math.MaxInt32 { - return false - } - newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema)) - if newZeroThreshold > h.nativeHistogramMaxZeroThreshold { - return false // New threshold would exceed the max threshold. - } - atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) - // Remove applicable buckets. - if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded { - atomicDecUint32(&cold.nativeHistogramBucketsNumber) - } - if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded { - atomicDecUint32(&cold.nativeHistogramBucketsNumber) - } - // Make cold counts the new hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) - count := n & ((1 << 63) - 1) - // Swap the pointer names to represent the new roles and make - // the rest less confusing. - hot, cold = cold, hot - waitForCooldown(count, cold) - // Add all the now cold counts to the new hot counts... - addAndResetCounts(hot, cold) - // ...adjust the new zero threshold in the cold counts, too... - atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) - // ...and then merge the newly deleted buckets into the wider zero - // bucket. - mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool { - return func(k, v interface{}) bool { - key := k.(int) - bucket := v.(*int64) - if key == smallestKey { - // Merge into hot zero bucket... - atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket))) - // ...and delete from cold counts. - coldBuckets.Delete(key) - atomicDecUint32(&cold.nativeHistogramBucketsNumber) - } else { - // Add to corresponding hot bucket... - if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { - atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) - } - // ...and reset cold bucket. - atomic.StoreInt64(bucket, 0) - } - return true - } - } - - cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive)) - cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative)) - return true -} - -// doubleBucketWidth doubles the bucket width (by decrementing the schema -// number). Note that very sparse buckets could lead to a low reduction of the -// bucket count (or even no reduction at all). The method does nothing if the -// schema is already -4. -func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) { - coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema) - if coldSchema == -4 { - return // Already at lowest resolution. - } - coldSchema-- - atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) - // Play it simple and just delete all cold buckets. - atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) - deleteSyncMap(&cold.nativeHistogramBucketsNegative) - deleteSyncMap(&cold.nativeHistogramBucketsPositive) - // Make coldCounts the new hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) - count := n & ((1 << 63) - 1) - // Swap the pointer names to represent the new roles and make - // the rest less confusing. - hot, cold = cold, hot - waitForCooldown(count, cold) - // Add all the now cold counts to the new hot counts... - addAndResetCounts(hot, cold) - // ...adjust the schema in the cold counts, too... - atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) - // ...and then merge the cold buckets into the wider hot buckets. - merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool { - return func(k, v interface{}) bool { - key := k.(int) - bucket := v.(*int64) - // Adjust key to match the bucket to merge into. - if key > 0 { - key++ - } - key /= 2 - // Add to corresponding hot bucket. - if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { - atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) - } - return true - } - } - - cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive)) - cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative)) - // Play it simple again and just delete all cold buckets. - atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) - deleteSyncMap(&cold.nativeHistogramBucketsNegative) - deleteSyncMap(&cold.nativeHistogramBucketsPositive) -} - -func (h *histogram) resetCounts(counts *histogramCounts) { - atomic.StoreUint64(&counts.sumBits, 0) - atomic.StoreUint64(&counts.count, 0) - atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0) - atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) - atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema) - atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0) - for i := range h.upperBounds { - atomic.StoreUint64(&counts.buckets[i], 0) - } - deleteSyncMap(&counts.nativeHistogramBucketsNegative) - deleteSyncMap(&counts.nativeHistogramBucketsPositive) -} - -// updateExemplar replaces the exemplar for the provided classic bucket. -// With empty labels, it's a no-op. It panics if any of the labels is invalid. -// If histogram is native, the exemplar will be cached into nativeExemplars, -// which has a limit, and will remove one exemplar when limit is reached. -func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { - if l == nil { - return - } - e, err := newExemplar(v, h.now(), l) - if err != nil { - panic(err) - } - h.exemplars[bucket].Store(e) - doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) - if doSparse { - h.nativeExemplars.addExemplar(e) - } -} - -// HistogramVec is a Collector that bundles a set of Histograms that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewHistogramVec. -type HistogramVec struct { - *MetricVec -} - -// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and -// partitioned by the given label names. -func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { - return V2.NewHistogramVec(HistogramVecOpts{ - HistogramOpts: opts, - VariableLabels: UnconstrainedLabels(labelNames), - }) -} - -// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts. -func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec { - desc := V2.NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - opts.VariableLabels, - opts.ConstLabels, - ) - return &HistogramVec{ - MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newHistogram(desc, opts.HistogramOpts, lvs...) - }), - } -} - -// GetMetricWithLabelValues returns the Histogram for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Histogram is created. -// -// It is possible to call this method without using the returned Histogram to only -// create the new Histogram but leave it at its starting value, a Histogram without -// any observations. -// -// Keeping the Histogram for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Histogram from the HistogramVec. In that case, the -// Histogram will still exist, but it will not be exported anymore, even if a -// Histogram with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// GetMetricWith returns the Histogram for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Histogram is created. Implications of -// creating a Histogram without using it and keeping the Histogram for later use -// are the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { - h, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return h -} - -// With works as GetMetricWith but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (v *HistogramVec) With(labels Labels) Observer { - h, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return h -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the HistogramVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.MetricVec.CurryWith(labels) - if vec != nil { - return &HistogramVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -type constHistogram struct { - desc *Desc - count uint64 - sum float64 - buckets map[float64]uint64 - labelPairs []*dto.LabelPair - createdTs *timestamppb.Timestamp -} - -func (h *constHistogram) Desc() *Desc { - return h.desc -} - -func (h *constHistogram) Write(out *dto.Metric) error { - his := &dto.Histogram{ - CreatedTimestamp: h.createdTs, - } - - buckets := make([]*dto.Bucket, 0, len(h.buckets)) - - his.SampleCount = proto.Uint64(h.count) - his.SampleSum = proto.Float64(h.sum) - for upperBound, count := range h.buckets { - buckets = append(buckets, &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(upperBound), - }) - } - - if len(buckets) > 0 { - sort.Sort(buckSort(buckets)) - } - his.Bucket = buckets - - out.Histogram = his - out.Label = h.labelPairs - - return nil -} - -// NewConstHistogram returns a metric representing a Prometheus histogram with -// fixed values for the count, sum, and bucket counts. As those parameters -// cannot be changed, the returned value does not implement the Histogram -// interface (but only the Metric interface). Users of this package will not -// have much use for it in regular operations. However, when implementing custom -// Collectors, it is useful as a throw-away metric that is generated on the fly -// to send it to Prometheus in the Collect method. -// -// buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. The +Inf bucket is implicit, and its value is equal to the provided count. -// -// NewConstHistogram returns an error if the length of labelValues is not -// consistent with the variable labels in Desc or if Desc is invalid. -func NewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { - return nil, err - } - return &constHistogram{ - desc: desc, - count: count, - sum: sum, - buckets: buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstHistogram is a version of NewConstHistogram that panics where -// NewConstHistogram would have returned an error. -func MustNewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) Metric { - m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) - if err != nil { - panic(err) - } - return m -} - -// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp. -func NewConstHistogramWithCreatedTimestamp( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - ct time.Time, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { - return nil, err - } - return &constHistogram{ - desc: desc, - count: count, - sum: sum, - buckets: buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - createdTs: timestamppb.New(ct), - }, nil -} - -// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where -// NewConstHistogramWithCreatedTimestamp would have returned an error. -func MustNewConstHistogramWithCreatedTimestamp( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - ct time.Time, - labelValues ...string, -) Metric { - m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type buckSort []*dto.Bucket - -func (s buckSort) Len() int { - return len(s) -} - -func (s buckSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s buckSort) Less(i, j int) bool { - return s[i].GetUpperBound() < s[j].GetUpperBound() -} - -// pickSchema returns the largest number n between -4 and 8 such that -// 2^(2^-n) is less or equal the provided bucketFactor. -// -// Special cases: -// - bucketFactor <= 1: panics. -// - bucketFactor < 2^(2^-8) (but > 1): still returns 8. -func pickSchema(bucketFactor float64) int32 { - if bucketFactor <= 1 { - panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor)) - } - floor := math.Floor(math.Log2(math.Log2(bucketFactor))) - switch { - case floor <= -8: - return nativeHistogramSchemaMaximum - case floor >= 4: - return nativeHistogramSchemaMinimum - default: - return -int32(floor) - } -} - -func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { - var ii []int - buckets.Range(func(k, v interface{}) bool { - ii = append(ii, k.(int)) - return true - }) - sort.Ints(ii) - - if len(ii) == 0 { - return nil, nil - } - - var ( - spans []*dto.BucketSpan - deltas []int64 - prevCount int64 - nextI int - ) - - appendDelta := func(count int64) { - *spans[len(spans)-1].Length++ - deltas = append(deltas, count-prevCount) - prevCount = count - } - - for n, i := range ii { - v, _ := buckets.Load(i) - count := atomic.LoadInt64(v.(*int64)) - // Multiple spans with only small gaps in between are probably - // encoded more efficiently as one larger span with a few empty - // buckets. Needs some research to find the sweet spot. For now, - // we assume that gaps of one or two buckets should not create - // a new span. - iDelta := int32(i - nextI) - if n == 0 || iDelta > 2 { - // We have to create a new span, either because we are - // at the very beginning, or because we have found a gap - // of more than two buckets. - spans = append(spans, &dto.BucketSpan{ - Offset: proto.Int32(iDelta), - Length: proto.Uint32(0), - }) - } else { - // We have found a small gap (or no gap at all). - // Insert empty buckets as needed. - for j := int32(0); j < iDelta; j++ { - appendDelta(0) - } - } - appendDelta(count) - nextI = i + 1 - } - return spans, deltas -} - -// addToBucket increments the sparse bucket at key by the provided amount. It -// returns true if a new sparse bucket had to be created for that. -func addToBucket(buckets *sync.Map, key int, increment int64) bool { - if existingBucket, ok := buckets.Load(key); ok { - // Fast path without allocation. - atomic.AddInt64(existingBucket.(*int64), increment) - return false - } - // Bucket doesn't exist yet. Slow path allocating new counter. - newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape. - if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded { - // The bucket was created concurrently in another goroutine. - // Have to increment after all. - atomic.AddInt64(actualBucket.(*int64), increment) - return false - } - return true -} - -// addAndReset returns a function to be used with sync.Map.Range of spare -// buckets in coldCounts. It increments the buckets in the provided hotBuckets -// according to the buckets ranged through. It then resets all buckets ranged -// through to 0 (but leaves them in place so that they don't need to get -// recreated on the next scrape). -func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool { - return func(k, v interface{}) bool { - bucket := v.(*int64) - if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) { - atomic.AddUint32(bucketNumber, 1) - } - atomic.StoreInt64(bucket, 0) - return true - } -} - -func deleteSyncMap(m *sync.Map) { - m.Range(func(k, v interface{}) bool { - m.Delete(k) - return true - }) -} - -func findSmallestKey(m *sync.Map) int { - result := math.MaxInt32 - m.Range(func(k, v interface{}) bool { - key := k.(int) - if key < result { - result = key - } - return true - }) - return result -} - -func getLe(key int, schema int32) float64 { - // Here a bit of context about the behavior for the last bucket counting - // regular numbers (called simply "last bucket" below) and the bucket - // counting observations of ±Inf (called "inf bucket" below, with a key - // one higher than that of the "last bucket"): - // - // If we apply the usual formula to the last bucket, its upper bound - // would be calculated as +Inf. The reason is that the max possible - // regular float64 number (math.MaxFloat64) doesn't coincide with one of - // the calculated bucket boundaries. So the calculated boundary has to - // be larger than math.MaxFloat64, and the only float64 larger than - // math.MaxFloat64 is +Inf. However, we want to count actual - // observations of ±Inf in the inf bucket. Therefore, we have to treat - // the upper bound of the last bucket specially and set it to - // math.MaxFloat64. (The upper bound of the inf bucket, with its key - // being one higher than that of the last bucket, naturally comes out as - // +Inf by the usual formula. So that's fine.) - // - // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of - // 1024. If there were a float64 number following math.MaxFloat64, it - // would have a frac of 1.0 and an exp of 1024, or equivalently a frac - // of 0.5 and an exp of 1025. However, since frac must be smaller than - // 1, and exp must be smaller than 1025, either representation overflows - // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the - // largest possible float64. Q.E.D.) However, the formula for - // calculating the upper bound from the idx and schema of the last - // bucket results in precisely that. It is either frac=1.0 & exp=1024 - // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, - // by the way, a power of two where the exponent itself is a power of - // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all - // schemas.) So these are the special cases we have to catch below. - if schema < 0 { - exp := key << -schema - if exp == 1024 { - // This is the last bucket before the overflow bucket - // (for ±Inf observations). Return math.MaxFloat64 as - // explained above. - return math.MaxFloat64 - } - return math.Ldexp(1, exp) - } - - fracIdx := key & ((1 << schema) - 1) - frac := nativeHistogramBounds[schema][fracIdx] - exp := (key >> schema) + 1 - if frac == 0.5 && exp == 1025 { - // This is the last bucket before the overflow bucket (for ±Inf - // observations). Return math.MaxFloat64 as explained above. - return math.MaxFloat64 - } - return math.Ldexp(frac, exp) -} - -// waitForCooldown returns after the count field in the provided histogramCounts -// has reached the provided count value. -func waitForCooldown(count uint64, counts *histogramCounts) { - for count != atomic.LoadUint64(&counts.count) { - runtime.Gosched() // Let observations get work done. - } -} - -// atomicAddFloat adds the provided float atomically to another float -// represented by the bit pattern the bits pointer is pointing to. -func atomicAddFloat(bits *uint64, v float64) { - for { - loadedBits := atomic.LoadUint64(bits) - newBits := math.Float64bits(math.Float64frombits(loadedBits) + v) - if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { - break - } - } -} - -// atomicDecUint32 atomically decrements the uint32 p points to. See -// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done. -func atomicDecUint32(p *uint32) { - atomic.AddUint32(p, ^uint32(0)) -} - -// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero -// bucket) from the cold counts to the corresponding fields in the hot -// counts. Those fields are then reset to 0 in the cold counts. -func addAndResetCounts(hot, cold *histogramCounts) { - atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count)) - atomic.StoreUint64(&cold.count, 0) - coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits)) - atomicAddFloat(&hot.sumBits, coldSum) - atomic.StoreUint64(&cold.sumBits, 0) - for i := range hot.buckets { - atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i])) - atomic.StoreUint64(&cold.buckets[i], 0) - } - atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) - atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) -} - -type nativeExemplars struct { - sync.Mutex - - // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0. - // The ttl is used on insertion to remove an exemplar that is older than ttl, if present. - ttl time.Duration - - exemplars []*dto.Exemplar -} - -func (n *nativeExemplars) isEnabled() bool { - return n.ttl != -1 -} - -func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { - if ttl == 0 { - ttl = 5 * time.Minute - } - - if maxCount == 0 { - maxCount = 10 - } - - if maxCount < 0 { - maxCount = 0 - ttl = -1 - } - - return nativeExemplars{ - ttl: ttl, - exemplars: make([]*dto.Exemplar, 0, maxCount), - } -} - -func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { - if !n.isEnabled() { - return - } - - n.Lock() - defer n.Unlock() - - // When the number of exemplars has not yet exceeded or - // is equal to cap(n.exemplars), then - // insert the new exemplar directly. - if len(n.exemplars) < cap(n.exemplars) { - var nIdx int - for nIdx = 0; nIdx < len(n.exemplars); nIdx++ { - if *e.Value < *n.exemplars[nIdx].Value { - break - } - } - n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...) - return - } - - if len(n.exemplars) == 1 { - // When the number of exemplars is 1, then - // replace the existing exemplar with the new exemplar. - n.exemplars[0] = e - return - } - // From this point on, the number of exemplars is greater than 1. - - // When the number of exemplars exceeds the limit, remove one exemplar. - var ( - ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop. - otIdx = -1 // Index of the exemplar with the oldest timestamp. - - md = -1.0 // Logarithm of the delta of the closest pair of exemplars. - - // The insertion point of the new exemplar in the exemplars slice after insertion. - // This is calculated purely based on the order of the exemplars by value. - // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end. - nIdx = -1 - - // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar. - // The aim is to keep a good spread of exemplars by value and not let them bunch up too much. - // It is calculated in 3 steps: - // 1. First we set rIdx to the index of the older exemplar within the closest pair by value. - // That is the following will be true (on log scale): - // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have - // the closest values to each other from all pairs. - // For example, suppose the values are distributed like this: - // |-----------x-------------x----------------x----x-----| - // ^--rIdx as this is older. - // Or like this: - // |-----------x-------------x----------------x----x-----| - // ^--rIdx as this is older. - // 2. If there is an exemplar that expired, then we simple reset rIdx to that index. - // 3. We check if by inserting the new exemplar we would create a closer pair at - // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to - // keep the spread of exemplars by value; otherwise we keep rIdx as it is. - rIdx = -1 - cLog float64 // Logarithm of the current exemplar. - pLog float64 // Logarithm of the previous exemplar. - ) - - for i, exemplar := range n.exemplars { - // Find the exemplar with the oldest timestamp. - if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) { - ot = exemplar.Timestamp.AsTime() - otIdx = i - } - - // Find the index at which to insert new the exemplar. - if nIdx == -1 && *e.Value <= *exemplar.Value { - nIdx = i - } - - // Find the two closest exemplars and pick the one the with older timestamp. - pLog = cLog - cLog = math.Log(exemplar.GetValue()) - if i == 0 { - continue - } - diff := math.Abs(cLog - pLog) - if md == -1 || diff < md { - // The closest exemplar pair is at index: i-1, i. - // Choose the exemplar with the older timestamp for replacement. - md = diff - if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) { - rIdx = i - } else { - rIdx = i - 1 - } - } - - } - - // If all existing exemplar are smaller than new exemplar, - // then the exemplar should be inserted at the end. - if nIdx == -1 { - nIdx = len(n.exemplars) - } - // Here, we have the following relationships: - // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0) - // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars)) - - if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl { - // If the oldest exemplar has expired, then replace it with the new exemplar. - rIdx = otIdx - } else { - // In the previous for loop, when calculating the closest pair of exemplars, - // we did not take into account the newly inserted exemplar. - // So we need to calculate with the newly inserted exemplar again. - elog := math.Log(e.GetValue()) - if nIdx > 0 { - diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue())) - if diff < md { - // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx. - // v--rIdx - // |-----------x-n-----------x----------------x----x-----| - // nIdx-1--^ ^--new exemplar value - // Do not make the spread worse, replace nIdx-1 and not rIdx. - md = diff - rIdx = nIdx - 1 - } - } - if nIdx < len(n.exemplars) { - diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog) - if diff < md { - // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx. - // v--rIdx - // |-----------x-----------n-x----------------x----x-----| - // new exemplar value--^ ^--nIdx - // Do not make the spread worse, replace nIdx-1 and not rIdx. - rIdx = nIdx - } - } - } - - // Adjust the slice according to rIdx and nIdx. - switch { - case rIdx == nIdx: - n.exemplars[nIdx] = e - case rIdx < nIdx: - n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...) - case rIdx > nIdx: - n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) - } -} - -type constNativeHistogram struct { - desc *Desc - dto.Histogram - labelPairs []*dto.LabelPair -} - -func validateCount(sum float64, count uint64, negativeBuckets, positiveBuckets map[int]int64, zeroBucket uint64) error { - var bucketPopulationSum int64 - for _, v := range positiveBuckets { - bucketPopulationSum += v - } - for _, v := range negativeBuckets { - bucketPopulationSum += v - } - bucketPopulationSum += int64(zeroBucket) - - // If the sum of observations is NaN, the number of observations must be greater or equal to the sum of all bucket counts. - // Otherwise, the number of observations must be equal to the sum of all bucket counts . - - if math.IsNaN(sum) && bucketPopulationSum > int64(count) || - !math.IsNaN(sum) && bucketPopulationSum != int64(count) { - return errors.New("the sum of all bucket populations exceeds the count of observations") - } - return nil -} - -// NewConstNativeHistogram returns a metric representing a Prometheus native histogram with -// fixed values for the count, sum, and positive/negative/zero bucket counts. As those parameters -// cannot be changed, the returned value does not implement the Histogram -// interface (but only the Metric interface). Users of this package will not -// have much use for it in regular operations. However, when implementing custom -// OpenTelemetry Collectors, it is useful as a throw-away metric that is generated on the fly -// to send it to Prometheus in the Collect method. -// -// zeroBucket counts all (positive and negative) -// observations in the zero bucket (with an absolute value less or equal -// the current threshold). -// positiveBuckets and negativeBuckets are separate maps for negative and positive -// observations. The map's value is an int64, counting observations in -// that bucket. The map's key is the -// index of the bucket according to the used -// Schema. Index 0 is for an upper bound of 1 in positive buckets and for a lower bound of -1 in negative buckets. -// NewConstNativeHistogram returns an error if -// - the length of labelValues is not consistent with the variable labels in Desc or if Desc is invalid. -// - the schema passed is not between 8 and -4 -// - the sum of counts in all buckets including the zero bucket does not equal the count if sum is not NaN (or exceeds the count if sum is NaN) -// -// See https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms for more details about the conversion from OTel to Prometheus. -func NewConstNativeHistogram( - desc *Desc, - count uint64, - sum float64, - positiveBuckets, negativeBuckets map[int]int64, - zeroBucket uint64, - schema int32, - zeroThreshold float64, - createdTimestamp time.Time, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { - return nil, err - } - if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum { - return nil, errors.New("invalid native histogram schema") - } - if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil { - return nil, err - } - - NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets) - PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets) - ret := &constNativeHistogram{ - desc: desc, - Histogram: dto.Histogram{ - CreatedTimestamp: timestamppb.New(createdTimestamp), - Schema: &schema, - ZeroThreshold: &zeroThreshold, - SampleCount: &count, - SampleSum: &sum, - - NegativeSpan: NegativeSpan, - NegativeDelta: NegativeDelta, - - PositiveSpan: PositiveSpan, - PositiveDelta: PositiveDelta, - - ZeroCount: proto.Uint64(zeroBucket), - }, - labelPairs: MakeLabelPairs(desc, labelValues), - } - if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 { - ret.PositiveSpan = []*dto.BucketSpan{{ - Offset: proto.Int32(0), - Length: proto.Uint32(0), - }} - } - return ret, nil -} - -// MustNewConstNativeHistogram is a version of NewConstNativeHistogram that panics where -// NewConstNativeHistogram would have returned an error. -func MustNewConstNativeHistogram( - desc *Desc, - count uint64, - sum float64, - positiveBuckets, negativeBuckets map[int]int64, - zeroBucket uint64, - nativeHistogramSchema int32, - nativeHistogramZeroThreshold float64, - createdTimestamp time.Time, - labelValues ...string, -) Metric { - nativehistogram, err := NewConstNativeHistogram(desc, - count, - sum, - positiveBuckets, - negativeBuckets, - zeroBucket, - nativeHistogramSchema, - nativeHistogramZeroThreshold, - createdTimestamp, - labelValues...) - if err != nil { - panic(err) - } - return nativehistogram -} - -func (h *constNativeHistogram) Desc() *Desc { - return h.desc -} - -func (h *constNativeHistogram) Write(out *dto.Metric) error { - out.Histogram = &h.Histogram - out.Label = h.labelPairs - return nil -} - -func makeBucketsFromMap(buckets map[int]int64) ([]*dto.BucketSpan, []int64) { - if len(buckets) == 0 { - return nil, nil - } - var ii []int - for k := range buckets { - ii = append(ii, k) - } - sort.Ints(ii) - - var ( - spans []*dto.BucketSpan - deltas []int64 - prevCount int64 - nextI int - ) - - appendDelta := func(count int64) { - *spans[len(spans)-1].Length++ - deltas = append(deltas, count-prevCount) - prevCount = count - } - - for n, i := range ii { - count := buckets[i] - // Multiple spans with only small gaps in between are probably - // encoded more efficiently as one larger span with a few empty - // buckets. Needs some research to find the sweet spot. For now, - // we assume that gaps of one or two buckets should not create - // a new span. - iDelta := int32(i - nextI) - if n == 0 || iDelta > 2 { - // We have to create a new span, either because we are - // at the very beginning, or because we have found a gap - // of more than two buckets. - spans = append(spans, &dto.BucketSpan{ - Offset: proto.Int32(iDelta), - Length: proto.Uint32(0), - }) - } else { - // We have found a small gap (or no gap at all). - // Insert empty buckets as needed. - for j := int32(0); j < iDelta; j++ { - appendDelta(0) - } - } - appendDelta(count) - nextI = i + 1 - } - return spans, deltas -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go deleted file mode 100644 index 1ed5abe74c..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2015 Björn Rabenstein -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. -// -// The code in this package is copy/paste to avoid a dependency. Hence this file -// carries the copyright of the original repo. -// https://github.com/beorn7/floats -package internal - -import ( - "math" -) - -// minNormalFloat64 is the smallest positive normal value of type float64. -var minNormalFloat64 = math.Float64frombits(0x0010000000000000) - -// AlmostEqualFloat64 returns true if a and b are equal within a relative error -// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the -// details of the applied method. -func AlmostEqualFloat64(a, b, epsilon float64) bool { - if a == b { - return true - } - absA := math.Abs(a) - absB := math.Abs(b) - diff := math.Abs(a - b) - if a == 0 || b == 0 || absA+absB < minNormalFloat64 { - return diff < epsilon*minNormalFloat64 - } - return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon -} - -// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64. -func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if !AlmostEqualFloat64(a[i], b[i], epsilon) { - return false - } - } - return true -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go deleted file mode 100644 index 7bac0da33d..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ /dev/null @@ -1,655 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// It provides tools to compare sequences of strings and generate textual diffs. -// -// Maintaining `GetUnifiedDiffString` here because original repository -// (https://github.com/pmezard/go-difflib) is no longer maintained. -package internal - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" -) - -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - -func maxInt(a, b int) int { - if a > b { - return a - } - return b -} - -func calculateRatio(matches, length int) float64 { - if length > 0 { - return 2.0 * float64(matches) / float64(length) - } - return 1.0 -} - -type Match struct { - A int - B int - Size int -} - -type OpCode struct { - Tag byte - I1 int - I2 int - J1 int - J2 int -} - -// SequenceMatcher compares sequence of strings. The basic -// algorithm predates, and is a little fancier than, an algorithm -// published in the late 1980's by Ratcliff and Obershelp under the -// hyperbolic name "gestalt pattern matching". The basic idea is to find -// the longest contiguous matching subsequence that contains no "junk" -// elements (R-O doesn't address junk). The same idea is then applied -// recursively to the pieces of the sequences to the left and to the right -// of the matching subsequence. This does not yield minimal edit -// sequences, but does tend to yield matches that "look right" to people. -// -// SequenceMatcher tries to compute a "human-friendly diff" between two -// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the -// longest *contiguous* & junk-free matching subsequence. That's what -// catches peoples' eyes. The Windows(tm) windiff has another interesting -// notion, pairing up elements that appear uniquely in each sequence. -// That, and the method here, appear to yield more intuitive difference -// reports than does diff. This method appears to be the least vulnerable -// to synching up on blocks of "junk lines", though (like blank lines in -// ordinary text files, or maybe "

" lines in HTML files). That may be -// because this is the only method of the 3 that has a *concept* of -// "junk" . -// -// Timing: Basic R-O is cubic time worst case and quadratic time expected -// case. SequenceMatcher is quadratic time for the worst case and has -// expected-case behavior dependent in a complicated way on how many -// elements the sequences have in common; best case time is linear. -type SequenceMatcher struct { - a []string - b []string - b2j map[string][]int - IsJunk func(string) bool - autoJunk bool - bJunk map[string]struct{} - matchingBlocks []Match - fullBCount map[string]int - bPopular map[string]struct{} - opCodes []OpCode -} - -func NewMatcher(a, b []string) *SequenceMatcher { - m := SequenceMatcher{autoJunk: true} - m.SetSeqs(a, b) - return &m -} - -func NewMatcherWithJunk(a, b []string, autoJunk bool, - isJunk func(string) bool, -) *SequenceMatcher { - m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} - m.SetSeqs(a, b) - return &m -} - -// Set two sequences to be compared. -func (m *SequenceMatcher) SetSeqs(a, b []string) { - m.SetSeq1(a) - m.SetSeq2(b) -} - -// Set the first sequence to be compared. The second sequence to be compared is -// not changed. -// -// SequenceMatcher computes and caches detailed information about the second -// sequence, so if you want to compare one sequence S against many sequences, -// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other -// sequences. -// -// See also SetSeqs() and SetSeq2(). -func (m *SequenceMatcher) SetSeq1(a []string) { - if &a == &m.a { - return - } - m.a = a - m.matchingBlocks = nil - m.opCodes = nil -} - -// Set the second sequence to be compared. The first sequence to be compared is -// not changed. -func (m *SequenceMatcher) SetSeq2(b []string) { - if &b == &m.b { - return - } - m.b = b - m.matchingBlocks = nil - m.opCodes = nil - m.fullBCount = nil - m.chainB() -} - -func (m *SequenceMatcher) chainB() { - // Populate line -> index mapping - b2j := map[string][]int{} - for i, s := range m.b { - indices := b2j[s] - indices = append(indices, i) - b2j[s] = indices - } - - // Purge junk elements - m.bJunk = map[string]struct{}{} - if m.IsJunk != nil { - junk := m.bJunk - for s := range b2j { - if m.IsJunk(s) { - junk[s] = struct{}{} - } - } - for s := range junk { - delete(b2j, s) - } - } - - // Purge remaining popular elements - popular := map[string]struct{}{} - n := len(m.b) - if m.autoJunk && n >= 200 { - ntest := n/100 + 1 - for s, indices := range b2j { - if len(indices) > ntest { - popular[s] = struct{}{} - } - } - for s := range popular { - delete(b2j, s) - } - } - m.bPopular = popular - m.b2j = b2j -} - -func (m *SequenceMatcher) isBJunk(s string) bool { - _, ok := m.bJunk[s] - return ok -} - -// Find longest matching block in a[alo:ahi] and b[blo:bhi]. -// -// If IsJunk is not defined: -// -// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi -// -// and for all (i',j',k') meeting those conditions, -// -// k >= k' -// i <= i' -// and if i == i', j <= j' -// -// In other words, of all maximal matching blocks, return one that -// starts earliest in a, and of all those maximal matching blocks that -// start earliest in a, return the one that starts earliest in b. -// -// If IsJunk is defined, first the longest matching block is -// determined as above, but with the additional restriction that no -// junk element appears in the block. Then that block is extended as -// far as possible by matching (only) junk elements on both sides. So -// the resulting block never matches on junk except as identical junk -// happens to be adjacent to an "interesting" match. -// -// If no blocks match, return (alo, blo, 0). -func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { - // CAUTION: stripping common prefix or suffix would be incorrect. - // E.g., - // ab - // acab - // Longest matching block is "ab", but if common prefix is - // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so - // strip, so ends up claiming that ab is changed to acab by - // inserting "ca" in the middle. That's minimal but unintuitive: - // "it's obvious" that someone inserted "ac" at the front. - // Windiff ends up at the same place as diff, but by pairing up - // the unique 'b's and then matching the first two 'a's. - besti, bestj, bestsize := alo, blo, 0 - - // find longest junk-free match - // during an iteration of the loop, j2len[j] = length of longest - // junk-free match ending with a[i-1] and b[j] - j2len := map[int]int{} - for i := alo; i != ahi; i++ { - // look at all instances of a[i] in b; note that because - // b2j has no junk keys, the loop is skipped if a[i] is junk - newj2len := map[int]int{} - for _, j := range m.b2j[m.a[i]] { - // a[i] matches b[j] - if j < blo { - continue - } - if j >= bhi { - break - } - k := j2len[j-1] + 1 - newj2len[j] = k - if k > bestsize { - besti, bestj, bestsize = i-k+1, j-k+1, k - } - } - j2len = newj2len - } - - // Extend the best by non-junk elements on each end. In particular, - // "popular" non-junk elements aren't in b2j, which greatly speeds - // the inner loop above, but also means "the best" match so far - // doesn't contain any junk *or* popular non-junk elements. - for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - !m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize++ - } - - // Now that we have a wholly interesting match (albeit possibly - // empty!), we may as well suck up the matching junk on each - // side of it too. Can't think of a good reason not to, and it - // saves post-processing the (possibly considerable) expense of - // figuring out what to do with it. In the case of an empty - // interesting match, this is clearly the right thing to do, - // because no other kind of match is possible in the regions. - for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize++ - } - - return Match{A: besti, B: bestj, Size: bestsize} -} - -// Return list of triples describing matching subsequences. -// -// Each triple is of the form (i, j, n), and means that -// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in -// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are -// adjacent triples in the list, and the second is not the last triple in the -// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe -// adjacent equal blocks. -// -// The last triple is a dummy, (len(a), len(b), 0), and is the only -// triple with n==0. -func (m *SequenceMatcher) GetMatchingBlocks() []Match { - if m.matchingBlocks != nil { - return m.matchingBlocks - } - - var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match - matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { - match := m.findLongestMatch(alo, ahi, blo, bhi) - i, j, k := match.A, match.B, match.Size - if match.Size > 0 { - if alo < i && blo < j { - matched = matchBlocks(alo, i, blo, j, matched) - } - matched = append(matched, match) - if i+k < ahi && j+k < bhi { - matched = matchBlocks(i+k, ahi, j+k, bhi, matched) - } - } - return matched - } - matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) - - // It's possible that we have adjacent equal blocks in the - // matching_blocks list now. - nonAdjacent := []Match{} - i1, j1, k1 := 0, 0, 0 - for _, b := range matched { - // Is this block adjacent to i1, j1, k1? - i2, j2, k2 := b.A, b.B, b.Size - if i1+k1 == i2 && j1+k1 == j2 { - // Yes, so collapse them -- this just increases the length of - // the first block by the length of the second, and the first - // block so lengthened remains the block to compare against. - k1 += k2 - } else { - // Not adjacent. Remember the first block (k1==0 means it's - // the dummy we started with), and make the second block the - // new block to compare against. - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - i1, j1, k1 = i2, j2, k2 - } - } - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - - nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) - m.matchingBlocks = nonAdjacent - return m.matchingBlocks -} - -// Return list of 5-tuples describing how to turn a into b. -// -// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple -// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the -// tuple preceding it, and likewise for j1 == the previous j2. -// -// The tags are characters, with these meanings: -// -// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] -// -// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. -// -// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. -// -// 'e' (equal): a[i1:i2] == b[j1:j2] -func (m *SequenceMatcher) GetOpCodes() []OpCode { - if m.opCodes != nil { - return m.opCodes - } - i, j := 0, 0 - matching := m.GetMatchingBlocks() - opCodes := make([]OpCode, 0, len(matching)) - for _, m := range matching { - // invariant: we've pumped out correct diffs to change - // a[:i] into b[:j], and the next matching block is - // a[ai:ai+size] == b[bj:bj+size]. So we need to pump - // out a diff to change a[i:ai] into b[j:bj], pump out - // the matching block, and move (i,j) beyond the match - ai, bj, size := m.A, m.B, m.Size - tag := byte(0) - if i < ai && j < bj { - tag = 'r' - } else if i < ai { - tag = 'd' - } else if j < bj { - tag = 'i' - } - if tag > 0 { - opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) - } - i, j = ai+size, bj+size - // the list of matching blocks is terminated by a - // sentinel with size 0 - if size > 0 { - opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) - } - } - m.opCodes = opCodes - return m.opCodes -} - -// Isolate change clusters by eliminating ranges with no changes. -// -// Return a generator of groups with up to n lines of context. -// Each group is in the same format as returned by GetOpCodes(). -func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { - if n < 0 { - n = 3 - } - codes := m.GetOpCodes() - if len(codes) == 0 { - codes = []OpCode{{'e', 0, 1, 0, 1}} - } - // Fixup leading and trailing groups if they show no changes. - if codes[0].Tag == 'e' { - c := codes[0] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, maxInt(i1, i2-n), i2, maxInt(j1, j2-n), j2} - } - if codes[len(codes)-1].Tag == 'e' { - c := codes[len(codes)-1] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, minInt(i2, i1+n), j1, minInt(j2, j1+n)} - } - nn := n + n - groups := [][]OpCode{} - group := []OpCode{} - for _, c := range codes { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - // End the current group and start a new one whenever - // there is a large range with no changes. - if c.Tag == 'e' && i2-i1 > nn { - group = append(group, OpCode{ - c.Tag, i1, minInt(i2, i1+n), - j1, minInt(j2, j1+n), - }) - groups = append(groups, group) - group = []OpCode{} - i1, j1 = maxInt(i1, i2-n), maxInt(j1, j2-n) - } - group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) - } - if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') { - groups = append(groups, group) - } - return groups -} - -// Return a measure of the sequences' similarity (float in [0,1]). -// -// Where T is the total number of elements in both sequences, and -// M is the number of matches, this is 2.0*M / T. -// Note that this is 1 if the sequences are identical, and 0 if -// they have nothing in common. -// -// .Ratio() is expensive to compute if you haven't already computed -// .GetMatchingBlocks() or .GetOpCodes(), in which case you may -// want to try .QuickRatio() or .RealQuickRation() first to get an -// upper bound. -func (m *SequenceMatcher) Ratio() float64 { - matches := 0 - for _, m := range m.GetMatchingBlocks() { - matches += m.Size - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() relatively quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute. -func (m *SequenceMatcher) QuickRatio() float64 { - // viewing a and b as multisets, set matches to the cardinality - // of their intersection; this counts the number of matches - // without regard to order, so is clearly an upper bound - if m.fullBCount == nil { - m.fullBCount = map[string]int{} - for _, s := range m.b { - m.fullBCount[s]++ - } - } - - // avail[x] is the number of times x appears in 'b' less the - // number of times we've seen it in 'a' so far ... kinda - avail := map[string]int{} - matches := 0 - for _, s := range m.a { - n, ok := avail[s] - if !ok { - n = m.fullBCount[s] - } - avail[s] = n - 1 - if n > 0 { - matches++ - } - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() very quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute than either .Ratio() or .QuickRatio(). -func (m *SequenceMatcher) RealQuickRatio() float64 { - la, lb := len(m.a), len(m.b) - return calculateRatio(minInt(la, lb), la+lb) -} - -// Convert range to the "ed" format -func formatRangeUnified(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 1 { - return strconv.Itoa(beginning) - } - if length == 0 { - beginning-- // empty ranges begin at line just before the range - } - return fmt.Sprintf("%d,%d", beginning, length) -} - -// Unified diff parameters -type UnifiedDiff struct { - A []string // First sequence lines - FromFile string // First file name - FromDate string // First file time - B []string // Second sequence lines - ToFile string // Second file name - ToDate string // Second file time - Eol string // Headers end of line, defaults to LF - Context int // Number of context lines -} - -// Compare two sequences of lines; generate the delta as a unified diff. -// -// Unified diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by 'n' which -// defaults to three. -// -// By default, the diff control lines (those with ---, +++, or @@) are -// created with a trailing newline. This is helpful so that inputs -// created from file.readlines() result in diffs that are suitable for -// file.writelines() since both the inputs and outputs have trailing -// newlines. -// -// For inputs that do not have trailing newlines, set the lineterm -// argument to "" so that the output will be uniformly newline free. -// -// The unidiff format normally has a header for filenames and modification -// times. Any or all of these may be specified using strings for -// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. -// The modification times are normally expressed in the ISO 8601 format. -func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - wf := func(format string, args ...interface{}) error { - _, err := fmt.Fprintf(buf, format, args...) - return err - } - ws := func(s string) error { - _, err := buf.WriteString(s) - return err - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err - } - } - } - first, last := g[0], g[len(g)-1] - range1 := formatRangeUnified(first.I1, last.I2) - range2 := formatRangeUnified(first.J1, last.J2) - if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { - return err - } - for _, c := range g { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - if c.Tag == 'e' { - for _, line := range diff.A[i1:i2] { - if err := ws(" " + line); err != nil { - return err - } - } - continue - } - if c.Tag == 'r' || c.Tag == 'd' { - for _, line := range diff.A[i1:i2] { - if err := ws("-" + line); err != nil { - return err - } - } - } - if c.Tag == 'r' || c.Tag == 'i' { - for _, line := range diff.B[j1:j2] { - if err := ws("+" + line); err != nil { - return err - } - } - } - } - } - return nil -} - -// Like WriteUnifiedDiff but returns the diff a string. -func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteUnifiedDiff(w, diff) - return w.String(), err -} - -// Split a string on "\n" while preserving them. The output can be used -// as input for UnifiedDiff and ContextDiff structures. -func SplitLines(s string) []string { - lines := strings.SplitAfter(s, "\n") - lines[len(lines)-1] += "\n" - return lines -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go deleted file mode 100644 index a4fa6eabd7..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import "regexp" - -type GoCollectorRule struct { - Matcher *regexp.Regexp - Deny bool -} - -// GoCollectorOptions should not be used be directly by anything, except `collectors` package. -// Use it via collectors package instead. See issue -// https://github.com/prometheus/client_golang/issues/1030. -// -// This is internal, so external users only can use it via `collector.WithGoCollector*` methods -type GoCollectorOptions struct { - DisableMemStatsLikeMetrics bool - RuntimeMetricSumForHist map[string]string - RuntimeMetricRules []GoCollectorRule -} - -var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go deleted file mode 100644 index d273b6640e..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.17 -// +build go1.17 - -package internal - -import ( - "math" - "path" - "runtime/metrics" - "strings" - - "github.com/prometheus/common/model" -) - -// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics -// metric description and validates whether the metric is suitable for integration -// with Prometheus. -// -// Returns false if a name could not be produced, or if Prometheus does not understand -// the runtime/metrics Kind. -// -// Note that the main reason a name couldn't be produced is if the runtime/metrics -// package exports a name with characters outside the valid Prometheus metric name -// character set. This is theoretically possible, but should never happen in practice. -// Still, don't rely on it. -func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) { - namespace := "go" - - comp := strings.SplitN(d.Name, ":", 2) - key := comp[0] - unit := comp[1] - - // The last path element in the key is the name, - // the rest is the subsystem. - subsystem := path.Dir(key[1:] /* remove leading / */) - name := path.Base(key) - - // subsystem is translated by replacing all / and - with _. - subsystem = strings.ReplaceAll(subsystem, "/", "_") - subsystem = strings.ReplaceAll(subsystem, "-", "_") - - // unit is translated assuming that the unit contains no - // non-ASCII characters. - unit = strings.ReplaceAll(unit, "-", "_") - unit = strings.ReplaceAll(unit, "*", "_") - unit = strings.ReplaceAll(unit, "/", "_per_") - - // name has - replaced with _ and is concatenated with the unit and - // other data. - name = strings.ReplaceAll(name, "-", "_") - name += "_" + unit - if d.Cumulative && d.Kind != metrics.KindFloat64Histogram { - name += "_total" - } - - // Our current conversion moves to legacy naming, so use legacy validation. - valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name) - switch d.Kind { - case metrics.KindUint64: - case metrics.KindFloat64: - case metrics.KindFloat64Histogram: - default: - valid = false - } - return namespace, subsystem, name, valid -} - -// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram -// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces -// a reduced set of buckets. This function always removes any -Inf bucket as it's represented -// as the bottom-most upper-bound inclusive bucket in Prometheus. -func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 { - switch unit { - case "bytes": - // Re-bucket as powers of 2. - return reBucketExp(buckets, 2) - case "seconds": - // Re-bucket as powers of 10 and then merge all buckets greater - // than 1 second into the +Inf bucket. - b := reBucketExp(buckets, 10) - for i := range b { - if b[i] <= 1 { - continue - } - b[i] = math.Inf(1) - b = b[:i+1] - break - } - return b - } - return buckets -} - -// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and -// downsamples the buckets to those a multiple of base apart. The end result -// is a roughly exponential (in many cases, perfectly exponential) bucketing -// scheme. -func reBucketExp(buckets []float64, base float64) []float64 { - bucket := buckets[0] - var newBuckets []float64 - // We may see a -Inf here, in which case, add it and skip it - // since we risk producing NaNs otherwise. - // - // We need to preserve -Inf values to maintain runtime/metrics - // conventions. We'll strip it out later. - if bucket == math.Inf(-1) { - newBuckets = append(newBuckets, bucket) - buckets = buckets[1:] - bucket = buckets[0] - } - // From now on, bucket should always have a non-Inf value because - // Infs are only ever at the ends of the bucket lists, so - // arithmetic operations on it are non-NaN. - for i := 1; i < len(buckets); i++ { - if bucket >= 0 && buckets[i] < bucket*base { - // The next bucket we want to include is at least bucket*base. - continue - } else if bucket < 0 && buckets[i] < bucket/base { - // In this case the bucket we're targeting is negative, and since - // we're ascending through buckets here, we need to divide to get - // closer to zero exponentially. - continue - } - // The +Inf bucket will always be the last one, and we'll always - // end up including it here because bucket - newBuckets = append(newBuckets, bucket) - bucket = buckets[i] - } - return append(newBuckets, bucket) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go deleted file mode 100644 index 6515c11480..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "sort" - - dto "github.com/prometheus/client_model/go" -) - -// LabelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. -type LabelPairSorter []*dto.LabelPair - -func (s LabelPairSorter) Len() int { - return len(s) -} - -func (s LabelPairSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s LabelPairSorter) Less(i, j int) bool { - return s[i].GetName() < s[j].GetName() -} - -// MetricSorter is a sortable slice of *dto.Metric. -type MetricSorter []*dto.Metric - -func (s MetricSorter) Len() int { - return len(s) -} - -func (s MetricSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s MetricSorter) Less(i, j int) bool { - if len(s[i].Label) != len(s[j].Label) { - // This should not happen. The metrics are - // inconsistent. However, we have to deal with the fact, as - // people might use custom collectors or metric family injection - // to create inconsistent metrics. So let's simply compare the - // number of labels in this case. That will still yield - // reproducible sorting. - return len(s[i].Label) < len(s[j].Label) - } - for n, lp := range s[i].Label { - vi := lp.GetValue() - vj := s[j].Label[n].GetValue() - if vi != vj { - return vi < vj - } - } - - // We should never arrive here. Multiple metrics with the same - // label set in the same scrape will lead to undefined ingestion - // behavior. However, as above, we have to provide stable sorting - // here, even for inconsistent metrics. So sort equal metrics - // by their timestamp, with missing timestamps (implying "now") - // coming last. - if s[i].TimestampMs == nil { - return false - } - if s[j].TimestampMs == nil { - return true - } - return s[i].GetTimestampMs() < s[j].GetTimestampMs() -} - -// NormalizeMetricFamilies returns a MetricFamily slice with empty -// MetricFamilies pruned and the remaining MetricFamilies sorted by name within -// the slice, with the contained Metrics sorted within each MetricFamily. -func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { - for _, mf := range metricFamiliesByName { - sort.Sort(MetricSorter(mf.Metric)) - } - names := make([]string, 0, len(metricFamiliesByName)) - for name, mf := range metricFamiliesByName { - if len(mf.Metric) > 0 { - names = append(names, name) - } - } - sort.Strings(names) - result := make([]*dto.MetricFamily, 0, len(names)) - for _, name := range names { - result = append(result, metricFamiliesByName[name]) - } - return result -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go deleted file mode 100644 index 5fe8d3b4d2..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "strings" - "unicode/utf8" - - "github.com/prometheus/common/model" -) - -// Labels represents a collection of label name -> value mappings. This type is -// commonly used with the With(Labels) and GetMetricWith(Labels) methods of -// metric vector Collectors, e.g.: -// -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -// -// The other use-case is the specification of constant label pairs in Opts or to -// create a Desc. -type Labels map[string]string - -// LabelConstraint normalizes label values. -type LabelConstraint func(string) string - -// ConstrainedLabels represents a label name and its constrain function -// to normalize label values. This type is commonly used when constructing -// metric vector Collectors. -type ConstrainedLabel struct { - Name string - Constraint LabelConstraint -} - -// ConstrainableLabels is an interface that allows creating of labels that can -// be optionally constrained. -// -// prometheus.V2().NewCounterVec(CounterVecOpts{ -// CounterOpts: {...}, // Usual CounterOpts fields -// VariableLabels: []ConstrainedLabels{ -// {Name: "A"}, -// {Name: "B", Constraint: func(v string) string { ... }}, -// }, -// }) -type ConstrainableLabels interface { - compile() *compiledLabels - labelNames() []string -} - -// ConstrainedLabels represents a collection of label name -> constrain function -// to normalize label values. This type is commonly used when constructing -// metric vector Collectors. -type ConstrainedLabels []ConstrainedLabel - -func (cls ConstrainedLabels) compile() *compiledLabels { - compiled := &compiledLabels{ - names: make([]string, len(cls)), - labelConstraints: map[string]LabelConstraint{}, - } - - for i, label := range cls { - compiled.names[i] = label.Name - if label.Constraint != nil { - compiled.labelConstraints[label.Name] = label.Constraint - } - } - - return compiled -} - -func (cls ConstrainedLabels) labelNames() []string { - names := make([]string, len(cls)) - for i, label := range cls { - names[i] = label.Name - } - return names -} - -// UnconstrainedLabels represents collection of label without any constraint on -// their value. Thus, it is simply a collection of label names. -// -// UnconstrainedLabels([]string{ "A", "B" }) -// -// is equivalent to -// -// ConstrainedLabels { -// { Name: "A" }, -// { Name: "B" }, -// } -type UnconstrainedLabels []string - -func (uls UnconstrainedLabels) compile() *compiledLabels { - return &compiledLabels{ - names: uls, - } -} - -func (uls UnconstrainedLabels) labelNames() []string { - return uls -} - -type compiledLabels struct { - names []string - labelConstraints map[string]LabelConstraint -} - -func (cls *compiledLabels) compile() *compiledLabels { - return cls -} - -func (cls *compiledLabels) labelNames() []string { - return cls.names -} - -func (cls *compiledLabels) constrain(labelName, value string) string { - if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil { - return fn(value) - } - return value -} - -// reservedLabelPrefix is a prefix which is not legal in user-supplied -// label names. -const reservedLabelPrefix = "__" - -var errInconsistentCardinality = errors.New("inconsistent label cardinality") - -func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { - return fmt.Errorf( - "%w: %q has %d variable labels named %q but %d values %q were provided", - errInconsistentCardinality, fqName, - len(labels), labels, - len(labelValues), labelValues, - ) -} - -func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { - if len(labels) != expectedNumberOfValues { - return fmt.Errorf( - "%w: expected %d label values but got %d in %#v", - errInconsistentCardinality, expectedNumberOfValues, - len(labels), labels, - ) - } - - for name, val := range labels { - if !utf8.ValidString(val) { - return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) - } - } - - return nil -} - -func validateLabelValues(vals []string, expectedNumberOfValues int) error { - if len(vals) != expectedNumberOfValues { - // The call below makes vals escape, copy them to avoid that. - vals := append([]string(nil), vals...) - return fmt.Errorf( - "%w: expected %d label values but got %d in %#v", - errInconsistentCardinality, expectedNumberOfValues, - len(vals), vals, - ) - } - - for _, val := range vals { - if !utf8.ValidString(val) { - return fmt.Errorf("label value %q is not valid UTF-8", val) - } - } - - return nil -} - -func checkLabelName(l string) bool { - //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. - return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go deleted file mode 100644 index 76e59f1288..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "math" - "sort" - "strings" - "time" - - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" - "google.golang.org/protobuf/proto" -) - -var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. - -// A Metric models a single sample value with its meta data being exported to -// Prometheus. Implementations of Metric in this package are Gauge, Counter, -// Histogram, Summary, and Untyped. -type Metric interface { - // Desc returns the descriptor for the Metric. This method idempotently - // returns the same descriptor throughout the lifetime of the - // Metric. The returned descriptor is immutable by contract. A Metric - // unable to describe itself must return an invalid descriptor (created - // with NewInvalidDesc). - Desc() *Desc - // Write encodes the Metric into a "Metric" Protocol Buffer data - // transmission object. - // - // Metric implementations must observe concurrency safety as reads of - // this metric may occur at any time, and any blocking occurs at the - // expense of total performance of rendering all registered - // metrics. Ideally, Metric implementations should support concurrent - // readers. - // - // While populating dto.Metric, it is the responsibility of the - // implementation to ensure validity of the Metric protobuf (like valid - // UTF-8 strings or syntactically valid metric and label names). It is - // recommended to sort labels lexicographically. Callers of Write should - // still make sure of sorting if they depend on it. - Write(*dto.Metric) error - // TODO(beorn7): The original rationale of passing in a pre-allocated - // dto.Metric protobuf to save allocations has disappeared. The - // signature of this method should be changed to "Write() (*dto.Metric, - // error)". -} - -// Opts bundles the options for creating most Metric types. Each metric -// implementation XXX has its own XXXOpts type, but in most cases, it is just -// an alias of this type (which might change when the requirement arises.) -// -// It is mandatory to set Name to a non-empty string. All other fields are -// optional and can safely be left at their zero value, although it is strongly -// encouraged to set a Help string. -type Opts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Metric (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the metric must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this metric. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels - ConstLabels Labels - - // now is for testing purposes, by default it's time.Now. - now func() time.Time -} - -// BuildFQName joins the given three name components by "_". Empty name -// components are ignored. If the name parameter itself is empty, an empty -// string is returned, no matter what. Metric implementations included in this -// library use this function internally to generate the fully-qualified metric -// name from the name component in their Opts. Users of the library will only -// need this function if they implement their own Metric or instantiate a Desc -// (with NewDesc) directly. -func BuildFQName(namespace, subsystem, name string) string { - if name == "" { - return "" - } - - sb := strings.Builder{} - sb.Grow(len(namespace) + len(subsystem) + len(name) + 2) - - if namespace != "" { - sb.WriteString(namespace) - sb.WriteString("_") - } - - if subsystem != "" { - sb.WriteString(subsystem) - sb.WriteString("_") - } - - sb.WriteString(name) - - return sb.String() -} - -type invalidMetric struct { - desc *Desc - err error -} - -// NewInvalidMetric returns a metric whose Write method always returns the -// provided error. It is useful if a Collector finds itself unable to collect -// a metric and wishes to report an error to the registry. -func NewInvalidMetric(desc *Desc, err error) Metric { - return &invalidMetric{desc, err} -} - -func (m *invalidMetric) Desc() *Desc { return m.desc } - -func (m *invalidMetric) Write(*dto.Metric) error { return m.err } - -type timestampedMetric struct { - Metric - t time.Time -} - -func (m timestampedMetric) Write(pb *dto.Metric) error { - e := m.Metric.Write(pb) - pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) - return e -} - -// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a -// way that it has an explicit timestamp set to the provided Time. This is only -// useful in rare cases as the timestamp of a Prometheus metric should usually -// be set by the Prometheus server during scraping. Exceptions include mirroring -// metrics with given timestamps from other metric -// sources. -// -// NewMetricWithTimestamp works best with MustNewConstMetric, -// MustNewConstHistogram, and MustNewConstSummary, see example. -// -// Currently, the exposition formats used by Prometheus are limited to -// millisecond resolution. Thus, the provided time will be rounded down to the -// next full millisecond value. -func NewMetricWithTimestamp(t time.Time, m Metric) Metric { - return timestampedMetric{Metric: m, t: t} -} - -type withExemplarsMetric struct { - Metric - - exemplars []*dto.Exemplar -} - -func (m *withExemplarsMetric) Write(pb *dto.Metric) error { - if err := m.Metric.Write(pb); err != nil { - return err - } - - switch { - case pb.Counter != nil: - pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1] - case pb.Histogram != nil: - h := pb.Histogram - for _, e := range m.exemplars { - if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 || - len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) && - e.GetTimestamp() != nil { - h.Exemplars = append(h.Exemplars, e) - if len(h.Bucket) == 0 { - // Don't proceed to classic buckets if there are none. - continue - } - } - // h.Bucket are sorted by UpperBound. - i := sort.Search(len(h.Bucket), func(i int) bool { - return h.Bucket[i].GetUpperBound() >= e.GetValue() - }) - if i < len(h.Bucket) { - h.Bucket[i].Exemplar = e - } else { - // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. - b := &dto.Bucket{ - CumulativeCount: proto.Uint64(h.GetSampleCount()), - UpperBound: proto.Float64(math.Inf(1)), - Exemplar: e, - } - h.Bucket = append(h.Bucket, b) - } - } - default: - // TODO(bwplotka): Implement Gauge? - return errors.New("cannot inject exemplar into Gauge, Summary or Untyped") - } - - return nil -} - -// Exemplar is easier to use, user-facing representation of *dto.Exemplar. -type Exemplar struct { - Value float64 - Labels Labels - // Optional. - // Default value (time.Time{}) indicates its empty, which should be - // understood as time.Now() time at the moment of creation of metric. - Timestamp time.Time -} - -// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given -// exemplars. Exemplars are validated. -// -// Only last applicable exemplar is injected from the list. -// For example for Counter it means last exemplar is injected. -// For Histogram, it means last applicable exemplar for each bucket is injected. -// For a Native Histogram, all valid exemplars are injected. -// -// NewMetricWithExemplars works best with MustNewConstMetric and -// MustNewConstHistogram, see example. -func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { - if len(exemplars) == 0 { - return nil, errors.New("no exemplar was passed for NewMetricWithExemplars") - } - - var ( - now = time.Now() - exs = make([]*dto.Exemplar, len(exemplars)) - err error - ) - for i, e := range exemplars { - ts := e.Timestamp - if ts.IsZero() { - ts = now - } - exs[i], err = newExemplar(e.Value, ts, e.Labels) - if err != nil { - return nil, err - } - } - - return &withExemplarsMetric{Metric: m, exemplars: exs}, nil -} - -// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where -// NewMetricWithExemplars would have returned an error. -func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric { - ret, err := NewMetricWithExemplars(m, exemplars...) - if err != nil { - panic(err) - } - return ret -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go deleted file mode 100644 index 7c12b21087..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !js || wasm -// +build !js wasm - -package prometheus - -import "runtime" - -// getRuntimeNumThreads returns the number of open OS threads. -func getRuntimeNumThreads() float64 { - n, _ := runtime.ThreadCreateProfile(nil) - return float64(n) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go deleted file mode 100644 index 7348df01df..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build js && !wasm -// +build js,!wasm - -package prometheus - -// getRuntimeNumThreads returns the number of open OS threads. -func getRuntimeNumThreads() float64 { - return 1 -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go deleted file mode 100644 index 03773b21f7..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Observer is the interface that wraps the Observe method, which is used by -// Histogram and Summary to add observations. -type Observer interface { - Observe(float64) -} - -// The ObserverFunc type is an adapter to allow the use of ordinary -// functions as Observers. If f is a function with the appropriate -// signature, ObserverFunc(f) is an Observer that calls f. -// -// This adapter is usually used in connection with the Timer type, and there are -// two general use cases: -// -// The most common one is to use a Gauge as the Observer for a Timer. -// See the "Gauge" Timer example. -// -// The more advanced use case is to create a function that dynamically decides -// which Observer to use for observing the duration. See the "Complex" Timer -// example. -type ObserverFunc func(float64) - -// Observe calls f(value). It implements Observer. -func (f ObserverFunc) Observe(value float64) { - f(value) -} - -// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. -type ObserverVec interface { - GetMetricWith(Labels) (Observer, error) - GetMetricWithLabelValues(lvs ...string) (Observer, error) - With(Labels) Observer - WithLabelValues(...string) Observer - CurryWith(Labels) (ObserverVec, error) - MustCurryWith(Labels) ObserverVec - - Collector -} - -// ExemplarObserver is implemented by Observers that offer the option of -// observing a value together with an exemplar. Its ObserveWithExemplar method -// works like the Observe method of an Observer but also replaces the currently -// saved exemplar (if any) with a new one, created from the provided value, the -// current time as timestamp, and the provided Labels. Empty Labels will lead to -// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is -// left in place. ObserveWithExemplar panics if any of the provided labels are -// invalid or if the provided labels contain more than 128 runes in total. -type ExemplarObserver interface { - ObserveWithExemplar(value float64, exemplar Labels) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go deleted file mode 100644 index e7bce8b58e..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "os" - "strconv" - "strings" -) - -type processCollector struct { - collectFn func(chan<- Metric) - describeFn func(chan<- *Desc) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc - inBytes, outBytes *Desc -} - -// ProcessCollectorOpts defines the behavior of a process metrics collector -// created with NewProcessCollector. -type ProcessCollectorOpts struct { - // PidFn returns the PID of the process the collector collects metrics - // for. It is called upon each collection. By default, the PID of the - // current process is used, as determined on construction time by - // calling os.Getpid(). - PidFn func() (int, error) - // If non-empty, each of the collected metrics is prefixed by the - // provided string and an underscore ("_"). - Namespace string - // If true, any error encountered during collection is reported as an - // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored - // and the collected metrics will be incomplete. (Possibly, no metrics - // will be collected at all.) While that's usually not desired, it is - // appropriate for the common "mix-in" of process metrics, where process - // metrics are nice to have, but failing to collect them should not - // disrupt the collection of the remaining metrics. - ReportErrors bool -} - -// NewProcessCollector is the obsolete version of collectors.NewProcessCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewProcessCollector instead. -func NewProcessCollector(opts ProcessCollectorOpts) Collector { - ns := "" - if len(opts.Namespace) > 0 { - ns = opts.Namespace + "_" - } - - c := &processCollector{ - reportErrors: opts.ReportErrors, - cpuTotal: NewDesc( - ns+"process_cpu_seconds_total", - "Total user and system CPU time spent in seconds.", - nil, nil, - ), - openFDs: NewDesc( - ns+"process_open_fds", - "Number of open file descriptors.", - nil, nil, - ), - maxFDs: NewDesc( - ns+"process_max_fds", - "Maximum number of open file descriptors.", - nil, nil, - ), - vsize: NewDesc( - ns+"process_virtual_memory_bytes", - "Virtual memory size in bytes.", - nil, nil, - ), - maxVsize: NewDesc( - ns+"process_virtual_memory_max_bytes", - "Maximum amount of virtual memory available in bytes.", - nil, nil, - ), - rss: NewDesc( - ns+"process_resident_memory_bytes", - "Resident memory size in bytes.", - nil, nil, - ), - startTime: NewDesc( - ns+"process_start_time_seconds", - "Start time of the process since unix epoch in seconds.", - nil, nil, - ), - inBytes: NewDesc( - ns+"process_network_receive_bytes_total", - "Number of bytes received by the process over the network.", - nil, nil, - ), - outBytes: NewDesc( - ns+"process_network_transmit_bytes_total", - "Number of bytes sent by the process over the network.", - nil, nil, - ), - } - - if opts.PidFn == nil { - c.pidFn = getPIDFn() - } else { - c.pidFn = opts.PidFn - } - - // Set up process metric collection if supported by the runtime. - if canCollectProcess() { - c.collectFn = c.processCollect - c.describeFn = c.describe - } else { - c.collectFn = c.errorCollectFn - c.describeFn = c.errorDescribeFn - } - - return c -} - -func (c *processCollector) errorCollectFn(ch chan<- Metric) { - c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) -} - -func (c *processCollector) errorDescribeFn(ch chan<- *Desc) { - if c.reportErrors { - ch <- NewInvalidDesc(errors.New("process metrics not supported on this platform")) - } -} - -// Collect returns the current state of all metrics of the collector. -func (c *processCollector) Collect(ch chan<- Metric) { - c.collectFn(ch) -} - -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *Desc) { - c.describeFn(ch) -} - -func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { - if !c.reportErrors { - return - } - if desc == nil { - desc = NewInvalidDesc(err) - } - ch <- NewInvalidMetric(desc, err) -} - -// NewPidFileFn returns a function that retrieves a pid from the specified file. -// It is meant to be used for the PidFn field in ProcessCollectorOpts. -func NewPidFileFn(pidFilePath string) func() (int, error) { - return func() (int, error) { - content, err := os.ReadFile(pidFilePath) - if err != nil { - return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err) - } - pid, err := strconv.Atoi(strings.TrimSpace(string(content))) - if err != nil { - return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err) - } - - return pid, nil - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go deleted file mode 100644 index b32c95fa3f..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build darwin && !ios - -package prometheus - -import ( - "errors" - "fmt" - "os" - "syscall" - "time" - - "golang.org/x/sys/unix" -) - -// errNotImplemented is returned by stub functions that replace cgo functions, when cgo -// isn't available. -var errNotImplemented = errors.New("not implemented") - -type memoryInfo struct { - vsize uint64 // Virtual memory size in bytes - rss uint64 // Resident memory size in bytes -} - -func canCollectProcess() bool { - return true -} - -func getSoftLimit(which int) (uint64, error) { - rlimit := syscall.Rlimit{} - - if err := syscall.Getrlimit(which, &rlimit); err != nil { - return 0, err - } - - return rlimit.Cur, nil -} - -func getOpenFileCount() (float64, error) { - // Alternately, the undocumented proc_pidinfo(PROC_PIDLISTFDS) can be used to - // return a list of open fds, but that requires a way to call C APIs. The - // benefits, however, include fewer system calls and not failing when at the - // open file soft limit. - - if dir, err := os.Open("/dev/fd"); err != nil { - return 0.0, err - } else { - defer dir.Close() - - // Avoid ReadDir(), as it calls stat(2) on each descriptor. Not only is - // that info not used, but KQUEUE descriptors fail stat(2), which causes - // the whole method to fail. - if names, err := dir.Readdirnames(0); err != nil { - return 0.0, err - } else { - // Subtract 1 to ignore the open /dev/fd descriptor above. - return float64(len(names) - 1), nil - } - } -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - if procs, err := unix.SysctlKinfoProcSlice("kern.proc.pid", os.Getpid()); err == nil { - if len(procs) == 1 { - startTime := float64(procs[0].Proc.P_starttime.Nano() / 1e9) - ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) - } else { - err = fmt.Errorf("sysctl() returned %d proc structs (expected 1)", len(procs)) - c.reportError(ch, c.startTime, err) - } - } else { - c.reportError(ch, c.startTime, err) - } - - // The proc structure returned by kern.proc.pid above has an Rusage member, - // but it is not filled in, so it needs to be fetched by getrusage(2). For - // that call, the UTime, STime, and Maxrss members are filled out, but not - // Ixrss, Idrss, or Isrss for the memory usage. Memory stats will require - // access to the C API to call task_info(TASK_BASIC_INFO). - rusage := unix.Rusage{} - - if err := unix.Getrusage(syscall.RUSAGE_SELF, &rusage); err == nil { - cpuTime := time.Duration(rusage.Stime.Nano() + rusage.Utime.Nano()).Seconds() - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, cpuTime) - } else { - c.reportError(ch, c.cpuTotal, err) - } - - if memInfo, err := getMemory(); err == nil { - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss)) - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize)) - } else if !errors.Is(err, errNotImplemented) { - // Don't report an error when support is not compiled in. - c.reportError(ch, c.rss, err) - c.reportError(ch, c.vsize, err) - } - - if fds, err := getOpenFileCount(); err == nil { - ch <- MustNewConstMetric(c.openFDs, GaugeValue, fds) - } else { - c.reportError(ch, c.openFDs, err) - } - - if openFiles, err := getSoftLimit(syscall.RLIMIT_NOFILE); err == nil { - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(openFiles)) - } else { - c.reportError(ch, c.maxFDs, err) - } - - if addressSpace, err := getSoftLimit(syscall.RLIMIT_AS); err == nil { - ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(addressSpace)) - } else { - c.reportError(ch, c.maxVsize, err) - } - - // TODO: socket(PF_SYSTEM) to fetch "com.apple.network.statistics" might - // be able to get the per-process network send/receive counts. -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c deleted file mode 100644 index d00a24315d..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build darwin && !ios && cgo - -#include -#include -#include - -// The compiler warns that mach/shared_memory_server.h is deprecated, and to use -// mach/shared_region.h instead. But that doesn't define -// SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE, so redefine them here and -// avoid a warning message when running tests. -#define GLOBAL_SHARED_TEXT_SEGMENT 0x90000000U -#define SHARED_DATA_REGION_SIZE 0x10000000 -#define SHARED_TEXT_REGION_SIZE 0x10000000 - - -int get_memory_info(unsigned long long *rss, unsigned long long *vsize) -{ - // This is lightly adapted from how ps(1) obtains its memory info. - // https://github.com/apple-oss-distributions/adv_cmds/blob/8744084ea0ff41ca4bb96b0f9c22407d0e48e9b7/ps/tasks.c#L109 - - kern_return_t error; - task_t task = MACH_PORT_NULL; - mach_task_basic_info_data_t info; - mach_msg_type_number_t info_count = MACH_TASK_BASIC_INFO_COUNT; - - error = task_info( - mach_task_self(), - MACH_TASK_BASIC_INFO, - (task_info_t) &info, - &info_count ); - - if( error != KERN_SUCCESS ) - { - return error; - } - - *rss = info.resident_size; - *vsize = info.virtual_size; - - { - vm_region_basic_info_data_64_t b_info; - mach_vm_address_t address = GLOBAL_SHARED_TEXT_SEGMENT; - mach_vm_size_t size; - mach_port_t object_name; - - /* - * try to determine if this task has the split libraries - * mapped in... if so, adjust its virtual size down by - * the 2 segments that are used for split libraries - */ - info_count = VM_REGION_BASIC_INFO_COUNT_64; - - error = mach_vm_region( - mach_task_self(), - &address, - &size, - VM_REGION_BASIC_INFO_64, - (vm_region_info_t) &b_info, - &info_count, - &object_name); - - if (error == KERN_SUCCESS) { - if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) && - *vsize > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE)) { - *vsize -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE); - } - } - } - - return 0; -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go deleted file mode 100644 index 9ac53f9992..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build darwin && !ios && cgo - -package prometheus - -/* -int get_memory_info(unsigned long long *rss, unsigned long long *vs); -*/ -import "C" -import "fmt" - -func getMemory() (*memoryInfo, error) { - var rss, vsize C.ulonglong - - if err := C.get_memory_info(&rss, &vsize); err != 0 { - return nil, fmt.Errorf("task_info() failed with 0x%x", int(err)) - } - - return &memoryInfo{vsize: uint64(vsize), rss: uint64(rss)}, nil -} - -// describe returns all descriptions of the collector for Darwin. -// Ensure that this list of descriptors is kept in sync with the metrics collected -// in the processCollect method. Any changes to the metrics in processCollect -// (such as adding or removing metrics) should be reflected in this list of descriptors. -func (c *processCollector) describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.maxVsize - ch <- c.startTime - ch <- c.rss - ch <- c.vsize - - /* the process could be collected but not implemented yet - ch <- c.inBytes - ch <- c.outBytes - */ -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go deleted file mode 100644 index 378865129b..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build darwin && !ios && !cgo - -package prometheus - -func getMemory() (*memoryInfo, error) { - return nil, errNotImplemented -} - -// describe returns all descriptions of the collector for Darwin. -// Ensure that this list of descriptors is kept in sync with the metrics collected -// in the processCollect method. Any changes to the metrics in processCollect -// (such as adding or removing metrics) should be reflected in this list of descriptors. -func (c *processCollector) describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.maxVsize - ch <- c.startTime - - /* the process could be collected but not implemented yet - ch <- c.rss - ch <- c.vsize - ch <- c.inBytes - ch <- c.outBytes - */ -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go deleted file mode 100644 index 7732b7f376..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build wasip1 || js || ios -// +build wasip1 js ios - -package prometheus - -func canCollectProcess() bool { - return false -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - c.errorCollectFn(ch) -} - -// describe returns all descriptions of the collector for wasip1 and js. -// Ensure that this list of descriptors is kept in sync with the metrics collected -// in the processCollect method. Any changes to the metrics in processCollect -// (such as adding or removing metrics) should be reflected in this list of descriptors. -func (c *processCollector) describe(ch chan<- *Desc) { - c.errorDescribeFn(ch) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go deleted file mode 100644 index 8074f70f5d..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows && !js && !wasip1 && !darwin -// +build !windows,!js,!wasip1,!darwin - -package prometheus - -import ( - "github.com/prometheus/procfs" -) - -func canCollectProcess() bool { - _, err := procfs.NewDefaultFS() - return err == nil -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - pid, err := c.pidFn() - if err != nil { - c.reportError(ch, nil, err) - return - } - - p, err := procfs.NewProc(pid) - if err != nil { - c.reportError(ch, nil, err) - return - } - - if stat, err := p.Stat(); err == nil { - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) - if startTime, err := stat.StartTime(); err == nil { - ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) - } else { - c.reportError(ch, c.startTime, err) - } - } else { - c.reportError(ch, nil, err) - } - - if fds, err := p.FileDescriptorsLen(); err == nil { - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) - } else { - c.reportError(ch, c.openFDs, err) - } - - if limits, err := p.Limits(); err == nil { - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) - ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) - } else { - c.reportError(ch, nil, err) - } - - if netstat, err := p.Netstat(); err == nil { - var inOctets, outOctets float64 - if netstat.InOctets != nil { - inOctets = *netstat.InOctets - } - if netstat.OutOctets != nil { - outOctets = *netstat.OutOctets - } - ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) - ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) - } else { - c.reportError(ch, nil, err) - } -} - -// describe returns all descriptions of the collector for others than windows, js, wasip1 and darwin. -// Ensure that this list of descriptors is kept in sync with the metrics collected -// in the processCollect method. Any changes to the metrics in processCollect -// (such as adding or removing metrics) should be reflected in this list of descriptors. -func (c *processCollector) describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.maxVsize - ch <- c.rss - ch <- c.startTime - ch <- c.inBytes - ch <- c.outBytes -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go deleted file mode 100644 index fa474289ef..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -func canCollectProcess() bool { - return true -} - -var ( - modpsapi = syscall.NewLazyDLL("psapi.dll") - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") - procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") -) - -type processMemoryCounters struct { - // System interface description - // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex - - // Refer to the Golang internal implementation - // https://golang.org/src/internal/syscall/windows/psapi_windows.go - _ uint32 - PageFaultCount uint32 - PeakWorkingSetSize uintptr - WorkingSetSize uintptr - QuotaPeakPagedPoolUsage uintptr - QuotaPagedPoolUsage uintptr - QuotaPeakNonPagedPoolUsage uintptr - QuotaNonPagedPoolUsage uintptr - PagefileUsage uintptr - PeakPagefileUsage uintptr - PrivateUsage uintptr -} - -func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { - mem := processMemoryCounters{} - r1, _, err := procGetProcessMemoryInfo.Call( - uintptr(handle), - uintptr(unsafe.Pointer(&mem)), - uintptr(unsafe.Sizeof(mem)), - ) - if r1 != 1 { - return mem, err - } else { - return mem, nil - } -} - -func getProcessHandleCount(handle windows.Handle) (uint32, error) { - var count uint32 - r1, _, err := procGetProcessHandleCount.Call( - uintptr(handle), - uintptr(unsafe.Pointer(&count)), - ) - if r1 != 1 { - return 0, err - } else { - return count, nil - } -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - h := windows.CurrentProcess() - - var startTime, exitTime, kernelTime, userTime windows.Filetime - err := windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) - if err != nil { - c.reportError(ch, nil, err) - return - } - ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) - - mem, err := getProcessMemoryInfo(h) - if err != nil { - c.reportError(ch, nil, err) - return - } - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) - - handles, err := getProcessHandleCount(h) - if err != nil { - c.reportError(ch, nil, err) - return - } - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. -} - -// describe returns all descriptions of the collector for windows. -// Ensure that this list of descriptors is kept in sync with the metrics collected -// in the processCollect method. Any changes to the metrics in processCollect -// (such as adding or removing metrics) should be reflected in this list of descriptors. -func (c *processCollector) describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.rss - ch <- c.startTime -} - -func fileTimeToSeconds(ft windows.Filetime) float64 { - return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go deleted file mode 100644 index c6fd2f58b7..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ /dev/null @@ -1,1076 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bytes" - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode/utf8" - - "github.com/prometheus/client_golang/prometheus/internal" - - "github.com/cespare/xxhash/v2" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" - "google.golang.org/protobuf/proto" -) - -const ( - // Capacity for the channel to collect metrics and descriptors. - capMetricChan = 1000 - capDescChan = 10 -) - -// DefaultRegisterer and DefaultGatherer are the implementations of the -// Registerer and Gatherer interface a number of convenience functions in this -// package act on. Initially, both variables point to the same Registry, which -// has a process collector (currently on Linux only, see NewProcessCollector) -// and a Go collector (see NewGoCollector, in particular the note about -// stop-the-world implication with Go versions older than 1.9) already -// registered. This approach to keep default instances as global state mirrors -// the approach of other packages in the Go standard library. Note that there -// are caveats. Change the variables with caution and only if you understand the -// consequences. Users who want to avoid global state altogether should not use -// the convenience functions and act on custom instances instead. -var ( - defaultRegistry = NewRegistry() - DefaultRegisterer Registerer = defaultRegistry - DefaultGatherer Gatherer = defaultRegistry -) - -func init() { - MustRegister(NewProcessCollector(ProcessCollectorOpts{})) - MustRegister(NewGoCollector()) -} - -// NewRegistry creates a new vanilla Registry without any Collectors -// pre-registered. -func NewRegistry() *Registry { - return &Registry{ - collectorsByID: map[uint64]Collector{}, - descIDs: map[uint64]struct{}{}, - dimHashesByName: map[string]uint64{}, - } -} - -// NewPedanticRegistry returns a registry that checks during collection if each -// collected Metric is consistent with its reported Desc, and if the Desc has -// actually been registered with the registry. Unchecked Collectors (those whose -// Describe method does not yield any descriptors) are excluded from the check. -// -// Usually, a Registry will be happy as long as the union of all collected -// Metrics is consistent and valid even if some metrics are not consistent with -// their own Desc or a Desc provided by their registered Collector. Well-behaved -// Collectors and Metrics will only provide consistent Descs. This Registry is -// useful to test the implementation of Collectors and Metrics. -func NewPedanticRegistry() *Registry { - r := NewRegistry() - r.pedanticChecksEnabled = true - return r -} - -// Registerer is the interface for the part of a registry in charge of -// registering and unregistering. Users of custom registries should use -// Registerer as type for registration purposes (rather than the Registry type -// directly). In that way, they are free to use custom Registerer implementation -// (e.g. for testing purposes). -type Registerer interface { - // Register registers a new Collector to be included in metrics - // collection. It returns an error if the descriptors provided by the - // Collector are invalid or if they — in combination with descriptors of - // already registered Collectors — do not fulfill the consistency and - // uniqueness criteria described in the documentation of metric.Desc. - // - // If the provided Collector is equal to a Collector already registered - // (which includes the case of re-registering the same Collector), the - // returned error is an instance of AlreadyRegisteredError, which - // contains the previously registered Collector. - // - // A Collector whose Describe method does not yield any Desc is treated - // as unchecked. Registration will always succeed. No check for - // re-registering (see previous paragraph) is performed. Thus, the - // caller is responsible for not double-registering the same unchecked - // Collector, and for providing a Collector that will not cause - // inconsistent metrics on collection. (This would lead to scrape - // errors.) - Register(Collector) error - // MustRegister works like Register but registers any number of - // Collectors and panics upon the first registration that causes an - // error. - MustRegister(...Collector) - // Unregister unregisters the Collector that equals the Collector passed - // in as an argument. (Two Collectors are considered equal if their - // Describe method yields the same set of descriptors.) The function - // returns whether a Collector was unregistered. Note that an unchecked - // Collector cannot be unregistered (as its Describe method does not - // yield any descriptor). - // - // Note that even after unregistering, it will not be possible to - // register a new Collector that is inconsistent with the unregistered - // Collector, e.g. a Collector collecting metrics with the same name but - // a different help string. The rationale here is that the same registry - // instance must only collect consistent metrics throughout its - // lifetime. - Unregister(Collector) bool -} - -// Gatherer is the interface for the part of a registry in charge of gathering -// the collected metrics into a number of MetricFamilies. The Gatherer interface -// comes with the same general implication as described for the Registerer -// interface. -type Gatherer interface { - // Gather calls the Collect method of the registered Collectors and then - // gathers the collected metrics into a lexicographically sorted slice - // of uniquely named MetricFamily protobufs. Gather ensures that the - // returned slice is valid and self-consistent so that it can be used - // for valid exposition. As an exception to the strict consistency - // requirements described for metric.Desc, Gather will tolerate - // different sets of label names for metrics of the same metric family. - // - // Even if an error occurs, Gather attempts to gather as many metrics as - // possible. Hence, if a non-nil error is returned, the returned - // MetricFamily slice could be nil (in case of a fatal error that - // prevented any meaningful metric collection) or contain a number of - // MetricFamily protobufs, some of which might be incomplete, and some - // might be missing altogether. The returned error (which might be a - // MultiError) explains the details. Note that this is mostly useful for - // debugging purposes. If the gathered protobufs are to be used for - // exposition in actual monitoring, it is almost always better to not - // expose an incomplete result and instead disregard the returned - // MetricFamily protobufs in case the returned error is non-nil. - Gather() ([]*dto.MetricFamily, error) -} - -// Register registers the provided Collector with the DefaultRegisterer. -// -// Register is a shortcut for DefaultRegisterer.Register(c). See there for more -// details. -func Register(c Collector) error { - return DefaultRegisterer.Register(c) -} - -// MustRegister registers the provided Collectors with the DefaultRegisterer and -// panics if any error occurs. -// -// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See -// there for more details. -func MustRegister(cs ...Collector) { - DefaultRegisterer.MustRegister(cs...) -} - -// Unregister removes the registration of the provided Collector from the -// DefaultRegisterer. -// -// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for -// more details. -func Unregister(c Collector) bool { - return DefaultRegisterer.Unregister(c) -} - -// GathererFunc turns a function into a Gatherer. -type GathererFunc func() ([]*dto.MetricFamily, error) - -// Gather implements Gatherer. -func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { - return gf() -} - -// AlreadyRegisteredError is returned by the Register method if the Collector to -// be registered has already been registered before, or a different Collector -// that collects the same metrics has been registered before. Registration fails -// in that case, but you can detect from the kind of error what has -// happened. The error contains fields for the existing Collector and the -// (rejected) new Collector that equals the existing one. This can be used to -// find out if an equal Collector has been registered before and switch over to -// using the old one, as demonstrated in the example. -type AlreadyRegisteredError struct { - ExistingCollector, NewCollector Collector -} - -func (err AlreadyRegisteredError) Error() string { - return "duplicate metrics collector registration attempted" -} - -// MultiError is a slice of errors implementing the error interface. It is used -// by a Gatherer to report multiple errors during MetricFamily gathering. -type MultiError []error - -// Error formats the contained errors as a bullet point list, preceded by the -// total number of errors. Note that this results in a multi-line string. -func (errs MultiError) Error() string { - if len(errs) == 0 { - return "" - } - buf := &bytes.Buffer{} - fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) - for _, err := range errs { - fmt.Fprintf(buf, "\n* %s", err) - } - return buf.String() -} - -// Append appends the provided error if it is not nil. -func (errs *MultiError) Append(err error) { - if err != nil { - *errs = append(*errs, err) - } -} - -// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only -// contained error as error if len(errs is 1). In all other cases, it returns -// the MultiError directly. This is helpful for returning a MultiError in a way -// that only uses the MultiError if needed. -func (errs MultiError) MaybeUnwrap() error { - switch len(errs) { - case 0: - return nil - case 1: - return errs[0] - default: - return errs - } -} - -// Registry registers Prometheus collectors, collects their metrics, and gathers -// them into MetricFamilies for exposition. It implements Registerer, Gatherer, -// and Collector. The zero value is not usable. Create instances with -// NewRegistry or NewPedanticRegistry. -// -// Registry implements Collector to allow it to be used for creating groups of -// metrics. See the Grouping example for how this can be done. -type Registry struct { - mtx sync.RWMutex - collectorsByID map[uint64]Collector // ID is a hash of the descIDs. - descIDs map[uint64]struct{} - dimHashesByName map[string]uint64 - uncheckedCollectors []Collector - pedanticChecksEnabled bool -} - -// Register implements Registerer. -func (r *Registry) Register(c Collector) error { - var ( - descChan = make(chan *Desc, capDescChan) - newDescIDs = map[uint64]struct{}{} - newDimHashesByName = map[string]uint64{} - collectorID uint64 // All desc IDs XOR'd together. - duplicateDescErr error - ) - go func() { - c.Describe(descChan) - close(descChan) - }() - r.mtx.Lock() - defer func() { - // Drain channel in case of premature return to not leak a goroutine. - for range descChan { - } - r.mtx.Unlock() - }() - // Conduct various tests... - for desc := range descChan { - - // Is the descriptor valid at all? - if desc.err != nil { - return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err) - } - - // Is the descID unique? - // (In other words: Is the fqName + constLabel combination unique?) - if _, exists := r.descIDs[desc.id]; exists { - duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) - } - // If it is not a duplicate desc in this collector, XOR it to - // the collectorID. (We allow duplicate descs within the same - // collector, but their existence must be a no-op.) - if _, exists := newDescIDs[desc.id]; !exists { - newDescIDs[desc.id] = struct{}{} - collectorID ^= desc.id - } - - // Are all the label names and the help string consistent with - // previous descriptors of the same name? - // First check existing descriptors... - if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) - } - continue - } - - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - continue - } - newDimHashesByName[desc.fqName] = desc.dimHash - } - // A Collector yielding no Desc at all is considered unchecked. - if len(newDescIDs) == 0 { - r.uncheckedCollectors = append(r.uncheckedCollectors, c) - return nil - } - if existing, exists := r.collectorsByID[collectorID]; exists { - switch e := existing.(type) { - case *wrappingCollector: - return AlreadyRegisteredError{ - ExistingCollector: e.unwrapRecursively(), - NewCollector: c, - } - default: - return AlreadyRegisteredError{ - ExistingCollector: e, - NewCollector: c, - } - } - } - // If the collectorID is new, but at least one of the descs existed - // before, we are in trouble. - if duplicateDescErr != nil { - return duplicateDescErr - } - - // Only after all tests have passed, actually register. - r.collectorsByID[collectorID] = c - for hash := range newDescIDs { - r.descIDs[hash] = struct{}{} - } - for name, dimHash := range newDimHashesByName { - r.dimHashesByName[name] = dimHash - } - return nil -} - -// Unregister implements Registerer. -func (r *Registry) Unregister(c Collector) bool { - var ( - descChan = make(chan *Desc, capDescChan) - descIDs = map[uint64]struct{}{} - collectorID uint64 // All desc IDs XOR'd together. - ) - go func() { - c.Describe(descChan) - close(descChan) - }() - for desc := range descChan { - if _, exists := descIDs[desc.id]; !exists { - collectorID ^= desc.id - descIDs[desc.id] = struct{}{} - } - } - - r.mtx.RLock() - if _, exists := r.collectorsByID[collectorID]; !exists { - r.mtx.RUnlock() - return false - } - r.mtx.RUnlock() - - r.mtx.Lock() - defer r.mtx.Unlock() - - delete(r.collectorsByID, collectorID) - for id := range descIDs { - delete(r.descIDs, id) - } - // dimHashesByName is left untouched as those must be consistent - // throughout the lifetime of a program. - return true -} - -// MustRegister implements Registerer. -func (r *Registry) MustRegister(cs ...Collector) { - for _, c := range cs { - if err := r.Register(c); err != nil { - panic(err) - } - } -} - -// Gather implements Gatherer. -func (r *Registry) Gather() ([]*dto.MetricFamily, error) { - r.mtx.RLock() - - if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 { - // Fast path. - r.mtx.RUnlock() - return nil, nil - } - - var ( - checkedMetricChan = make(chan Metric, capMetricChan) - uncheckedMetricChan = make(chan Metric, capMetricChan) - metricHashes = map[uint64]struct{}{} - wg sync.WaitGroup - errs MultiError // The collected errors to return in the end. - registeredDescIDs map[uint64]struct{} // Only used for pedantic checks - ) - - goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) - metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) - checkedCollectors := make(chan Collector, len(r.collectorsByID)) - uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) - for _, collector := range r.collectorsByID { - checkedCollectors <- collector - } - for _, collector := range r.uncheckedCollectors { - uncheckedCollectors <- collector - } - // In case pedantic checks are enabled, we have to copy the map before - // giving up the RLock. - if r.pedanticChecksEnabled { - registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) - for id := range r.descIDs { - registeredDescIDs[id] = struct{}{} - } - } - r.mtx.RUnlock() - - wg.Add(goroutineBudget) - - collectWorker := func() { - for { - select { - case collector := <-checkedCollectors: - collector.Collect(checkedMetricChan) - case collector := <-uncheckedCollectors: - collector.Collect(uncheckedMetricChan) - default: - return - } - wg.Done() - } - } - - // Start the first worker now to make sure at least one is running. - go collectWorker() - goroutineBudget-- - - // Close checkedMetricChan and uncheckedMetricChan once all collectors - // are collected. - go func() { - wg.Wait() - close(checkedMetricChan) - close(uncheckedMetricChan) - }() - - // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. - defer func() { - if checkedMetricChan != nil { - for range checkedMetricChan { - } - } - if uncheckedMetricChan != nil { - for range uncheckedMetricChan { - } - } - }() - - // Copy the channel references so we can nil them out later to remove - // them from the select statements below. - cmc := checkedMetricChan - umc := uncheckedMetricChan - - for { - select { - case metric, ok := <-cmc: - if !ok { - cmc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - registeredDescIDs, - )) - case metric, ok := <-umc: - if !ok { - umc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - nil, - )) - default: - if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { - // All collectors are already being worked on or - // we have already as many goroutines started as - // there are collectors. Do the same as above, - // just without the default. - select { - case metric, ok := <-cmc: - if !ok { - cmc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - registeredDescIDs, - )) - case metric, ok := <-umc: - if !ok { - umc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - nil, - )) - } - break - } - // Start more workers. - go collectWorker() - goroutineBudget-- - runtime.Gosched() - } - // Once both checkedMetricChan and uncheckedMetricChan are closed - // and drained, the contraption above will nil out cmc and umc, - // and then we can leave the collect loop here. - if cmc == nil && umc == nil { - break - } - } - return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// Describe implements Collector. -func (r *Registry) Describe(ch chan<- *Desc) { - r.mtx.RLock() - defer r.mtx.RUnlock() - - // Only report the checked Collectors; unchecked collectors don't report any - // Desc. - for _, c := range r.collectorsByID { - c.Describe(ch) - } -} - -// Collect implements Collector. -func (r *Registry) Collect(ch chan<- Metric) { - r.mtx.RLock() - defer r.mtx.RUnlock() - - for _, c := range r.collectorsByID { - c.Collect(ch) - } - for _, c := range r.uncheckedCollectors { - c.Collect(ch) - } -} - -// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the -// Prometheus text format, and writes it to a temporary file. Upon success, the -// temporary file is renamed to the provided filename. -// -// This is intended for use with the textfile collector of the node exporter. -// Note that the node exporter expects the filename to be suffixed with ".prom". -func WriteToTextfile(filename string, g Gatherer) error { - tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename)) - if err != nil { - return err - } - defer os.Remove(tmp.Name()) - - mfs, err := g.Gather() - if err != nil { - return err - } - for _, mf := range mfs { - if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { - return err - } - } - if err := tmp.Close(); err != nil { - return err - } - - if err := os.Chmod(tmp.Name(), 0o644); err != nil { - return err - } - return os.Rename(tmp.Name(), filename) -} - -// processMetric is an internal helper method only used by the Gather method. -func processMetric( - metric Metric, - metricFamiliesByName map[string]*dto.MetricFamily, - metricHashes map[uint64]struct{}, - registeredDescIDs map[uint64]struct{}, -) error { - desc := metric.Desc() - // Wrapped metrics collected by an unchecked Collector can have an - // invalid Desc. - if desc.err != nil { - return desc.err - } - dtoMetric := &dto.Metric{} - if err := metric.Write(dtoMetric); err != nil { - return fmt.Errorf("error collecting metric %v: %w", desc, err) - } - metricFamily, ok := metricFamiliesByName[desc.fqName] - if ok { // Existing name. - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), - ) - } - // TODO(beorn7): Simplify switch once Desc has type. - switch metricFamily.GetType() { - case dto.MetricType_COUNTER: - if dtoMetric.Counter == nil { - return fmt.Errorf( - "collected metric %s %s should be a Counter", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_GAUGE: - if dtoMetric.Gauge == nil { - return fmt.Errorf( - "collected metric %s %s should be a Gauge", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_SUMMARY: - if dtoMetric.Summary == nil { - return fmt.Errorf( - "collected metric %s %s should be a Summary", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_UNTYPED: - if dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %s %s should be Untyped", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_HISTOGRAM: - if dtoMetric.Histogram == nil { - return fmt.Errorf( - "collected metric %s %s should be a Histogram", - desc.fqName, dtoMetric, - ) - } - default: - panic("encountered MetricFamily with invalid type") - } - } else { // New name. - metricFamily = &dto.MetricFamily{} - metricFamily.Name = proto.String(desc.fqName) - metricFamily.Help = proto.String(desc.help) - // TODO(beorn7): Simplify switch once Desc has type. - switch { - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - return fmt.Errorf("empty metric collected: %s", dtoMetric) - } - if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { - return err - } - metricFamiliesByName[desc.fqName] = metricFamily - } - if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { - return err - } - if registeredDescIDs != nil { - // Is the desc registered at all? - if _, exist := registeredDescIDs[desc.id]; !exist { - return fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { - return err - } - } - metricFamily.Metric = append(metricFamily.Metric, dtoMetric) - return nil -} - -// Gatherers is a slice of Gatherer instances that implements the Gatherer -// interface itself. Its Gather method calls Gather on all Gatherers in the -// slice in order and returns the merged results. Errors returned from the -// Gather calls are all returned in a flattened MultiError. Duplicate and -// inconsistent Metrics are skipped (first occurrence in slice order wins) and -// reported in the returned error. -// -// Gatherers can be used to merge the Gather results from multiple -// Registries. It also provides a way to directly inject existing MetricFamily -// protobufs into the gathering by creating a custom Gatherer with a Gather -// method that simply returns the existing MetricFamily protobufs. Note that no -// registration is involved (in contrast to Collector registration), so -// obviously registration-time checks cannot happen. Any inconsistencies between -// the gathered MetricFamilies are reported as errors by the Gather method, and -// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies -// (e.g. syntactically invalid metric or label names) will go undetected. -type Gatherers []Gatherer - -// Gather implements Gatherer. -func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { - var ( - metricFamiliesByName = map[string]*dto.MetricFamily{} - metricHashes = map[uint64]struct{}{} - errs MultiError // The collected errors to return in the end. - ) - - for i, g := range gs { - mfs, err := g.Gather() - if err != nil { - multiErr := MultiError{} - if errors.As(err, &multiErr) { - for _, err := range multiErr { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) - } - } else { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) - } - } - for _, mf := range mfs { - existingMF, exists := metricFamiliesByName[mf.GetName()] - if exists { - if existingMF.GetHelp() != mf.GetHelp() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has help %q but should have %q", - mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), - )) - continue - } - if existingMF.GetType() != mf.GetType() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has type %s but should have %s", - mf.GetName(), mf.GetType(), existingMF.GetType(), - )) - continue - } - } else { - existingMF = &dto.MetricFamily{} - existingMF.Name = mf.Name - existingMF.Help = mf.Help - existingMF.Type = mf.Type - if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { - errs = append(errs, err) - continue - } - metricFamiliesByName[mf.GetName()] = existingMF - } - for _, m := range mf.Metric { - if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { - errs = append(errs, err) - continue - } - existingMF.Metric = append(existingMF.Metric, m) - } - } - } - return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// checkSuffixCollisions checks for collisions with the “magic” suffixes the -// Prometheus text format and the internal metric representation of the -// Prometheus server add while flattening Summaries and Histograms. -func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { - var ( - newName = mf.GetName() - newType = mf.GetType() - newNameWithoutSuffix = "" - ) - switch { - case strings.HasSuffix(newName, "_count"): - newNameWithoutSuffix = newName[:len(newName)-6] - case strings.HasSuffix(newName, "_sum"): - newNameWithoutSuffix = newName[:len(newName)-4] - case strings.HasSuffix(newName, "_bucket"): - newNameWithoutSuffix = newName[:len(newName)-7] - } - if newNameWithoutSuffix != "" { - if existingMF, ok := mfs[newNameWithoutSuffix]; ok { - switch existingMF.GetType() { - case dto.MetricType_SUMMARY: - if !strings.HasSuffix(newName, "_bucket") { - return fmt.Errorf( - "collected metric named %q collides with previously collected summary named %q", - newName, newNameWithoutSuffix, - ) - } - case dto.MetricType_HISTOGRAM: - return fmt.Errorf( - "collected metric named %q collides with previously collected histogram named %q", - newName, newNameWithoutSuffix, - ) - } - } - } - if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { - if _, ok := mfs[newName+"_count"]; ok { - return fmt.Errorf( - "collected histogram or summary named %q collides with previously collected metric named %q", - newName, newName+"_count", - ) - } - if _, ok := mfs[newName+"_sum"]; ok { - return fmt.Errorf( - "collected histogram or summary named %q collides with previously collected metric named %q", - newName, newName+"_sum", - ) - } - } - if newType == dto.MetricType_HISTOGRAM { - if _, ok := mfs[newName+"_bucket"]; ok { - return fmt.Errorf( - "collected histogram named %q collides with previously collected metric named %q", - newName, newName+"_bucket", - ) - } - } - return nil -} - -// checkMetricConsistency checks if the provided Metric is consistent with the -// provided MetricFamily. It also hashes the Metric labels and the MetricFamily -// name. If the resulting hash is already in the provided metricHashes, an error -// is returned. If not, it is added to metricHashes. -func checkMetricConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - metricHashes map[uint64]struct{}, -) error { - name := metricFamily.GetName() - - // Type consistency with metric family. - if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || - metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || - metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || - metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || - metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %q { %s} is not a %s", - name, dtoMetric, metricFamily.GetType(), - ) - } - - previousLabelName := "" - for _, labelPair := range dtoMetric.GetLabel() { - labelName := labelPair.GetName() - if labelName == previousLabelName { - return fmt.Errorf( - "collected metric %q { %s} has two or more labels with the same name: %s", - name, dtoMetric, labelName, - ) - } - if !checkLabelName(labelName) { - return fmt.Errorf( - "collected metric %q { %s} has a label with an invalid name: %s", - name, dtoMetric, labelName, - ) - } - if dtoMetric.Summary != nil && labelName == quantileLabel { - return fmt.Errorf( - "collected metric %q { %s} must not have an explicit %q label", - name, dtoMetric, quantileLabel, - ) - } - if !utf8.ValidString(labelPair.GetValue()) { - return fmt.Errorf( - "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", - name, dtoMetric, labelName, labelPair.GetValue()) - } - previousLabelName = labelName - } - - // Is the metric unique (i.e. no other metric with the same name and the same labels)? - h := xxhash.New() - h.WriteString(name) - h.Write(separatorByteSlice) - // Make sure label pairs are sorted. We depend on it for the consistency - // check. - if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) { - // We cannot sort dtoMetric.Label in place as it is immutable by contract. - copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) - copy(copiedLabels, dtoMetric.Label) - sort.Sort(internal.LabelPairSorter(copiedLabels)) - dtoMetric.Label = copiedLabels - } - for _, lp := range dtoMetric.Label { - h.WriteString(lp.GetName()) - h.Write(separatorByteSlice) - h.WriteString(lp.GetValue()) - h.Write(separatorByteSlice) - } - if dtoMetric.TimestampMs != nil { - h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10)) - h.Write(separatorByteSlice) - } - hSum := h.Sum64() - if _, exists := metricHashes[hSum]; exists { - return fmt.Errorf( - "collected metric %q { %s} was collected before with the same name and label values", - name, dtoMetric, - ) - } - metricHashes[hSum] = struct{}{} - return nil -} - -func checkDescConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - desc *Desc, -) error { - // Desc help consistency with metric family help. - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, - ) - } - - // Is the desc consistent with the content of the metric? - lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) - copy(lpsFromDesc, desc.constLabelPairs) - for _, l := range desc.variableLabels.names { - lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l), - }) - } - if len(lpsFromDesc) != len(dtoMetric.Label) { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - sort.Sort(internal.LabelPairSorter(lpsFromDesc)) - for i, lpFromDesc := range lpsFromDesc { - lpFromMetric := dtoMetric.Label[i] - if lpFromDesc.GetName() != lpFromMetric.GetName() || - lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - } - return nil -} - -var _ TransactionalGatherer = &MultiTRegistry{} - -// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple -// transactional gatherers. -// -// It is caller responsibility to ensure two registries have mutually exclusive metric families, -// no deduplication will happen. -type MultiTRegistry struct { - tGatherers []TransactionalGatherer -} - -// NewMultiTRegistry creates MultiTRegistry. -func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry { - return &MultiTRegistry{ - tGatherers: tGatherers, - } -} - -// Gather implements TransactionalGatherer interface. -func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) { - errs := MultiError{} - - dFns := make([]func(), 0, len(r.tGatherers)) - // TODO(bwplotka): Implement concurrency for those? - for _, g := range r.tGatherers { - // TODO(bwplotka): Check for duplicates? - m, d, err := g.Gather() - errs.Append(err) - - mfs = append(mfs, m...) - dFns = append(dFns, d) - } - - // TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already. - sort.Slice(mfs, func(i, j int) bool { - return *mfs[i].Name < *mfs[j].Name - }) - return mfs, func() { - for _, d := range dFns { - d() - } - }, errs.MaybeUnwrap() -} - -// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory -// used by metric family is no longer used by a caller. This allows implementations with cache. -type TransactionalGatherer interface { - // Gather returns metrics in a lexicographically sorted slice - // of uniquely named MetricFamily protobufs. Gather ensures that the - // returned slice is valid and self-consistent so that it can be used - // for valid exposition. As an exception to the strict consistency - // requirements described for metric.Desc, Gather will tolerate - // different sets of label names for metrics of the same metric family. - // - // Even if an error occurs, Gather attempts to gather as many metrics as - // possible. Hence, if a non-nil error is returned, the returned - // MetricFamily slice could be nil (in case of a fatal error that - // prevented any meaningful metric collection) or contain a number of - // MetricFamily protobufs, some of which might be incomplete, and some - // might be missing altogether. The returned error (which might be a - // MultiError) explains the details. Note that this is mostly useful for - // debugging purposes. If the gathered protobufs are to be used for - // exposition in actual monitoring, it is almost always better to not - // expose an incomplete result and instead disregard the returned - // MetricFamily protobufs in case the returned error is non-nil. - // - // Important: done is expected to be triggered (even if the error occurs!) - // once caller does not need returned slice of dto.MetricFamily. - Gather() (_ []*dto.MetricFamily, done func(), err error) -} - -// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function. -func ToTransactionalGatherer(g Gatherer) TransactionalGatherer { - return &noTransactionGatherer{g: g} -} - -type noTransactionGatherer struct { - g Gatherer -} - -// Gather implements TransactionalGatherer interface. -func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) { - mfs, err := g.g.Gather() - return mfs, func() {}, err -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go deleted file mode 100644 index ac5203c6fa..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ /dev/null @@ -1,830 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "math" - "runtime" - "sort" - "sync" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" - - "github.com/beorn7/perks/quantile" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" -) - -// quantileLabel is used for the label that defines the quantile in a -// summary. -const quantileLabel = "quantile" - -// A Summary captures individual observations from an event or sample stream and -// summarizes them in a manner similar to traditional summary statistics: 1. sum -// of observations, 2. observation count, 3. rank estimations. -// -// A typical use-case is the observation of request latencies. By default, a -// Summary provides the median, the 90th and the 99th percentile of the latency -// as rank estimations. However, the default behavior will change in the -// upcoming v1.0.0 of the library. There will be no rank estimations at all by -// default. For a sane transition, it is recommended to set the desired rank -// estimations explicitly. -// -// Note that the rank estimations cannot be aggregated in a meaningful way with -// the Prometheus query language (i.e. you cannot average or add them). If you -// need aggregatable quantiles (e.g. you want the 99th percentile latency of all -// queries served across all instances of a service), consider the Histogram -// metric type. See the Prometheus documentation for more details. -// -// To create Summary instances, use NewSummary. -type Summary interface { - Metric - Collector - - // Observe adds a single observation to the summary. Observations are - // usually positive or zero. Negative observations are accepted but - // prevent current versions of Prometheus from properly detecting - // counter resets in the sum of observations. See - // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations - // for details. - Observe(float64) -} - -var errQuantileLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in summaries", quantileLabel, -) - -// Default values for SummaryOpts. -const ( - // DefMaxAge is the default duration for which observations stay - // relevant. - DefMaxAge time.Duration = 10 * time.Minute - // DefAgeBuckets is the default number of buckets used to calculate the - // age of observations. - DefAgeBuckets = 5 - // DefBufCap is the standard buffer size for collecting Summary observations. - DefBufCap = 500 -) - -// SummaryOpts bundles the options for creating a Summary metric. It is -// mandatory to set Name to a non-empty string. While all other fields are -// optional and can safely be left at their zero value, it is recommended to set -// a help string and to explicitly set the Objectives field to the desired value -// as the default value will change in the upcoming v1.0.0 of the library. -type SummaryOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Summary (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Summary must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Summary. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // Due to the way a Summary is represented in the Prometheus text format - // and how it is handled by the Prometheus server internally, “quantile” - // is an illegal label name. Construction of a Summary or SummaryVec - // will panic if this label name is used in ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels - ConstLabels Labels - - // Objectives defines the quantile rank estimates with their respective - // absolute error. If Objectives[q] = e, then the value reported for q - // will be the φ-quantile value for some φ between q-e and q+e. The - // default value is an empty map, resulting in a summary without - // quantiles. - Objectives map[float64]float64 - - // MaxAge defines the duration for which an observation stays relevant - // for the summary. Only applies to pre-calculated quantiles, does not - // apply to _sum and _count. Must be positive. The default value is - // DefMaxAge. - MaxAge time.Duration - - // AgeBuckets is the number of buckets used to exclude observations that - // are older than MaxAge from the summary. A higher number has a - // resource penalty, so only increase it if the higher resolution is - // really required. For very high observation rates, you might want to - // reduce the number of age buckets. With only one age bucket, you will - // effectively see a complete reset of the summary each time MaxAge has - // passed. The default value is DefAgeBuckets. - AgeBuckets uint32 - - // BufCap defines the default sample stream buffer size. The default - // value of DefBufCap should suffice for most uses. If there is a need - // to increase the value, a multiple of 500 is recommended (because that - // is the internal buffer size of the underlying package - // "github.com/bmizerany/perks/quantile"). - BufCap uint32 - - // now is for testing purposes, by default it's time.Now. - now func() time.Time -} - -// SummaryVecOpts bundles the options to create a SummaryVec metric. -// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels -// is optional and can safely be left to its default value. -type SummaryVecOpts struct { - SummaryOpts - - // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Constraint - // function, if provided. - VariableLabels ConstrainableLabels -} - -// Problem with the sliding-window decay algorithm... The Merge method of -// perk/quantile is actually not working as advertised - and it might be -// unfixable, as the underlying algorithm is apparently not capable of merging -// summaries in the first place. To avoid using Merge, we are currently adding -// observations to _each_ age bucket, i.e. the effort to add a sample is -// essentially multiplied by the number of age buckets. When rotating age -// buckets, we empty the previous head stream. On scrape time, we simply take -// the quantiles from the head stream (no merging required). Result: More effort -// on observation time, less effort on scrape time, which is exactly the -// opposite of what we try to accomplish, but at least the results are correct. -// -// The quite elegant previous contraption to merge the age buckets efficiently -// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) -// can't be used anymore. - -// NewSummary creates a new Summary based on the provided SummaryOpts. -func NewSummary(opts SummaryOpts) Summary { - return newSummary( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { - if len(desc.variableLabels.names) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues)) - } - - for _, n := range desc.variableLabels.names { - if n == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - - if opts.Objectives == nil { - opts.Objectives = map[float64]float64{} - } - - if opts.MaxAge < 0 { - panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) - } - if opts.MaxAge == 0 { - opts.MaxAge = DefMaxAge - } - - if opts.AgeBuckets == 0 { - opts.AgeBuckets = DefAgeBuckets - } - - if opts.BufCap == 0 { - opts.BufCap = DefBufCap - } - - if opts.now == nil { - opts.now = time.Now - } - if len(opts.Objectives) == 0 { - // Use the lock-free implementation of a Summary without objectives. - s := &noObjectivesSummary{ - desc: desc, - labelPairs: MakeLabelPairs(desc, labelValues), - counts: [2]*summaryCounts{{}, {}}, - } - s.init(s) // Init self-collection. - s.createdTs = timestamppb.New(opts.now()) - return s - } - - s := &summary{ - desc: desc, - now: opts.now, - - objectives: opts.Objectives, - sortedObjectives: make([]float64, 0, len(opts.Objectives)), - - labelPairs: MakeLabelPairs(desc, labelValues), - - hotBuf: make([]float64, 0, opts.BufCap), - coldBuf: make([]float64, 0, opts.BufCap), - streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), - } - s.headStreamExpTime = opts.now().Add(s.streamDuration) - s.hotBufExpTime = s.headStreamExpTime - - for i := uint32(0); i < opts.AgeBuckets; i++ { - s.streams = append(s.streams, s.newStream()) - } - s.headStream = s.streams[0] - - for qu := range s.objectives { - s.sortedObjectives = append(s.sortedObjectives, qu) - } - sort.Float64s(s.sortedObjectives) - - s.init(s) // Init self-collection. - s.createdTs = timestamppb.New(opts.now()) - return s -} - -type summary struct { - selfCollector - - bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. - mtx sync.Mutex // Protects every other moving part. - // Lock bufMtx before mtx if both are needed. - - desc *Desc - - now func() time.Time - - objectives map[float64]float64 - sortedObjectives []float64 - - labelPairs []*dto.LabelPair - - sum float64 - cnt uint64 - - hotBuf, coldBuf []float64 - - streams []*quantile.Stream - streamDuration time.Duration - headStream *quantile.Stream - headStreamIdx int - headStreamExpTime, hotBufExpTime time.Time - - createdTs *timestamppb.Timestamp -} - -func (s *summary) Desc() *Desc { - return s.desc -} - -func (s *summary) Observe(v float64) { - s.bufMtx.Lock() - defer s.bufMtx.Unlock() - - now := s.now() - if now.After(s.hotBufExpTime) { - s.asyncFlush(now) - } - s.hotBuf = append(s.hotBuf, v) - if len(s.hotBuf) == cap(s.hotBuf) { - s.asyncFlush(now) - } -} - -func (s *summary) Write(out *dto.Metric) error { - sum := &dto.Summary{ - CreatedTimestamp: s.createdTs, - } - qs := make([]*dto.Quantile, 0, len(s.objectives)) - - s.bufMtx.Lock() - s.mtx.Lock() - // Swap bufs even if hotBuf is empty to set new hotBufExpTime. - s.swapBufs(s.now()) - s.bufMtx.Unlock() - - s.flushColdBuf() - sum.SampleCount = proto.Uint64(s.cnt) - sum.SampleSum = proto.Float64(s.sum) - - for _, rank := range s.sortedObjectives { - var q float64 - if s.headStream.Count() == 0 { - q = math.NaN() - } else { - q = s.headStream.Query(rank) - } - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - s.mtx.Unlock() - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - return nil -} - -func (s *summary) newStream() *quantile.Stream { - return quantile.NewTargeted(s.objectives) -} - -// asyncFlush needs bufMtx locked. -func (s *summary) asyncFlush(now time.Time) { - s.mtx.Lock() - s.swapBufs(now) - - // Unblock the original goroutine that was responsible for the mutation - // that triggered the compaction. But hold onto the global non-buffer - // state mutex until the operation finishes. - go func() { - s.flushColdBuf() - s.mtx.Unlock() - }() -} - -// rotateStreams needs mtx AND bufMtx locked. -func (s *summary) maybeRotateStreams() { - for !s.hotBufExpTime.Equal(s.headStreamExpTime) { - s.headStream.Reset() - s.headStreamIdx++ - if s.headStreamIdx >= len(s.streams) { - s.headStreamIdx = 0 - } - s.headStream = s.streams[s.headStreamIdx] - s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) - } -} - -// flushColdBuf needs mtx locked. -func (s *summary) flushColdBuf() { - for _, v := range s.coldBuf { - for _, stream := range s.streams { - stream.Insert(v) - } - s.cnt++ - s.sum += v - } - s.coldBuf = s.coldBuf[0:0] - s.maybeRotateStreams() -} - -// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. -func (s *summary) swapBufs(now time.Time) { - if len(s.coldBuf) != 0 { - panic("coldBuf is not empty") - } - s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf - // hotBuf is now empty and gets new expiration set. - for now.After(s.hotBufExpTime) { - s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) - } -} - -type summaryCounts struct { - // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - sumBits uint64 - count uint64 -} - -type noObjectivesSummary struct { - // countAndHotIdx enables lock-free writes with use of atomic updates. - // The most significant bit is the hot index [0 or 1] of the count field - // below. Observe calls update the hot one. All remaining bits count the - // number of Observe calls. Observe starts by incrementing this counter, - // and finish by incrementing the count field in the respective - // summaryCounts, as a marker for completion. - // - // Calls of the Write method (which are non-mutating reads from the - // perspective of the summary) swap the hot–cold under the writeMtx - // lock. A cooldown is awaited (while locked) by comparing the number of - // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cool fields must - // be merged into the new hot before releasing writeMtx. - - // Fields with atomic access first! See alignment constraint: - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - countAndHotIdx uint64 - - selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. - - // Two counts, one is "hot" for lock-free observations, the other is - // "cold" for writing out a dto.Metric. It has to be an array of - // pointers to guarantee 64bit alignment of the histogramCounts, see - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. - counts [2]*summaryCounts - - labelPairs []*dto.LabelPair - - createdTs *timestamppb.Timestamp -} - -func (s *noObjectivesSummary) Desc() *Desc { - return s.desc -} - -func (s *noObjectivesSummary) Observe(v float64) { - // We increment h.countAndHotIdx so that the counter in the lower - // 63 bits gets incremented. At the same time, we get the new value - // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&s.countAndHotIdx, 1) - hotCounts := s.counts[n>>63] - - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break - } - } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) -} - -func (s *noObjectivesSummary) Write(out *dto.Metric) error { - // For simplicity, we protect this whole method by a mutex. It is not in - // the hot path, i.e. Observe is called much more often than Write. The - // complication of making Write lock-free isn't worth it, if possible at - // all. - s.writeMtx.Lock() - defer s.writeMtx.Unlock() - - // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) - // without touching the count bits. See the struct comments for a full - // description of the algorithm. - n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) - // count is contained unchanged in the lower 63 bits. - count := n & ((1 << 63) - 1) - // The most significant bit tells us which counts is hot. The complement - // is thus the cold one. - hotCounts := s.counts[n>>63] - coldCounts := s.counts[(^n)>>63] - - // Await cooldown. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } - - sum := &dto.Summary{ - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), - CreatedTimestamp: s.createdTs, - } - - out.Summary = sum - out.Label = s.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - return nil -} - -type quantSort []*dto.Quantile - -func (s quantSort) Len() int { - return len(s) -} - -func (s quantSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s quantSort) Less(i, j int) bool { - return s[i].GetQuantile() < s[j].GetQuantile() -} - -// SummaryVec is a Collector that bundles a set of Summaries that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewSummaryVec. -type SummaryVec struct { - *MetricVec -} - -// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and -// partitioned by the given label names. -// -// Due to the way a Summary is represented in the Prometheus text format and how -// it is handled by the Prometheus server internally, “quantile” is an illegal -// label name. NewSummaryVec will panic if this label name is used. -func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { - return V2.NewSummaryVec(SummaryVecOpts{ - SummaryOpts: opts, - VariableLabels: UnconstrainedLabels(labelNames), - }) -} - -// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts. -func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec { - for _, ln := range opts.VariableLabels.labelNames() { - if ln == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - desc := V2.NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - opts.VariableLabels, - opts.ConstLabels, - ) - return &SummaryVec{ - MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newSummary(desc, opts.SummaryOpts, lvs...) - }), - } -} - -// GetMetricWithLabelValues returns the Summary for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Summary is created. -// -// It is possible to call this method without using the returned Summary to only -// create the new Summary but leave it at its starting value, a Summary without -// any observations. -// -// Keeping the Summary for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Summary from the SummaryVec. In that case, -// the Summary will still exist, but it will not be exported anymore, even if a -// Summary with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// GetMetricWith returns the Summary for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Summary is created. Implications of -// creating a Summary without using it and keeping the Summary for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { - s, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return s -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (v *SummaryVec) With(labels Labels) Observer { - s, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return s -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the SummaryVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.MetricVec.CurryWith(labels) - if vec != nil { - return &SummaryVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -type constSummary struct { - desc *Desc - count uint64 - sum float64 - quantiles map[float64]float64 - labelPairs []*dto.LabelPair - createdTs *timestamppb.Timestamp -} - -func (s *constSummary) Desc() *Desc { - return s.desc -} - -func (s *constSummary) Write(out *dto.Metric) error { - sum := &dto.Summary{ - CreatedTimestamp: s.createdTs, - } - qs := make([]*dto.Quantile, 0, len(s.quantiles)) - - sum.SampleCount = proto.Uint64(s.count) - sum.SampleSum = proto.Float64(s.sum) - - for rank, q := range s.quantiles { - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - - return nil -} - -// NewConstSummary returns a metric representing a Prometheus summary with fixed -// values for the count, sum, and quantiles. As those parameters cannot be -// changed, the returned value does not implement the Summary interface (but -// only the Metric interface). Users of this package will not have much use for -// it in regular operations. However, when implementing custom Collectors, it is -// useful as a throw-away metric that is generated on the fly to send it to -// Prometheus in the Collect method. -// -// quantiles maps ranks to quantile values. For example, a median latency of -// 0.23s and a 99th percentile latency of 0.56s would be expressed as: -// -// map[float64]float64{0.5: 0.23, 0.99: 0.56} -// -// NewConstSummary returns an error if the length of labelValues is not -// consistent with the variable labels in Desc or if Desc is invalid. -func NewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { - return nil, err - } - return &constSummary{ - desc: desc, - count: count, - sum: sum, - quantiles: quantiles, - labelPairs: MakeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstSummary is a version of NewConstSummary that panics where -// NewConstMetric would have returned an error. -func MustNewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) Metric { - m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) - if err != nil { - panic(err) - } - return m -} - -// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp. -func NewConstSummaryWithCreatedTimestamp( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - ct time.Time, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { - return nil, err - } - return &constSummary{ - desc: desc, - count: count, - sum: sum, - quantiles: quantiles, - labelPairs: MakeLabelPairs(desc, labelValues), - createdTs: timestamppb.New(ct), - }, nil -} - -// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where -// NewConstSummaryWithCreatedTimestamp would have returned an error. -func MustNewConstSummaryWithCreatedTimestamp( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - ct time.Time, - labelValues ...string, -) Metric { - m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...) - if err != nil { - panic(err) - } - return m -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go deleted file mode 100644 index 52344fef53..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "time" - -// Timer is a helper type to time functions. Use NewTimer to create new -// instances. -type Timer struct { - begin time.Time - observer Observer -} - -// NewTimer creates a new Timer. The provided Observer is used to observe a -// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar -// later on will be also supported. -// Timer is usually used to time a function call in the -// following way: -// -// func TimeMe() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDuration() -// // Do actual work. -// } -// -// or -// -// func TimeMeWithExemplar() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDurationWithExemplar(exemplar) -// // Do actual work. -// } -func NewTimer(o Observer) *Timer { - return &Timer{ - begin: time.Now(), - observer: o, - } -} - -// ObserveDuration records the duration passed since the Timer was created with -// NewTimer. It calls the Observe method of the Observer provided during -// construction with the duration in seconds as an argument. The observed -// duration is also returned. ObserveDuration is usually called with a defer -// statement. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func (t *Timer) ObserveDuration() time.Duration { - d := time.Since(t.begin) - if t.observer != nil { - t.observer.Observe(d.Seconds()) - } - return d -} - -// ObserveDurationWithExemplar is like ObserveDuration, but it will also -// observe exemplar with the duration unless exemplar is nil or provided Observer can't -// be casted to ExemplarObserver. -func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration { - d := time.Since(t.begin) - eo, ok := t.observer.(ExemplarObserver) - if ok && exemplar != nil { - eo.ObserveWithExemplar(d.Seconds(), exemplar) - return d - } - if t.observer != nil { - t.observer.Observe(d.Seconds()) - } - return d -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go deleted file mode 100644 index 0f9ce63f40..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// UntypedOpts is an alias for Opts. See there for doc comments. -type UntypedOpts Opts - -// UntypedFunc works like GaugeFunc but the collected metric is of type -// "Untyped". UntypedFunc is useful to mirror an external metric of unknown -// type. -// -// To create UntypedFunc instances, use NewUntypedFunc. -type UntypedFunc interface { - Metric - Collector -} - -// NewUntypedFunc creates a new UntypedFunc based on the provided -// UntypedOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where an UntypedFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go deleted file mode 100644 index cc23011fad..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "sort" - "time" - "unicode/utf8" - - "github.com/prometheus/client_golang/prometheus/internal" - - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" -) - -// ValueType is an enumeration of metric types that represent a simple value. -type ValueType int - -// Possible values for the ValueType enum. Use UntypedValue to mark a metric -// with an unknown type. -const ( - _ ValueType = iota - CounterValue - GaugeValue - UntypedValue -) - -var ( - CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }() - GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }() - UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }() -) - -func (v ValueType) ToDTO() *dto.MetricType { - switch v { - case CounterValue: - return CounterMetricTypePtr - case GaugeValue: - return GaugeMetricTypePtr - default: - return UntypedMetricTypePtr - } -} - -// valueFunc is a generic metric for simple values retrieved on collect time -// from a function. It implements Metric and Collector. Its effective type is -// determined by ValueType. This is a low-level building block used by the -// library to back the implementations of CounterFunc, GaugeFunc, and -// UntypedFunc. -type valueFunc struct { - selfCollector - - desc *Desc - valType ValueType - function func() float64 - labelPairs []*dto.LabelPair -} - -// newValueFunc returns a newly allocated valueFunc with the given Desc and -// ValueType. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a valueFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { - result := &valueFunc{ - desc: desc, - valType: valueType, - function: function, - labelPairs: MakeLabelPairs(desc, nil), - } - result.init(result) - return result -} - -func (v *valueFunc) Desc() *Desc { - return v.desc -} - -func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil) -} - -// NewConstMetric returns a metric with one fixed value that cannot be -// changed. Users of this package will not have much use for it in regular -// operations. However, when implementing custom Collectors, it is useful as a -// throw-away metric that is generated on the fly to send it to Prometheus in -// the Collect method. NewConstMetric returns an error if the length of -// labelValues is not consistent with the variable labels in Desc or if Desc is -// invalid. -func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { - return nil, err - } - - metric := &dto.Metric{} - if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil { - return nil, err - } - - return &constMetric{ - desc: desc, - metric: metric, - }, nil -} - -// MustNewConstMetric is a version of NewConstMetric that panics where -// NewConstMetric would have returned an error. -func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { - m, err := NewConstMetric(desc, valueType, value, labelValues...) - if err != nil { - panic(err) - } - return m -} - -// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters -// with created timestamp set and returns an error for other metric types. -func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { - return nil, err - } - switch valueType { - case CounterValue: - break - default: - return nil, errors.New("created timestamps are only supported for counters") - } - - metric := &dto.Metric{} - if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil { - return nil, err - } - - return &constMetric{ - desc: desc, - metric: metric, - }, nil -} - -// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where -// NewConstMetricWithCreatedTimestamp would have returned an error. -func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric { - m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type constMetric struct { - desc *Desc - metric *dto.Metric -} - -func (m *constMetric) Desc() *Desc { - return m.desc -} - -func (m *constMetric) Write(out *dto.Metric) error { - out.Label = m.metric.Label - out.Counter = m.metric.Counter - out.Gauge = m.metric.Gauge - out.Untyped = m.metric.Untyped - return nil -} - -func populateMetric( - t ValueType, - v float64, - labelPairs []*dto.LabelPair, - e *dto.Exemplar, - m *dto.Metric, - ct *timestamppb.Timestamp, -) error { - m.Label = labelPairs - switch t { - case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct} - case GaugeValue: - m.Gauge = &dto.Gauge{Value: proto.Float64(v)} - case UntypedValue: - m.Untyped = &dto.Untyped{Value: proto.Float64(v)} - default: - return fmt.Errorf("encountered unknown type %v", t) - } - return nil -} - -// MakeLabelPairs is a helper function to create protobuf LabelPairs from the -// variable and constant labels in the provided Desc. The values for the -// variable labels are defined by the labelValues slice, which must be in the -// same order as the corresponding variable labels in the Desc. -// -// This function is only needed for custom Metric implementations. See MetricVec -// example. -func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { - totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs) - if totalLen == 0 { - // Super fast path. - return nil - } - if len(desc.variableLabels.names) == 0 { - // Moderately fast path. - return desc.constLabelPairs - } - labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, l := range desc.variableLabels.names { - labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(l), - Value: proto.String(labelValues[i]), - }) - } - labelPairs = append(labelPairs, desc.constLabelPairs...) - sort.Sort(internal.LabelPairSorter(labelPairs)) - return labelPairs -} - -// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. -const ExemplarMaxRunes = 128 - -// newExemplar creates a new dto.Exemplar from the provided values. An error is -// returned if any of the label names or values are invalid or if the total -// number of runes in the label names and values exceeds ExemplarMaxRunes. -func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) { - e := &dto.Exemplar{} - e.Value = proto.Float64(value) - tsProto := timestamppb.New(ts) - if err := tsProto.CheckValid(); err != nil { - return nil, err - } - e.Timestamp = tsProto - labelPairs := make([]*dto.LabelPair, 0, len(l)) - var runes int - for name, value := range l { - if !checkLabelName(name) { - return nil, fmt.Errorf("exemplar label name %q is invalid", name) - } - runes += utf8.RuneCountInString(name) - if !utf8.ValidString(value) { - return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value) - } - runes += utf8.RuneCountInString(value) - labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(name), - Value: proto.String(value), - }) - } - if runes > ExemplarMaxRunes { - return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes) - } - e.Label = labelPairs - return e, nil -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go deleted file mode 100644 index 487b466563..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ /dev/null @@ -1,709 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sync" - - "github.com/prometheus/common/model" -) - -// MetricVec is a Collector to bundle metrics of the same name that differ in -// their label values. MetricVec is not used directly but as a building block -// for implementations of vectors of a given metric type, like GaugeVec, -// CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be -// used for custom Metric implementations. -// -// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in -// FooVec and initialize it with NewMetricVec. Implement wrappers for -// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather -// than (Metric, error). Similarly, create a wrapper for CurryWith that returns -// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also -// add the convenience methods WithLabelValues, With, and MustCurryWith, which -// panic instead of returning errors. See also the MetricVec example. -type MetricVec struct { - *metricMap - - curry []curriedLabelValue - - // hashAdd and hashAddByte can be replaced for testing collision handling. - hashAdd func(h uint64, s string) uint64 - hashAddByte func(h uint64, b byte) uint64 -} - -// NewMetricVec returns an initialized metricVec. -func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { - return &MetricVec{ - metricMap: &metricMap{ - metrics: map[uint64][]metricWithLabelValues{}, - desc: desc, - newMetric: newMetric, - }, - hashAdd: hashAdd, - hashAddByte: hashAddByte, - } -} - -// DeleteLabelValues removes the metric where the variable labels are the same -// as those passed in as labels (same order as the VariableLabels in Desc). It -// returns true if a metric was deleted. -// -// It is not an error if the number of label values is not the same as the -// number of VariableLabels in Desc. However, such inconsistent label count can -// never match an actual metric, so the method will always return false in that -// case. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider Delete(Labels) as an -// alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the CounterVec example. -func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { - lvs = constrainLabelValues(m.desc, lvs, m.curry) - - h, err := m.hashLabelValues(lvs) - if err != nil { - return false - } - - return m.deleteByHashWithLabelValues(h, lvs, m.curry) -} - -// Delete deletes the metric where the variable labels are the same as those -// passed in as labels. It returns true if a metric was deleted. -// -// It is not an error if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. However, such inconsistent Labels -// can never match an actual metric, so the method will always return false in -// that case. -// -// This method is used for the same purpose as DeleteLabelValues(...string). See -// there for pros and cons of the two methods. -func (m *MetricVec) Delete(labels Labels) bool { - labels, closer := constrainLabels(m.desc, labels) - defer closer() - - h, err := m.hashLabels(labels) - if err != nil { - return false - } - - return m.deleteByHashWithLabels(h, labels, m.curry) -} - -// DeletePartialMatch deletes all metrics where the variable labels contain all of those -// passed in as labels. The order of the labels does not matter. -// It returns the number of metrics deleted. -// -// Note that curried labels will never be matched if deleting from the curried vector. -// To match curried labels with DeletePartialMatch, it must be called on the base vector. -func (m *MetricVec) DeletePartialMatch(labels Labels) int { - labels, closer := constrainLabels(m.desc, labels) - defer closer() - - return m.deleteByLabels(labels, m.curry) -} - -// Without explicit forwarding of Describe, Collect, Reset, those methods won't -// show up in GoDoc. - -// Describe implements Collector. -func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } - -// Collect implements Collector. -func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } - -// Reset deletes all metrics in this vector. -func (m *MetricVec) Reset() { m.metricMap.Reset() } - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the MetricVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -// -// Note that CurryWith is usually not called directly but through a wrapper -// around MetricVec, implementing a vector for a specific Metric -// implementation, for example GaugeVec. -func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { - var ( - newCurry []curriedLabelValue - oldCurry = m.curry - iCurry int - ) - for i, labelName := range m.desc.variableLabels.names { - val, ok := labels[labelName] - if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { - if ok { - return nil, fmt.Errorf("label name %q is already curried", labelName) - } - newCurry = append(newCurry, oldCurry[iCurry]) - iCurry++ - } else { - if !ok { - continue // Label stays uncurried. - } - newCurry = append(newCurry, curriedLabelValue{ - i, - m.desc.variableLabels.constrain(labelName, val), - }) - } - } - if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { - return nil, fmt.Errorf("%d unknown label(s) found during currying", l) - } - - return &MetricVec{ - metricMap: m.metricMap, - curry: newCurry, - hashAdd: m.hashAdd, - hashAddByte: m.hashAddByte, - }, nil -} - -// GetMetricWithLabelValues returns the Metric for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Metric is created (by -// calling the newMetric function provided during construction of the -// MetricVec). -// -// It is possible to call this method without using the returned Metric to only -// create the new Metric but leave it in its initial state. -// -// Keeping the Metric for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Metric from the MetricVec. In that case, the -// Metric will still exist, but it will not be exported anymore, even if a -// Metric with the same label values is created later. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// -// Note that GetMetricWithLabelValues is usually not called directly but through -// a wrapper around MetricVec, implementing a vector for a specific Metric -// implementation, for example GaugeVec. -func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { - lvs = constrainLabelValues(m.desc, lvs, m.curry) - h, err := m.hashLabelValues(lvs) - if err != nil { - return nil, err - } - - return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil -} - -// GetMetricWith returns the Metric for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Metric is created. Implications of -// creating a Metric without using it and keeping the Metric for later use -// are the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -// -// Note that GetMetricWith is usually not called directly but through a wrapper -// around MetricVec, implementing a vector for a specific Metric implementation, -// for example GaugeVec. -func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { - labels, closer := constrainLabels(m.desc, labels) - defer closer() - - h, err := m.hashLabels(labels) - if err != nil { - return nil, err - } - - return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil -} - -func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { - if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil { - return 0, err - } - - var ( - h = hashNew() - curry = m.curry - iVals, iCurry int - ) - for i := 0; i < len(m.desc.variableLabels.names); i++ { - if iCurry < len(curry) && curry[iCurry].index == i { - h = m.hashAdd(h, curry[iCurry].value) - iCurry++ - } else { - h = m.hashAdd(h, vals[iVals]) - iVals++ - } - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { - if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil { - return 0, err - } - - var ( - h = hashNew() - curry = m.curry - iCurry int - ) - for i, labelName := range m.desc.variableLabels.names { - val, ok := labels[labelName] - if iCurry < len(curry) && curry[iCurry].index == i { - if ok { - return 0, fmt.Errorf("label name %q is already curried", labelName) - } - h = m.hashAdd(h, curry[iCurry].value) - iCurry++ - } else { - if !ok { - return 0, fmt.Errorf("label name %q missing in label map", labelName) - } - h = m.hashAdd(h, val) - } - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -// metricWithLabelValues provides the metric and its label values for -// disambiguation on hash collision. -type metricWithLabelValues struct { - values []string - metric Metric -} - -// curriedLabelValue sets the curried value for a label at the given index. -type curriedLabelValue struct { - index int - value string -} - -// metricMap is a helper for metricVec and shared between differently curried -// metricVecs. -type metricMap struct { - mtx sync.RWMutex // Protects metrics. - metrics map[uint64][]metricWithLabelValues - desc *Desc - newMetric func(labelValues ...string) Metric -} - -// Describe implements Collector. It will send exactly one Desc to the provided -// channel. -func (m *metricMap) Describe(ch chan<- *Desc) { - ch <- m.desc -} - -// Collect implements Collector. -func (m *metricMap) Collect(ch chan<- Metric) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - for _, metrics := range m.metrics { - for _, metric := range metrics { - ch <- metric.metric - } - } -} - -// Reset deletes all metrics in this vector. -func (m *metricMap) Reset() { - m.mtx.Lock() - defer m.mtx.Unlock() - - for h := range m.metrics { - delete(m.metrics, h) - } -} - -// deleteByHashWithLabelValues removes the metric from the hash bucket h. If -// there are multiple matches in the bucket, use lvs to select a metric and -// remove only that metric. -func (m *metricMap) deleteByHashWithLabelValues( - h uint64, lvs []string, curry []curriedLabelValue, -) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - metrics, ok := m.metrics[h] - if !ok { - return false - } - - i := findMetricWithLabelValues(metrics, lvs, curry) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - old := metrics - m.metrics[h] = append(metrics[:i], metrics[i+1:]...) - old[len(old)-1] = metricWithLabelValues{} - } else { - delete(m.metrics, h) - } - return true -} - -// deleteByHashWithLabels removes the metric from the hash bucket h. If there -// are multiple matches in the bucket, use lvs to select a metric and remove -// only that metric. -func (m *metricMap) deleteByHashWithLabels( - h uint64, labels Labels, curry []curriedLabelValue, -) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - metrics, ok := m.metrics[h] - if !ok { - return false - } - i := findMetricWithLabels(m.desc, metrics, labels, curry) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - old := metrics - m.metrics[h] = append(metrics[:i], metrics[i+1:]...) - old[len(old)-1] = metricWithLabelValues{} - } else { - delete(m.metrics, h) - } - return true -} - -// deleteByLabels deletes a metric if the given labels are present in the metric. -func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int { - m.mtx.Lock() - defer m.mtx.Unlock() - - var numDeleted int - - for h, metrics := range m.metrics { - i := findMetricWithPartialLabels(m.desc, metrics, labels, curry) - if i >= len(metrics) { - // Didn't find matching labels in this metric slice. - continue - } - delete(m.metrics, h) - numDeleted++ - } - - return numDeleted -} - -// findMetricWithPartialLabel returns the index of the matching metric or -// len(metrics) if not found. -func findMetricWithPartialLabels( - desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, -) int { - for i, metric := range metrics { - if matchPartialLabels(desc, metric.values, labels, curry) { - return i - } - } - return len(metrics) -} - -// indexOf searches the given slice of strings for the target string and returns -// the index or len(items) as well as a boolean whether the search succeeded. -func indexOf(target string, items []string) (int, bool) { - for i, l := range items { - if l == target { - return i, true - } - } - return len(items), false -} - -// valueMatchesVariableOrCurriedValue determines if a value was previously curried, -// and returns whether it matches either the "base" value or the curried value accordingly. -// It also indicates whether the match is against a curried or uncurried value. -func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) { - for _, curriedValue := range curry { - if curriedValue.index == index { - // This label was curried. See if the curried value matches our target. - return curriedValue.value == targetValue, true - } - } - // This label was not curried. See if the current value matches our target label. - return values[index] == targetValue, false -} - -// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present. -func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { - for l, v := range labels { - // Check if the target label exists in our metrics and get the index. - varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names) - if validLabel { - // Check the value of that label against the target value. - // We don't consider curried values in partial matches. - matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry) - if matches && !curried { - continue - } - } - return false - } - return true -} - -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *metricMap) getOrCreateMetricWithLabelValues( - hash uint64, lvs []string, curry []curriedLabelValue, -) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) - if !ok { - inlinedLVs := inlineLabelValues(lvs, curry) - metric = m.newMetric(inlinedLVs...) - m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) - } - return metric -} - -// getOrCreateMetricWithLabels retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *metricMap) getOrCreateMetricWithLabels( - hash uint64, labels Labels, curry []curriedLabelValue, -) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) - if !ok { - lvs := extractLabelValues(m.desc, labels, curry) - metric = m.newMetric(lvs...) - m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) - } - return metric -} - -// getMetricWithHashAndLabelValues gets a metric while handling possible -// collisions in the hash space. Must be called while holding the read mutex. -func (m *metricMap) getMetricWithHashAndLabelValues( - h uint64, lvs []string, curry []curriedLabelValue, -) (Metric, bool) { - metrics, ok := m.metrics[h] - if ok { - if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// getMetricWithHashAndLabels gets a metric while handling possible collisions in -// the hash space. Must be called while holding read mutex. -func (m *metricMap) getMetricWithHashAndLabels( - h uint64, labels Labels, curry []curriedLabelValue, -) (Metric, bool) { - metrics, ok := m.metrics[h] - if ok { - if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// findMetricWithLabelValues returns the index of the matching metric or -// len(metrics) if not found. -func findMetricWithLabelValues( - metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, -) int { - for i, metric := range metrics { - if matchLabelValues(metric.values, lvs, curry) { - return i - } - } - return len(metrics) -} - -// findMetricWithLabels returns the index of the matching metric or len(metrics) -// if not found. -func findMetricWithLabels( - desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, -) int { - for i, metric := range metrics { - if matchLabels(desc, metric.values, labels, curry) { - return i - } - } - return len(metrics) -} - -func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool { - if len(values) != len(lvs)+len(curry) { - return false - } - var iLVs, iCurry int - for i, v := range values { - if iCurry < len(curry) && curry[iCurry].index == i { - if v != curry[iCurry].value { - return false - } - iCurry++ - continue - } - if v != lvs[iLVs] { - return false - } - iLVs++ - } - return true -} - -func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { - if len(values) != len(labels)+len(curry) { - return false - } - iCurry := 0 - for i, k := range desc.variableLabels.names { - if iCurry < len(curry) && curry[iCurry].index == i { - if values[i] != curry[iCurry].value { - return false - } - iCurry++ - continue - } - if values[i] != labels[k] { - return false - } - } - return true -} - -func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { - labelValues := make([]string, len(labels)+len(curry)) - iCurry := 0 - for i, k := range desc.variableLabels.names { - if iCurry < len(curry) && curry[iCurry].index == i { - labelValues[i] = curry[iCurry].value - iCurry++ - continue - } - labelValues[i] = labels[k] - } - return labelValues -} - -func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { - labelValues := make([]string, len(lvs)+len(curry)) - var iCurry, iLVs int - for i := range labelValues { - if iCurry < len(curry) && curry[iCurry].index == i { - labelValues[i] = curry[iCurry].value - iCurry++ - continue - } - labelValues[i] = lvs[iLVs] - iLVs++ - } - return labelValues -} - -var labelsPool = &sync.Pool{ - New: func() interface{} { - return make(Labels) - }, -} - -func constrainLabels(desc *Desc, labels Labels) (Labels, func()) { - if len(desc.variableLabels.labelConstraints) == 0 { - // Fast path when there's no constraints - return labels, func() {} - } - - constrainedLabels := labelsPool.Get().(Labels) - for l, v := range labels { - constrainedLabels[l] = desc.variableLabels.constrain(l, v) - } - - return constrainedLabels, func() { - for k := range constrainedLabels { - delete(constrainedLabels, k) - } - labelsPool.Put(constrainedLabels) - } -} - -func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string { - if len(desc.variableLabels.labelConstraints) == 0 { - // Fast path when there's no constraints - return lvs - } - - constrainedValues := make([]string, len(lvs)) - var iCurry, iLVs int - for i := 0; i < len(lvs)+len(curry); i++ { - if iCurry < len(curry) && curry[iCurry].index == i { - iCurry++ - continue - } - - if i < len(desc.variableLabels.names) { - constrainedValues[iLVs] = desc.variableLabels.constrain( - desc.variableLabels.names[i], - lvs[iLVs], - ) - } else { - constrainedValues[iLVs] = lvs[iLVs] - } - iLVs++ - } - return constrainedValues -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go deleted file mode 100644 index 42bc3a8f06..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -type v2 struct{} - -// V2 is a struct that can be referenced to access experimental API that might -// be present in v2 of client golang someday. It offers extended functionality -// of v1 with slightly changed API. It is acceptable to use some pieces from v1 -// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc` -// in the same codebase. -var V2 = v2{} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go deleted file mode 100644 index 2ed1285068..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sort" - - "github.com/prometheus/client_golang/prometheus/internal" - - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" -) - -// WrapRegistererWith returns a Registerer wrapping the provided -// Registerer. Collectors registered with the returned Registerer will be -// registered with the wrapped Registerer in a modified way. The modified -// Collector adds the provided Labels to all Metrics it collects (as -// ConstLabels). The Metrics collected by the unmodified Collector must not -// duplicate any of those labels. Wrapping a nil value is valid, resulting -// in a no-op Registerer. -// -// WrapRegistererWith provides a way to add fixed labels to a subset of -// Collectors. It should not be used to add fixed labels to all metrics -// exposed. See also -// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels -// -// Conflicts between Collectors registered through the original Registerer with -// Collectors registered through the wrapping Registerer will still be -// detected. Any AlreadyRegisteredError returned by the Register method of -// either Registerer will contain the ExistingCollector in the form it was -// provided to the respective registry. -// -// The Collector example demonstrates a use of WrapRegistererWith. -func WrapRegistererWith(labels Labels, reg Registerer) Registerer { - return &wrappingRegisterer{ - wrappedRegisterer: reg, - labels: labels, - } -} - -// WrapRegistererWithPrefix returns a Registerer wrapping the provided -// Registerer. Collectors registered with the returned Registerer will be -// registered with the wrapped Registerer in a modified way. The modified -// Collector adds the provided prefix to the name of all Metrics it collects. -// Wrapping a nil value is valid, resulting in a no-op Registerer. -// -// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of -// a sub-system. To make this work, register metrics of the sub-system with the -// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful -// to use the same prefix for all metrics exposed. In particular, do not prefix -// metric names that are standardized across applications, as that would break -// horizontal monitoring, for example the metrics provided by the Go collector -// (see NewGoCollector) and the process collector (see NewProcessCollector). (In -// fact, those metrics are already prefixed with "go_" or "process_", -// respectively.) -// -// Conflicts between Collectors registered through the original Registerer with -// Collectors registered through the wrapping Registerer will still be -// detected. Any AlreadyRegisteredError returned by the Register method of -// either Registerer will contain the ExistingCollector in the form it was -// provided to the respective registry. -func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { - return &wrappingRegisterer{ - wrappedRegisterer: reg, - prefix: prefix, - } -} - -// WrapCollectorWith returns a Collector wrapping the provided Collector. The -// wrapped Collector will add the provided Labels to all Metrics it collects (as -// ConstLabels). The Metrics collected by the unmodified Collector must not -// duplicate any of those labels. -// -// WrapCollectorWith can be useful to work with multiple instances of a third -// party library that does not expose enough flexibility on the lifecycle of its -// registered metrics. -// For example, let's say you have a foo.New(reg Registerer) constructor that -// registers metrics but never unregisters them, and you want to create multiple -// instances of foo.Foo with different labels. -// The way to achieve that, is to create a new Registry, pass it to foo.New, -// then use WrapCollectorWith to wrap that Registry with the desired labels and -// register that as a collector in your main Registry. -// Then you can un-register the wrapped collector effectively un-registering the -// metrics registered by foo.New. -func WrapCollectorWith(labels Labels, c Collector) Collector { - return &wrappingCollector{ - wrappedCollector: c, - labels: labels, - } -} - -// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The -// wrapped Collector will add the provided prefix to the name of all Metrics it collects. -// -// See the documentation of WrapCollectorWith for more details on the use case. -func WrapCollectorWithPrefix(prefix string, c Collector) Collector { - return &wrappingCollector{ - wrappedCollector: c, - prefix: prefix, - } -} - -type wrappingRegisterer struct { - wrappedRegisterer Registerer - prefix string - labels Labels -} - -func (r *wrappingRegisterer) Register(c Collector) error { - if r.wrappedRegisterer == nil { - return nil - } - return r.wrappedRegisterer.Register(&wrappingCollector{ - wrappedCollector: c, - prefix: r.prefix, - labels: r.labels, - }) -} - -func (r *wrappingRegisterer) MustRegister(cs ...Collector) { - if r.wrappedRegisterer == nil { - return - } - for _, c := range cs { - if err := r.Register(c); err != nil { - panic(err) - } - } -} - -func (r *wrappingRegisterer) Unregister(c Collector) bool { - if r.wrappedRegisterer == nil { - return false - } - return r.wrappedRegisterer.Unregister(&wrappingCollector{ - wrappedCollector: c, - prefix: r.prefix, - labels: r.labels, - }) -} - -type wrappingCollector struct { - wrappedCollector Collector - prefix string - labels Labels -} - -func (c *wrappingCollector) Collect(ch chan<- Metric) { - wrappedCh := make(chan Metric) - go func() { - c.wrappedCollector.Collect(wrappedCh) - close(wrappedCh) - }() - for m := range wrappedCh { - ch <- &wrappingMetric{ - wrappedMetric: m, - prefix: c.prefix, - labels: c.labels, - } - } -} - -func (c *wrappingCollector) Describe(ch chan<- *Desc) { - wrappedCh := make(chan *Desc) - go func() { - c.wrappedCollector.Describe(wrappedCh) - close(wrappedCh) - }() - for desc := range wrappedCh { - ch <- wrapDesc(desc, c.prefix, c.labels) - } -} - -func (c *wrappingCollector) unwrapRecursively() Collector { - switch wc := c.wrappedCollector.(type) { - case *wrappingCollector: - return wc.unwrapRecursively() - default: - return wc - } -} - -type wrappingMetric struct { - wrappedMetric Metric - prefix string - labels Labels -} - -func (m *wrappingMetric) Desc() *Desc { - return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) -} - -func (m *wrappingMetric) Write(out *dto.Metric) error { - if err := m.wrappedMetric.Write(out); err != nil { - return err - } - if len(m.labels) == 0 { - // No wrapping labels. - return nil - } - for ln, lv := range m.labels { - out.Label = append(out.Label, &dto.LabelPair{ - Name: proto.String(ln), - Value: proto.String(lv), - }) - } - sort.Sort(internal.LabelPairSorter(out.Label)) - return nil -} - -func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { - constLabels := Labels{} - for _, lp := range desc.constLabelPairs { - constLabels[*lp.Name] = *lp.Value - } - for ln, lv := range labels { - if _, alreadyUsed := constLabels[ln]; alreadyUsed { - return &Desc{ - fqName: desc.fqName, - help: desc.help, - variableLabels: desc.variableLabels, - constLabelPairs: desc.constLabelPairs, - err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), - } - } - constLabels[ln] = lv - } - // NewDesc will do remaining validations. - newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) - // Propagate errors if there was any. This will override any errer - // created by NewDesc above, i.e. earlier errors get precedence. - if desc.err != nil { - newDesc.err = desc.err - } - return newDesc -} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/github.com/prometheus/client_model/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE deleted file mode 100644 index 20110e410e..0000000000 --- a/vendor/github.com/prometheus/client_model/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Data model artifacts for Prometheus. -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go deleted file mode 100644 index 2f15490758..0000000000 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ /dev/null @@ -1,1399 +0,0 @@ -// Copyright 2013 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v3.20.3 -// source: io/prometheus/client/metrics.proto - -package io_prometheus_client - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type MetricType int32 - -const ( - // COUNTER must use the Metric field "counter". - MetricType_COUNTER MetricType = 0 - // GAUGE must use the Metric field "gauge". - MetricType_GAUGE MetricType = 1 - // SUMMARY must use the Metric field "summary". - MetricType_SUMMARY MetricType = 2 - // UNTYPED must use the Metric field "untyped". - MetricType_UNTYPED MetricType = 3 - // HISTOGRAM must use the Metric field "histogram". - MetricType_HISTOGRAM MetricType = 4 - // GAUGE_HISTOGRAM must use the Metric field "histogram". - MetricType_GAUGE_HISTOGRAM MetricType = 5 -) - -// Enum value maps for MetricType. -var ( - MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", - 5: "GAUGE_HISTOGRAM", - } - MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, - "GAUGE_HISTOGRAM": 5, - } -) - -func (x MetricType) Enum() *MetricType { - p := new(MetricType) - *p = x - return p -} - -func (x MetricType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (MetricType) Descriptor() protoreflect.EnumDescriptor { - return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor() -} - -func (MetricType) Type() protoreflect.EnumType { - return &file_io_prometheus_client_metrics_proto_enumTypes[0] -} - -func (x MetricType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *MetricType) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = MetricType(num) - return nil -} - -// Deprecated: Use MetricType.Descriptor instead. -func (MetricType) EnumDescriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} -} - -type LabelPair struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (x *LabelPair) Reset() { - *x = LabelPair{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LabelPair) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LabelPair) ProtoMessage() {} - -func (x *LabelPair) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead. -func (*LabelPair) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} -} - -func (x *LabelPair) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *LabelPair) GetValue() string { - if x != nil && x.Value != nil { - return *x.Value - } - return "" -} - -type Gauge struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` -} - -func (x *Gauge) Reset() { - *x = Gauge{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Gauge) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Gauge) ProtoMessage() {} - -func (x *Gauge) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Gauge.ProtoReflect.Descriptor instead. -func (*Gauge) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1} -} - -func (x *Gauge) GetValue() float64 { - if x != nil && x.Value != nil { - return *x.Value - } - return 0 -} - -type Counter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` - CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` -} - -func (x *Counter) Reset() { - *x = Counter{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Counter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Counter) ProtoMessage() {} - -func (x *Counter) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Counter.ProtoReflect.Descriptor instead. -func (*Counter) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2} -} - -func (x *Counter) GetValue() float64 { - if x != nil && x.Value != nil { - return *x.Value - } - return 0 -} - -func (x *Counter) GetExemplar() *Exemplar { - if x != nil { - return x.Exemplar - } - return nil -} - -func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp { - if x != nil { - return x.CreatedTimestamp - } - return nil -} - -type Quantile struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` -} - -func (x *Quantile) Reset() { - *x = Quantile{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Quantile) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Quantile) ProtoMessage() {} - -func (x *Quantile) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Quantile.ProtoReflect.Descriptor instead. -func (*Quantile) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3} -} - -func (x *Quantile) GetQuantile() float64 { - if x != nil && x.Quantile != nil { - return *x.Quantile - } - return 0 -} - -func (x *Quantile) GetValue() float64 { - if x != nil && x.Value != nil { - return *x.Value - } - return 0 -} - -type Summary struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` -} - -func (x *Summary) Reset() { - *x = Summary{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Summary) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Summary) ProtoMessage() {} - -func (x *Summary) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Summary.ProtoReflect.Descriptor instead. -func (*Summary) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4} -} - -func (x *Summary) GetSampleCount() uint64 { - if x != nil && x.SampleCount != nil { - return *x.SampleCount - } - return 0 -} - -func (x *Summary) GetSampleSum() float64 { - if x != nil && x.SampleSum != nil { - return *x.SampleSum - } - return 0 -} - -func (x *Summary) GetQuantile() []*Quantile { - if x != nil { - return x.Quantile - } - return nil -} - -func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp { - if x != nil { - return x.CreatedTimestamp - } - return nil -} - -type Untyped struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` -} - -func (x *Untyped) Reset() { - *x = Untyped{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Untyped) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Untyped) ProtoMessage() {} - -func (x *Untyped) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Untyped.ProtoReflect.Descriptor instead. -func (*Untyped) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5} -} - -func (x *Untyped) GetValue() float64 { - if x != nil && x.Value != nil { - return *x.Value - } - return 0 -} - -type Histogram struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0. - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - // Buckets for the conventional histogram. - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional. - CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` - // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. - // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and - // then each power of two is divided into 2^n logarithmic buckets. - // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). - // In the future, more bucket schemas may be added using numbers < -4 or > 8. - Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"` - ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket. - ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket. - ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0. - // Negative buckets for the native histogram. - NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"` - // Use either "negative_delta" or "negative_count", the former for - // regular histograms with integer counts, the latter for float - // histograms. - NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). - NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket. - // Positive buckets for the native histogram. - // Use a no-op span (offset 0, length 0) for a native histogram without any - // observations yet and with a zero_threshold of 0. Otherwise, it would be - // indistinguishable from a classic histogram. - PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` - // Use either "positive_delta" or "positive_count", the former for - // regular histograms with integer counts, the latter for float - // histograms. - PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). - PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket. - // Only used for native histograms. These exemplars MUST have a timestamp. - Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"` -} - -func (x *Histogram) Reset() { - *x = Histogram{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Histogram) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Histogram) ProtoMessage() {} - -func (x *Histogram) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Histogram.ProtoReflect.Descriptor instead. -func (*Histogram) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6} -} - -func (x *Histogram) GetSampleCount() uint64 { - if x != nil && x.SampleCount != nil { - return *x.SampleCount - } - return 0 -} - -func (x *Histogram) GetSampleCountFloat() float64 { - if x != nil && x.SampleCountFloat != nil { - return *x.SampleCountFloat - } - return 0 -} - -func (x *Histogram) GetSampleSum() float64 { - if x != nil && x.SampleSum != nil { - return *x.SampleSum - } - return 0 -} - -func (x *Histogram) GetBucket() []*Bucket { - if x != nil { - return x.Bucket - } - return nil -} - -func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp { - if x != nil { - return x.CreatedTimestamp - } - return nil -} - -func (x *Histogram) GetSchema() int32 { - if x != nil && x.Schema != nil { - return *x.Schema - } - return 0 -} - -func (x *Histogram) GetZeroThreshold() float64 { - if x != nil && x.ZeroThreshold != nil { - return *x.ZeroThreshold - } - return 0 -} - -func (x *Histogram) GetZeroCount() uint64 { - if x != nil && x.ZeroCount != nil { - return *x.ZeroCount - } - return 0 -} - -func (x *Histogram) GetZeroCountFloat() float64 { - if x != nil && x.ZeroCountFloat != nil { - return *x.ZeroCountFloat - } - return 0 -} - -func (x *Histogram) GetNegativeSpan() []*BucketSpan { - if x != nil { - return x.NegativeSpan - } - return nil -} - -func (x *Histogram) GetNegativeDelta() []int64 { - if x != nil { - return x.NegativeDelta - } - return nil -} - -func (x *Histogram) GetNegativeCount() []float64 { - if x != nil { - return x.NegativeCount - } - return nil -} - -func (x *Histogram) GetPositiveSpan() []*BucketSpan { - if x != nil { - return x.PositiveSpan - } - return nil -} - -func (x *Histogram) GetPositiveDelta() []int64 { - if x != nil { - return x.PositiveDelta - } - return nil -} - -func (x *Histogram) GetPositiveCount() []float64 { - if x != nil { - return x.PositiveCount - } - return nil -} - -func (x *Histogram) GetExemplars() []*Exemplar { - if x != nil { - return x.Exemplars - } - return nil -} - -// A Bucket of a conventional histogram, each of which is treated as -// an individual counter-like time series by Prometheus. -type Bucket struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order. - CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0. - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive. - Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` -} - -func (x *Bucket) Reset() { - *x = Bucket{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket) ProtoMessage() {} - -func (x *Bucket) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket.ProtoReflect.Descriptor instead. -func (*Bucket) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7} -} - -func (x *Bucket) GetCumulativeCount() uint64 { - if x != nil && x.CumulativeCount != nil { - return *x.CumulativeCount - } - return 0 -} - -func (x *Bucket) GetCumulativeCountFloat() float64 { - if x != nil && x.CumulativeCountFloat != nil { - return *x.CumulativeCountFloat - } - return 0 -} - -func (x *Bucket) GetUpperBound() float64 { - if x != nil && x.UpperBound != nil { - return *x.UpperBound - } - return 0 -} - -func (x *Bucket) GetExemplar() *Exemplar { - if x != nil { - return x.Exemplar - } - return nil -} - -// A BucketSpan defines a number of consecutive buckets in a native -// histogram with their offset. Logically, it would be more -// straightforward to include the bucket counts in the Span. However, -// the protobuf representation is more compact in the way the data is -// structured here (with all the buckets in a single array separate -// from the Spans). -type BucketSpan struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative). - Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets. -} - -func (x *BucketSpan) Reset() { - *x = BucketSpan{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BucketSpan) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BucketSpan) ProtoMessage() {} - -func (x *BucketSpan) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead. -func (*BucketSpan) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8} -} - -func (x *BucketSpan) GetOffset() int32 { - if x != nil && x.Offset != nil { - return *x.Offset - } - return 0 -} - -func (x *BucketSpan) GetLength() uint32 { - if x != nil && x.Length != nil { - return *x.Length - } - return 0 -} - -type Exemplar struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style. -} - -func (x *Exemplar) Reset() { - *x = Exemplar{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Exemplar) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Exemplar) ProtoMessage() {} - -func (x *Exemplar) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead. -func (*Exemplar) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9} -} - -func (x *Exemplar) GetLabel() []*LabelPair { - if x != nil { - return x.Label - } - return nil -} - -func (x *Exemplar) GetValue() float64 { - if x != nil && x.Value != nil { - return *x.Value - } - return 0 -} - -func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp { - if x != nil { - return x.Timestamp - } - return nil -} - -type Metric struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` -} - -func (x *Metric) Reset() { - *x = Metric{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Metric) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Metric) ProtoMessage() {} - -func (x *Metric) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Metric.ProtoReflect.Descriptor instead. -func (*Metric) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10} -} - -func (x *Metric) GetLabel() []*LabelPair { - if x != nil { - return x.Label - } - return nil -} - -func (x *Metric) GetGauge() *Gauge { - if x != nil { - return x.Gauge - } - return nil -} - -func (x *Metric) GetCounter() *Counter { - if x != nil { - return x.Counter - } - return nil -} - -func (x *Metric) GetSummary() *Summary { - if x != nil { - return x.Summary - } - return nil -} - -func (x *Metric) GetUntyped() *Untyped { - if x != nil { - return x.Untyped - } - return nil -} - -func (x *Metric) GetHistogram() *Histogram { - if x != nil { - return x.Histogram - } - return nil -} - -func (x *Metric) GetTimestampMs() int64 { - if x != nil && x.TimestampMs != nil { - return *x.TimestampMs - } - return 0 -} - -type MetricFamily struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"` -} - -func (x *MetricFamily) Reset() { - *x = MetricFamily{} - if protoimpl.UnsafeEnabled { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MetricFamily) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MetricFamily) ProtoMessage() {} - -func (x *MetricFamily) ProtoReflect() protoreflect.Message { - mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead. -func (*MetricFamily) Descriptor() ([]byte, []int) { - return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11} -} - -func (x *MetricFamily) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *MetricFamily) GetHelp() string { - if x != nil && x.Help != nil { - return *x.Help - } - return "" -} - -func (x *MetricFamily) GetType() MetricType { - if x != nil && x.Type != nil { - return *x.Type - } - return MetricType_COUNTER -} - -func (x *MetricFamily) GetMetric() []*Metric { - if x != nil { - return x.Metric - } - return nil -} - -func (x *MetricFamily) GetUnit() string { - if x != nil && x.Unit != nil { - return *x.Unit - } - return "" -} - -var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor - -var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ - 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, - 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, - 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, - 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, - 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, - 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, - 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, - 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, - 0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, - 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, - 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, - 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, - 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, - 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47, - 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, - 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, - 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, - 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, - 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, - 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, - 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, - 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, - 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, - 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, - 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, - 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, - 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, - 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20, - 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, - 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, - 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, - 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, - 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, - 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, - 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, - 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, - 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, - 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, - 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, - 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, - 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, - 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, - 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, - 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, - 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, - 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, - 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, - 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, - 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, - 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, - 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, - 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, - 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, - 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, - 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, - 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, - 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, - 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, - 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, - 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, - 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a, - 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, - 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, - 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, - 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, - 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, - 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, - 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, - 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, - 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, - 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, -} - -var ( - file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once - file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc -) - -func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte { - file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() { - file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData) - }) - return file_io_prometheus_client_metrics_proto_rawDescData -} - -var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12) -var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{ - (MetricType)(0), // 0: io.prometheus.client.MetricType - (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair - (*Gauge)(nil), // 2: io.prometheus.client.Gauge - (*Counter)(nil), // 3: io.prometheus.client.Counter - (*Quantile)(nil), // 4: io.prometheus.client.Quantile - (*Summary)(nil), // 5: io.prometheus.client.Summary - (*Untyped)(nil), // 6: io.prometheus.client.Untyped - (*Histogram)(nil), // 7: io.prometheus.client.Histogram - (*Bucket)(nil), // 8: io.prometheus.client.Bucket - (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan - (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar - (*Metric)(nil), // 11: io.prometheus.client.Metric - (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily - (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp -} -var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ - 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar - 13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp - 4, // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile - 13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp - 8, // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket - 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp - 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan - 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan - 10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar - 10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar - 1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair - 13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp - 1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair - 2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge - 3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter - 5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary - 6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped - 7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram - 0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType - 11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric - 20, // [20:20] is the sub-list for method output_type - 20, // [20:20] is the sub-list for method input_type - 20, // [20:20] is the sub-list for extension type_name - 20, // [20:20] is the sub-list for extension extendee - 0, // [0:20] is the sub-list for field type_name -} - -func init() { file_io_prometheus_client_metrics_proto_init() } -func file_io_prometheus_client_metrics_proto_init() { - if File_io_prometheus_client_metrics_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LabelPair); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Gauge); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Counter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Quantile); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Summary); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Untyped); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Histogram); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BucketSpan); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Exemplar); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Metric); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetricFamily); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc, - NumEnums: 1, - NumMessages: 12, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_io_prometheus_client_metrics_proto_goTypes, - DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs, - EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes, - MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes, - }.Build() - File_io_prometheus_client_metrics_proto = out.File - file_io_prometheus_client_metrics_proto_rawDesc = nil - file_io_prometheus_client_metrics_proto_goTypes = nil - file_io_prometheus_client_metrics_proto_depIdxs = nil -} diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/github.com/prometheus/common/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE deleted file mode 100644 index 636a2c1a5e..0000000000 --- a/vendor/github.com/prometheus/common/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Common libraries shared by Prometheus Go components. -Copyright 2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go deleted file mode 100644 index 7b762370e2..0000000000 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "fmt" - "io" - "math" - "mime" - "net/http" - - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/encoding/protodelim" - - "github.com/prometheus/common/model" -) - -// Decoder types decode an input stream into metric families. -type Decoder interface { - Decode(*dto.MetricFamily) error -} - -// DecodeOptions contains options used by the Decoder and in sample extraction. -type DecodeOptions struct { - // Timestamp is added to each value from the stream that has no explicit timestamp set. - Timestamp model.Time -} - -// ResponseFormat extracts the correct format from a HTTP response header. -// If no matching format can be found FormatUnknown is returned. -func ResponseFormat(h http.Header) Format { - ct := h.Get(hdrContentType) - - mediatype, params, err := mime.ParseMediaType(ct) - if err != nil { - return FmtUnknown - } - - const textType = "text/plain" - - switch mediatype { - case ProtoType: - if p, ok := params["proto"]; ok && p != ProtoProtocol { - return FmtUnknown - } - if e, ok := params["encoding"]; ok && e != "delimited" { - return FmtUnknown - } - return FmtProtoDelim - - case textType: - if v, ok := params["version"]; ok && v != TextVersion { - return FmtUnknown - } - return FmtText - } - - return FmtUnknown -} - -// NewDecoder returns a new decoder based on the given input format. Metric -// names are validated based on the provided Format -- if the format requires -// escaping, raditional Prometheues validity checking is used. Otherwise, names -// are checked for UTF-8 validity. Supported formats include delimited protobuf -// and Prometheus text format. For historical reasons, this decoder fallbacks -// to classic text decoding for any other format. This decoder does not fully -// support OpenMetrics although it may often succeed due to the similarities -// between the formats. This decoder may not support the latest features of -// Prometheus text format and is not intended for high-performance applications. -// See: https://github.com/prometheus/common/issues/812 -func NewDecoder(r io.Reader, format Format) Decoder { - scheme := model.LegacyValidation - if format.ToEscapingScheme() == model.NoEscaping { - scheme = model.UTF8Validation - } - switch format.FormatType() { - case TypeProtoDelim: - return &protoDecoder{r: bufio.NewReader(r), s: scheme} - case TypeProtoText, TypeProtoCompact: - return &errDecoder{err: fmt.Errorf("format %s not supported for decoding", format)} - } - return &textDecoder{r: r, s: scheme} -} - -// protoDecoder implements the Decoder interface for protocol buffers. -type protoDecoder struct { - r protodelim.Reader - s model.ValidationScheme -} - -// Decode implements the Decoder interface. -func (d *protoDecoder) Decode(v *dto.MetricFamily) error { - opts := protodelim.UnmarshalOptions{ - MaxSize: -1, - } - if err := opts.UnmarshalFrom(d.r, v); err != nil { - return err - } - if !d.s.IsValidMetricName(v.GetName()) { - return fmt.Errorf("invalid metric name %q", v.GetName()) - } - for _, m := range v.GetMetric() { - if m == nil { - continue - } - for _, l := range m.GetLabel() { - if l == nil { - continue - } - if !model.LabelValue(l.GetValue()).IsValid() { - return fmt.Errorf("invalid label value %q", l.GetValue()) - } - if !d.s.IsValidLabelName(l.GetName()) { - return fmt.Errorf("invalid label name %q", l.GetName()) - } - } - } - return nil -} - -// errDecoder is an error-state decoder that always returns the same error. -type errDecoder struct { - err error -} - -func (d *errDecoder) Decode(*dto.MetricFamily) error { - return d.err -} - -// textDecoder implements the Decoder interface for the text protocol. -type textDecoder struct { - r io.Reader - fams map[string]*dto.MetricFamily - s model.ValidationScheme - err error -} - -// Decode implements the Decoder interface. -func (d *textDecoder) Decode(v *dto.MetricFamily) error { - if d.err == nil { - // Read all metrics in one shot. - p := NewTextParser(d.s) - d.fams, d.err = p.TextToMetricFamilies(d.r) - // If we don't get an error, store io.EOF for the end. - if d.err == nil { - d.err = io.EOF - } - } - // Pick off one MetricFamily per Decode until there's nothing left. - for key, fam := range d.fams { - v.Name = fam.Name - v.Help = fam.Help - v.Type = fam.Type - v.Metric = fam.Metric - delete(d.fams, key) - return nil - } - return d.err -} - -// SampleDecoder wraps a Decoder to extract samples from the metric families -// decoded by the wrapped Decoder. -type SampleDecoder struct { - Dec Decoder - Opts *DecodeOptions - - f dto.MetricFamily -} - -// Decode calls the Decode method of the wrapped Decoder and then extracts the -// samples from the decoded MetricFamily into the provided model.Vector. -func (sd *SampleDecoder) Decode(s *model.Vector) error { - err := sd.Dec.Decode(&sd.f) - if err != nil { - return err - } - *s, err = extractSamples(&sd.f, sd.Opts) - return err -} - -// ExtractSamples builds a slice of samples from the provided metric -// families. If an error occurs during sample extraction, it continues to -// extract from the remaining metric families. The returned error is the last -// error that has occurred. -func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { - var ( - all model.Vector - lastErr error - ) - for _, f := range fams { - some, err := extractSamples(f, o) - if err != nil { - lastErr = err - continue - } - all = append(all, some...) - } - return all, lastErr -} - -func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { - switch f.GetType() { - case dto.MetricType_COUNTER: - return extractCounter(o, f), nil - case dto.MetricType_GAUGE: - return extractGauge(o, f), nil - case dto.MetricType_SUMMARY: - return extractSummary(o, f), nil - case dto.MetricType_UNTYPED: - return extractUntyped(o, f), nil - case dto.MetricType_HISTOGRAM: - return extractHistogram(o, f), nil - } - return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) -} - -func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Counter == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Counter.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Gauge == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Gauge.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Untyped == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Untyped.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Summary == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - for _, q := range m.Summary.Quantile { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - // BUG(matt): Update other names to "quantile". - lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetValue()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleCount()), - Timestamp: timestamp, - }) - } - - return samples -} - -func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Histogram == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - infSeen := false - - for _, q := range m.Histogram.Bucket { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - if math.IsInf(q.GetUpperBound(), +1) { - infSeen = true - } - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetCumulativeCount()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - count := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleCount()), - Timestamp: timestamp, - } - samples = append(samples, count) - - if !infSeen { - // Append an infinity bucket sample. - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: count.Value, - Timestamp: timestamp, - }) - } - } - - return samples -} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go deleted file mode 100644 index 73c24dfbc9..0000000000 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "net/http" - - "github.com/munnerz/goautoneg" - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/encoding/protodelim" - "google.golang.org/protobuf/encoding/prototext" - - "github.com/prometheus/common/model" -) - -// Encoder types encode metric families into an underlying wire protocol. -type Encoder interface { - Encode(*dto.MetricFamily) error -} - -// Closer is implemented by Encoders that need to be closed to finalize -// encoding. (For example, OpenMetrics needs a final `# EOF` line.) -// -// Note that all Encoder implementations returned from this package implement -// Closer, too, even if the Close call is a no-op. This happens in preparation -// for adding a Close method to the Encoder interface directly in a (mildly -// breaking) release in the future. -type Closer interface { - Close() error -} - -type encoderCloser struct { - encode func(*dto.MetricFamily) error - close func() error -} - -func (ec encoderCloser) Encode(v *dto.MetricFamily) error { - return ec.encode(v) -} - -func (ec encoderCloser) Close() error { - return ec.close() -} - -// Negotiate returns the Content-Type based on the given Accept header. If no -// appropriate accepted type is found, FmtText is returned (which is the -// Prometheus text format). This function will never negotiate FmtOpenMetrics, -// as the support is still experimental. To include the option to negotiate -// FmtOpenMetrics, use NegotiateIncludingOpenMetrics. -func Negotiate(h http.Header) Format { - escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) - for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { - switch Format(escapeParam) { - case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format("; escaping=" + escapeParam) - default: - // If the escaping parameter is unknown, ignore it. - } - } - ver := ac.Params["version"] - if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { - switch ac.Params["encoding"] { - case "delimited": - return FmtProtoDelim + escapingScheme - case "text": - return FmtProtoText + escapingScheme - case "compact-text": - return FmtProtoCompact + escapingScheme - } - } - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + escapingScheme - } - } - return FmtText + escapingScheme -} - -// NegotiateIncludingOpenMetrics works like Negotiate but includes -// FmtOpenMetrics as an option for the result. Note that this function is -// temporary and will disappear once FmtOpenMetrics is fully supported and as -// such may be negotiated by the normal Negotiate function. -func NegotiateIncludingOpenMetrics(h http.Header) Format { - escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) - for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { - switch Format(escapeParam) { - case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format("; escaping=" + escapeParam) - default: - // If the escaping parameter is unknown, ignore it. - } - } - ver := ac.Params["version"] - if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { - switch ac.Params["encoding"] { - case "delimited": - return FmtProtoDelim + escapingScheme - case "text": - return FmtProtoText + escapingScheme - case "compact-text": - return FmtProtoCompact + escapingScheme - } - } - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + escapingScheme - } - if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { - switch ver { - case OpenMetricsVersion_1_0_0: - return FmtOpenMetrics_1_0_0 + escapingScheme - default: - return FmtOpenMetrics_0_0_1 + escapingScheme - } - } - } - return FmtText + escapingScheme -} - -// NewEncoder returns a new encoder based on content type negotiation. All -// Encoder implementations returned by NewEncoder also implement Closer, and -// callers should always call the Close method. It is currently only required -// for FmtOpenMetrics, but a future (breaking) release will add the Close method -// to the Encoder interface directly. The current version of the Encoder -// interface is kept for backwards compatibility. -// In cases where the Format does not allow for UTF-8 names, the global -// NameEscapingScheme will be applied. -// -// NewEncoder can be called with additional options to customize the OpenMetrics text output. -// For example: -// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines()) -// -// Extra options are ignored for all other formats. -func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder { - escapingScheme := format.ToEscapingScheme() - - switch format.FormatType() { - case TypeProtoDelim: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := protodelim.MarshalTo(w, model.EscapeMetricFamily(v, escapingScheme)) - return err - }, - close: func() error { return nil }, - } - case TypeProtoCompact: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String()) - return err - }, - close: func() error { return nil }, - } - case TypeProtoText: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme))) - return err - }, - close: func() error { return nil }, - } - case TypeTextPlain: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme)) - return err - }, - close: func() error { return nil }, - } - case TypeOpenMetrics: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...) - return err - }, - close: func() error { - _, err := FinalizeOpenMetrics(w) - return err - }, - } - } - panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format)) -} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go deleted file mode 100644 index c34c7de432..0000000000 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package expfmt contains tools for reading and writing Prometheus metrics. -package expfmt - -import ( - "errors" - "strings" - - "github.com/prometheus/common/model" -) - -// Format specifies the HTTP content type of the different wire protocols. -type Format string - -// Constants to assemble the Content-Type values for the different wire -// protocols. The Content-Type strings here are all for the legacy exposition -// formats, where valid characters for metric names and label names are limited. -// Support for arbitrary UTF-8 characters in those names is already partially -// implemented in this module (see model.ValidationScheme), but to actually use -// it on the wire, new content-type strings will have to be agreed upon and -// added here. -const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` - //nolint:revive // Allow for underscores. - OpenMetricsVersion_0_0_1 = "0.0.1" - //nolint:revive // Allow for underscores. - OpenMetricsVersion_1_0_0 = "1.0.0" - - // The Content-Type values for the different wire protocols. Do not do direct - // comparisons to these constants, instead use the comparison functions. - // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. - FmtUnknown Format = `` - // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead. - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead. - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead. - FmtProtoText Format = ProtoFmt + ` encoding=text` - // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` - // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. - //nolint:revive // Allow for underscores. - FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. - //nolint:revive // Allow for underscores. - FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` -) - -const ( - hdrContentType = "Content-Type" - hdrAccept = "Accept" -) - -// FormatType is a Go enum representing the overall category for the given -// Format. As the number of Format permutations increases, doing basic string -// comparisons are not feasible, so this enum captures the most useful -// high-level attribute of the Format string. -type FormatType int - -const ( - TypeUnknown FormatType = iota - TypeProtoCompact - TypeProtoDelim - TypeProtoText - TypeTextPlain - TypeOpenMetrics -) - -// NewFormat generates a new Format from the type provided. Mostly used for -// tests, most Formats should be generated as part of content negotiation in -// encode.go. If a type has more than one version, the latest version will be -// returned. -func NewFormat(t FormatType) Format { - switch t { - case TypeProtoCompact: - return FmtProtoCompact - case TypeProtoDelim: - return FmtProtoDelim - case TypeProtoText: - return FmtProtoText - case TypeTextPlain: - return FmtText - case TypeOpenMetrics: - return FmtOpenMetrics_1_0_0 - default: - return FmtUnknown - } -} - -// NewOpenMetricsFormat generates a new OpenMetrics format matching the -// specified version number. -func NewOpenMetricsFormat(version string) (Format, error) { - if version == OpenMetricsVersion_0_0_1 { - return FmtOpenMetrics_0_0_1, nil - } - if version == OpenMetricsVersion_1_0_0 { - return FmtOpenMetrics_1_0_0, nil - } - return FmtUnknown, errors.New("unknown open metrics version string") -} - -// WithEscapingScheme returns a copy of Format with the specified escaping -// scheme appended to the end. If an escaping scheme already exists it is -// removed. -func (f Format) WithEscapingScheme(s model.EscapingScheme) Format { - var terms []string - for _, p := range strings.Split(string(f), ";") { - toks := strings.Split(p, "=") - if len(toks) != 2 { - trimmed := strings.TrimSpace(p) - if len(trimmed) > 0 { - terms = append(terms, trimmed) - } - continue - } - key := strings.TrimSpace(toks[0]) - if key != model.EscapingKey { - terms = append(terms, strings.TrimSpace(p)) - } - } - terms = append(terms, model.EscapingKey+"="+s.String()) - return Format(strings.Join(terms, "; ")) -} - -// FormatType deduces an overall FormatType for the given format. -func (f Format) FormatType() FormatType { - toks := strings.Split(string(f), ";") - params := make(map[string]string) - for i, t := range toks { - if i == 0 { - continue - } - args := strings.Split(t, "=") - if len(args) != 2 { - continue - } - params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1]) - } - - switch strings.TrimSpace(toks[0]) { - case ProtoType: - if params["proto"] != ProtoProtocol { - return TypeUnknown - } - switch params["encoding"] { - case "delimited": - return TypeProtoDelim - case "text": - return TypeProtoText - case "compact-text": - return TypeProtoCompact - default: - return TypeUnknown - } - case OpenMetricsType: - if params["charset"] != "utf-8" { - return TypeUnknown - } - return TypeOpenMetrics - case "text/plain": - v, ok := params["version"] - if !ok { - return TypeTextPlain - } - if v == TextVersion { - return TypeTextPlain - } - return TypeUnknown - default: - return TypeUnknown - } -} - -// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the -// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid -// "escaping" term exists, that will be used. Otherwise, the global default will -// be returned. -func (f Format) ToEscapingScheme() model.EscapingScheme { - for _, p := range strings.Split(string(f), ";") { - toks := strings.Split(p, "=") - if len(toks) != 2 { - continue - } - key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) - if key == model.EscapingKey { - scheme, err := model.ToEscapingScheme(value) - if err != nil { - return model.NameEscapingScheme - } - return scheme - } - } - return model.NameEscapingScheme -} diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go deleted file mode 100644 index 0290f6abc4..0000000000 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Build only when actually fuzzing -//go:build gofuzz -// +build gofuzz - -package expfmt - -import ( - "bytes" - - "github.com/prometheus/common/model" -) - -// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: -// -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz -// -// Further input samples should go in the folder fuzz/corpus. -func Fuzz(in []byte) int { - parser := NewTextParser(model.UTF8Validation) - _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - if err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go deleted file mode 100644 index 8dbf6d04ed..0000000000 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ /dev/null @@ -1,695 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "bytes" - "fmt" - "io" - "math" - "strconv" - "strings" - - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/types/known/timestamppb" - - "github.com/prometheus/common/model" -) - -type encoderOption struct { - withCreatedLines bool - withUnit bool -} - -type EncoderOption func(*encoderOption) - -// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder -// to include _created lines (See -// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1). -// Created timestamps can improve the accuracy of series reset detection, but -// come with a bandwidth cost. -// -// At the time of writing, created timestamp ingestion is still experimental in -// Prometheus and need to be enabled with the feature-flag -// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are -// still possible. Therefore, it is recommended to use this feature with caution. -func WithCreatedLines() EncoderOption { - return func(t *encoderOption) { - t.withCreatedLines = true - } -} - -// WithUnit is an EncoderOption enabling a set unit to be written to the output -// and to be added to the metric name, if it's not there already, as a suffix. -// Without opting in this way, the unit will not be added to the metric name and, -// on top of that, the unit will not be passed onto the output, even if it -// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil. -func WithUnit() EncoderOption { - return func(t *encoderOption) { - t.withUnit = true - } -} - -// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the -// OpenMetrics text format and writes the resulting lines to 'out'. It returns -// the number of bytes written and any error encountered. The output will have -// the same order as the input, no further sorting is performed. Furthermore, -// this function assumes the input is already sanitized and does not perform any -// sanity checks. If the input contains duplicate metrics or invalid metric or -// label names, the conversion will result in invalid text format output. -// -// If metric names conform to the legacy validation pattern, they will be placed -// outside the brackets in the traditional way, like `foo{}`. If the metric name -// fails the legacy validation check, it will be placed quoted inside the -// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and -// no error will be thrown in this case. -// -// Similar to metric names, if label names conform to the legacy validation -// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label -// name fails the legacy validation check, it will be quoted: -// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and -// no error will be thrown in this case. -// -// This function fulfills the type 'expfmt.encoder'. -// -// Note that OpenMetrics requires a final `# EOF` line. Since this function acts -// on individual metric families, it is the responsibility of the caller to -// append this line to 'out' once all metric families have been written. -// Conveniently, this can be done by calling FinalizeOpenMetrics. -// -// The output should be fully OpenMetrics compliant. However, there are a few -// missing features and peculiarities to avoid complications when switching from -// Prometheus to OpenMetrics or vice versa: -// -// - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT` -// lines. A counter with a missing `_total` suffix is not an error. However, -// its type will be set to `unknown` in that case to avoid invalid OpenMetrics -// output. -// -// - According to the OM specs, the `# UNIT` line is optional, but if populated, -// the unit has to be present in the metric name as its suffix: -// (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit). -// However, in order to accommodate any potential scenario where such a change in the -// metric name is not desirable, the users are here given the choice of either explicitly -// opt in, in case they wish for the unit to be included in the output AND in the metric name -// as a suffix (see the description of the WithUnit function above), -// or not to opt in, in case they don't want for any of that to happen. -// -// - No support for the following (optional) features: info type, -// stateset type, gaugehistogram type. -// -// - The size of exemplar labels is not checked (i.e. it's possible to create -// exemplars that are larger than allowed by the OpenMetrics specification). -// -// - The value of Counters is not checked. (OpenMetrics doesn't allow counters -// with a `NaN` value.) -func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) { - toOM := encoderOption{} - for _, option := range options { - option(&toOM) - } - - name := in.GetName() - if name == "" { - return 0, fmt.Errorf("MetricFamily has no name: %s", in) - } - - // Try the interface upgrade. If it doesn't work, we'll use a - // bufio.Writer from the sync.Pool. - w, ok := out.(enhancedWriter) - if !ok { - b := bufPool.Get().(*bufio.Writer) - b.Reset(out) - w = b - defer func() { - bErr := b.Flush() - if err == nil { - err = bErr - } - bufPool.Put(b) - }() - } - - var ( - n int - metricType = in.GetType() - compliantName = name - ) - if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { - compliantName = name[:len(name)-6] - } - if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) { - compliantName = compliantName + "_" + *in.Unit - } - - // Comments, first HELP, then TYPE. - if in.Help != nil { - n, err = w.WriteString("# HELP ") - written += n - if err != nil { - return - } - n, err = writeName(w, compliantName) - written += n - if err != nil { - return - } - err = w.WriteByte(' ') - written++ - if err != nil { - return - } - n, err = writeEscapedString(w, *in.Help, true) - written += n - if err != nil { - return - } - err = w.WriteByte('\n') - written++ - if err != nil { - return - } - } - n, err = w.WriteString("# TYPE ") - written += n - if err != nil { - return - } - n, err = writeName(w, compliantName) - written += n - if err != nil { - return - } - switch metricType { - case dto.MetricType_COUNTER: - if strings.HasSuffix(name, "_total") { - n, err = w.WriteString(" counter\n") - } else { - n, err = w.WriteString(" unknown\n") - } - case dto.MetricType_GAUGE: - n, err = w.WriteString(" gauge\n") - case dto.MetricType_SUMMARY: - n, err = w.WriteString(" summary\n") - case dto.MetricType_UNTYPED: - n, err = w.WriteString(" unknown\n") - case dto.MetricType_HISTOGRAM: - n, err = w.WriteString(" histogram\n") - default: - return written, fmt.Errorf("unknown metric type %s", metricType.String()) - } - written += n - if err != nil { - return - } - if toOM.withUnit && in.Unit != nil { - n, err = w.WriteString("# UNIT ") - written += n - if err != nil { - return - } - n, err = writeName(w, compliantName) - written += n - if err != nil { - return - } - - err = w.WriteByte(' ') - written++ - if err != nil { - return - } - n, err = writeEscapedString(w, *in.Unit, true) - written += n - if err != nil { - return - } - err = w.WriteByte('\n') - written++ - if err != nil { - return - } - } - - var createdTsBytesWritten int - - // Finally the samples, one line for each. - if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { - compliantName += "_total" - } - for _, metric := range in.Metric { - switch metricType { - case dto.MetricType_COUNTER: - if metric.Counter == nil { - return written, fmt.Errorf( - "expected counter in metric %s %s", compliantName, metric, - ) - } - n, err = writeOpenMetricsSample( - w, compliantName, "", metric, "", 0, - metric.Counter.GetValue(), 0, false, - metric.Counter.Exemplar, - ) - if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil { - createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp()) - n += createdTsBytesWritten - } - case dto.MetricType_GAUGE: - if metric.Gauge == nil { - return written, fmt.Errorf( - "expected gauge in metric %s %s", compliantName, metric, - ) - } - n, err = writeOpenMetricsSample( - w, compliantName, "", metric, "", 0, - metric.Gauge.GetValue(), 0, false, - nil, - ) - case dto.MetricType_UNTYPED: - if metric.Untyped == nil { - return written, fmt.Errorf( - "expected untyped in metric %s %s", compliantName, metric, - ) - } - n, err = writeOpenMetricsSample( - w, compliantName, "", metric, "", 0, - metric.Untyped.GetValue(), 0, false, - nil, - ) - case dto.MetricType_SUMMARY: - if metric.Summary == nil { - return written, fmt.Errorf( - "expected summary in metric %s %s", compliantName, metric, - ) - } - for _, q := range metric.Summary.Quantile { - n, err = writeOpenMetricsSample( - w, compliantName, "", metric, - model.QuantileLabel, q.GetQuantile(), - q.GetValue(), 0, false, - nil, - ) - written += n - if err != nil { - return - } - } - n, err = writeOpenMetricsSample( - w, compliantName, "_sum", metric, "", 0, - metric.Summary.GetSampleSum(), 0, false, - nil, - ) - written += n - if err != nil { - return - } - n, err = writeOpenMetricsSample( - w, compliantName, "_count", metric, "", 0, - 0, metric.Summary.GetSampleCount(), true, - nil, - ) - if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil { - createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp()) - n += createdTsBytesWritten - } - case dto.MetricType_HISTOGRAM: - if metric.Histogram == nil { - return written, fmt.Errorf( - "expected histogram in metric %s %s", compliantName, metric, - ) - } - infSeen := false - for _, b := range metric.Histogram.Bucket { - n, err = writeOpenMetricsSample( - w, compliantName, "_bucket", metric, - model.BucketLabel, b.GetUpperBound(), - 0, b.GetCumulativeCount(), true, - b.Exemplar, - ) - written += n - if err != nil { - return - } - if math.IsInf(b.GetUpperBound(), +1) { - infSeen = true - } - } - if !infSeen { - n, err = writeOpenMetricsSample( - w, compliantName, "_bucket", metric, - model.BucketLabel, math.Inf(+1), - 0, metric.Histogram.GetSampleCount(), true, - nil, - ) - written += n - if err != nil { - return - } - } - n, err = writeOpenMetricsSample( - w, compliantName, "_sum", metric, "", 0, - metric.Histogram.GetSampleSum(), 0, false, - nil, - ) - written += n - if err != nil { - return - } - n, err = writeOpenMetricsSample( - w, compliantName, "_count", metric, "", 0, - 0, metric.Histogram.GetSampleCount(), true, - nil, - ) - if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil { - createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp()) - n += createdTsBytesWritten - } - default: - return written, fmt.Errorf( - "unexpected type in metric %s %s", compliantName, metric, - ) - } - written += n - if err != nil { - return - } - } - return -} - -// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics. -func FinalizeOpenMetrics(w io.Writer) (written int, err error) { - return w.Write([]byte("# EOF\n")) -} - -// writeOpenMetricsSample writes a single sample in OpenMetrics text format to -// w, given the metric name, the metric proto message itself, optionally an -// additional label name with a float64 value (use empty string as label name if -// not required), the value (optionally as float64 or uint64, determined by -// useIntValue), and optionally an exemplar (use nil if not required). The -// function returns the number of bytes written and any error encountered. -func writeOpenMetricsSample( - w enhancedWriter, - name, suffix string, - metric *dto.Metric, - additionalLabelName string, additionalLabelValue float64, - floatValue float64, intValue uint64, useIntValue bool, - exemplar *dto.Exemplar, -) (int, error) { - written := 0 - n, err := writeOpenMetricsNameAndLabelPairs( - w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, - ) - written += n - if err != nil { - return written, err - } - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - if useIntValue { - n, err = writeUint(w, intValue) - } else { - n, err = writeOpenMetricsFloat(w, floatValue) - } - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - // TODO(beorn7): Format this directly without converting to a float first. - n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000) - written += n - if err != nil { - return written, err - } - } - if exemplar != nil && len(exemplar.Label) > 0 { - n, err = writeExemplar(w, exemplar) - written += n - if err != nil { - return written, err - } - } - err = w.WriteByte('\n') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but -// formats the float in OpenMetrics style. -func writeOpenMetricsNameAndLabelPairs( - w enhancedWriter, - name string, - in []*dto.LabelPair, - additionalLabelName string, additionalLabelValue float64, -) (int, error) { - var ( - written int - separator byte = '{' - metricInsideBraces = false - ) - - if name != "" { - // If the name does not pass the legacy validity check, we must put the - // metric name inside the braces, quoted. - if !model.LegacyValidation.IsValidMetricName(name) { - metricInsideBraces = true - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - separator = ',' - } - - n, err := writeName(w, name) - written += n - if err != nil { - return written, err - } - } - - if len(in) == 0 && additionalLabelName == "" { - if metricInsideBraces { - err := w.WriteByte('}') - written++ - if err != nil { - return written, err - } - } - return written, nil - } - - for _, lp := range in { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := writeName(w, lp.GetName()) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeEscapedString(w, lp.GetValue(), true) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - separator = ',' - } - if additionalLabelName != "" { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(additionalLabelName) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeOpenMetricsFloat(w, additionalLabelValue) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - } - err := w.WriteByte('}') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeOpenMetricsCreated writes the created timestamp for a single time series -// following OpenMetrics text format to w, given the metric name, the metric proto -// message itself, optionally a suffix to be removed, e.g. '_total' for counters, -// an additional label name with a float64 value (use empty string as label name if -// not required) and the timestamp that represents the created timestamp. -// The function returns the number of bytes written and any error encountered. -func writeOpenMetricsCreated(w enhancedWriter, - name, suffixToTrim string, metric *dto.Metric, - additionalLabelName string, additionalLabelValue float64, - createdTimestamp *timestamppb.Timestamp, -) (int, error) { - written := 0 - n, err := writeOpenMetricsNameAndLabelPairs( - w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue, - ) - written += n - if err != nil { - return written, err - } - - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - - // TODO(beorn7): Format this directly from components of ts to - // avoid overflow/underflow and precision issues of the float - // conversion. - n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9) - written += n - if err != nil { - return written, err - } - - err = w.WriteByte('\n') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeExemplar writes the provided exemplar in OpenMetrics format to w. The -// function returns the number of bytes written and any error encountered. -func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { - written := 0 - n, err := w.WriteString(" # ") - written += n - if err != nil { - return written, err - } - n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0) - written += n - if err != nil { - return written, err - } - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeOpenMetricsFloat(w, e.GetValue()) - written += n - if err != nil { - return written, err - } - if e.Timestamp != nil { - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - err = e.Timestamp.CheckValid() - if err != nil { - return written, err - } - ts := e.Timestamp.AsTime() - // TODO(beorn7): Format this directly from components of ts to - // avoid overflow/underflow and precision issues of the float - // conversion. - n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9) - written += n - if err != nil { - return written, err - } - } - return written, nil -} - -// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting -// number would otherwise contain neither a "." nor an "e". -func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) { - switch { - case f == 1: - return w.WriteString("1.0") - case f == 0: - return w.WriteString("0.0") - case f == -1: - return w.WriteString("-1.0") - case math.IsNaN(f): - return w.WriteString("NaN") - case math.IsInf(f, +1): - return w.WriteString("+Inf") - case math.IsInf(f, -1): - return w.WriteString("-Inf") - default: - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) - if !bytes.ContainsAny(*bp, "e.") { - *bp = append(*bp, '.', '0') - } - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err - } -} - -// writeUint is like writeInt just for uint64. -func writeUint(w enhancedWriter, u uint64) (int, error) { - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendUint((*bp)[:0], u, 10) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err -} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go deleted file mode 100644 index c4e9c1bbc3..0000000000 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "fmt" - "io" - "math" - "strconv" - "strings" - "sync" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/common/model" -) - -// enhancedWriter has all the enhanced write functions needed here. bufio.Writer -// implements it. -type enhancedWriter interface { - io.Writer - WriteRune(r rune) (n int, err error) - WriteString(s string) (n int, err error) - WriteByte(c byte) error -} - -const ( - initialNumBufSize = 24 -) - -var ( - bufPool = sync.Pool{ - New: func() interface{} { - return bufio.NewWriter(io.Discard) - }, - } - numBufPool = sync.Pool{ - New: func() interface{} { - b := make([]byte, 0, initialNumBufSize) - return &b - }, - } -) - -// MetricFamilyToText converts a MetricFamily proto message into text format and -// writes the resulting lines to 'out'. It returns the number of bytes written -// and any error encountered. The output will have the same order as the input, -// no further sorting is performed. Furthermore, this function assumes the input -// is already sanitized and does not perform any sanity checks. If the input -// contains duplicate metrics or invalid metric or label names, the conversion -// will result in invalid text format output. -// -// If metric names conform to the legacy validation pattern, they will be placed -// outside the brackets in the traditional way, like `foo{}`. If the metric name -// fails the legacy validation check, it will be placed quoted inside the -// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and -// no error will be thrown in this case. -// -// Similar to metric names, if label names conform to the legacy validation -// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label -// name fails the legacy validation check, it will be quoted: -// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and -// no error will be thrown in this case. -// -// This method fulfills the type 'prometheus.encoder'. -func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { - // Fail-fast checks. - if len(in.Metric) == 0 { - return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) - } - name := in.GetName() - if name == "" { - return 0, fmt.Errorf("MetricFamily has no name: %s", in) - } - - // Try the interface upgrade. If it doesn't work, we'll use a - // bufio.Writer from the sync.Pool. - w, ok := out.(enhancedWriter) - if !ok { - b := bufPool.Get().(*bufio.Writer) - b.Reset(out) - w = b - defer func() { - bErr := b.Flush() - if err == nil { - err = bErr - } - bufPool.Put(b) - }() - } - - var n int - - // Comments, first HELP, then TYPE. - if in.Help != nil { - n, err = w.WriteString("# HELP ") - written += n - if err != nil { - return - } - n, err = writeName(w, name) - written += n - if err != nil { - return - } - err = w.WriteByte(' ') - written++ - if err != nil { - return - } - n, err = writeEscapedString(w, *in.Help, false) - written += n - if err != nil { - return - } - err = w.WriteByte('\n') - written++ - if err != nil { - return - } - } - n, err = w.WriteString("# TYPE ") - written += n - if err != nil { - return - } - n, err = writeName(w, name) - written += n - if err != nil { - return - } - metricType := in.GetType() - switch metricType { - case dto.MetricType_COUNTER: - n, err = w.WriteString(" counter\n") - case dto.MetricType_GAUGE: - n, err = w.WriteString(" gauge\n") - case dto.MetricType_SUMMARY: - n, err = w.WriteString(" summary\n") - case dto.MetricType_UNTYPED: - n, err = w.WriteString(" untyped\n") - case dto.MetricType_HISTOGRAM: - n, err = w.WriteString(" histogram\n") - default: - return written, fmt.Errorf("unknown metric type %s", metricType.String()) - } - written += n - if err != nil { - return - } - - // Finally the samples, one line for each. - for _, metric := range in.Metric { - switch metricType { - case dto.MetricType_COUNTER: - if metric.Counter == nil { - return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Counter.GetValue(), - ) - case dto.MetricType_GAUGE: - if metric.Gauge == nil { - return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Gauge.GetValue(), - ) - case dto.MetricType_UNTYPED: - if metric.Untyped == nil { - return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Untyped.GetValue(), - ) - case dto.MetricType_SUMMARY: - if metric.Summary == nil { - return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, - ) - } - for _, q := range metric.Summary.Quantile { - n, err = writeSample( - w, name, "", metric, - model.QuantileLabel, q.GetQuantile(), - q.GetValue(), - ) - written += n - if err != nil { - return - } - } - n, err = writeSample( - w, name, "_sum", metric, "", 0, - metric.Summary.GetSampleSum(), - ) - written += n - if err != nil { - return - } - n, err = writeSample( - w, name, "_count", metric, "", 0, - float64(metric.Summary.GetSampleCount()), - ) - case dto.MetricType_HISTOGRAM: - if metric.Histogram == nil { - return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, - ) - } - infSeen := false - for _, b := range metric.Histogram.Bucket { - n, err = writeSample( - w, name, "_bucket", metric, - model.BucketLabel, b.GetUpperBound(), - float64(b.GetCumulativeCount()), - ) - written += n - if err != nil { - return - } - if math.IsInf(b.GetUpperBound(), +1) { - infSeen = true - } - } - if !infSeen { - n, err = writeSample( - w, name, "_bucket", metric, - model.BucketLabel, math.Inf(+1), - float64(metric.Histogram.GetSampleCount()), - ) - written += n - if err != nil { - return - } - } - n, err = writeSample( - w, name, "_sum", metric, "", 0, - metric.Histogram.GetSampleSum(), - ) - written += n - if err != nil { - return - } - n, err = writeSample( - w, name, "_count", metric, "", 0, - float64(metric.Histogram.GetSampleCount()), - ) - default: - return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, - ) - } - written += n - if err != nil { - return - } - } - return -} - -// writeSample writes a single sample in text format to w, given the metric -// name, the metric proto message itself, optionally an additional label name -// with a float64 value (use empty string as label name if not required), and -// the value. The function returns the number of bytes written and any error -// encountered. -func writeSample( - w enhancedWriter, - name, suffix string, - metric *dto.Metric, - additionalLabelName string, additionalLabelValue float64, - value float64, -) (int, error) { - written := 0 - n, err := writeNameAndLabelPairs( - w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, - ) - written += n - if err != nil { - return written, err - } - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeFloat(w, value) - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeInt(w, *metric.TimestampMs) - written += n - if err != nil { - return written, err - } - } - err = w.WriteByte('\n') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the -// explicitly given metric name and additional label pair into text formatted as -// required by the text format and writes it to 'w'. An empty slice in -// combination with an empty string 'additionalLabelName' results in nothing -// being written. Otherwise, the label pairs are written, escaped as required by -// the text format, and enclosed in '{...}'. The function returns the number of -// bytes written and any error encountered. If the metric name is not -// legacy-valid, it will be put inside the brackets as well. Legacy-invalid -// label names will also be quoted. -func writeNameAndLabelPairs( - w enhancedWriter, - name string, - in []*dto.LabelPair, - additionalLabelName string, additionalLabelValue float64, -) (int, error) { - var ( - written int - separator byte = '{' - metricInsideBraces = false - ) - - if name != "" { - // If the name does not pass the legacy validity check, we must put the - // metric name inside the braces. - if !model.LegacyValidation.IsValidMetricName(name) { - metricInsideBraces = true - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - separator = ',' - } - n, err := writeName(w, name) - written += n - if err != nil { - return written, err - } - } - - if len(in) == 0 && additionalLabelName == "" { - if metricInsideBraces { - err := w.WriteByte('}') - written++ - if err != nil { - return written, err - } - } - return written, nil - } - - for _, lp := range in { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := writeName(w, lp.GetName()) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeEscapedString(w, lp.GetValue(), true) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - separator = ',' - } - if additionalLabelName != "" { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(additionalLabelName) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeFloat(w, additionalLabelValue) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - } - err := w.WriteByte('}') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if -// includeDoubleQuote is true - '"' by '\"'. -var ( - escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) - quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) -) - -func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { - if includeDoubleQuote { - return quotedEscaper.WriteString(w, v) - } - return escaper.WriteString(w, v) -} - -// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes -// a few common cases for increased efficiency. For non-hardcoded cases, it uses -// strconv.AppendFloat to avoid allocations, similar to writeInt. -func writeFloat(w enhancedWriter, f float64) (int, error) { - switch { - case f == 1: - return 1, w.WriteByte('1') - case f == 0: - return 1, w.WriteByte('0') - case f == -1: - return w.WriteString("-1") - case math.IsNaN(f): - return w.WriteString("NaN") - case math.IsInf(f, +1): - return w.WriteString("+Inf") - case math.IsInf(f, -1): - return w.WriteString("-Inf") - default: - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err - } -} - -// writeInt is equivalent to fmt.Fprint with an int64 argument but uses -// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid -// allocations. -func writeInt(w enhancedWriter, i int64) (int, error) { - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendInt((*bp)[:0], i, 10) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err -} - -// writeName writes a string as-is if it complies with the legacy naming -// scheme, or escapes it in double quotes if not. -func writeName(w enhancedWriter, name string) (int, error) { - if model.LegacyValidation.IsValidMetricName(name) { - return w.WriteString(name) - } - var written int - var err error - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - var n int - n, err = writeEscapedString(w, name, true) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - return written, err -} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go deleted file mode 100644 index 8f2edde324..0000000000 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ /dev/null @@ -1,933 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "math" - "strconv" - "strings" - "unicode/utf8" - - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" - - "github.com/prometheus/common/model" -) - -// A stateFn is a function that represents a state in a state machine. By -// executing it, the state is progressed to the next state. The stateFn returns -// another stateFn, which represents the new state. The end state is represented -// by nil. -type stateFn func() stateFn - -// ParseError signals errors while parsing the simple and flat text-based -// exchange format. -type ParseError struct { - Line int - Msg string -} - -// Error implements the error interface. -func (e ParseError) Error() string { - return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) -} - -// TextParser is used to parse the simple and flat text-based exchange format. Its -// zero value is ready to use. -type TextParser struct { - metricFamiliesByName map[string]*dto.MetricFamily - buf *bufio.Reader // Where the parsed input is read through. - err error // Most recent error. - lineCount int // Tracks the line count for error messages. - currentByte byte // The most recent byte read. - currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. - currentMF *dto.MetricFamily - currentMetric *dto.Metric - currentLabelPair *dto.LabelPair - currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. - - // The remaining member variables are only used for summaries/histograms. - currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' - // Summary specific. - summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentQuantile float64 - // Histogram specific. - histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentBucket float64 - // These tell us if the currently processed line ends on '_count' or - // '_sum' respectively and belong to a summary/histogram, representing the sample - // count and sum of that summary/histogram. - currentIsSummaryCount, currentIsSummarySum bool - currentIsHistogramCount, currentIsHistogramSum bool - // These indicate if the metric name from the current line being parsed is inside - // braces and if that metric name was found respectively. - currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool - // scheme sets the desired ValidationScheme for names. Defaults to the invalid - // UnsetValidation. - scheme model.ValidationScheme -} - -// NewTextParser returns a new TextParser with the provided nameValidationScheme. -func NewTextParser(nameValidationScheme model.ValidationScheme) TextParser { - return TextParser{scheme: nameValidationScheme} -} - -// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange -// format and creates MetricFamily proto messages. It returns the MetricFamily -// proto messages in a map where the metric names are the keys, along with any -// error encountered. -// -// If the input contains duplicate metrics (i.e. lines with the same metric name -// and exactly the same label set), the resulting MetricFamily will contain -// duplicate Metric proto messages. Similar is true for duplicate label -// names. Checks for duplicates have to be performed separately, if required. -// Also note that neither the metrics within each MetricFamily are sorted nor -// the label pairs within each Metric. Sorting is not required for the most -// frequent use of this method, which is sample ingestion in the Prometheus -// server. However, for presentation purposes, you might want to sort the -// metrics, and in some cases, you must sort the labels, e.g. for consumption by -// the metric family injection hook of the Prometheus registry. -// -// Summaries and histograms are rather special beasts. You would probably not -// use them in the simple text format anyway. This method can deal with -// summaries and histograms if they are presented in exactly the way the -// text.Create function creates them. -// -// This method must not be called concurrently. If you want to parse different -// input concurrently, instantiate a separate Parser for each goroutine. -func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { - p.reset(in) - for nextState := p.startOfLine; nextState != nil; nextState = nextState() { - // Magic happens here... - } - // Get rid of empty metric families. - for k, mf := range p.metricFamiliesByName { - if len(mf.GetMetric()) == 0 { - delete(p.metricFamiliesByName, k) - } - } - // If p.err is io.EOF now, we have run into a premature end of the input - // stream. Turn this error into something nicer and more - // meaningful. (io.EOF is often used as a signal for the legitimate end - // of an input stream.) - if p.err != nil && errors.Is(p.err, io.EOF) { - p.parseError("unexpected end of input stream") - } - return p.metricFamiliesByName, p.err -} - -func (p *TextParser) reset(in io.Reader) { - p.metricFamiliesByName = map[string]*dto.MetricFamily{} - p.currentLabelPairs = nil - if p.buf == nil { - p.buf = bufio.NewReader(in) - } else { - p.buf.Reset(in) - } - p.err = nil - p.lineCount = 0 - if p.summaries == nil || len(p.summaries) > 0 { - p.summaries = map[uint64]*dto.Metric{} - } - if p.histograms == nil || len(p.histograms) > 0 { - p.histograms = map[uint64]*dto.Metric{} - } - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() - p.currentMF = nil -} - -// startOfLine represents the state where the next byte read from p.buf is the -// start of a line (or whitespace leading up to it). -func (p *TextParser) startOfLine() stateFn { - p.lineCount++ - p.currentMetricIsInsideBraces = false - p.currentMetricInsideBracesIsPresent = false - if p.skipBlankTab(); p.err != nil { - // This is the only place that we expect to see io.EOF, - // which is not an error but the signal that we are done. - // Any other error that happens to align with the start of - // a line is still an error. - if errors.Is(p.err, io.EOF) { - p.err = nil - } - return nil - } - switch p.currentByte { - case '#': - return p.startComment - case '\n': - return p.startOfLine // Empty line, start the next one. - case '{': - p.currentMetricIsInsideBraces = true - return p.readingLabels - } - return p.readingMetricName -} - -// startComment represents the state where the next byte read from p.buf is the -// start of a comment (or whitespace leading up to it). -func (p *TextParser) startComment() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - return p.startOfLine - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - // If we have hit the end of line already, there is nothing left - // to do. This is not considered a syntax error. - if p.currentByte == '\n' { - return p.startOfLine - } - keyword := p.currentToken.String() - if keyword != "HELP" && keyword != "TYPE" { - // Generic comment, ignore by fast forwarding to end of line. - for p.currentByte != '\n' { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return nil // Unexpected end of input. - } - } - return p.startOfLine - } - // There is something. Next has to be a metric name. - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenAsMetricName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - if !isBlankOrTab(p.currentByte) { - p.parseError("invalid metric name in comment") - return nil - } - p.setOrCreateCurrentMF() - if p.err != nil { - return nil - } - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - switch keyword { - case "HELP": - return p.readingHelp - case "TYPE": - return p.readingType - } - panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) -} - -// readingMetricName represents the state where the last byte read (now in -// p.currentByte) is the first byte of a metric name. -func (p *TextParser) readingMetricName() stateFn { - if p.readTokenAsMetricName(); p.err != nil { - return nil - } - if p.currentToken.Len() == 0 { - p.parseError("invalid metric name") - return nil - } - p.setOrCreateCurrentMF() - if p.err != nil { - return nil - } - // Now is the time to fix the type if it hasn't happened yet. - if p.currentMF.Type == nil { - p.currentMF.Type = dto.MetricType_UNTYPED.Enum() - } - p.currentMetric = &dto.Metric{} - // Do not append the newly created currentMetric to - // currentMF.Metric right now. First wait if this is a summary, - // and the metric exists already, which we can only know after - // having read all the labels. - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingLabels -} - -// readingLabels represents the state where the last byte read (now in -// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the -// first byte of the value (otherwise). -func (p *TextParser) readingLabels() stateFn { - // Summaries/histograms are special. We have to reset the - // currentLabels map, currentQuantile and currentBucket before starting to - // read labels. - if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - p.currentLabels = map[string]string{} - p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() - } - if p.currentByte != '{' { - return p.readingValue - } - return p.startLabelName -} - -// startLabelName represents the state where the next byte read from p.buf is -// the start of a label name (or whitespace leading up to it). -func (p *TextParser) startLabelName() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '}' { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) - p.currentLabelPairs = nil - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - } - if p.readTokenAsLabelName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() == 0 { - p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) - return nil - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - if p.currentMetricIsInsideBraces { - if p.currentMetricInsideBracesIsPresent { - p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName())) - return nil - } - switch p.currentByte { - case ',': - p.setOrCreateCurrentMF() - if p.err != nil { - return nil - } - if p.currentMF.Type == nil { - p.currentMF.Type = dto.MetricType_UNTYPED.Enum() - } - p.currentMetric = &dto.Metric{} - p.currentMetricInsideBracesIsPresent = true - return p.startLabelName - case '}': - p.setOrCreateCurrentMF() - if p.err != nil { - p.currentLabelPairs = nil - return nil - } - if p.currentMF.Type == nil { - p.currentMF.Type = dto.MetricType_UNTYPED.Enum() - } - p.currentMetric = &dto.Metric{} - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) - p.currentLabelPairs = nil - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - default: - p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte)) - return nil - } - } - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - p.currentLabelPairs = nil - return nil - } - p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} - if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { - p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) - p.currentLabelPairs = nil - return nil - } - if !p.scheme.IsValidLabelName(p.currentLabelPair.GetName()) { - p.parseError(fmt.Sprintf("invalid label name %q", p.currentLabelPair.GetName())) - p.currentLabelPairs = nil - return nil - } - // Special summary/histogram treatment. Don't add 'quantile' and 'le' - // labels to 'real' labels. - if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) && - (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) { - p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) - } - // Check for duplicate label names. - labels := make(map[string]struct{}) - for _, l := range p.currentLabelPairs { - lName := l.GetName() - if _, exists := labels[lName]; exists { - p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) - p.currentLabelPairs = nil - return nil - } - labels[lName] = struct{}{} - } - return p.startLabelValue -} - -// startLabelValue represents the state where the next byte read from p.buf is -// the start of a (quoted) label value (or whitespace leading up to it). -func (p *TextParser) startLabelValue() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '"' { - p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) - return nil - } - if p.readTokenAsLabelValue(); p.err != nil { - return nil - } - if !model.LabelValue(p.currentToken.String()).IsValid() { - p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) - return nil - } - p.currentLabelPair.Value = proto.String(p.currentToken.String()) - // Special treatment of summaries: - // - Quantile labels are special, will result in dto.Quantile later. - // - Other labels have to be added to currentLabels for signature calculation. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if p.currentLabelPair.GetName() == model.QuantileLabel { - if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) - p.currentLabelPairs = nil - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - // Similar special treatment of histograms. - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if p.currentLabelPair.GetName() == model.BucketLabel { - if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - switch p.currentByte { - case ',': - return p.startLabelName - - case '}': - if p.currentMF == nil { - p.parseError("invalid metric name") - return nil - } - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) - p.currentLabelPairs = nil - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - default: - p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) - p.currentLabelPairs = nil - return nil - } -} - -// readingValue represents the state where the last byte read (now in -// p.currentByte) is the first byte of the sample value (i.e. a float). -func (p *TextParser) readingValue() stateFn { - // When we are here, we have read all the labels, so for the - // special case of a summary/histogram, we can finally find out - // if the metric already exists. - switch p.currentMF.GetType() { - case dto.MetricType_SUMMARY: - signature := model.LabelsToSignature(p.currentLabels) - if summary := p.summaries[signature]; summary != nil { - p.currentMetric = summary - } else { - p.summaries[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - case dto.MetricType_HISTOGRAM: - signature := model.LabelsToSignature(p.currentLabels) - if histogram := p.histograms[signature]; histogram != nil { - p.currentMetric = histogram - } else { - p.histograms[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - default: - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - value, err := parseFloat(p.currentToken.String()) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) - return nil - } - switch p.currentMF.GetType() { - case dto.MetricType_COUNTER: - p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} - case dto.MetricType_GAUGE: - p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} - case dto.MetricType_UNTYPED: - p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} - case dto.MetricType_SUMMARY: - // *sigh* - if p.currentMetric.Summary == nil { - p.currentMetric.Summary = &dto.Summary{} - } - switch { - case p.currentIsSummaryCount: - p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsSummarySum: - p.currentMetric.Summary.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentQuantile): - p.currentMetric.Summary.Quantile = append( - p.currentMetric.Summary.Quantile, - &dto.Quantile{ - Quantile: proto.Float64(p.currentQuantile), - Value: proto.Float64(value), - }, - ) - } - case dto.MetricType_HISTOGRAM: - // *sigh* - if p.currentMetric.Histogram == nil { - p.currentMetric.Histogram = &dto.Histogram{} - } - switch { - case p.currentIsHistogramCount: - p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsHistogramSum: - p.currentMetric.Histogram.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentBucket): - p.currentMetric.Histogram.Bucket = append( - p.currentMetric.Histogram.Bucket, - &dto.Bucket{ - UpperBound: proto.Float64(p.currentBucket), - CumulativeCount: proto.Uint64(uint64(value)), - }, - ) - } - default: - p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) - } - if p.currentByte == '\n' { - return p.startOfLine - } - return p.startTimestamp -} - -// startTimestamp represents the state where the next byte read from p.buf is -// the start of the timestamp (or whitespace leading up to it). -func (p *TextParser) startTimestamp() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) - return nil - } - p.currentMetric.TimestampMs = proto.Int64(timestamp) - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() > 0 { - p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) - return nil - } - return p.startOfLine -} - -// readingHelp represents the state where the last byte read (now in -// p.currentByte) is the first byte of the docstring after 'HELP'. -func (p *TextParser) readingHelp() stateFn { - if p.currentMF.Help != nil { - p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) - return nil - } - // Rest of line is the docstring. - if p.readTokenUntilNewline(true); p.err != nil { - return nil // Unexpected end of input. - } - p.currentMF.Help = proto.String(p.currentToken.String()) - return p.startOfLine -} - -// readingType represents the state where the last byte read (now in -// p.currentByte) is the first byte of the type hint after 'HELP'. -func (p *TextParser) readingType() stateFn { - if p.currentMF.Type != nil { - p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) - return nil - } - // Rest of line is the type. - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] - if !ok { - p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) - return nil - } - p.currentMF.Type = dto.MetricType(metricType).Enum() - return p.startOfLine -} - -// parseError sets p.err to a ParseError at the current line with the given -// message. -func (p *TextParser) parseError(msg string) { - p.err = ParseError{ - Line: p.lineCount, - Msg: msg, - } -} - -// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte -// that is neither ' ' nor '\t'. That byte is left in p.currentByte. -func (p *TextParser) skipBlankTab() { - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { - return - } - } -} - -// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do -// anything if p.currentByte is neither ' ' nor '\t'. -func (p *TextParser) skipBlankTabIfCurrentBlankTab() { - if isBlankOrTab(p.currentByte) { - p.skipBlankTab() - } -} - -// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The -// first byte considered is the byte already read (now in p.currentByte). The -// first whitespace byte encountered is still copied into p.currentByte, but not -// into p.currentToken. -func (p *TextParser) readTokenUntilWhitespace() { - p.currentToken.Reset() - for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first -// byte considered is the byte already read (now in p.currentByte). The first -// newline byte encountered is still copied into p.currentByte, but not into -// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are -// recognized: '\\' translates into '\', and '\n' into a line-feed character. -// All other escape sequences are invalid and cause an error. -func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { - p.currentToken.Reset() - escaped := false - for p.err == nil { - if recognizeEscapeSequence && escaped { - switch p.currentByte { - case '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - case '"': - p.currentToken.WriteByte('"') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - } else { - switch p.currentByte { - case '\n': - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a metric name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsMetricName() { - p.currentToken.Reset() - // A UTF-8 metric name must be quoted and may have escaped characters. - quoted := false - escaped := false - if !isValidMetricNameStart(p.currentByte) { - return - } - for p.err == nil { - if escaped { - switch p.currentByte { - case '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - case '"': - p.currentToken.WriteByte('"') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - } else { - switch p.currentByte { - case '"': - quoted = !quoted - if !quoted { - p.currentByte, p.err = p.buf.ReadByte() - return - } - case '\n': - p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String())) - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } - p.currentByte, p.err = p.buf.ReadByte() - if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { - return - } - } -} - -// readTokenAsLabelName copies a label name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a label name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsLabelName() { - p.currentToken.Reset() - // A UTF-8 label name must be quoted and may have escaped characters. - quoted := false - escaped := false - if !isValidLabelNameStart(p.currentByte) { - return - } - for p.err == nil { - if escaped { - switch p.currentByte { - case '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - case '"': - p.currentToken.WriteByte('"') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - } else { - switch p.currentByte { - case '"': - quoted = !quoted - if !quoted { - p.currentByte, p.err = p.buf.ReadByte() - return - } - case '\n': - p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String())) - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } - p.currentByte, p.err = p.buf.ReadByte() - if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { - return - } - } -} - -// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. -// In contrast to the other 'readTokenAs...' functions, which start with the -// last read byte in p.currentByte, this method ignores p.currentByte and starts -// with reading a new byte from p.buf. The first byte not part of a label value -// is still copied into p.currentByte, but not into p.currentToken. -func (p *TextParser) readTokenAsLabelValue() { - p.currentToken.Reset() - escaped := false - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return - } - if escaped { - switch p.currentByte { - case '"', '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - p.currentLabelPairs = nil - return - } - escaped = false - continue - } - switch p.currentByte { - case '"': - return - case '\n': - p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } -} - -func (p *TextParser) setOrCreateCurrentMF() { - p.currentIsSummaryCount = false - p.currentIsSummarySum = false - p.currentIsHistogramCount = false - p.currentIsHistogramSum = false - name := p.currentToken.String() - if !p.scheme.IsValidMetricName(name) { - p.parseError(fmt.Sprintf("invalid metric name %q", name)) - return - } - if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { - return - } - // Try out if this is a _sum or _count for a summary/histogram. - summaryName := summaryMetricName(name) - if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if isCount(name) { - p.currentIsSummaryCount = true - } - if isSum(name) { - p.currentIsSummarySum = true - } - return - } - } - histogramName := histogramMetricName(name) - if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if isCount(name) { - p.currentIsHistogramCount = true - } - if isSum(name) { - p.currentIsHistogramSum = true - } - return - } - } - p.currentMF = &dto.MetricFamily{Name: proto.String(name)} - p.metricFamiliesByName[name] = p.currentMF -} - -func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' -} - -func isValidLabelNameContinuation(b byte, quoted bool) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) -} - -func isValidMetricNameStart(b byte) bool { - return isValidLabelNameStart(b) || b == ':' -} - -func isValidMetricNameContinuation(b byte, quoted bool) bool { - return isValidLabelNameContinuation(b, quoted) || b == ':' -} - -func isBlankOrTab(b byte) bool { - return b == ' ' || b == '\t' -} - -func isCount(name string) bool { - return len(name) > 6 && name[len(name)-6:] == "_count" -} - -func isSum(name string) bool { - return len(name) > 4 && name[len(name)-4:] == "_sum" -} - -func isBucket(name string) bool { - return len(name) > 7 && name[len(name)-7:] == "_bucket" -} - -func summaryMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - default: - return name - } -} - -func histogramMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - case isBucket(name): - return name[:len(name)-7] - default: - return name - } -} - -func parseFloat(s string) (float64, error) { - if strings.ContainsAny(s, "pP_") { - return 0, errors.New("unsupported character in float") - } - return strconv.ParseFloat(s, 64) -} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go deleted file mode 100644 index 460f554f29..0000000000 --- a/vendor/github.com/prometheus/common/model/alert.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "errors" - "fmt" - "time" -) - -type AlertStatus string - -const ( - AlertFiring AlertStatus = "firing" - AlertResolved AlertStatus = "resolved" -) - -// Alert is a generic representation of an alert in the Prometheus eco-system. -type Alert struct { - // Label value pairs for purpose of aggregation, matching, and disposition - // dispatching. This must minimally include an "alertname" label. - Labels LabelSet `json:"labels"` - - // Extra key/value information which does not define alert identity. - Annotations LabelSet `json:"annotations"` - - // The known time range for this alert. Both ends are optional. - StartsAt time.Time `json:"startsAt,omitempty"` - EndsAt time.Time `json:"endsAt,omitempty"` - GeneratorURL string `json:"generatorURL"` -} - -// Name returns the name of the alert. It is equivalent to the "alertname" label. -func (a *Alert) Name() string { - return string(a.Labels[AlertNameLabel]) -} - -// Fingerprint returns a unique hash for the alert. It is equivalent to -// the fingerprint of the alert's label set. -func (a *Alert) Fingerprint() Fingerprint { - return a.Labels.Fingerprint() -} - -func (a *Alert) String() string { - s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) - if a.Resolved() { - return s + "[resolved]" - } - return s + "[active]" -} - -// Resolved returns true iff the activity interval ended in the past. -func (a *Alert) Resolved() bool { - return a.ResolvedAt(time.Now()) -} - -// ResolvedAt returns true iff the activity interval ended before -// the given timestamp. -func (a *Alert) ResolvedAt(ts time.Time) bool { - if a.EndsAt.IsZero() { - return false - } - return !a.EndsAt.After(ts) -} - -// Status returns the status of the alert. -func (a *Alert) Status() AlertStatus { - return a.StatusAt(time.Now()) -} - -// StatusAt returns the status of the alert at the given timestamp. -func (a *Alert) StatusAt(ts time.Time) AlertStatus { - if a.ResolvedAt(ts) { - return AlertResolved - } - return AlertFiring -} - -// Validate checks whether the alert data is inconsistent. -func (a *Alert) Validate() error { - if a.StartsAt.IsZero() { - return errors.New("start time missing") - } - if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return errors.New("start time must be before end time") - } - if err := a.Labels.Validate(); err != nil { - return fmt.Errorf("invalid label set: %w", err) - } - if len(a.Labels) == 0 { - return errors.New("at least one label pair required") - } - if err := a.Annotations.Validate(); err != nil { - return fmt.Errorf("invalid annotations: %w", err) - } - return nil -} - -// Alert is a list of alerts that can be sorted in chronological order. -type Alerts []*Alert - -func (as Alerts) Len() int { return len(as) } -func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } - -func (as Alerts) Less(i, j int) bool { - if as[i].StartsAt.Before(as[j].StartsAt) { - return true - } - if as[i].EndsAt.Before(as[j].EndsAt) { - return true - } - return as[i].Fingerprint() < as[j].Fingerprint() -} - -// HasFiring returns true iff one of the alerts is not resolved. -func (as Alerts) HasFiring() bool { - for _, a := range as { - if !a.Resolved() { - return true - } - } - return false -} - -// HasFiringAt returns true iff one of the alerts is not resolved -// at the time ts. -func (as Alerts) HasFiringAt(ts time.Time) bool { - for _, a := range as { - if !a.ResolvedAt(ts) { - return true - } - } - return false -} - -// Status returns StatusFiring iff at least one of the alerts is firing. -func (as Alerts) Status() AlertStatus { - if as.HasFiring() { - return AlertFiring - } - return AlertResolved -} - -// StatusAt returns StatusFiring iff at least one of the alerts is firing -// at the time ts. -func (as Alerts) StatusAt(ts time.Time) AlertStatus { - if as.HasFiringAt(ts) { - return AlertFiring - } - return AlertResolved -} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go deleted file mode 100644 index fc4de4106e..0000000000 --- a/vendor/github.com/prometheus/common/model/fingerprinting.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "strconv" -) - -// Fingerprint provides a hash-capable representation of a Metric. -// For our purposes, FNV-1A 64-bit is used. -type Fingerprint uint64 - -// FingerprintFromString transforms a string representation into a Fingerprint. -func FingerprintFromString(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - return Fingerprint(num), err -} - -// ParseFingerprint parses the input string into a fingerprint. -func ParseFingerprint(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return 0, err - } - return Fingerprint(num), nil -} - -func (f Fingerprint) String() string { - return fmt.Sprintf("%016x", uint64(f)) -} - -// Fingerprints represents a collection of Fingerprint subject to a given -// natural sorting scheme. It implements sort.Interface. -type Fingerprints []Fingerprint - -// Len implements sort.Interface. -func (f Fingerprints) Len() int { - return len(f) -} - -// Less implements sort.Interface. -func (f Fingerprints) Less(i, j int) bool { - return f[i] < f[j] -} - -// Swap implements sort.Interface. -func (f Fingerprints) Swap(i, j int) { - f[i], f[j] = f[j], f[i] -} - -// FingerprintSet is a set of Fingerprints. -type FingerprintSet map[Fingerprint]struct{} - -// Equal returns true if both sets contain the same elements (and not more). -func (s FingerprintSet) Equal(o FingerprintSet) bool { - if len(s) != len(o) { - return false - } - - for k := range s { - if _, ok := o[k]; !ok { - return false - } - } - - return true -} - -// Intersection returns the elements contained in both sets. -func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { - myLength, otherLength := len(s), len(o) - if myLength == 0 || otherLength == 0 { - return FingerprintSet{} - } - - subSet := s - superSet := o - - if otherLength < myLength { - subSet = o - superSet = s - } - - out := FingerprintSet{} - - for k := range subSet { - if _, ok := superSet[k]; ok { - out[k] = struct{}{} - } - } - - return out -} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go deleted file mode 100644 index 367afecd30..0000000000 --- a/vendor/github.com/prometheus/common/model/fnv.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializes a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go deleted file mode 100644 index dfeb34be5f..0000000000 --- a/vendor/github.com/prometheus/common/model/labels.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "strings" - "unicode/utf8" -) - -const ( - // AlertNameLabel is the name of the label containing the alert's name. - AlertNameLabel = "alertname" - - // ExportedLabelPrefix is the prefix to prepend to the label names present in - // exported metrics if a label of the same name is added by the server. - ExportedLabelPrefix = "exported_" - - // MetricNameLabel is the label name indicating the metric name of a - // timeseries. - MetricNameLabel = "__name__" - // MetricTypeLabel is the label name indicating the metric type of - // timeseries as per the PROM-39 proposal. - MetricTypeLabel = "__type__" - // MetricUnitLabel is the label name indicating the metric unit of - // timeseries as per the PROM-39 proposal. - MetricUnitLabel = "__unit__" - - // SchemeLabel is the name of the label that holds the scheme on which to - // scrape a target. - SchemeLabel = "__scheme__" - - // AddressLabel is the name of the label that holds the address of - // a scrape target. - AddressLabel = "__address__" - - // MetricsPathLabel is the name of the label that holds the path on which to - // scrape a target. - MetricsPathLabel = "__metrics_path__" - - // ScrapeIntervalLabel is the name of the label that holds the scrape interval - // used to scrape a target. - ScrapeIntervalLabel = "__scrape_interval__" - - // ScrapeTimeoutLabel is the name of the label that holds the scrape - // timeout used to scrape a target. - ScrapeTimeoutLabel = "__scrape_timeout__" - - // ReservedLabelPrefix is a prefix which is not legal in user-supplied - // label names. - ReservedLabelPrefix = "__" - - // MetaLabelPrefix is a prefix for labels that provide meta information. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. - MetaLabelPrefix = "__meta_" - - // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. This is reserved for use in - // Prometheus configuration files by users. - TmpLabelPrefix = "__tmp_" - - // ParamLabelPrefix is a prefix for labels that provide URL parameters - // used to scrape a target. - ParamLabelPrefix = "__param_" - - // JobLabel is the label name indicating the job from which a timeseries - // was scraped. - JobLabel = "job" - - // InstanceLabel is the label name used for the instance label. - InstanceLabel = "instance" - - // BucketLabel is used for the label that defines the upper bound of a - // bucket of a histogram ("le" -> "less or equal"). - BucketLabel = "le" - - // QuantileLabel is used for the label that defines the quantile in a - // summary. - QuantileLabel = "quantile" -) - -// LabelNameRE is a regular expression matching valid label names. Note that the -// IsValid method of LabelName performs the same check but faster than a match -// with this regular expression. -var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") - -// A LabelName is a key for a LabelSet or Metric. It has a value associated -// therewith. -type LabelName string - -// IsValid returns true iff the name matches the pattern of LabelNameRE when -// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if -// NameValidationScheme is set to UTF8Validation. -// -// Deprecated: This method should not be used and may be removed in the future. -// Use [ValidationScheme.IsValidLabelName] instead. -func (ln LabelName) IsValid() bool { - return NameValidationScheme.IsValidLabelName(string(ln)) -} - -// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for -// legacy names. It does not use LabelNameRE for the check but a much faster -// hardcoded implementation. -// -// Deprecated: This method should not be used and may be removed in the future. -// Use [LegacyValidation.IsValidLabelName] instead. -func (ln LabelName) IsValidLegacy() bool { - return LegacyValidation.IsValidLabelName(string(ln)) -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (ln *LabelName) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// LabelNames is a sortable LabelName slice. In implements sort.Interface. -type LabelNames []LabelName - -func (l LabelNames) Len() int { - return len(l) -} - -func (l LabelNames) Less(i, j int) bool { - return l[i] < l[j] -} - -func (l LabelNames) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -func (l LabelNames) String() string { - labelStrings := make([]string, 0, len(l)) - for _, label := range l { - labelStrings = append(labelStrings, string(label)) - } - return strings.Join(labelStrings, ", ") -} - -// A LabelValue is an associated value for a LabelName. -type LabelValue string - -// IsValid returns true iff the string is a valid UTF-8. -func (lv LabelValue) IsValid() bool { - return utf8.ValidString(string(lv)) -} - -// LabelValues is a sortable LabelValue slice. It implements sort.Interface. -type LabelValues []LabelValue - -func (l LabelValues) Len() int { - return len(l) -} - -func (l LabelValues) Less(i, j int) bool { - return string(l[i]) < string(l[j]) -} - -func (l LabelValues) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -// LabelPair pairs a name with a value. -type LabelPair struct { - Name LabelName - Value LabelValue -} - -// LabelPairs is a sortable slice of LabelPair pointers. It implements -// sort.Interface. -type LabelPairs []*LabelPair - -func (l LabelPairs) Len() int { - return len(l) -} - -func (l LabelPairs) Less(i, j int) bool { - switch { - case l[i].Name > l[j].Name: - return false - case l[i].Name < l[j].Name: - return true - case l[i].Value > l[j].Value: - return false - case l[i].Value < l[j].Value: - return true - default: - return false - } -} - -func (l LabelPairs) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go deleted file mode 100644 index 9de47b2568..0000000000 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "sort" -) - -// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet -// may be fully-qualified down to the point where it may resolve to a single -// Metric in the data store or not. All operations that occur within the realm -// of a LabelSet can emit a vector of Metric entities to which the LabelSet may -// match. -type LabelSet map[LabelName]LabelValue - -// Validate checks whether all names and values in the label set -// are valid. -func (ls LabelSet) Validate() error { - for ln, lv := range ls { - if !ln.IsValid() { - return fmt.Errorf("invalid name %q", ln) - } - if !lv.IsValid() { - return fmt.Errorf("invalid value %q", lv) - } - } - return nil -} - -// Equal returns true iff both label sets have exactly the same key/value pairs. -func (ls LabelSet) Equal(o LabelSet) bool { - if len(ls) != len(o) { - return false - } - for ln, lv := range ls { - olv, ok := o[ln] - if !ok { - return false - } - if olv != lv { - return false - } - } - return true -} - -// Before compares the metrics, using the following criteria: -// -// If m has fewer labels than o, it is before o. If it has more, it is not. -// -// If the number of labels is the same, the superset of all label names is -// sorted alphanumerically. The first differing label pair found in that order -// determines the outcome: If the label does not exist at all in m, then m is -// before o, and vice versa. Otherwise the label value is compared -// alphanumerically. -// -// If m and o are equal, the method returns false. -func (ls LabelSet) Before(o LabelSet) bool { - if len(ls) < len(o) { - return true - } - if len(ls) > len(o) { - return false - } - - lns := make(LabelNames, 0, len(ls)+len(o)) - for ln := range ls { - lns = append(lns, ln) - } - for ln := range o { - lns = append(lns, ln) - } - // It's probably not worth it to de-dup lns. - sort.Sort(lns) - for _, ln := range lns { - mlv, ok := ls[ln] - if !ok { - return true - } - olv, ok := o[ln] - if !ok { - return false - } - if mlv < olv { - return true - } - if mlv > olv { - return false - } - } - return false -} - -// Clone returns a copy of the label set. -func (ls LabelSet) Clone() LabelSet { - lsn := make(LabelSet, len(ls)) - for ln, lv := range ls { - lsn[ln] = lv - } - return lsn -} - -// Merge is a helper function to non-destructively merge two label sets. -func (ls LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(ls)) - - for k, v := range ls { - result[k] = v - } - - for k, v := range other { - result[k] = v - } - - return result -} - -// Fingerprint returns the LabelSet's fingerprint. -func (ls LabelSet) Fingerprint() Fingerprint { - return labelSetToFingerprint(ls) -} - -// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (ls LabelSet) FastFingerprint() Fingerprint { - return labelSetToFastFingerprint(ls) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (ls *LabelSet) UnmarshalJSON(b []byte) error { - var m map[LabelName]LabelValue - if err := json.Unmarshal(b, &m); err != nil { - return err - } - // encoding/json only unmarshals maps of the form map[string]T. It treats - // LabelName as a string and does not call its UnmarshalJSON method. - // Thus, we have to replicate the behavior here. - for ln := range m { - if !ln.IsValid() { - return fmt.Errorf("%q is not a valid label name", ln) - } - } - *ls = LabelSet(m) - return nil -} diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go deleted file mode 100644 index abb2c90018..0000000000 --- a/vendor/github.com/prometheus/common/model/labelset_string.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "bytes" - "slices" - "strconv" -) - -// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically. -func (l LabelSet) String() string { - var lna [32]string // On stack to avoid memory allocation for sorting names. - labelNames := lna[:0] - for name := range l { - labelNames = append(labelNames, string(name)) - } - slices.Sort(labelNames) - var bytea [1024]byte // On stack to avoid memory allocation while building the output. - b := bytes.NewBuffer(bytea[:0]) - b.WriteByte('{') - for i, name := range labelNames { - if i > 0 { - b.WriteString(", ") - } - b.WriteString(name) - b.WriteByte('=') - b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)]))) - } - b.WriteByte('}') - return b.String() -} diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go deleted file mode 100644 index 447ab8ad63..0000000000 --- a/vendor/github.com/prometheus/common/model/metadata.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -// MetricType represents metric type values. -type MetricType string - -const ( - MetricTypeCounter = MetricType("counter") - MetricTypeGauge = MetricType("gauge") - MetricTypeHistogram = MetricType("histogram") - MetricTypeGaugeHistogram = MetricType("gaugehistogram") - MetricTypeSummary = MetricType("summary") - MetricTypeInfo = MetricType("info") - MetricTypeStateset = MetricType("stateset") - MetricTypeUnknown = MetricType("unknown") -) diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go deleted file mode 100644 index 3feebf328a..0000000000 --- a/vendor/github.com/prometheus/common/model/metric.go +++ /dev/null @@ -1,593 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "errors" - "fmt" - "regexp" - "sort" - "strconv" - "strings" - "unicode/utf8" - - dto "github.com/prometheus/client_model/go" - "go.yaml.in/yaml/v2" - "google.golang.org/protobuf/proto" -) - -var ( - // NameValidationScheme determines the global default method of the name - // validation to be used by all calls to IsValidMetricName() and LabelName - // IsValid(). - // - // Deprecated: This variable should not be used and might be removed in the - // far future. If you wish to stick to the legacy name validation use - // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods - // instead. This variable is here as an escape hatch for emergency cases, - // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g., - // to delay UTF-8 migrations in time or aid in debugging unforeseen results of - // the change. In such a case, a temporary assignment to `LegacyValidation` - // value in the `init()` function in your main.go or so, could be considered. - // - // Historically we opted for a global variable for feature gating different - // validation schemes in operations that were not otherwise easily adjustable - // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate - // Labels structure or package might have been a better choice. Given the - // change was made and many upgraded the common already, we live this as-is - // with this warning and learning for the future. - NameValidationScheme = UTF8Validation - - // NameEscapingScheme defines the default way that names will be escaped when - // presented to systems that do not support UTF-8 names. If the Content-Type - // "escaping" term is specified, that will override this value. - // NameEscapingScheme should not be set to the NoEscaping value. That string - // is used in content negotiation to indicate that a system supports UTF-8 and - // has that feature enabled. - NameEscapingScheme = UnderscoreEscaping -) - -// ValidationScheme is a Go enum for determining how metric and label names will -// be validated by this library. -type ValidationScheme int - -const ( - // UnsetValidation represents an undefined ValidationScheme. - // Should not be used in practice. - UnsetValidation ValidationScheme = iota - - // LegacyValidation is a setting that requires that all metric and label names - // conform to the original Prometheus character requirements described by - // MetricNameRE and LabelNameRE. - LegacyValidation - - // UTF8Validation only requires that metric and label names be valid UTF-8 - // strings. - UTF8Validation -) - -var _ interface { - yaml.Marshaler - yaml.Unmarshaler - json.Marshaler - json.Unmarshaler - fmt.Stringer -} = new(ValidationScheme) - -// String returns the string representation of s. -func (s ValidationScheme) String() string { - switch s { - case UnsetValidation: - return "unset" - case LegacyValidation: - return "legacy" - case UTF8Validation: - return "utf8" - default: - panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) - } -} - -// MarshalYAML implements the yaml.Marshaler interface. -func (s ValidationScheme) MarshalYAML() (any, error) { - switch s { - case UnsetValidation: - return "", nil - case LegacyValidation, UTF8Validation: - return s.String(), nil - default: - panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) - } -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (s *ValidationScheme) UnmarshalYAML(unmarshal func(any) error) error { - var scheme string - if err := unmarshal(&scheme); err != nil { - return err - } - return s.Set(scheme) -} - -// MarshalJSON implements the json.Marshaler interface. -func (s ValidationScheme) MarshalJSON() ([]byte, error) { - switch s { - case UnsetValidation: - return json.Marshal("") - case UTF8Validation, LegacyValidation: - return json.Marshal(s.String()) - default: - return nil, fmt.Errorf("unhandled ValidationScheme: %d", s) - } -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (s *ValidationScheme) UnmarshalJSON(bytes []byte) error { - var repr string - if err := json.Unmarshal(bytes, &repr); err != nil { - return err - } - return s.Set(repr) -} - -// Set implements the pflag.Value interface. -func (s *ValidationScheme) Set(text string) error { - switch text { - case "": - // Don't change the value. - case LegacyValidation.String(): - *s = LegacyValidation - case UTF8Validation.String(): - *s = UTF8Validation - default: - return fmt.Errorf("unrecognized ValidationScheme: %q", text) - } - return nil -} - -// IsValidMetricName returns whether metricName is valid according to s. -func (s ValidationScheme) IsValidMetricName(metricName string) bool { - switch s { - case LegacyValidation: - if len(metricName) == 0 { - return false - } - for i, b := range metricName { - if !isValidLegacyRune(b, i) { - return false - } - } - return true - case UTF8Validation: - if len(metricName) == 0 { - return false - } - return utf8.ValidString(metricName) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s.String())) - } -} - -// IsValidLabelName returns whether labelName is valid according to s. -func (s ValidationScheme) IsValidLabelName(labelName string) bool { - switch s { - case LegacyValidation: - if len(labelName) == 0 { - return false - } - for i, b := range labelName { - // TODO: Apply De Morgan's law. Make sure there are tests for this. - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck - return false - } - } - return true - case UTF8Validation: - if len(labelName) == 0 { - return false - } - return utf8.ValidString(labelName) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s)) - } -} - -// Type implements the pflag.Value interface. -func (ValidationScheme) Type() string { - return "validationScheme" -} - -type EscapingScheme int - -const ( - // NoEscaping indicates that a name will not be escaped. Unescaped names that - // do not conform to the legacy validity check will use a new exposition - // format syntax that will be officially standardized in future versions. - NoEscaping EscapingScheme = iota - - // UnderscoreEscaping replaces all legacy-invalid characters with underscores. - UnderscoreEscaping - - // DotsEscaping is similar to UnderscoreEscaping, except that dots are - // converted to `_dot_` and pre-existing underscores are converted to `__`. - DotsEscaping - - // ValueEncodingEscaping prepends the name with `U__` and replaces all invalid - // characters with the unicode value, surrounded by underscores. Single - // underscores are replaced with double underscores. - ValueEncodingEscaping -) - -const ( - // EscapingKey is the key in an Accept or Content-Type header that defines how - // metric and label names that do not conform to the legacy character - // requirements should be escaped when being scraped by a legacy prometheus - // system. If a system does not explicitly pass an escaping parameter in the - // Accept header, the default NameEscapingScheme will be used. - EscapingKey = "escaping" - - // Possible values for Escaping Key. - AllowUTF8 = "allow-utf-8" // No escaping required. - EscapeUnderscores = "underscores" - EscapeDots = "dots" - EscapeValues = "values" -) - -// MetricNameRE is a regular expression matching valid metric -// names. Note that the IsValidMetricName function performs the same -// check but faster than a match with this regular expression. -var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) - -// A Metric is similar to a LabelSet, but the key difference is that a Metric is -// a singleton and refers to one and only one stream of samples. -type Metric LabelSet - -// Equal compares the metrics. -func (m Metric) Equal(o Metric) bool { - return LabelSet(m).Equal(LabelSet(o)) -} - -// Before compares the metrics' underlying label sets. -func (m Metric) Before(o Metric) bool { - return LabelSet(m).Before(LabelSet(o)) -} - -// Clone returns a copy of the Metric. -func (m Metric) Clone() Metric { - clone := make(Metric, len(m)) - for k, v := range m { - clone[k] = v - } - return clone -} - -func (m Metric) String() string { - metricName, hasName := m[MetricNameLabel] - numLabels := len(m) - 1 - if !hasName { - numLabels = len(m) - } - labelStrings := make([]string, 0, numLabels) - for label, value := range m { - if label != MetricNameLabel { - labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) - } - } - - switch numLabels { - case 0: - if hasName { - return string(metricName) - } - return "{}" - default: - sort.Strings(labelStrings) - return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) - } -} - -// Fingerprint returns a Metric's Fingerprint. -func (m Metric) Fingerprint() Fingerprint { - return LabelSet(m).Fingerprint() -} - -// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (m Metric) FastFingerprint() Fingerprint { - return LabelSet(m).FastFingerprint() -} - -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE -// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is -// selected. -// -// Deprecated: This function should not be used and might be removed in the future. -// Use [ValidationScheme.IsValidMetricName] instead. -func IsValidMetricName(n LabelValue) bool { - return NameValidationScheme.IsValidMetricName(string(n)) -} - -// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the -// legacy validation scheme regardless of the value of NameValidationScheme. -// This function, however, does not use MetricNameRE for the check but a much -// faster hardcoded implementation. -// -// Deprecated: This function should not be used and might be removed in the future. -// Use [LegacyValidation.IsValidMetricName] instead. -func IsValidLegacyMetricName(n string) bool { - return LegacyValidation.IsValidMetricName(n) -} - -// EscapeMetricFamily escapes the given metric names and labels with the given -// escaping scheme. Returns a new object that uses the same pointers to fields -// when possible and creates new escaped versions so as not to mutate the -// input. -func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily { - if v == nil { - return nil - } - - if scheme == NoEscaping { - return v - } - - out := &dto.MetricFamily{ - Help: v.Help, - Type: v.Type, - Unit: v.Unit, - } - - // If the name is nil, copy as-is, don't try to escape. - if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { - out.Name = v.Name - } else { - out.Name = proto.String(EscapeName(v.GetName(), scheme)) - } - for _, m := range v.Metric { - if !metricNeedsEscaping(m) { - out.Metric = append(out.Metric, m) - continue - } - - escaped := &dto.Metric{ - Gauge: m.Gauge, - Counter: m.Counter, - Summary: m.Summary, - Untyped: m.Untyped, - Histogram: m.Histogram, - TimestampMs: m.TimestampMs, - } - - for _, l := range m.Label { - if l.GetName() == MetricNameLabel { - if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { - escaped.Label = append(escaped.Label, l) - continue - } - escaped.Label = append(escaped.Label, &dto.LabelPair{ - Name: proto.String(MetricNameLabel), - Value: proto.String(EscapeName(l.GetValue(), scheme)), - }) - continue - } - if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { - escaped.Label = append(escaped.Label, l) - continue - } - escaped.Label = append(escaped.Label, &dto.LabelPair{ - Name: proto.String(EscapeName(l.GetName(), scheme)), - Value: l.Value, - }) - } - out.Metric = append(out.Metric, escaped) - } - return out -} - -func metricNeedsEscaping(m *dto.Metric) bool { - for _, l := range m.Label { - if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { - return true - } - if !IsValidLegacyMetricName(l.GetName()) { - return true - } - } - return false -} - -// EscapeName escapes the incoming name according to the provided escaping -// scheme. Depending on the rules of escaping, this may cause no change in the -// string that is returned. (Especially NoEscaping, which by definition is a -// noop). This function does not do any validation of the name. -func EscapeName(name string, scheme EscapingScheme) string { - if len(name) == 0 { - return name - } - var escaped strings.Builder - switch scheme { - case NoEscaping: - return name - case UnderscoreEscaping: - if IsValidLegacyMetricName(name) { - return name - } - for i, b := range name { - if isValidLegacyRune(b, i) { - escaped.WriteRune(b) - } else { - escaped.WriteRune('_') - } - } - return escaped.String() - case DotsEscaping: - // Do not early return for legacy valid names, we still escape underscores. - for i, b := range name { - switch { - case b == '_': - escaped.WriteString("__") - case b == '.': - escaped.WriteString("_dot_") - case isValidLegacyRune(b, i): - escaped.WriteRune(b) - default: - escaped.WriteString("__") - } - } - return escaped.String() - case ValueEncodingEscaping: - if IsValidLegacyMetricName(name) { - return name - } - escaped.WriteString("U__") - for i, b := range name { - switch { - case b == '_': - escaped.WriteString("__") - case isValidLegacyRune(b, i): - escaped.WriteRune(b) - case !utf8.ValidRune(b): - escaped.WriteString("_FFFD_") - default: - escaped.WriteRune('_') - escaped.WriteString(strconv.FormatInt(int64(b), 16)) - escaped.WriteRune('_') - } - } - return escaped.String() - default: - panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) - } -} - -// lower function taken from strconv.atoi. -func lower(c byte) byte { - return c | ('x' - 'X') -} - -// UnescapeName unescapes the incoming name according to the provided escaping -// scheme if possible. Some schemes are partially or totally non-roundtripable. -// If any error is enountered, returns the original input. -func UnescapeName(name string, scheme EscapingScheme) string { - if len(name) == 0 { - return name - } - switch scheme { - case NoEscaping: - return name - case UnderscoreEscaping: - // It is not possible to unescape from underscore replacement. - return name - case DotsEscaping: - name = strings.ReplaceAll(name, "_dot_", ".") - name = strings.ReplaceAll(name, "__", "_") - return name - case ValueEncodingEscaping: - escapedName, found := strings.CutPrefix(name, "U__") - if !found { - return name - } - - var unescaped strings.Builder - TOP: - for i := 0; i < len(escapedName); i++ { - // All non-underscores are treated normally. - if escapedName[i] != '_' { - unescaped.WriteByte(escapedName[i]) - continue - } - i++ - if i >= len(escapedName) { - return name - } - // A double underscore is a single underscore. - if escapedName[i] == '_' { - unescaped.WriteByte('_') - continue - } - // We think we are in a UTF-8 code, process it. - var utf8Val uint - for j := 0; i < len(escapedName); j++ { - // This is too many characters for a utf8 value based on the MaxRune - // value of '\U0010FFFF'. - if j >= 6 { - return name - } - // Found a closing underscore, convert to a rune, check validity, and append. - if escapedName[i] == '_' { - utf8Rune := rune(utf8Val) - if !utf8.ValidRune(utf8Rune) { - return name - } - unescaped.WriteRune(utf8Rune) - continue TOP - } - r := lower(escapedName[i]) - utf8Val *= 16 - switch { - case r >= '0' && r <= '9': - utf8Val += uint(r) - '0' - case r >= 'a' && r <= 'f': - utf8Val += uint(r) - 'a' + 10 - default: - return name - } - i++ - } - // Didn't find closing underscore, invalid. - return name - } - return unescaped.String() - default: - panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) - } -} - -func isValidLegacyRune(b rune, i int) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0) -} - -func (e EscapingScheme) String() string { - switch e { - case NoEscaping: - return AllowUTF8 - case UnderscoreEscaping: - return EscapeUnderscores - case DotsEscaping: - return EscapeDots - case ValueEncodingEscaping: - return EscapeValues - default: - panic(fmt.Sprintf("unknown format scheme %d", e)) - } -} - -func ToEscapingScheme(s string) (EscapingScheme, error) { - if s == "" { - return NoEscaping, errors.New("got empty string instead of escaping scheme") - } - switch s { - case AllowUTF8: - return NoEscaping, nil - case EscapeUnderscores: - return UnderscoreEscaping, nil - case EscapeDots: - return DotsEscaping, nil - case EscapeValues: - return ValueEncodingEscaping, nil - default: - return NoEscaping, fmt.Errorf("unknown format scheme %s", s) - } -} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go deleted file mode 100644 index a7b9691707..0000000000 --- a/vendor/github.com/prometheus/common/model/model.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package model contains common data structures that are shared across -// Prometheus components and libraries. -package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go deleted file mode 100644 index dc8a0026c4..0000000000 --- a/vendor/github.com/prometheus/common/model/signature.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "sort" -) - -// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is -// used to separate label names, label values, and other strings from each other -// when calculating their combined hash value (aka signature aka fingerprint). -const SeparatorByte byte = 255 - -// cache the signature of an empty label set. -var emptyLabelSignature = hashNew() - -// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a -// given label set. (Collisions are possible but unlikely if the number of label -// sets the function is applied to is small.) -func LabelsToSignature(labels map[string]string) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - labelNames := make([]string, 0, len(labels)) - for labelName := range labels { - labelNames = append(labelNames, labelName) - } - sort.Strings(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, labelName) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, labels[labelName]) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as -// parameter (rather than a label map) and returns a Fingerprint. -func labelSetToFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - labelNames := make(LabelNames, 0, len(ls)) - for labelName := range ls { - labelNames = append(labelNames, labelName) - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(ls[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return Fingerprint(sum) -} - -// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a -// faster and less allocation-heavy hash function, which is more susceptible to -// create hash collisions. Therefore, collision detection should be applied. -func labelSetToFastFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - var result uint64 - for labelName, labelValue := range ls { - sum := hashNew() - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(labelValue)) - result ^= sum - } - return Fingerprint(result) -} - -// SignatureForLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and only includes the labels with the -// specified LabelNames into the signature calculation. The labels passed in -// will be sorted by this function. -func SignatureForLabels(m Metric, labels ...LabelName) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - sort.Sort(LabelNames(labels)) - - sum := hashNew() - for _, label := range labels { - sum = hashAdd(sum, string(label)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[label])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and excludes the labels with any of the -// specified LabelNames from the signature calculation. -func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { - if len(m) == 0 { - return emptyLabelSignature - } - - labelNames := make(LabelNames, 0, len(m)) - for labelName := range m { - if _, exclude := labels[labelName]; !exclude { - labelNames = append(labelNames, labelName) - } - } - if len(labelNames) == 0 { - return emptyLabelSignature - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go deleted file mode 100644 index 8f91a9702e..0000000000 --- a/vendor/github.com/prometheus/common/model/silence.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "errors" - "fmt" - "regexp" - "time" -) - -// Matcher describes a matches the value of a given label. -type Matcher struct { - Name LabelName `json:"name"` - Value string `json:"value"` - IsRegex bool `json:"isRegex"` -} - -func (m *Matcher) UnmarshalJSON(b []byte) error { - type plain Matcher - if err := json.Unmarshal(b, (*plain)(m)); err != nil { - return err - } - - if len(m.Name) == 0 { - return errors.New("label name in matcher must not be empty") - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return err - } - } - return nil -} - -// Validate returns true iff all fields of the matcher have valid values. -func (m *Matcher) Validate() error { - if !m.Name.IsValid() { - return fmt.Errorf("invalid name %q", m.Name) - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return fmt.Errorf("invalid regular expression %q", m.Value) - } - } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { - return fmt.Errorf("invalid value %q", m.Value) - } - return nil -} - -// Silence defines the representation of a silence definition in the Prometheus -// eco-system. -type Silence struct { - ID uint64 `json:"id,omitempty"` - - Matchers []*Matcher `json:"matchers"` - - StartsAt time.Time `json:"startsAt"` - EndsAt time.Time `json:"endsAt"` - - CreatedAt time.Time `json:"createdAt,omitempty"` - CreatedBy string `json:"createdBy"` - Comment string `json:"comment,omitempty"` -} - -// Validate returns true iff all fields of the silence have valid values. -func (s *Silence) Validate() error { - if len(s.Matchers) == 0 { - return errors.New("at least one matcher required") - } - for _, m := range s.Matchers { - if err := m.Validate(); err != nil { - return fmt.Errorf("invalid matcher: %w", err) - } - } - if s.StartsAt.IsZero() { - return errors.New("start time missing") - } - if s.EndsAt.IsZero() { - return errors.New("end time missing") - } - if s.EndsAt.Before(s.StartsAt) { - return errors.New("start time must be before end time") - } - if s.CreatedBy == "" { - return errors.New("creator information missing") - } - if s.Comment == "" { - return errors.New("comment missing") - } - if s.CreatedAt.IsZero() { - return errors.New("creation timestamp missing") - } - return nil -} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go deleted file mode 100644 index 1730b0fdc1..0000000000 --- a/vendor/github.com/prometheus/common/model/time.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "strconv" - "strings" - "time" -) - -const ( - // MinimumTick is the minimum supported time resolution. This has to be - // at least time.Second in order for the code below to work. - minimumTick = time.Millisecond - // second is the Time duration equivalent to one second. - second = int64(time.Second / minimumTick) - // The number of nanoseconds per minimum tick. - nanosPerTick = int64(minimumTick / time.Nanosecond) - - // Earliest is the earliest Time representable. Handy for - // initializing a high watermark. - Earliest = Time(math.MinInt64) - // Latest is the latest Time representable. Handy for initializing - // a low watermark. - Latest = Time(math.MaxInt64) -) - -// Time is the number of milliseconds since the epoch -// (1970-01-01 00:00 UTC) excluding leap seconds. -type Time int64 - -// Interval describes an interval between two timestamps. -type Interval struct { - Start, End Time -} - -// Now returns the current time as a Time. -func Now() Time { - return TimeFromUnixNano(time.Now().UnixNano()) -} - -// TimeFromUnix returns the Time equivalent to the Unix Time t -// provided in seconds. -func TimeFromUnix(t int64) Time { - return Time(t * second) -} - -// TimeFromUnixNano returns the Time equivalent to the Unix Time -// t provided in nanoseconds. -func TimeFromUnixNano(t int64) Time { - return Time(t / nanosPerTick) -} - -// Equal reports whether two Times represent the same instant. -func (t Time) Equal(o Time) bool { - return t == o -} - -// Before reports whether the Time t is before o. -func (t Time) Before(o Time) bool { - return t < o -} - -// After reports whether the Time t is after o. -func (t Time) After(o Time) bool { - return t > o -} - -// Add returns the Time t + d. -func (t Time) Add(d time.Duration) Time { - return t + Time(d/minimumTick) -} - -// Sub returns the Duration t - o. -func (t Time) Sub(o Time) time.Duration { - return time.Duration(t-o) * minimumTick -} - -// Time returns the time.Time representation of t. -func (t Time) Time() time.Time { - return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) -} - -// Unix returns t as a Unix time, the number of seconds elapsed -// since January 1, 1970 UTC. -func (t Time) Unix() int64 { - return int64(t) / second -} - -// UnixNano returns t as a Unix time, the number of nanoseconds elapsed -// since January 1, 1970 UTC. -func (t Time) UnixNano() int64 { - return int64(t) * nanosPerTick -} - -// The number of digits after the dot. -var dotPrecision = int(math.Log10(float64(second))) - -// String returns a string representation of the Time. -func (t Time) String() string { - return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) -} - -// MarshalJSON implements the json.Marshaler interface. -func (t Time) MarshalJSON() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (t *Time) UnmarshalJSON(b []byte) error { - p := strings.Split(string(b), ".") - switch len(p) { - case 1: - v, err := strconv.ParseInt(p[0], 10, 64) - if err != nil { - return err - } - *t = Time(v * second) - - case 2: - v, err := strconv.ParseInt(p[0], 10, 64) - if err != nil { - return err - } - v *= second - - prec := dotPrecision - len(p[1]) - if prec < 0 { - p[1] = p[1][:dotPrecision] - } else if prec > 0 { - p[1] += strings.Repeat("0", prec) - } - - va, err := strconv.ParseInt(p[1], 10, 32) - if err != nil { - return err - } - - // If the value was something like -0.1 the negative is lost in the - // parsing because of the leading zero, this ensures that we capture it. - if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { - *t = Time(v+va) * -1 - } else { - *t = Time(v + va) - } - - default: - return fmt.Errorf("invalid time %q", string(b)) - } - return nil -} - -// Duration wraps time.Duration. It is used to parse the custom duration format -// from YAML. -// This type should not propagate beyond the scope of input/output processing. -type Duration time.Duration - -// Set implements pflag/flag.Value. -func (d *Duration) Set(s string) error { - var err error - *d, err = ParseDuration(s) - return err -} - -// Type implements pflag.Value. -func (*Duration) Type() string { - return "duration" -} - -func isdigit(c byte) bool { return c >= '0' && c <= '9' } - -// Units are required to go in order from biggest to smallest. -// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day. -var unitMap = map[string]struct { - pos int - mult uint64 -}{ - "ms": {7, uint64(time.Millisecond)}, - "s": {6, uint64(time.Second)}, - "m": {5, uint64(time.Minute)}, - "h": {4, uint64(time.Hour)}, - "d": {3, uint64(24 * time.Hour)}, - "w": {2, uint64(7 * 24 * time.Hour)}, - "y": {1, uint64(365 * 24 * time.Hour)}, -} - -// ParseDuration parses a string into a time.Duration, assuming that a year -// always has 365d, a week always has 7d, and a day always has 24h. -// Negative durations are not supported. -func ParseDuration(s string) (Duration, error) { - switch s { - case "0": - // Allow 0 without a unit. - return 0, nil - case "": - return 0, errors.New("empty duration string") - } - - orig := s - var dur uint64 - lastUnitPos := 0 - - for s != "" { - if !isdigit(s[0]) { - return 0, fmt.Errorf("not a valid duration string: %q", orig) - } - // Consume [0-9]* - i := 0 - for ; i < len(s) && isdigit(s[i]); i++ { - } - v, err := strconv.ParseUint(s[:i], 10, 0) - if err != nil { - return 0, fmt.Errorf("not a valid duration string: %q", orig) - } - s = s[i:] - - // Consume unit. - for i = 0; i < len(s) && !isdigit(s[i]); i++ { - } - if i == 0 { - return 0, fmt.Errorf("not a valid duration string: %q", orig) - } - u := s[:i] - s = s[i:] - unit, ok := unitMap[u] - if !ok { - return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig) - } - if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest. - return 0, fmt.Errorf("not a valid duration string: %q", orig) - } - lastUnitPos = unit.pos - // Check if the provided duration overflows time.Duration (> ~ 290years). - if v > 1<<63/unit.mult { - return 0, errors.New("duration out of range") - } - dur += v * unit.mult - if dur > 1<<63-1 { - return 0, errors.New("duration out of range") - } - } - - return Duration(dur), nil -} - -// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations. -func ParseDurationAllowNegative(s string) (Duration, error) { - if s == "" || s[0] != '-' { - return ParseDuration(s) - } - - d, err := ParseDuration(s[1:]) - - return -d, err -} - -func (d Duration) String() string { - var ( - ms = int64(time.Duration(d) / time.Millisecond) - r = "" - sign = "" - ) - - if ms == 0 { - return "0s" - } - - if ms < 0 { - sign, ms = "-", -ms - } - - f := func(unit string, mult int64, exact bool) { - if exact && ms%mult != 0 { - return - } - if v := ms / mult; v > 0 { - r += fmt.Sprintf("%d%s", v, unit) - ms -= v * mult - } - } - - // Only format years and weeks if the remainder is zero, as it is often - // easier to read 90d than 12w6d. - f("y", 1000*60*60*24*365, true) - f("w", 1000*60*60*24*7, true) - - f("d", 1000*60*60*24, false) - f("h", 1000*60*60, false) - f("m", 1000*60, false) - f("s", 1000, false) - f("ms", 1, false) - - return sign + r -} - -// MarshalJSON implements the json.Marshaler interface. -func (d Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(d.String()) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (d *Duration) UnmarshalJSON(bytes []byte) error { - var s string - if err := json.Unmarshal(bytes, &s); err != nil { - return err - } - dur, err := ParseDuration(s) - if err != nil { - return err - } - *d = dur - return nil -} - -// MarshalText implements the encoding.TextMarshaler interface. -func (d *Duration) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -func (d *Duration) UnmarshalText(text []byte) error { - var err error - *d, err = ParseDuration(string(text)) - return err -} - -// MarshalYAML implements the yaml.Marshaler interface. -func (d Duration) MarshalYAML() (interface{}, error) { - return d.String(), nil -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - dur, err := ParseDuration(s) - if err != nil { - return err - } - *d = dur - return nil -} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go deleted file mode 100644 index a9995a37ee..0000000000 --- a/vendor/github.com/prometheus/common/model/value.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "sort" - "strconv" - "strings" -) - -// ZeroSample is the pseudo zero-value of Sample used to signal a -// non-existing sample. It is a Sample with timestamp Earliest, value 0.0, -// and metric nil. Note that the natural zero value of Sample has a timestamp -// of 0, which is possible to appear in a real Sample and thus not suitable -// to signal a non-existing Sample. -var ZeroSample = Sample{Timestamp: Earliest} - -// Sample is a sample pair associated with a metric. A single sample must either -// define Value or Histogram but not both. Histogram == nil implies the Value -// field is used, otherwise it should be ignored. -type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` - Histogram *SampleHistogram `json:"histogram"` -} - -// Equal compares first the metrics, then the timestamp, then the value. The -// semantics of value equality is defined by SampleValue.Equal. -func (s *Sample) Equal(o *Sample) bool { - if s == o { - return true - } - - if !s.Metric.Equal(o.Metric) { - return false - } - if !s.Timestamp.Equal(o.Timestamp) { - return false - } - if s.Histogram != nil { - return s.Histogram.Equal(o.Histogram) - } - return s.Value.Equal(o.Value) -} - -func (s Sample) String() string { - if s.Histogram != nil { - return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{ - Timestamp: s.Timestamp, - Histogram: s.Histogram, - }) - } - return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }) -} - -// MarshalJSON implements json.Marshaler. -func (s Sample) MarshalJSON() ([]byte, error) { - if s.Histogram != nil { - v := struct { - Metric Metric `json:"metric"` - Histogram SampleHistogramPair `json:"histogram"` - }{ - Metric: s.Metric, - Histogram: SampleHistogramPair{ - Timestamp: s.Timestamp, - Histogram: s.Histogram, - }, - } - return json.Marshal(&v) - } - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - return json.Marshal(&v) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Sample) UnmarshalJSON(b []byte) error { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - Histogram SampleHistogramPair `json:"histogram"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - Histogram: SampleHistogramPair{ - Timestamp: s.Timestamp, - Histogram: s.Histogram, - }, - } - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - s.Metric = v.Metric - if v.Histogram.Histogram != nil { - s.Timestamp = v.Histogram.Timestamp - s.Histogram = v.Histogram.Histogram - } else { - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value - } - - return nil -} - -// Samples is a sortable Sample slice. It implements sort.Interface. -type Samples []*Sample - -func (s Samples) Len() int { - return len(s) -} - -// Less compares first the metrics, then the timestamp. -func (s Samples) Less(i, j int) bool { - switch { - case s[i].Metric.Before(s[j].Metric): - return true - case s[j].Metric.Before(s[i].Metric): - return false - case s[i].Timestamp.Before(s[j].Timestamp): - return true - default: - return false - } -} - -func (s Samples) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Equal compares two sets of samples and returns true if they are equal. -func (s Samples) Equal(o Samples) bool { - if len(s) != len(o) { - return false - } - - for i, sample := range s { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// SampleStream is a stream of Values belonging to an attached COWMetric. -type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` - Histograms []SampleHistogramPair `json:"histograms"` -} - -func (ss SampleStream) String() string { - valuesLength := len(ss.Values) - vals := make([]string, valuesLength+len(ss.Histograms)) - for i, v := range ss.Values { - vals[i] = v.String() - } - for i, v := range ss.Histograms { - vals[i+valuesLength] = v.String() - } - return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) -} - -func (ss SampleStream) MarshalJSON() ([]byte, error) { - switch { - case len(ss.Histograms) > 0 && len(ss.Values) > 0: - v := struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` - Histograms []SampleHistogramPair `json:"histograms"` - }{ - Metric: ss.Metric, - Values: ss.Values, - Histograms: ss.Histograms, - } - return json.Marshal(&v) - case len(ss.Histograms) > 0: - v := struct { - Metric Metric `json:"metric"` - Histograms []SampleHistogramPair `json:"histograms"` - }{ - Metric: ss.Metric, - Histograms: ss.Histograms, - } - return json.Marshal(&v) - default: - v := struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` - }{ - Metric: ss.Metric, - Values: ss.Values, - } - return json.Marshal(&v) - } -} - -func (ss *SampleStream) UnmarshalJSON(b []byte) error { - v := struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` - Histograms []SampleHistogramPair `json:"histograms"` - }{ - Metric: ss.Metric, - Values: ss.Values, - Histograms: ss.Histograms, - } - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - ss.Metric = v.Metric - ss.Values = v.Values - ss.Histograms = v.Histograms - - return nil -} - -// Scalar is a scalar value evaluated at the set timestamp. -type Scalar struct { - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s Scalar) String() string { - return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) -} - -// MarshalJSON implements json.Marshaler. -func (s Scalar) MarshalJSON() ([]byte, error) { - v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, v}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Scalar) UnmarshalJSON(b []byte) error { - var f string - v := [...]interface{}{&s.Timestamp, &f} - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - value, err := strconv.ParseFloat(f, 64) - if err != nil { - return fmt.Errorf("error parsing sample value: %w", err) - } - s.Value = SampleValue(value) - return nil -} - -// String is a string value evaluated at the set timestamp. -type String struct { - Value string `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s *String) String() string { - return s.Value -} - -// MarshalJSON implements json.Marshaler. -func (s String) MarshalJSON() ([]byte, error) { - return json.Marshal([]interface{}{s.Timestamp, s.Value}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *String) UnmarshalJSON(b []byte) error { - v := [...]interface{}{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Vector is basically only an alias for Samples, but the -// contract is that in a Vector, all Samples have the same timestamp. -type Vector []*Sample - -func (vec Vector) String() string { - entries := make([]string, len(vec)) - for i, s := range vec { - entries[i] = s.String() - } - return strings.Join(entries, "\n") -} - -func (vec Vector) Len() int { return len(vec) } -func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } - -// Less compares first the metrics, then the timestamp. -func (vec Vector) Less(i, j int) bool { - switch { - case vec[i].Metric.Before(vec[j].Metric): - return true - case vec[j].Metric.Before(vec[i].Metric): - return false - case vec[i].Timestamp.Before(vec[j].Timestamp): - return true - default: - return false - } -} - -// Equal compares two sets of samples and returns true if they are equal. -func (vec Vector) Equal(o Vector) bool { - if len(vec) != len(o) { - return false - } - - for i, sample := range vec { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// Matrix is a list of time series. -type Matrix []*SampleStream - -func (m Matrix) Len() int { return len(m) } -func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } -func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } - -func (m Matrix) String() string { - matCp := make(Matrix, len(m)) - copy(matCp, m) - sort.Sort(matCp) - - strs := make([]string, len(matCp)) - - for i, ss := range matCp { - strs[i] = ss.String() - } - - return strings.Join(strs, "\n") -} diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go deleted file mode 100644 index 6bfc757d18..0000000000 --- a/vendor/github.com/prometheus/common/model/value_float.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "strconv" -) - -// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a -// non-existing sample pair. It is a SamplePair with timestamp Earliest and -// value 0.0. Note that the natural zero value of SamplePair has a timestamp -// of 0, which is possible to appear in a real SamplePair and thus not -// suitable to signal a non-existing SamplePair. -var ZeroSamplePair = SamplePair{Timestamp: Earliest} - -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return errors.New("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go deleted file mode 100644 index 91ce5b7a45..0000000000 --- a/vendor/github.com/prometheus/common/model/value_histogram.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "errors" - "fmt" - "strconv" - "strings" -) - -type FloatString float64 - -func (v FloatString) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -func (v FloatString) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -func (v *FloatString) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return errors.New("float value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = FloatString(f) - return nil -} - -type HistogramBucket struct { - Boundaries int32 - Lower FloatString - Upper FloatString - Count FloatString -} - -func (s HistogramBucket) MarshalJSON() ([]byte, error) { - b, err := json.Marshal(s.Boundaries) - if err != nil { - return nil, err - } - l, err := json.Marshal(s.Lower) - if err != nil { - return nil, err - } - u, err := json.Marshal(s.Upper) - if err != nil { - return nil, err - } - c, err := json.Marshal(s.Count) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil -} - -func (s *HistogramBucket) UnmarshalJSON(buf []byte) error { - tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count} - wantLen := len(tmp) - if err := json.Unmarshal(buf, &tmp); err != nil { - return err - } - if gotLen := len(tmp); gotLen != wantLen { - return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) - } - return nil -} - -func (s *HistogramBucket) Equal(o *HistogramBucket) bool { - return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) -} - -func (s HistogramBucket) String() string { - var sb strings.Builder - lowerInclusive := s.Boundaries == 1 || s.Boundaries == 3 - upperInclusive := s.Boundaries == 0 || s.Boundaries == 3 - if lowerInclusive { - sb.WriteRune('[') - } else { - sb.WriteRune('(') - } - fmt.Fprintf(&sb, "%g,%g", s.Lower, s.Upper) - if upperInclusive { - sb.WriteRune(']') - } else { - sb.WriteRune(')') - } - fmt.Fprintf(&sb, ":%v", s.Count) - return sb.String() -} - -type HistogramBuckets []*HistogramBucket - -func (s HistogramBuckets) Equal(o HistogramBuckets) bool { - if len(s) != len(o) { - return false - } - - for i, bucket := range s { - if !bucket.Equal(o[i]) { - return false - } - } - return true -} - -type SampleHistogram struct { - Count FloatString `json:"count"` - Sum FloatString `json:"sum"` - Buckets HistogramBuckets `json:"buckets"` -} - -func (s SampleHistogram) String() string { - return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets) -} - -func (s *SampleHistogram) Equal(o *SampleHistogram) bool { - return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets)) -} - -type SampleHistogramPair struct { - Timestamp Time - // Histogram should never be nil, it's only stored as pointer for efficiency. - Histogram *SampleHistogram -} - -func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { - if s.Histogram == nil { - return nil, errors.New("histogram is nil") - } - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Histogram) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { - tmp := []interface{}{&s.Timestamp, &s.Histogram} - wantLen := len(tmp) - if err := json.Unmarshal(buf, &tmp); err != nil { - return err - } - if gotLen := len(tmp); gotLen != wantLen { - return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) - } - if s.Histogram == nil { - return errors.New("histogram is null") - } - return nil -} - -func (s SampleHistogramPair) String() string { - return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp) -} - -func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool { - return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp)) -} diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go deleted file mode 100644 index 078910f46b..0000000000 --- a/vendor/github.com/prometheus/common/model/value_type.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" -) - -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string -} - -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} - -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} - -func (et ValueType) String() string { - switch et { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") -} diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore deleted file mode 100644 index 7cc33ae4a7..0000000000 --- a/vendor/github.com/prometheus/procfs/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/testdata/fixtures/ -/fixtures diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml deleted file mode 100644 index 3c3bf910fd..0000000000 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ /dev/null @@ -1,45 +0,0 @@ -version: "2" -linters: - enable: - - forbidigo - - godot - - misspell - - revive - - testifylint - settings: - forbidigo: - forbid: - - pattern: ^fmt\.Print.*$ - msg: Do not commit print statements. - godot: - exclude: - # Ignore "See: URL". - - 'See:' - capital: true - misspell: - locale: US - exclusions: - generated: lax - presets: - - comments - - common-false-positives - - legacy - - std-error-handling - paths: - - third_party$ - - builtin$ - - examples$ -formatters: - enable: - - gofmt - - goimports - settings: - goimports: - local-prefixes: - - github.com/prometheus/procfs - exclusions: - generated: lax - paths: - - third_party$ - - builtin$ - - examples$ diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md deleted file mode 100644 index d325872bdf..0000000000 --- a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,3 +0,0 @@ -# Prometheus Community Code of Conduct - -Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md deleted file mode 100644 index 853eb9d49b..0000000000 --- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md +++ /dev/null @@ -1,121 +0,0 @@ -# Contributing - -Prometheus uses GitHub to manage reviews of pull requests. - -* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute) - -* If you have a trivial fix or improvement, go ahead and create a pull request, - addressing (with `@...`) a suitable maintainer of this repository (see - [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. - -* If you plan to do something more involved, first discuss your ideas - on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). - This will avoid unnecessary work and surely give you and us a good deal - of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style). - -* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works) - -## Steps to Contribute - -Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue. - -Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community). - -For quickly compiling and testing your changes do: -``` -make test # Make sure all the tests pass before you commit and push :) -``` - -We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. - -## Pull Request Checklist - -* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes. - -* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests). - -* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)). - -* Add tests relevant to the fixed bug or new feature. - -## Dependency management - -The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed. - -All dependencies are vendored in the `vendor/` directory. - -To add or update a new dependency, use the `go get` command: - -```bash -# Pick the latest tagged release. -go get example.com/some/module/pkg - -# Pick a specific version. -go get example.com/some/module/pkg@vX.Y.Z -``` - -Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory: - - -```bash -# The GO111MODULE variable can be omitted when the code isn't located in GOPATH. -GO111MODULE=on go mod tidy - -GO111MODULE=on go mod vendor -``` - -You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request. - - -## API Implementation Guidelines - -### Naming and Documentation - -Public functions and structs should normally be named according to the file(s) being read and parsed. For example, -the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function -should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s). - -### Reading vs. Parsing - -Most functionality in this library consists of reading files and then parsing the text into structured data. In most -cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and -a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested -directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types -such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files -in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead. - -### /proc and /sys filesystem I/O - -The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O. -Many of the files are changing continuously and the data being read can in some cases change between subsequent -reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls -to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the -full file in a single operation using an internal utility function called `util.ReadFileNoStat`. -This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of -the file. - -Note that parsing the file's contents can still be performed one line at a time. This is done by first reading -the full file, and then using a scanner on the `[]byte` or `string` containing the data. - -``` - data, err := util.ReadFileNoStat("/proc/cpuinfo") - if err != nil { - return err - } - reader := bytes.NewReader(data) - scanner := bufio.NewScanner(reader) -``` - -The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files -can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does -not bother to check the size of the file before reading. -``` - data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity") -``` - diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/github.com/prometheus/procfs/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md deleted file mode 100644 index e00f3b365b..0000000000 --- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ /dev/null @@ -1,3 +0,0 @@ -* Johannes 'fish' Ziemke @discordianfish -* Paul Gier @pgier -* Ben Kochie @SuperQ diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile deleted file mode 100644 index 7edfe4d093..0000000000 --- a/vendor/github.com/prometheus/procfs/Makefile +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -include Makefile.common - -%/.unpacked: %.ttar - @echo ">> extracting fixtures $*" - ./ttar -C $(dir $*) -x -f $*.ttar - touch $@ - -fixtures: testdata/fixtures/.unpacked - -update_fixtures: - rm -vf testdata/fixtures/.unpacked - ./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/ - -.PHONY: build -build: - -.PHONY: test -test: testdata/fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common deleted file mode 100644 index 0ed55c2ba2..0000000000 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ /dev/null @@ -1,283 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# A common Makefile that includes rules to be reused in different prometheus projects. -# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! - -# Example usage : -# Create the main Makefile in the root project directory. -# include Makefile.common -# customTarget: -# @echo ">> Running customTarget" -# - -# Ensure GOBIN is not set during build so that promu is installed to the correct path -unexport GOBIN - -GO ?= go -GOFMT ?= $(GO)fmt -FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -GOOPTS ?= -GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) -GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) - -GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File -PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') - -PROMU := $(FIRST_GOPATH)/bin/promu -pkgs = ./... - -ifeq (arm, $(GOHOSTARCH)) - GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) -else - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) -endif - -GOTEST := $(GO) test -GOTEST_DIR := -ifneq ($(CIRCLE_JOB),) -ifneq ($(shell command -v gotestsum 2> /dev/null),) - GOTEST_DIR := test-results - GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- -endif -endif - -PROMU_VERSION ?= 0.17.0 -PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz - -SKIP_GOLANGCI_LINT := -GOLANGCI_LINT := -GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v2.0.2 -# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. -# windows isn't included here because of the path separator being different. -ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) - ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64)) - # If we're in CI and there is an Actions file, that means the linter - # is being run in Actions, so we don't need to run it here. - ifneq (,$(SKIP_GOLANGCI_LINT)) - GOLANGCI_LINT := - else ifeq (,$(CIRCLE_JOB)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint - else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint - endif - endif -endif - -PREFIX ?= $(shell pwd) -BIN_DIR ?= $(shell pwd) -DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) -DOCKERFILE_PATH ?= ./Dockerfile -DOCKERBUILD_CONTEXT ?= ./ -DOCKER_REPO ?= prom - -DOCKER_ARCHS ?= amd64 - -BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) -PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) -TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) - -SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) - -ifeq ($(GOHOSTARCH),amd64) - ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) - # Only supported on amd64 - test-flags := -race - endif -endif - -# This rule is used to forward a target like "build" to "common-build". This -# allows a new "build" target to be defined in a Makefile which includes this -# one and override "common-build" without override warnings. -%: common-% ; - -.PHONY: common-all -common-all: precheck style check_license lint yamllint unused build test - -.PHONY: common-style -common-style: - @echo ">> checking code style" - @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ - if [ -n "$${fmtRes}" ]; then \ - echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ - echo "Please ensure you are using $$($(GO) version) for formatting code."; \ - exit 1; \ - fi - -.PHONY: common-check_license -common-check_license: - @echo ">> checking license header" - @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ - awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ - done); \ - if [ -n "$${licRes}" ]; then \ - echo "license header checking failed:"; echo "$${licRes}"; \ - exit 1; \ - fi - -.PHONY: common-deps -common-deps: - @echo ">> getting dependencies" - $(GO) mod download - -.PHONY: update-go-deps -update-go-deps: - @echo ">> updating Go dependencies" - @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ - $(GO) get -d $$m; \ - done - $(GO) mod tidy - -.PHONY: common-test-short -common-test-short: $(GOTEST_DIR) - @echo ">> running short tests" - $(GOTEST) -short $(GOOPTS) $(pkgs) - -.PHONY: common-test -common-test: $(GOTEST_DIR) - @echo ">> running all tests" - $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) - -$(GOTEST_DIR): - @mkdir -p $@ - -.PHONY: common-format -common-format: - @echo ">> formatting code" - $(GO) fmt $(pkgs) - -.PHONY: common-vet -common-vet: - @echo ">> vetting code" - $(GO) vet $(GOOPTS) $(pkgs) - -.PHONY: common-lint -common-lint: $(GOLANGCI_LINT) -ifdef GOLANGCI_LINT - @echo ">> running golangci-lint" - $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) -endif - -.PHONY: common-lint-fix -common-lint-fix: $(GOLANGCI_LINT) -ifdef GOLANGCI_LINT - @echo ">> running golangci-lint fix" - $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs) -endif - -.PHONY: common-yamllint -common-yamllint: - @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell command -v yamllint 2> /dev/null)) - @echo "yamllint not installed so skipping" -else - yamllint . -endif - -# For backward-compatibility. -.PHONY: common-staticcheck -common-staticcheck: lint - -.PHONY: common-unused -common-unused: - @echo ">> running check for unused/missing packages in go.mod" - $(GO) mod tidy - @git diff --exit-code -- go.sum go.mod - -.PHONY: common-build -common-build: promu - @echo ">> building binaries" - $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) - -.PHONY: common-tarball -common-tarball: promu - @echo ">> building release tarball" - $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) - -.PHONY: common-docker-repo-name -common-docker-repo-name: - @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" - -.PHONY: common-docker $(BUILD_DOCKER_ARCHS) -common-docker: $(BUILD_DOCKER_ARCHS) -$(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ - -f $(DOCKERFILE_PATH) \ - --build-arg ARCH="$*" \ - --build-arg OS="linux" \ - $(DOCKERBUILD_CONTEXT) - -.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) -common-docker-publish: $(PUBLISH_DOCKER_ARCHS) -$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" - -DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) -.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) -common-docker-tag-latest: $(TAG_DOCKER_ARCHS) -$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" - -.PHONY: common-docker-manifest -common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" - -.PHONY: promu -promu: $(PROMU) - -$(PROMU): - $(eval PROMU_TMP := $(shell mktemp -d)) - curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) - mkdir -p $(FIRST_GOPATH)/bin - cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu - rm -r $(PROMU_TMP) - -.PHONY: proto -proto: - @echo ">> generating code from proto files" - @./scripts/genproto.sh - -ifdef GOLANGCI_LINT -$(GOLANGCI_LINT): - mkdir -p $(FIRST_GOPATH)/bin - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ - | sed -e '/install -d/d' \ - | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) -endif - -.PHONY: precheck -precheck:: - -define PRECHECK_COMMAND_template = -precheck:: $(1)_precheck - -PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) -.PHONY: $(1)_precheck -$(1)_precheck: - @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ - echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ - exit 1; \ - fi -endef - -govulncheck: install-govulncheck - govulncheck ./... - -install-govulncheck: - command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE deleted file mode 100644 index 53c5e9aa11..0000000000 --- a/vendor/github.com/prometheus/procfs/NOTICE +++ /dev/null @@ -1,7 +0,0 @@ -procfs provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. - -Copyright 2014-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md deleted file mode 100644 index 0718239cf1..0000000000 --- a/vendor/github.com/prometheus/procfs/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# procfs - -This package provides functions to retrieve system, kernel, and process -metrics from the pseudo-filesystems /proc and /sys. - -*WARNING*: This package is a work in progress. Its API may still break in -backwards-incompatible ways without warnings. Use it at your own risk. - -[![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/procfs.svg)](https://pkg.go.dev/github.com/prometheus/procfs) -[![CircleCI](https://circleci.com/gh/prometheus/procfs/tree/master.svg?style=svg)](https://circleci.com/gh/prometheus/procfs/tree/master) -[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) - -## Usage - -The procfs library is organized by packages based on whether the gathered data is coming from -/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, -/sys, or both. For example, cpu statistics are gathered from -`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount -point is initialized, and then the stat information is read. - -```go -fs, err := procfs.NewFS("/proc") -stats, err := fs.Stat() -``` - -Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems. - -```go - fs, err := blockdevice.NewFS("/proc", "/sys") - stats, err := fs.ProcDiskstats() -``` - -## Package Organization - -The packages in this project are organized according to (1) whether the data comes from the `/proc` or -`/sys` filesystem and (2) the type of information being retrieved. For example, most process information -can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives -is available in the `blockdevices` sub-package. - -## Building and Testing - -The procfs library is intended to be built as part of another application, so there are no distributable binaries. -However, most of the API includes unit tests which can be run with `make test`. - -### Updating Test Fixtures - -The procfs library includes a set of test fixtures which include many example files from -the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file -which is extracted automatically during testing. To add/update the test fixtures, first -ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then -extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`. - -```bash -rm -rf testdata/fixtures -make test -``` - -Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When -the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file -based on the updated `fixtures` directory. And finally, verify the changes using -`git diff testdata/fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md deleted file mode 100644 index fed02d85c7..0000000000 --- a/vendor/github.com/prometheus/procfs/SECURITY.md +++ /dev/null @@ -1,6 +0,0 @@ -# Reporting a security issue - -The Prometheus security policy, including how to report vulnerabilities, can be -found here: - - diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go deleted file mode 100644 index 2e53344151..0000000000 --- a/vendor/github.com/prometheus/procfs/arp.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "net" - "os" - "strconv" - "strings" -) - -// Learned from include/uapi/linux/if_arp.h. -const ( - // Completed entry (ha valid). - ATFComplete = 0x02 - // Permanent entry. - ATFPermanent = 0x04 - // Publish entry. - ATFPublish = 0x08 - // Has requested trailers. - ATFUseTrailers = 0x10 - // Obsoleted: Want to use a netmask (only for proxy entries). - ATFNetmask = 0x20 - // Don't answer this addresses. - ATFDontPublish = 0x40 -) - -// ARPEntry contains a single row of the columnar data represented in -// /proc/net/arp. -type ARPEntry struct { - // IP address - IPAddr net.IP - // MAC address - HWAddr net.HardwareAddr - // Name of the device - Device string - // Flags - Flags byte -} - -// GatherARPEntries retrieves all the ARP entries, parse the relevant columns, -// and then return a slice of ARPEntry's. -func (fs FS) GatherARPEntries() ([]ARPEntry, error) { - data, err := os.ReadFile(fs.proc.Path("net/arp")) - if err != nil { - return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) - } - - return parseARPEntries(data) -} - -func parseARPEntries(data []byte) ([]ARPEntry, error) { - lines := strings.Split(string(data), "\n") - entries := make([]ARPEntry, 0) - var err error - const ( - expectedDataWidth = 6 - expectedHeaderWidth = 9 - ) - for _, line := range lines { - columns := strings.Fields(line) - width := len(columns) - - if width == expectedHeaderWidth || width == 0 { - continue - } else if width == expectedDataWidth { - entry, err := parseARPEntry(columns) - if err != nil { - return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) - } - entries = append(entries, entry) - } else { - return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) - } - - } - - return entries, err -} - -func parseARPEntry(columns []string) (ARPEntry, error) { - entry := ARPEntry{Device: columns[5]} - ip := net.ParseIP(columns[0]) - entry.IPAddr = ip - - if mac, err := net.ParseMAC(columns[3]); err == nil { - entry.HWAddr = mac - } else { - return ARPEntry{}, err - } - - if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil { - entry.Flags = byte(flags) - } else { - return ARPEntry{}, err - } - - return entry, nil -} - -// IsComplete returns true if ARP entry is marked with complete flag. -func (entry *ARPEntry) IsComplete() bool { - return entry.Flags&ATFComplete != 0 -} diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go deleted file mode 100644 index 8380750090..0000000000 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// A BuddyInfo is the details parsed from /proc/buddyinfo. -// The data is comprised of an array of free fragments of each size. -// The sizes are 2^n*PAGE_SIZE, where n is the array index. -type BuddyInfo struct { - Node string - Zone string - Sizes []float64 -} - -// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. -func (fs FS) BuddyInfo() ([]BuddyInfo, error) { - file, err := os.Open(fs.proc.Path("buddyinfo")) - if err != nil { - return nil, err - } - defer file.Close() - - return parseBuddyInfo(file) -} - -func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { - var ( - buddyInfo = []BuddyInfo{} - scanner = bufio.NewScanner(r) - bucketCount = -1 - ) - - for scanner.Scan() { - var err error - line := scanner.Text() - parts := strings.Fields(line) - - if len(parts) < 4 { - return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts) - } - - node := strings.TrimSuffix(parts[1], ",") - zone := strings.TrimSuffix(parts[3], ",") - arraySize := len(parts[4:]) - - if bucketCount == -1 { - bucketCount = arraySize - } else { - if bucketCount != arraySize { - return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize) - } - } - - sizes := make([]float64, arraySize) - for i := 0; i < arraySize; i++ { - sizes[i], err = strconv.ParseFloat(parts[i+4], 64) - if err != nil { - return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) - } - } - - buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) - } - - return buddyInfo, scanner.Err() -} diff --git a/vendor/github.com/prometheus/procfs/cmdline.go b/vendor/github.com/prometheus/procfs/cmdline.go deleted file mode 100644 index bf4f3b48c0..0000000000 --- a/vendor/github.com/prometheus/procfs/cmdline.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// CmdLine returns the command line of the kernel. -func (fs FS) CmdLine() ([]string, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("cmdline")) - if err != nil { - return nil, err - } - - return strings.Fields(string(data)), nil -} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go deleted file mode 100644 index f0950bb495..0000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ /dev/null @@ -1,519 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux -// +build linux - -package procfs - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// CPUInfo contains general information about a system CPU found in /proc/cpuinfo. -type CPUInfo struct { - Processor uint - VendorID string - CPUFamily string - Model string - ModelName string - Stepping string - Microcode string - CPUMHz float64 - CacheSize string - PhysicalID string - Siblings uint - CoreID string - CPUCores uint - APICID string - InitialAPICID string - FPU string - FPUException string - CPUIDLevel uint - WP string - Flags []string - Bugs []string - BogoMips float64 - CLFlushSize uint - CacheAlignment uint - AddressSizes string - PowerManagement string -} - -var ( - cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`) - cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`) -) - -// CPUInfo returns information about current system CPUs. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -func (fs FS) CPUInfo() ([]CPUInfo, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo")) - if err != nil { - return nil, err - } - return parseCPUInfo(data) -} - -func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - // find the first "processor" line - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo := []CPUInfo{firstcpu} - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - i++ - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Processor = uint(v) - case "vendor", "vendor_id": - cpuinfo[i].VendorID = field[1] - case "cpu family": - cpuinfo[i].CPUFamily = field[1] - case "model": - cpuinfo[i].Model = field[1] - case "model name": - cpuinfo[i].ModelName = field[1] - case "stepping": - cpuinfo[i].Stepping = field[1] - case "microcode": - cpuinfo[i].Microcode = field[1] - case "cpu MHz": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].CPUMHz = v - case "cache size": - cpuinfo[i].CacheSize = field[1] - case "physical id": - cpuinfo[i].PhysicalID = field[1] - case "siblings": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Siblings = uint(v) - case "core id": - cpuinfo[i].CoreID = field[1] - case "cpu cores": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CPUCores = uint(v) - case "apicid": - cpuinfo[i].APICID = field[1] - case "initial apicid": - cpuinfo[i].InitialAPICID = field[1] - case "fpu": - cpuinfo[i].FPU = field[1] - case "fpu_exception": - cpuinfo[i].FPUException = field[1] - case "cpuid level": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CPUIDLevel = uint(v) - case "wp": - cpuinfo[i].WP = field[1] - case "flags": - cpuinfo[i].Flags = strings.Fields(field[1]) - case "bugs": - cpuinfo[i].Bugs = strings.Fields(field[1]) - case "bogomips": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].BogoMips = v - case "clflush size": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CLFlushSize = uint(v) - case "cache_alignment": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CacheAlignment = uint(v) - case "address sizes": - cpuinfo[i].AddressSizes = field[1] - case "power management": - cpuinfo[i].PowerManagement = field[1] - } - } - return cpuinfo, nil -} - -func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - match, err := regexp.MatchString("^[Pp]rocessor", firstLine) - if !match || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) - - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - featuresLine := "" - commonCPUInfo := CPUInfo{} - i := 0 - if strings.TrimSpace(field[0]) == "Processor" { - commonCPUInfo = CPUInfo{ModelName: field[1]} - i = -1 - } else { - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo = []CPUInfo{firstcpu} - } - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor - i++ - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Processor = uint(v) - case "BogoMIPS": - if i == -1 { - cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor - i++ - cpuinfo[i].Processor = 0 - } - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].BogoMips = v - case "Features": - featuresLine = line - case "model name": - cpuinfo[i].ModelName = field[1] - } - } - fields := strings.SplitN(featuresLine, ": ", 2) - for i := range cpuinfo { - cpuinfo[i].Flags = strings.Fields(fields[1]) - } - return cpuinfo, nil - -} - -func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - commonCPUInfo := CPUInfo{VendorID: field[1]} - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "bogomips per cpu": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - commonCPUInfo.BogoMips = v - case "features": - commonCPUInfo.Flags = strings.Fields(field[1]) - } - if strings.HasPrefix(line, "processor") { - match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) - if len(match) < 2 { - return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) - } - cpu := commonCPUInfo - v, err := strconv.ParseUint(match[1], 0, 32) - if err != nil { - return nil, err - } - cpu.Processor = uint(v) - cpuinfo = append(cpuinfo, cpu) - } - if strings.HasPrefix(line, "cpu number") { - break - } - } - - i := 0 - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "cpu number": - i++ - case "cpu MHz dynamic": - clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) - v, err := strconv.ParseFloat(clock, 64) - if err != nil { - return nil, err - } - cpuinfo[i].CPUMHz = v - case "physical id": - cpuinfo[i].PhysicalID = field[1] - case "core id": - cpuinfo[i].CoreID = field[1] - case "cpu cores": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CPUCores = uint(v) - case "siblings": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Siblings = uint(v) - } - } - - return cpuinfo, nil -} - -func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - // find the first "processor" line - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - systemType := field[1] - - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - i = int(v) - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - cpuinfo[i].Processor = uint(v) - cpuinfo[i].VendorID = systemType - case "cpu model": - cpuinfo[i].ModelName = field[1] - case "BogoMIPS": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].BogoMips = v - } - } - return cpuinfo, nil -} - -func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - // find the first "processor" line - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - systemType := field[1] - i := 0 - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - i = int(v) - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - cpuinfo[i].Processor = uint(v) - cpuinfo[i].VendorID = systemType - case "CPU Family": - cpuinfo[i].CPUFamily = field[1] - case "Model Name": - cpuinfo[i].ModelName = field[1] - } - } - return cpuinfo, nil -} - -func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo := []CPUInfo{firstcpu} - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - i++ - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Processor = uint(v) - case "cpu": - cpuinfo[i].VendorID = field[1] - case "clock": - clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) - v, err := strconv.ParseFloat(clock, 64) - if err != nil { - return nil, err - } - cpuinfo[i].CPUMHz = v - } - } - return cpuinfo, nil -} - -func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo := []CPUInfo{firstcpu} - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - i = int(v) - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - cpuinfo[i].Processor = uint(v) - case "hart": - cpuinfo[i].CoreID = field[1] - case "isa": - cpuinfo[i].ModelName = field[1] - } - } - return cpuinfo, nil -} - -func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode - return nil, errors.New("not implemented") -} - -// firstNonEmptyLine advances the scanner to the first non-empty line -// and returns the contents of that line. -func firstNonEmptyLine(scanner *bufio.Scanner) string { - for scanner.Scan() { - line := scanner.Text() - if strings.TrimSpace(line) != "" { - return line - } - } - return "" -} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go deleted file mode 100644 index 64cfd534c1..0000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && (arm || arm64) -// +build linux -// +build arm arm64 - -package procfs - -var parseCPUInfo = parseCPUInfoARM diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go deleted file mode 100644 index d88442f0ed..0000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoLoong diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go deleted file mode 100644 index c11207f3ab..0000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && (mips || mipsle || mips64 || mips64le) -// +build linux -// +build mips mipsle mips64 mips64le - -package procfs - -var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go deleted file mode 100644 index a6b2b3127c..0000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x -// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x - -package procfs - -var parseCPUInfo = parseCPUInfoDummy diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go deleted file mode 100644 index 003bc2ad4a..0000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && (ppc64 || ppc64le) -// +build linux -// +build ppc64 ppc64le - -package procfs - -var parseCPUInfo = parseCPUInfoPPC diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go deleted file mode 100644 index 1c9b7313b6..0000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && (riscv || riscv64) -// +build linux -// +build riscv riscv64 - -package procfs - -var parseCPUInfo = parseCPUInfoRISCV diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go deleted file mode 100644 index fa3686bc00..0000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoS390X diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go deleted file mode 100644 index a0ef55562e..0000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && (386 || amd64) -// +build linux -// +build 386 amd64 - -package procfs - -var parseCPUInfo = parseCPUInfoX86 diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go deleted file mode 100644 index 5f2a37a78b..0000000000 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Crypto holds info parsed from /proc/crypto. -type Crypto struct { - Alignmask *uint64 - Async bool - Blocksize *uint64 - Chunksize *uint64 - Ctxsize *uint64 - Digestsize *uint64 - Driver string - Geniv string - Internal string - Ivsize *uint64 - Maxauthsize *uint64 - MaxKeysize *uint64 - MinKeysize *uint64 - Module string - Name string - Priority *int64 - Refcnt *int64 - Seedsize *uint64 - Selftest string - Type string - Walksize *uint64 -} - -// Crypto parses an crypto-file (/proc/crypto) and returns a slice of -// structs containing the relevant info. More information available here: -// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html -func (fs FS) Crypto() ([]Crypto, error) { - path := fs.proc.Path("crypto") - b, err := util.ReadFileNoStat(path) - if err != nil { - return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err) - - } - - crypto, err := parseCrypto(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err) - } - - return crypto, nil -} - -// parseCrypto parses a /proc/crypto stream into Crypto elements. -func parseCrypto(r io.Reader) ([]Crypto, error) { - var out []Crypto - - s := bufio.NewScanner(r) - for s.Scan() { - text := s.Text() - switch { - case strings.HasPrefix(text, "name"): - // Each crypto element begins with its name. - out = append(out, Crypto{}) - case text == "": - continue - } - - kv := strings.Split(text, ":") - if len(kv) != 2 { - return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text) - } - - k := strings.TrimSpace(kv[0]) - v := strings.TrimSpace(kv[1]) - - // Parse the key/value pair into the currently focused element. - c := &out[len(out)-1] - if err := c.parseKV(k, v); err != nil { - return nil, err - } - } - - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil -} - -// parseKV parses a key/value pair into the appropriate field of c. -func (c *Crypto) parseKV(k, v string) error { - vp := util.NewValueParser(v) - - switch k { - case "async": - // Interpret literal yes as true. - c.Async = v == "yes" - case "blocksize": - c.Blocksize = vp.PUInt64() - case "chunksize": - c.Chunksize = vp.PUInt64() - case "digestsize": - c.Digestsize = vp.PUInt64() - case "driver": - c.Driver = v - case "geniv": - c.Geniv = v - case "internal": - c.Internal = v - case "ivsize": - c.Ivsize = vp.PUInt64() - case "maxauthsize": - c.Maxauthsize = vp.PUInt64() - case "max keysize": - c.MaxKeysize = vp.PUInt64() - case "min keysize": - c.MinKeysize = vp.PUInt64() - case "module": - c.Module = v - case "name": - c.Name = v - case "priority": - c.Priority = vp.PInt64() - case "refcnt": - c.Refcnt = vp.PInt64() - case "seedsize": - c.Seedsize = vp.PUInt64() - case "selftest": - c.Selftest = v - case "type": - c.Type = v - case "walksize": - c.Walksize = vp.PUInt64() - } - - return vp.Err() -} diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go deleted file mode 100644 index f9d961e441..0000000000 --- a/vendor/github.com/prometheus/procfs/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2014 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package procfs provides functions to retrieve system, kernel and process -// metrics from the pseudo-filesystem proc. -// -// Example: -// -// package main -// -// import ( -// "fmt" -// "log" -// -// "github.com/prometheus/procfs" -// ) -// -// func main() { -// p, err := procfs.Self() -// if err != nil { -// log.Fatalf("could not get process: %s", err) -// } -// -// stat, err := p.Stat() -// if err != nil { -// log.Fatalf("could not get process stat: %s", err) -// } -// -// fmt.Printf("command: %s\n", stat.Comm) -// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) -// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) -// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) -// } -package procfs diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go deleted file mode 100644 index 9bdaccc7c8..0000000000 --- a/vendor/github.com/prometheus/procfs/fs.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "github.com/prometheus/procfs/internal/fs" -) - -// FS represents the pseudo-filesystem sys, which provides an interface to -// kernel data structures. -type FS struct { - proc fs.FS - isReal bool -} - -const ( - // DefaultMountPoint is the common mount point of the proc filesystem. - DefaultMountPoint = fs.DefaultProcMountPoint - - // SectorSize represents the size of a sector in bytes. - // It is specific to Linux block I/O operations. - SectorSize = 512 -) - -// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. -// It will error if the mount point directory can't be read or is a file. -func NewDefaultFS() (FS, error) { - return NewFS(DefaultMountPoint) -} - -// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error -// if the mount point directory can't be read or is a file. -func NewFS(mountPoint string) (FS, error) { - fs, err := fs.NewFS(mountPoint) - if err != nil { - return FS{}, err - } - - isReal, err := isRealProc(mountPoint) - if err != nil { - return FS{}, err - } - - return FS{fs, isReal}, nil -} diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go deleted file mode 100644 index 1b5bdbdf84..0000000000 --- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !freebsd && !linux -// +build !freebsd,!linux - -package procfs - -// isRealProc returns true on architectures that don't have a Type argument -// in their Statfs_t struct. -func isRealProc(_ string) (bool, error) { - return true, nil -} diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go deleted file mode 100644 index 80df79c319..0000000000 --- a/vendor/github.com/prometheus/procfs/fs_statfs_type.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build freebsd || linux -// +build freebsd linux - -package procfs - -import ( - "syscall" -) - -// isRealProc determines whether supplied mountpoint is really a proc filesystem. -func isRealProc(mountPoint string) (bool, error) { - stat := syscall.Statfs_t{} - err := syscall.Statfs(mountPoint, &stat) - if err != nil { - return false, err - } - - // 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87 - return stat.Type == 0x9fa0, nil -} diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go deleted file mode 100644 index 7db8633077..0000000000 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ /dev/null @@ -1,422 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Fscacheinfo represents fscache statistics. -type Fscacheinfo struct { - // Number of index cookies allocated - IndexCookiesAllocated uint64 - // data storage cookies allocated - DataStorageCookiesAllocated uint64 - // Number of special cookies allocated - SpecialCookiesAllocated uint64 - // Number of objects allocated - ObjectsAllocated uint64 - // Number of object allocation failures - ObjectAllocationsFailure uint64 - // Number of objects that reached the available state - ObjectsAvailable uint64 - // Number of objects that reached the dead state - ObjectsDead uint64 - // Number of objects that didn't have a coherency check - ObjectsWithoutCoherencyCheck uint64 - // Number of objects that passed a coherency check - ObjectsWithCoherencyCheck uint64 - // Number of objects that needed a coherency data update - ObjectsNeedCoherencyCheckUpdate uint64 - // Number of objects that were declared obsolete - ObjectsDeclaredObsolete uint64 - // Number of pages marked as being cached - PagesMarkedAsBeingCached uint64 - // Number of uncache page requests seen - UncachePagesRequestSeen uint64 - // Number of acquire cookie requests seen - AcquireCookiesRequestSeen uint64 - // Number of acq reqs given a NULL parent - AcquireRequestsWithNullParent uint64 - // Number of acq reqs rejected due to no cache available - AcquireRequestsRejectedNoCacheAvailable uint64 - // Number of acq reqs succeeded - AcquireRequestsSucceeded uint64 - // Number of acq reqs rejected due to error - AcquireRequestsRejectedDueToError uint64 - // Number of acq reqs failed on ENOMEM - AcquireRequestsFailedDueToEnomem uint64 - // Number of lookup calls made on cache backends - LookupsNumber uint64 - // Number of negative lookups made - LookupsNegative uint64 - // Number of positive lookups made - LookupsPositive uint64 - // Number of objects created by lookup - ObjectsCreatedByLookup uint64 - // Number of lookups timed out and requeued - LookupsTimedOutAndRequed uint64 - InvalidationsNumber uint64 - InvalidationsRunning uint64 - // Number of update cookie requests seen - UpdateCookieRequestSeen uint64 - // Number of upd reqs given a NULL parent - UpdateRequestsWithNullParent uint64 - // Number of upd reqs granted CPU time - UpdateRequestsRunning uint64 - // Number of relinquish cookie requests seen - RelinquishCookiesRequestSeen uint64 - // Number of rlq reqs given a NULL parent - RelinquishCookiesWithNullParent uint64 - // Number of rlq reqs waited on completion of creation - RelinquishRequestsWaitingCompleteCreation uint64 - // Relinqs rtr - RelinquishRetries uint64 - // Number of attribute changed requests seen - AttributeChangedRequestsSeen uint64 - // Number of attr changed requests queued - AttributeChangedRequestsQueued uint64 - // Number of attr changed rejected -ENOBUFS - AttributeChangedRejectDueToEnobufs uint64 - // Number of attr changed failed -ENOMEM - AttributeChangedFailedDueToEnomem uint64 - // Number of attr changed ops given CPU time - AttributeChangedOps uint64 - // Number of allocation requests seen - AllocationRequestsSeen uint64 - // Number of successful alloc reqs - AllocationOkRequests uint64 - // Number of alloc reqs that waited on lookup completion - AllocationWaitingOnLookup uint64 - // Number of alloc reqs rejected -ENOBUFS - AllocationsRejectedDueToEnobufs uint64 - // Number of alloc reqs aborted -ERESTARTSYS - AllocationsAbortedDueToErestartsys uint64 - // Number of alloc reqs submitted - AllocationOperationsSubmitted uint64 - // Number of alloc reqs waited for CPU time - AllocationsWaitedForCPU uint64 - // Number of alloc reqs aborted due to object death - AllocationsAbortedDueToObjectDeath uint64 - // Number of retrieval (read) requests seen - RetrievalsReadRequests uint64 - // Number of successful retr reqs - RetrievalsOk uint64 - // Number of retr reqs that waited on lookup completion - RetrievalsWaitingLookupCompletion uint64 - // Number of retr reqs returned -ENODATA - RetrievalsReturnedEnodata uint64 - // Number of retr reqs rejected -ENOBUFS - RetrievalsRejectedDueToEnobufs uint64 - // Number of retr reqs aborted -ERESTARTSYS - RetrievalsAbortedDueToErestartsys uint64 - // Number of retr reqs failed -ENOMEM - RetrievalsFailedDueToEnomem uint64 - // Number of retr reqs submitted - RetrievalsRequests uint64 - // Number of retr reqs waited for CPU time - RetrievalsWaitingCPU uint64 - // Number of retr reqs aborted due to object death - RetrievalsAbortedDueToObjectDeath uint64 - // Number of storage (write) requests seen - StoreWriteRequests uint64 - // Number of successful store reqs - StoreSuccessfulRequests uint64 - // Number of store reqs on a page already pending storage - StoreRequestsOnPendingStorage uint64 - // Number of store reqs rejected -ENOBUFS - StoreRequestsRejectedDueToEnobufs uint64 - // Number of store reqs failed -ENOMEM - StoreRequestsFailedDueToEnomem uint64 - // Number of store reqs submitted - StoreRequestsSubmitted uint64 - // Number of store reqs granted CPU time - StoreRequestsRunning uint64 - // Number of pages given store req processing time - StorePagesWithRequestsProcessing uint64 - // Number of store reqs deleted from tracking tree - StoreRequestsDeleted uint64 - // Number of store reqs over store limit - StoreRequestsOverStoreLimit uint64 - // Number of release reqs against pages with no pending store - ReleaseRequestsAgainstPagesWithNoPendingStorage uint64 - // Number of release reqs against pages stored by time lock granted - ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 - // Number of release reqs ignored due to in-progress store - ReleaseRequestsIgnoredDueToInProgressStore uint64 - // Number of page stores canceled due to release req - PageStoresCancelledByReleaseRequests uint64 - VmscanWaiting uint64 - // Number of times async ops added to pending queues - OpsPending uint64 - // Number of times async ops given CPU time - OpsRunning uint64 - // Number of times async ops queued for processing - OpsEnqueued uint64 - // Number of async ops canceled - OpsCancelled uint64 - // Number of async ops rejected due to object lookup/create failure - OpsRejected uint64 - // Number of async ops initialized - OpsInitialised uint64 - // Number of async ops queued for deferred release - OpsDeferred uint64 - // Number of async ops released (should equal ini=N when idle) - OpsReleased uint64 - // Number of deferred-release async ops garbage collected - OpsGarbageCollected uint64 - // Number of in-progress alloc_object() cache ops - CacheopAllocationsinProgress uint64 - // Number of in-progress lookup_object() cache ops - CacheopLookupObjectInProgress uint64 - // Number of in-progress lookup_complete() cache ops - CacheopLookupCompleteInPorgress uint64 - // Number of in-progress grab_object() cache ops - CacheopGrabObjectInProgress uint64 - CacheopInvalidations uint64 - // Number of in-progress update_object() cache ops - CacheopUpdateObjectInProgress uint64 - // Number of in-progress drop_object() cache ops - CacheopDropObjectInProgress uint64 - // Number of in-progress put_object() cache ops - CacheopPutObjectInProgress uint64 - // Number of in-progress attr_changed() cache ops - CacheopAttributeChangeInProgress uint64 - // Number of in-progress sync_cache() cache ops - CacheopSyncCacheInProgress uint64 - // Number of in-progress read_or_alloc_page() cache ops - CacheopReadOrAllocPageInProgress uint64 - // Number of in-progress read_or_alloc_pages() cache ops - CacheopReadOrAllocPagesInProgress uint64 - // Number of in-progress allocate_page() cache ops - CacheopAllocatePageInProgress uint64 - // Number of in-progress allocate_pages() cache ops - CacheopAllocatePagesInProgress uint64 - // Number of in-progress write_page() cache ops - CacheopWritePagesInProgress uint64 - // Number of in-progress uncache_page() cache ops - CacheopUncachePagesInProgress uint64 - // Number of in-progress dissociate_pages() cache ops - CacheopDissociatePagesInProgress uint64 - // Number of object lookups/creations rejected due to lack of space - CacheevLookupsAndCreationsRejectedLackSpace uint64 - // Number of stale objects deleted - CacheevStaleObjectsDeleted uint64 - // Number of objects retired when relinquished - CacheevRetiredWhenReliquished uint64 - // Number of objects culled - CacheevObjectsCulled uint64 -} - -// Fscacheinfo returns information about current fscache statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt -func (fs FS) Fscacheinfo() (Fscacheinfo, error) { - b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats")) - if err != nil { - return Fscacheinfo{}, err - } - - m, err := parseFscacheinfo(bytes.NewReader(b)) - if err != nil { - return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err) - } - - return *m, nil -} - -func setFSCacheFields(fields []string, setFields ...*uint64) error { - var err error - if len(fields) < len(setFields) { - return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) - } - - for i := range setFields { - *setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64) - if err != nil { - return err - } - } - return nil -} - -func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { - var m Fscacheinfo - s := bufio.NewScanner(r) - for s.Scan() { - fields := strings.Fields(s.Text()) - if len(fields) < 2 { - return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text()) - } - - switch fields[0] { - case "Cookies:": - err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated, - &m.SpecialCookiesAllocated) - if err != nil { - return &m, err - } - case "Objects:": - err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure, - &m.ObjectsAvailable, &m.ObjectsDead) - if err != nil { - return &m, err - } - case "ChkAux": - err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck, - &m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete) - if err != nil { - return &m, err - } - case "Pages": - err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen) - if err != nil { - return &m, err - } - case "Acquire:": - err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent, - &m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError, - &m.AcquireRequestsFailedDueToEnomem) - if err != nil { - return &m, err - } - case "Lookups:": - err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive, - &m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed) - if err != nil { - return &m, err - } - case "Invals": - err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning) - if err != nil { - return &m, err - } - case "Updates:": - err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent, - &m.UpdateRequestsRunning) - if err != nil { - return &m, err - } - case "Relinqs:": - err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent, - &m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries) - if err != nil { - return &m, err - } - case "AttrChg:": - err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued, - &m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps) - if err != nil { - return &m, err - } - case "Allocs": - if strings.Split(fields[2], "=")[0] == "n" { - err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests, - &m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU, - &m.AllocationsAbortedDueToObjectDeath) - if err != nil { - return &m, err - } - } - case "Retrvls:": - if strings.Split(fields[1], "=")[0] == "n" { - err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion, - &m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys, - &m.RetrievalsFailedDueToEnomem) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath) - if err != nil { - return &m, err - } - } - case "Stores": - if strings.Split(fields[2], "=")[0] == "n" { - err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests, - &m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning, - &m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit) - if err != nil { - return &m, err - } - } - case "VmScan": - err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage, - &m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore, - &m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting) - if err != nil { - return &m, err - } - case "Ops": - if strings.Split(fields[2], "=")[0] == "pend" { - err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected) - if err != nil { - return &m, err - } - } - case "CacheOp:": - if strings.Split(fields[1], "=")[0] == "alo" { - err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress, - &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress) - if err != nil { - return &m, err - } - } else if strings.Split(fields[1], "=")[0] == "inv" { - err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress, - &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress, - &m.CacheopSyncCacheInProgress) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress, - &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress, - &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress) - if err != nil { - return &m, err - } - } - case "CacheEv:": - err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted, - &m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled) - if err != nil { - return &m, err - } - } - } - - return &m, nil -} diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go deleted file mode 100644 index 3a43e83915..0000000000 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fs - -import ( - "fmt" - "os" - "path/filepath" -) - -const ( - // DefaultProcMountPoint is the common mount point of the proc filesystem. - DefaultProcMountPoint = "/proc" - - // DefaultSysMountPoint is the common mount point of the sys filesystem. - DefaultSysMountPoint = "/sys" - - // DefaultConfigfsMountPoint is the common mount point of the configfs. - DefaultConfigfsMountPoint = "/sys/kernel/config" - - // DefaultSelinuxMountPoint is the common mount point of the selinuxfs. - DefaultSelinuxMountPoint = "/sys/fs/selinux" -) - -// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an -// interface to kernel data structures. -type FS string - -// NewFS returns a new FS mounted under the given mountPoint. It will error -// if the mount point can't be read. -func NewFS(mountPoint string) (FS, error) { - info, err := os.Stat(mountPoint) - if err != nil { - return "", fmt.Errorf("could not read %q: %w", mountPoint, err) - } - if !info.IsDir() { - return "", fmt.Errorf("mount point %q is not a directory", mountPoint) - } - - return FS(mountPoint), nil -} - -// Path appends the given path elements to the filesystem path, adding separators -// as necessary. -func (fs FS) Path(p ...string) string { - return filepath.Join(append([]string{string(fs)}, p...)...) -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go deleted file mode 100644 index 5a7d2df06a..0000000000 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "errors" - "os" - "strconv" - "strings" -) - -// ParseUint32s parses a slice of strings into a slice of uint32s. -func ParseUint32s(ss []string) ([]uint32, error) { - us := make([]uint32, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 32) - if err != nil { - return nil, err - } - - us = append(us, uint32(u)) - } - - return us, nil -} - -// ParseUint64s parses a slice of strings into a slice of uint64s. -func ParseUint64s(ss []string) ([]uint64, error) { - us := make([]uint64, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - us = append(us, u) - } - - return us, nil -} - -// ParsePInt64s parses a slice of strings into a slice of int64 pointers. -func ParsePInt64s(ss []string) ([]*int64, error) { - us := make([]*int64, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return nil, err - } - - us = append(us, &u) - } - - return us, nil -} - -// Parses a uint64 from given hex in string. -func ParseHexUint64s(ss []string) ([]*uint64, error) { - us := make([]*uint64, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return nil, err - } - - us = append(us, &u) - } - - return us, nil -} - -// ReadUintFromFile reads a file and attempts to parse a uint64 from it. -func ReadUintFromFile(path string) (uint64, error) { - data, err := os.ReadFile(path) - if err != nil { - return 0, err - } - return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) -} - -// ReadIntFromFile reads a file and attempts to parse a int64 from it. -func ReadIntFromFile(path string) (int64, error) { - data, err := os.ReadFile(path) - if err != nil { - return 0, err - } - return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) -} - -// ParseBool parses a string into a boolean pointer. -func ParseBool(b string) *bool { - var truth bool - switch b { - case "enabled": - truth = true - case "disabled": - truth = false - default: - return nil - } - return &truth -} - -// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX. -func ReadHexFromFile(path string) (uint64, error) { - data, err := os.ReadFile(path) - if err != nil { - return 0, err - } - hexString := strings.TrimSpace(string(data)) - if !strings.HasPrefix(hexString, "0x") { - return 0, errors.New("invalid format: hex string does not start with '0x'") - } - return strconv.ParseUint(hexString[2:], 16, 64) -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go deleted file mode 100644 index 71b7a70ebd..0000000000 --- a/vendor/github.com/prometheus/procfs/internal/util/readfile.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "io" - "os" -) - -// ReadFileNoStat uses io.ReadAll to read contents of entire file. -// This is similar to os.ReadFile but without the call to os.Stat, because -// many files in /proc and /sys report incorrect file sizes (either 0 or 4096). -// Reads a max file size of 1024kB. For files larger than this, a scanner -// should be used. -func ReadFileNoStat(filename string) ([]byte, error) { - const maxBufferSize = 1024 * 1024 - - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - - reader := io.LimitReader(f, maxBufferSize) - return io.ReadAll(reader) -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go deleted file mode 100644 index d5404a6d72..0000000000 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build (linux || darwin) && !appengine -// +build linux darwin -// +build !appengine - -package util - -import ( - "bytes" - "os" - "strconv" - "strings" - "syscall" -) - -// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly. -// https://github.com/prometheus/node_exporter/pull/728/files -// -// Note that this function will not read files larger than 128 bytes. -func SysReadFile(file string) (string, error) { - f, err := os.Open(file) - if err != nil { - return "", err - } - defer f.Close() - - // On some machines, hwmon drivers are broken and return EAGAIN. This causes - // Go's os.ReadFile implementation to poll forever. - // - // Since we either want to read data or bail immediately, do the simplest - // possible read using syscall directly. - const sysFileBufferSize = 128 - b := make([]byte, sysFileBufferSize) - n, err := syscall.Read(int(f.Fd()), b) - if err != nil { - return "", err - } - - return string(bytes.TrimSpace(b[:n])), nil -} - -// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it. -func SysReadUintFromFile(path string) (uint64, error) { - data, err := SysReadFile(path) - if err != nil { - return 0, err - } - return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) -} - -// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it. -func SysReadIntFromFile(path string) (int64, error) { - data, err := SysReadFile(path) - if err != nil { - return 0, err - } - return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go deleted file mode 100644 index 1d86f5e63f..0000000000 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build (linux && appengine) || (!linux && !darwin) -// +build linux,appengine !linux,!darwin - -package util - -import ( - "fmt" -) - -// SysReadFile is here implemented as a noop for builds that do not support -// the read syscall. For example Windows, or Linux on Google App Engine. -func SysReadFile(file string) (string, error) { - return "", fmt.Errorf("not supported on this platform") -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go deleted file mode 100644 index fe2355d3c6..0000000000 --- a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "strconv" -) - -// TODO(mdlayher): util packages are an anti-pattern and this should be moved -// somewhere else that is more focused in the future. - -// A ValueParser enables parsing a single string into a variety of data types -// in a concise and safe way. The Err method must be invoked after invoking -// any other methods to ensure a value was successfully parsed. -type ValueParser struct { - v string - err error -} - -// NewValueParser creates a ValueParser using the input string. -func NewValueParser(v string) *ValueParser { - return &ValueParser{v: v} -} - -// Int interprets the underlying value as an int and returns that value. -func (vp *ValueParser) Int() int { return int(vp.int64()) } - -// PInt64 interprets the underlying value as an int64 and returns a pointer to -// that value. -func (vp *ValueParser) PInt64() *int64 { - if vp.err != nil { - return nil - } - - v := vp.int64() - return &v -} - -// int64 interprets the underlying value as an int64 and returns that value. -// TODO: export if/when necessary. -func (vp *ValueParser) int64() int64 { - if vp.err != nil { - return 0 - } - - // A base value of zero makes ParseInt infer the correct base using the - // string's prefix, if any. - const base = 0 - v, err := strconv.ParseInt(vp.v, base, 64) - if err != nil { - vp.err = err - return 0 - } - - return v -} - -// PUInt64 interprets the underlying value as an uint64 and returns a pointer to -// that value. -func (vp *ValueParser) PUInt64() *uint64 { - if vp.err != nil { - return nil - } - - // A base value of zero makes ParseInt infer the correct base using the - // string's prefix, if any. - const base = 0 - v, err := strconv.ParseUint(vp.v, base, 64) - if err != nil { - vp.err = err - return nil - } - - return &v -} - -// Err returns the last error, if any, encountered by the ValueParser. -func (vp *ValueParser) Err() error { - return vp.err -} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go deleted file mode 100644 index bc3a20c932..0000000000 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "encoding/hex" - "errors" - "fmt" - "io" - "net" - "os" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. -type IPVSStats struct { - // Total count of connections. - Connections uint64 - // Total incoming packages processed. - IncomingPackets uint64 - // Total outgoing packages processed. - OutgoingPackets uint64 - // Total incoming traffic. - IncomingBytes uint64 - // Total outgoing traffic. - OutgoingBytes uint64 -} - -// IPVSBackendStatus holds current metrics of one virtual / real address pair. -type IPVSBackendStatus struct { - // The local (virtual) IP address. - LocalAddress net.IP - // The remote (real) IP address. - RemoteAddress net.IP - // The local (virtual) port. - LocalPort uint16 - // The remote (real) port. - RemotePort uint16 - // The local firewall mark - LocalMark string - // The transport protocol (TCP, UDP). - Proto string - // The current number of active connections for this virtual/real address pair. - ActiveConn uint64 - // The current number of inactive connections for this virtual/real address pair. - InactConn uint64 - // The current weight of this virtual/real address pair. - Weight uint64 -} - -// IPVSStats reads the IPVS statistics from the specified `proc` filesystem. -func (fs FS) IPVSStats() (IPVSStats, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats")) - if err != nil { - return IPVSStats{}, err - } - - return parseIPVSStats(bytes.NewReader(data)) -} - -// parseIPVSStats performs the actual parsing of `ip_vs_stats`. -func parseIPVSStats(r io.Reader) (IPVSStats, error) { - var ( - statContent []byte - statLines []string - statFields []string - stats IPVSStats - ) - - statContent, err := io.ReadAll(r) - if err != nil { - return IPVSStats{}, err - } - - statLines = strings.SplitN(string(statContent), "\n", 4) - if len(statLines) != 4 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") - } - - statFields = strings.Fields(statLines[2]) - if len(statFields) != 5 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") - } - - stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) - if err != nil { - return IPVSStats{}, err - } - - return stats, nil -} - -// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. -func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) { - file, err := os.Open(fs.proc.Path("net/ip_vs")) - if err != nil { - return nil, err - } - defer file.Close() - - return parseIPVSBackendStatus(file) -} - -func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { - var ( - status []IPVSBackendStatus - scanner = bufio.NewScanner(file) - proto string - localMark string - localAddress net.IP - localPort uint16 - err error - ) - - for scanner.Scan() { - fields := strings.Fields(scanner.Text()) - if len(fields) == 0 { - continue - } - switch { - case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": - continue - case fields[0] == "TCP" || fields[0] == "UDP": - if len(fields) < 2 { - continue - } - proto = fields[0] - localMark = "" - localAddress, localPort, err = parseIPPort(fields[1]) - if err != nil { - return nil, err - } - case fields[0] == "FWM": - if len(fields) < 2 { - continue - } - proto = fields[0] - localMark = fields[1] - localAddress = nil - localPort = 0 - case fields[0] == "->": - if len(fields) < 6 { - continue - } - remoteAddress, remotePort, err := parseIPPort(fields[1]) - if err != nil { - return nil, err - } - weight, err := strconv.ParseUint(fields[3], 10, 64) - if err != nil { - return nil, err - } - activeConn, err := strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, err - } - inactConn, err := strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - status = append(status, IPVSBackendStatus{ - LocalAddress: localAddress, - LocalPort: localPort, - LocalMark: localMark, - RemoteAddress: remoteAddress, - RemotePort: remotePort, - Proto: proto, - Weight: weight, - ActiveConn: activeConn, - InactConn: inactConn, - }) - } - } - return status, nil -} - -func parseIPPort(s string) (net.IP, uint16, error) { - var ( - ip net.IP - err error - ) - - switch len(s) { - case 13: - ip, err = hex.DecodeString(s[0:8]) - if err != nil { - return nil, 0, err - } - case 46: - ip = net.ParseIP(s[1:40]) - if ip == nil { - return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) - } - default: - return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err) - } - - portString := s[len(s)-4:] - if len(portString) != 4 { - return nil, 0, - fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err) - } - port, err := strconv.ParseUint(portString, 16, 16) - if err != nil { - return nil, 0, err - } - - return ip, uint16(port), nil -} diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go deleted file mode 100644 index db88566bdf..0000000000 --- a/vendor/github.com/prometheus/procfs/kernel_random.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows -// +build !windows - -package procfs - -import ( - "os" - - "github.com/prometheus/procfs/internal/util" -) - -// KernelRandom contains information about to the kernel's random number generator. -type KernelRandom struct { - // EntropyAvaliable gives the available entropy, in bits. - EntropyAvaliable *uint64 - // PoolSize gives the size of the entropy pool, in bits. - PoolSize *uint64 - // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded. - URandomMinReseedSeconds *uint64 - // WriteWakeupThreshold the number of bits of entropy below which we wake up processes - // that do a select(2) or poll(2) for write access to /dev/random. - WriteWakeupThreshold *uint64 - // ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep - // waiting for entropy from /dev/random. - ReadWakeupThreshold *uint64 -} - -// KernelRandom returns values from /proc/sys/kernel/random. -func (fs FS) KernelRandom() (KernelRandom, error) { - random := KernelRandom{} - - for file, p := range map[string]**uint64{ - "entropy_avail": &random.EntropyAvaliable, - "poolsize": &random.PoolSize, - "urandom_min_reseed_secs": &random.URandomMinReseedSeconds, - "write_wakeup_threshold": &random.WriteWakeupThreshold, - "read_wakeup_threshold": &random.ReadWakeupThreshold, - } { - val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file)) - if os.IsNotExist(err) { - continue - } - if err != nil { - return random, err - } - *p = &val - } - - return random, nil -} diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go deleted file mode 100644 index 332e76c17f..0000000000 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// LoadAvg represents an entry in /proc/loadavg. -type LoadAvg struct { - Load1 float64 - Load5 float64 - Load15 float64 -} - -// LoadAvg returns loadavg from /proc. -func (fs FS) LoadAvg() (*LoadAvg, error) { - path := fs.proc.Path("loadavg") - - data, err := util.ReadFileNoStat(path) - if err != nil { - return nil, err - } - return parseLoad(data) -} - -// Parse /proc loadavg and return 1m, 5m and 15m. -func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { - loads := make([]float64, 3) - parts := strings.Fields(string(loadavgBytes)) - if len(parts) < 3 { - return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes)) - } - - var err error - for i, load := range parts[0:3] { - loads[i], err = strconv.ParseFloat(load, 64) - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) - } - } - return &LoadAvg{ - Load1: loads[0], - Load5: loads[1], - Load15: loads[2], - }, nil -} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go deleted file mode 100644 index 67a9d2b448..0000000000 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "os" - "regexp" - "strconv" - "strings" -) - -var ( - statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) - recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`) - recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) - recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) - recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) - componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) -) - -// MDStat holds info parsed from /proc/mdstat. -type MDStat struct { - // Name of the device. - Name string - // activity-state of the device. - ActivityState string - // Number of active disks. - DisksActive int64 - // Total number of disks the device requires. - DisksTotal int64 - // Number of failed disks. - DisksFailed int64 - // Number of "down" disks. (the _ indicator in the status line) - DisksDown int64 - // Spare disks in the device. - DisksSpare int64 - // Number of blocks the device holds. - BlocksTotal int64 - // Number of blocks on the device that are in sync. - BlocksSynced int64 - // Number of blocks on the device that need to be synced. - BlocksToBeSynced int64 - // progress percentage of current sync - BlocksSyncedPct float64 - // estimated finishing time for current sync (in minutes) - BlocksSyncedFinishTime float64 - // current sync speed (in Kilobytes/sec) - BlocksSyncedSpeed float64 - // Name of md component devices - Devices []string -} - -// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of -// structs containing the relevant info. More information available here: -// https://raid.wiki.kernel.org/index.php/Mdstat -func (fs FS) MDStat() ([]MDStat, error) { - data, err := os.ReadFile(fs.proc.Path("mdstat")) - if err != nil { - return nil, err - } - mdstat, err := parseMDStat(data) - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) - } - return mdstat, nil -} - -// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of -// structs containing the relevant info. -func parseMDStat(mdStatData []byte) ([]MDStat, error) { - mdStats := []MDStat{} - lines := strings.Split(string(mdStatData), "\n") - - for i, line := range lines { - if strings.TrimSpace(line) == "" || line[0] == ' ' || - strings.HasPrefix(line, "Personalities") || - strings.HasPrefix(line, "unused") { - continue - } - - deviceFields := strings.Fields(line) - if len(deviceFields) < 3 { - return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line) - } - mdName := deviceFields[0] // mdx - state := deviceFields[2] // active or inactive - - if len(lines) <= i+3 { - return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName) - } - - // Failed disks have the suffix (F) & Spare disks have the suffix (S). - fail := int64(strings.Count(line, "(F)")) - spare := int64(strings.Count(line, "(S)")) - active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) - - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) - } - - syncLineIdx := i + 2 - if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line - syncLineIdx++ - } - - // If device is syncing at the moment, get the number of currently - // synced bytes, otherwise that number equals the size of the device. - blocksSynced := size - blocksToBeSynced := size - speed := float64(0) - finish := float64(0) - pct := float64(0) - recovering := strings.Contains(lines[syncLineIdx], "recovery") - resyncing := strings.Contains(lines[syncLineIdx], "resync") - checking := strings.Contains(lines[syncLineIdx], "check") - - // Append recovery and resyncing state info. - if recovering || resyncing || checking { - if recovering { - state = "recovering" - } else if checking { - state = "checking" - } else { - state = "resyncing" - } - - // Handle case when resync=PENDING or resync=DELAYED. - if strings.Contains(lines[syncLineIdx], "PENDING") || - strings.Contains(lines[syncLineIdx], "DELAYED") { - blocksSynced = 0 - } else { - blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) - } - } - } - - mdStats = append(mdStats, MDStat{ - Name: mdName, - ActivityState: state, - DisksActive: active, - DisksFailed: fail, - DisksDown: down, - DisksSpare: spare, - DisksTotal: total, - BlocksTotal: size, - BlocksSynced: blocksSynced, - BlocksToBeSynced: blocksToBeSynced, - BlocksSyncedPct: pct, - BlocksSyncedFinishTime: finish, - BlocksSyncedSpeed: speed, - Devices: evalComponentDevices(deviceFields), - }) - } - - return mdStats, nil -} - -func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { - statusFields := strings.Fields(statusLine) - if len(statusFields) < 1 { - return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) - } - - sizeStr := statusFields[0] - size, err = strconv.ParseInt(sizeStr, 10, 64) - if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) - } - - if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { - // In the device deviceLine, only disks have a number associated with them in []. - total = int64(strings.Count(deviceLine, "[")) - return total, total, 0, size, nil - } - - if strings.Contains(deviceLine, "inactive") { - return 0, 0, 0, size, nil - } - - matches := statusLineRE.FindStringSubmatch(statusLine) - if len(matches) != 5 { - return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) - } - - total, err = strconv.ParseInt(matches[2], 10, 64) - if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) - } - - active, err = strconv.ParseInt(matches[3], 10, 64) - if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err) - } - down = int64(strings.Count(matches[4], "_")) - - return active, total, down, size, nil -} - -func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) { - matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) - if len(matches) != 2 { - return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err) - } - - blocks := strings.Split(matches[1], "/") - blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64) - if err != nil { - return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err) - } - - blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64) - if err != nil { - return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err) - } - - // Get percentage complete - matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) - if len(matches) != 2 { - return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) - } - pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) - if err != nil { - return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) - } - - // Get time expected left to complete - matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) - if len(matches) != 2 { - return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) - } - finish, err = strconv.ParseFloat(matches[1], 64) - if err != nil { - return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) - } - - // Get recovery speed - matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) - if len(matches) != 2 { - return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) - } - speed, err = strconv.ParseFloat(matches[1], 64) - if err != nil { - return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) - } - - return blocksSynced, blocksToBeSynced, pct, finish, speed, nil -} - -func evalComponentDevices(deviceFields []string) []string { - mdComponentDevices := make([]string, 0) - if len(deviceFields) > 3 { - for _, field := range deviceFields[4:] { - match := componentDeviceRE.FindStringSubmatch(field) - if match == nil { - continue - } - mdComponentDevices = append(mdComponentDevices, match[1]) - } - } - - return mdComponentDevices -} diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go deleted file mode 100644 index 4b2c4050a3..0000000000 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Meminfo represents memory statistics. -type Meminfo struct { - // Total usable ram (i.e. physical ram minus a few reserved - // bits and the kernel binary code) - MemTotal *uint64 - // The sum of LowFree+HighFree - MemFree *uint64 - // An estimate of how much memory is available for starting - // new applications, without swapping. Calculated from - // MemFree, SReclaimable, the size of the file LRU lists, and - // the low watermarks in each zone. The estimate takes into - // account that the system needs some page cache to function - // well, and that not all reclaimable slab will be - // reclaimable, due to items being in use. The impact of those - // factors will vary from system to system. - MemAvailable *uint64 - // Relatively temporary storage for raw disk blocks shouldn't - // get tremendously large (20MB or so) - Buffers *uint64 - Cached *uint64 - // Memory that once was swapped out, is swapped back in but - // still also is in the swapfile (if memory is needed it - // doesn't need to be swapped out AGAIN because it is already - // in the swapfile. This saves I/O) - SwapCached *uint64 - // Memory that has been used more recently and usually not - // reclaimed unless absolutely necessary. - Active *uint64 - // Memory which has been less recently used. It is more - // eligible to be reclaimed for other purposes - Inactive *uint64 - ActiveAnon *uint64 - InactiveAnon *uint64 - ActiveFile *uint64 - InactiveFile *uint64 - Unevictable *uint64 - Mlocked *uint64 - // total amount of swap space available - SwapTotal *uint64 - // Memory which has been evicted from RAM, and is temporarily - // on the disk - SwapFree *uint64 - // Memory which is waiting to get written back to the disk - Dirty *uint64 - // Memory which is actively being written back to the disk - Writeback *uint64 - // Non-file backed pages mapped into userspace page tables - AnonPages *uint64 - // files which have been mapped, such as libraries - Mapped *uint64 - Shmem *uint64 - // in-kernel data structures cache - Slab *uint64 - // Part of Slab, that might be reclaimed, such as caches - SReclaimable *uint64 - // Part of Slab, that cannot be reclaimed on memory pressure - SUnreclaim *uint64 - KernelStack *uint64 - // amount of memory dedicated to the lowest level of page - // tables. - PageTables *uint64 - // NFS pages sent to the server, but not yet committed to - // stable storage - NFSUnstable *uint64 - // Memory used for block device "bounce buffers" - Bounce *uint64 - // Memory used by FUSE for temporary writeback buffers - WritebackTmp *uint64 - // Based on the overcommit ratio ('vm.overcommit_ratio'), - // this is the total amount of memory currently available to - // be allocated on the system. This limit is only adhered to - // if strict overcommit accounting is enabled (mode 2 in - // 'vm.overcommit_memory'). - // The CommitLimit is calculated with the following formula: - // CommitLimit = ([total RAM pages] - [total huge TLB pages]) * - // overcommit_ratio / 100 + [total swap pages] - // For example, on a system with 1G of physical RAM and 7G - // of swap with a `vm.overcommit_ratio` of 30 it would - // yield a CommitLimit of 7.3G. - // For more details, see the memory overcommit documentation - // in vm/overcommit-accounting. - CommitLimit *uint64 - // The amount of memory presently allocated on the system. - // The committed memory is a sum of all of the memory which - // has been allocated by processes, even if it has not been - // "used" by them as of yet. A process which malloc()'s 1G - // of memory, but only touches 300M of it will show up as - // using 1G. This 1G is memory which has been "committed" to - // by the VM and can be used at any time by the allocating - // application. With strict overcommit enabled on the system - // (mode 2 in 'vm.overcommit_memory'),allocations which would - // exceed the CommitLimit (detailed above) will not be permitted. - // This is useful if one needs to guarantee that processes will - // not fail due to lack of memory once that memory has been - // successfully allocated. - CommittedAS *uint64 - // total size of vmalloc memory area - VmallocTotal *uint64 - // amount of vmalloc area which is used - VmallocUsed *uint64 - // largest contiguous block of vmalloc area which is free - VmallocChunk *uint64 - Percpu *uint64 - HardwareCorrupted *uint64 - AnonHugePages *uint64 - ShmemHugePages *uint64 - ShmemPmdMapped *uint64 - CmaTotal *uint64 - CmaFree *uint64 - HugePagesTotal *uint64 - HugePagesFree *uint64 - HugePagesRsvd *uint64 - HugePagesSurp *uint64 - Hugepagesize *uint64 - DirectMap4k *uint64 - DirectMap2M *uint64 - DirectMap1G *uint64 - - // The struct fields below are the byte-normalized counterparts to the - // existing struct fields. Values are normalized using the optional - // unit field in the meminfo line. - MemTotalBytes *uint64 - MemFreeBytes *uint64 - MemAvailableBytes *uint64 - BuffersBytes *uint64 - CachedBytes *uint64 - SwapCachedBytes *uint64 - ActiveBytes *uint64 - InactiveBytes *uint64 - ActiveAnonBytes *uint64 - InactiveAnonBytes *uint64 - ActiveFileBytes *uint64 - InactiveFileBytes *uint64 - UnevictableBytes *uint64 - MlockedBytes *uint64 - SwapTotalBytes *uint64 - SwapFreeBytes *uint64 - DirtyBytes *uint64 - WritebackBytes *uint64 - AnonPagesBytes *uint64 - MappedBytes *uint64 - ShmemBytes *uint64 - SlabBytes *uint64 - SReclaimableBytes *uint64 - SUnreclaimBytes *uint64 - KernelStackBytes *uint64 - PageTablesBytes *uint64 - NFSUnstableBytes *uint64 - BounceBytes *uint64 - WritebackTmpBytes *uint64 - CommitLimitBytes *uint64 - CommittedASBytes *uint64 - VmallocTotalBytes *uint64 - VmallocUsedBytes *uint64 - VmallocChunkBytes *uint64 - PercpuBytes *uint64 - HardwareCorruptedBytes *uint64 - AnonHugePagesBytes *uint64 - ShmemHugePagesBytes *uint64 - ShmemPmdMappedBytes *uint64 - CmaTotalBytes *uint64 - CmaFreeBytes *uint64 - HugepagesizeBytes *uint64 - DirectMap4kBytes *uint64 - DirectMap2MBytes *uint64 - DirectMap1GBytes *uint64 -} - -// Meminfo returns an information about current kernel/system memory statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -func (fs FS) Meminfo() (Meminfo, error) { - b, err := util.ReadFileNoStat(fs.proc.Path("meminfo")) - if err != nil { - return Meminfo{}, err - } - - m, err := parseMemInfo(bytes.NewReader(b)) - if err != nil { - return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err) - } - - return *m, nil -} - -func parseMemInfo(r io.Reader) (*Meminfo, error) { - var m Meminfo - s := bufio.NewScanner(r) - for s.Scan() { - fields := strings.Fields(s.Text()) - var val, valBytes uint64 - - val, err := strconv.ParseUint(fields[1], 0, 64) - if err != nil { - return nil, err - } - - switch len(fields) { - case 2: - // No unit present, use the parsed the value as bytes directly. - valBytes = val - case 3: - // Unit present in optional 3rd field, convert it to - // bytes. The only unit supported within the Linux - // kernel is `kB`. - if fields[2] != "kB" { - return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2]) - } - - valBytes = 1024 * val - - default: - return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text()) - } - - switch fields[0] { - case "MemTotal:": - m.MemTotal = &val - m.MemTotalBytes = &valBytes - case "MemFree:": - m.MemFree = &val - m.MemFreeBytes = &valBytes - case "MemAvailable:": - m.MemAvailable = &val - m.MemAvailableBytes = &valBytes - case "Buffers:": - m.Buffers = &val - m.BuffersBytes = &valBytes - case "Cached:": - m.Cached = &val - m.CachedBytes = &valBytes - case "SwapCached:": - m.SwapCached = &val - m.SwapCachedBytes = &valBytes - case "Active:": - m.Active = &val - m.ActiveBytes = &valBytes - case "Inactive:": - m.Inactive = &val - m.InactiveBytes = &valBytes - case "Active(anon):": - m.ActiveAnon = &val - m.ActiveAnonBytes = &valBytes - case "Inactive(anon):": - m.InactiveAnon = &val - m.InactiveAnonBytes = &valBytes - case "Active(file):": - m.ActiveFile = &val - m.ActiveFileBytes = &valBytes - case "Inactive(file):": - m.InactiveFile = &val - m.InactiveFileBytes = &valBytes - case "Unevictable:": - m.Unevictable = &val - m.UnevictableBytes = &valBytes - case "Mlocked:": - m.Mlocked = &val - m.MlockedBytes = &valBytes - case "SwapTotal:": - m.SwapTotal = &val - m.SwapTotalBytes = &valBytes - case "SwapFree:": - m.SwapFree = &val - m.SwapFreeBytes = &valBytes - case "Dirty:": - m.Dirty = &val - m.DirtyBytes = &valBytes - case "Writeback:": - m.Writeback = &val - m.WritebackBytes = &valBytes - case "AnonPages:": - m.AnonPages = &val - m.AnonPagesBytes = &valBytes - case "Mapped:": - m.Mapped = &val - m.MappedBytes = &valBytes - case "Shmem:": - m.Shmem = &val - m.ShmemBytes = &valBytes - case "Slab:": - m.Slab = &val - m.SlabBytes = &valBytes - case "SReclaimable:": - m.SReclaimable = &val - m.SReclaimableBytes = &valBytes - case "SUnreclaim:": - m.SUnreclaim = &val - m.SUnreclaimBytes = &valBytes - case "KernelStack:": - m.KernelStack = &val - m.KernelStackBytes = &valBytes - case "PageTables:": - m.PageTables = &val - m.PageTablesBytes = &valBytes - case "NFS_Unstable:": - m.NFSUnstable = &val - m.NFSUnstableBytes = &valBytes - case "Bounce:": - m.Bounce = &val - m.BounceBytes = &valBytes - case "WritebackTmp:": - m.WritebackTmp = &val - m.WritebackTmpBytes = &valBytes - case "CommitLimit:": - m.CommitLimit = &val - m.CommitLimitBytes = &valBytes - case "Committed_AS:": - m.CommittedAS = &val - m.CommittedASBytes = &valBytes - case "VmallocTotal:": - m.VmallocTotal = &val - m.VmallocTotalBytes = &valBytes - case "VmallocUsed:": - m.VmallocUsed = &val - m.VmallocUsedBytes = &valBytes - case "VmallocChunk:": - m.VmallocChunk = &val - m.VmallocChunkBytes = &valBytes - case "Percpu:": - m.Percpu = &val - m.PercpuBytes = &valBytes - case "HardwareCorrupted:": - m.HardwareCorrupted = &val - m.HardwareCorruptedBytes = &valBytes - case "AnonHugePages:": - m.AnonHugePages = &val - m.AnonHugePagesBytes = &valBytes - case "ShmemHugePages:": - m.ShmemHugePages = &val - m.ShmemHugePagesBytes = &valBytes - case "ShmemPmdMapped:": - m.ShmemPmdMapped = &val - m.ShmemPmdMappedBytes = &valBytes - case "CmaTotal:": - m.CmaTotal = &val - m.CmaTotalBytes = &valBytes - case "CmaFree:": - m.CmaFree = &val - m.CmaFreeBytes = &valBytes - case "HugePages_Total:": - m.HugePagesTotal = &val - case "HugePages_Free:": - m.HugePagesFree = &val - case "HugePages_Rsvd:": - m.HugePagesRsvd = &val - case "HugePages_Surp:": - m.HugePagesSurp = &val - case "Hugepagesize:": - m.Hugepagesize = &val - m.HugepagesizeBytes = &valBytes - case "DirectMap4k:": - m.DirectMap4k = &val - m.DirectMap4kBytes = &valBytes - case "DirectMap2M:": - m.DirectMap2M = &val - m.DirectMap2MBytes = &valBytes - case "DirectMap1G:": - m.DirectMap1G = &val - m.DirectMap1GBytes = &valBytes - } - } - - return &m, nil -} diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go deleted file mode 100644 index a704c5e735..0000000000 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// A MountInfo is a type that describes the details, options -// for each mount, parsed from /proc/self/mountinfo. -// The fields described in each entry of /proc/self/mountinfo -// is described in the following man page. -// http://man7.org/linux/man-pages/man5/proc.5.html -type MountInfo struct { - // Unique ID for the mount - MountID int - // The ID of the parent mount - ParentID int - // The value of `st_dev` for the files on this FS - MajorMinorVer string - // The pathname of the directory in the FS that forms - // the root for this mount - Root string - // The pathname of the mount point relative to the root - MountPoint string - // Mount options - Options map[string]string - // Zero or more optional fields - OptionalFields map[string]string - // The Filesystem type - FSType string - // FS specific information or "none" - Source string - // Superblock options - SuperOptions map[string]string -} - -// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs. -func parseMountInfo(info []byte) ([]*MountInfo, error) { - mounts := []*MountInfo{} - scanner := bufio.NewScanner(bytes.NewReader(info)) - for scanner.Scan() { - mountString := scanner.Text() - parsedMounts, err := parseMountInfoString(mountString) - if err != nil { - return nil, err - } - mounts = append(mounts, parsedMounts) - } - - err := scanner.Err() - return mounts, err -} - -// Parses a mountinfo file line, and converts it to a MountInfo struct. -// An important check here is to see if the hyphen separator, as if it does not exist, -// it means that the line is malformed. -func parseMountInfoString(mountString string) (*MountInfo, error) { - var err error - - mountInfo := strings.Split(mountString, " ") - mountInfoLength := len(mountInfo) - if mountInfoLength < 10 { - return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString) - } - - if mountInfo[mountInfoLength-4] != "-" { - return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4]) - } - - mount := &MountInfo{ - MajorMinorVer: mountInfo[2], - Root: mountInfo[3], - MountPoint: mountInfo[4], - Options: mountOptionsParser(mountInfo[5]), - OptionalFields: nil, - FSType: mountInfo[mountInfoLength-3], - Source: mountInfo[mountInfoLength-2], - SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]), - } - - mount.MountID, err = strconv.Atoi(mountInfo[0]) - if err != nil { - return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID) - } - mount.ParentID, err = strconv.Atoi(mountInfo[1]) - if err != nil { - return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID) - } - // Has optional fields, which is a space separated list of values. - // Example: shared:2 master:7 - if mountInfo[6] != "" { - mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) - if err != nil { - return nil, fmt.Errorf("%w: %w", ErrFileParse, err) - } - } - return mount, nil -} - -// mountOptionsIsValidField checks a string against a valid list of optional fields keys. -func mountOptionsIsValidField(s string) bool { - switch s { - case - "shared", - "master", - "propagate_from", - "unbindable": - return true - } - return false -} - -// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings. -func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { - optionalFields := make(map[string]string) - for _, field := range o { - optionSplit := strings.SplitN(field, ":", 2) - value := "" - if len(optionSplit) == 2 { - value = optionSplit[1] - } - if mountOptionsIsValidField(optionSplit[0]) { - optionalFields[optionSplit[0]] = value - } - } - return optionalFields, nil -} - -// mountOptionsParser parses the mount options, superblock options. -func mountOptionsParser(mountOptions string) map[string]string { - opts := make(map[string]string) - options := strings.Split(mountOptions, ",") - for _, opt := range options { - splitOption := strings.Split(opt, "=") - if len(splitOption) < 2 { - key := splitOption[0] - opts[key] = "" - } else { - key, value := splitOption[0], splitOption[1] - opts[key] = value - } - } - return opts -} - -// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`. -func GetMounts() ([]*MountInfo, error) { - data, err := util.ReadFileNoStat("/proc/self/mountinfo") - if err != nil { - return nil, err - } - return parseMountInfo(data) -} - -// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`. -func GetProcMounts(pid int) ([]*MountInfo, error) { - data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) - if err != nil { - return nil, err - } - return parseMountInfo(data) -} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go deleted file mode 100644 index 50caa73274..0000000000 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ /dev/null @@ -1,710 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -// While implementing parsing of /proc/[pid]/mountstats, this blog was used -// heavily as a reference: -// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex -// -// Special thanks to Chris Siebenmann for all of his posts explaining the -// various statistics available for NFS. - -import ( - "bufio" - "fmt" - "io" - "strconv" - "strings" - "time" -) - -// Constants shared between multiple functions. -const ( - deviceEntryLen = 8 - - fieldBytesLen = 8 - fieldEventsLen = 27 - - statVersion10 = "1.0" - statVersion11 = "1.1" - - fieldTransport10TCPLen = 10 - fieldTransport10UDPLen = 7 - - fieldTransport11TCPLen = 13 - fieldTransport11UDPLen = 10 - - // Kernel version >= 4.14 MaxLen - // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 - fieldTransport11RDMAMaxLen = 28 - - // Kernel version <= 4.2 MinLen - // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 - fieldTransport11RDMAMinLen = 20 -) - -// A Mount is a device mount parsed from /proc/[pid]/mountstats. -type Mount struct { - // Name of the device. - Device string - // The mount point of the device. - Mount string - // The filesystem type used by the device. - Type string - // If available additional statistics related to this Mount. - // Use a type assertion to determine if additional statistics are available. - Stats MountStats -} - -// A MountStats is a type which contains detailed statistics for a specific -// type of Mount. -type MountStats interface { - mountStats() -} - -// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. -type MountStatsNFS struct { - // The version of statistics provided. - StatVersion string - // The mount options of the NFS mount. - Opts map[string]string - // The age of the NFS mount. - Age time.Duration - // Statistics related to byte counters for various operations. - Bytes NFSBytesStats - // Statistics related to various NFS event occurrences. - Events NFSEventsStats - // Statistics broken down by filesystem operation. - Operations []NFSOperationStats - // Statistics about the NFS RPC transport. - Transport []NFSTransportStats -} - -// mountStats implements MountStats. -func (m MountStatsNFS) mountStats() {} - -// A NFSBytesStats contains statistics about the number of bytes read and written -// by an NFS client to and from an NFS server. -type NFSBytesStats struct { - // Number of bytes read using the read() syscall. - Read uint64 - // Number of bytes written using the write() syscall. - Write uint64 - // Number of bytes read using the read() syscall in O_DIRECT mode. - DirectRead uint64 - // Number of bytes written using the write() syscall in O_DIRECT mode. - DirectWrite uint64 - // Number of bytes read from the NFS server, in total. - ReadTotal uint64 - // Number of bytes written to the NFS server, in total. - WriteTotal uint64 - // Number of pages read directly via mmap()'d files. - ReadPages uint64 - // Number of pages written directly via mmap()'d files. - WritePages uint64 -} - -// A NFSEventsStats contains statistics about NFS event occurrences. -type NFSEventsStats struct { - // Number of times cached inode attributes are re-validated from the server. - InodeRevalidate uint64 - // Number of times cached dentry nodes are re-validated from the server. - DnodeRevalidate uint64 - // Number of times an inode cache is cleared. - DataInvalidate uint64 - // Number of times cached inode attributes are invalidated. - AttributeInvalidate uint64 - // Number of times files or directories have been open()'d. - VFSOpen uint64 - // Number of times a directory lookup has occurred. - VFSLookup uint64 - // Number of times permissions have been checked. - VFSAccess uint64 - // Number of updates (and potential writes) to pages. - VFSUpdatePage uint64 - // Number of pages read directly via mmap()'d files. - VFSReadPage uint64 - // Number of times a group of pages have been read. - VFSReadPages uint64 - // Number of pages written directly via mmap()'d files. - VFSWritePage uint64 - // Number of times a group of pages have been written. - VFSWritePages uint64 - // Number of times directory entries have been read with getdents(). - VFSGetdents uint64 - // Number of times attributes have been set on inodes. - VFSSetattr uint64 - // Number of pending writes that have been forcefully flushed to the server. - VFSFlush uint64 - // Number of times fsync() has been called on directories and files. - VFSFsync uint64 - // Number of times locking has been attempted on a file. - VFSLock uint64 - // Number of times files have been closed and released. - VFSFileRelease uint64 - // Unknown. Possibly unused. - CongestionWait uint64 - // Number of times files have been truncated. - Truncation uint64 - // Number of times a file has been grown due to writes beyond its existing end. - WriteExtension uint64 - // Number of times a file was removed while still open by another process. - SillyRename uint64 - // Number of times the NFS server gave less data than expected while reading. - ShortRead uint64 - // Number of times the NFS server wrote less data than expected while writing. - ShortWrite uint64 - // Number of times the NFS server indicated EJUKEBOX; retrieving data from - // offline storage. - JukeboxDelay uint64 - // Number of NFS v4.1+ pNFS reads. - PNFSRead uint64 - // Number of NFS v4.1+ pNFS writes. - PNFSWrite uint64 -} - -// A NFSOperationStats contains statistics for a single operation. -type NFSOperationStats struct { - // The name of the operation. - Operation string - // Number of requests performed for this operation. - Requests uint64 - // Number of times an actual RPC request has been transmitted for this operation. - Transmissions uint64 - // Number of times a request has had a major timeout. - MajorTimeouts uint64 - // Number of bytes sent for this operation, including RPC headers and payload. - BytesSent uint64 - // Number of bytes received for this operation, including RPC headers and payload. - BytesReceived uint64 - // Duration all requests spent queued for transmission before they were sent. - CumulativeQueueMilliseconds uint64 - // Duration it took to get a reply back after the request was transmitted. - CumulativeTotalResponseMilliseconds uint64 - // Duration from when a request was enqueued to when it was completely handled. - CumulativeTotalRequestMilliseconds uint64 - // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. - Errors uint64 -} - -// A NFSTransportStats contains statistics for the NFS mount RPC requests and -// responses. -type NFSTransportStats struct { - // The transport protocol used for the NFS mount. - Protocol string - // The local port used for the NFS mount. - Port uint64 - // Number of times the client has had to establish a connection from scratch - // to the NFS server. - Bind uint64 - // Number of times the client has made a TCP connection to the NFS server. - Connect uint64 - // Duration (in jiffies, a kernel internal unit of time) the NFS mount has - // spent waiting for connections to the server to be established. - ConnectIdleTime uint64 - // Duration since the NFS mount last saw any RPC traffic. - IdleTimeSeconds uint64 - // Number of RPC requests for this mount sent to the NFS server. - Sends uint64 - // Number of RPC responses for this mount received from the NFS server. - Receives uint64 - // Number of times the NFS server sent a response with a transaction ID - // unknown to this client. - BadTransactionIDs uint64 - // A running counter, incremented on each request as the current difference - // ebetween sends and receives. - CumulativeActiveRequests uint64 - // A running counter, incremented on each request by the current backlog - // queue size. - CumulativeBacklog uint64 - - // Stats below only available with stat version 1.1. - - // Maximum number of simultaneously active RPC requests ever used. - MaximumRPCSlotsUsed uint64 - // A running counter, incremented on each request as the current size of the - // sending queue. - CumulativeSendingQueue uint64 - // A running counter, incremented on each request as the current size of the - // pending queue. - CumulativePendingQueue uint64 - - // Stats below only available with stat version 1.1. - // Transport over RDMA - - // accessed when sending a call - ReadChunkCount uint64 - WriteChunkCount uint64 - ReplyChunkCount uint64 - TotalRdmaRequest uint64 - - // rarely accessed error counters - PullupCopyCount uint64 - HardwayRegisterCount uint64 - FailedMarshalCount uint64 - BadReplyCount uint64 - MrsRecovered uint64 - MrsOrphaned uint64 - MrsAllocated uint64 - EmptySendctxQ uint64 - - // accessed when receiving a reply - TotalRdmaReply uint64 - FixupCopyCount uint64 - ReplyWaitsForSend uint64 - LocalInvNeeded uint64 - NomsgCallCount uint64 - BcallCount uint64 -} - -// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice -// of Mount structures containing detailed information about each mount. -// If available, statistics for each mount are parsed as well. -func parseMountStats(r io.Reader) ([]*Mount, error) { - const ( - device = "device" - statVersionPrefix = "statvers=" - - nfs3Type = "nfs" - nfs4Type = "nfs4" - ) - - var mounts []*Mount - - s := bufio.NewScanner(r) - for s.Scan() { - // Only look for device entries in this function - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 || ss[0] != device { - continue - } - - m, err := parseMount(ss) - if err != nil { - return nil, err - } - - // Does this mount also possess statistics information? - if len(ss) > deviceEntryLen { - // Only NFSv3 and v4 are supported for parsing statistics - if m.Type != nfs3Type && m.Type != nfs4Type { - return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type) - } - - statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) - - stats, err := parseMountStatsNFS(s, statVersion) - if err != nil { - return nil, err - } - - m.Stats = stats - } - - mounts = append(mounts, m) - } - - return mounts, s.Err() -} - -// parseMount parses an entry in /proc/[pid]/mountstats in the format: -// -// device [device] mounted on [mount] with fstype [type] -func parseMount(ss []string) (*Mount, error) { - if len(ss) < deviceEntryLen { - return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) - } - - // Check for specific words appearing at specific indices to ensure - // the format is consistent with what we expect - format := []struct { - i int - s string - }{ - {i: 0, s: "device"}, - {i: 2, s: "mounted"}, - {i: 3, s: "on"}, - {i: 5, s: "with"}, - {i: 6, s: "fstype"}, - } - - for _, f := range format { - if ss[f.i] != f.s { - return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) - } - } - - return &Mount{ - Device: ss[1], - Mount: ss[4], - Type: ss[7], - }, nil -} - -// parseMountStatsNFS parses a MountStatsNFS by scanning additional information -// related to NFS statistics. -func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { - // Field indicators for parsing specific types of data - const ( - fieldOpts = "opts:" - fieldAge = "age:" - fieldBytes = "bytes:" - fieldEvents = "events:" - fieldPerOpStats = "per-op" - fieldTransport = "xprt:" - ) - - stats := &MountStatsNFS{ - StatVersion: statVersion, - } - - for s.Scan() { - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 { - break - } - - switch ss[0] { - case fieldOpts: - if len(ss) < 2 { - return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) - } - if stats.Opts == nil { - stats.Opts = map[string]string{} - } - for _, opt := range strings.Split(ss[1], ",") { - split := strings.Split(opt, "=") - if len(split) == 2 { - stats.Opts[split[0]] = split[1] - } else { - stats.Opts[opt] = "" - } - } - case fieldAge: - if len(ss) < 2 { - return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) - } - // Age integer is in seconds - d, err := time.ParseDuration(ss[1] + "s") - if err != nil { - return nil, err - } - - stats.Age = d - case fieldBytes: - if len(ss) < 2 { - return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) - } - bstats, err := parseNFSBytesStats(ss[1:]) - if err != nil { - return nil, err - } - - stats.Bytes = *bstats - case fieldEvents: - if len(ss) < 2 { - return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss) - } - estats, err := parseNFSEventsStats(ss[1:]) - if err != nil { - return nil, err - } - - stats.Events = *estats - case fieldTransport: - if len(ss) < 3 { - return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss) - } - - tstats, err := parseNFSTransportStats(ss[1:], statVersion) - if err != nil { - return nil, err - } - - stats.Transport = append(stats.Transport, *tstats) - } - - // When encountering "per-operation statistics", we must break this - // loop and parse them separately to ensure we can terminate parsing - // before reaching another device entry; hence why this 'if' statement - // is not just another switch case - if ss[0] == fieldPerOpStats { - break - } - } - - if err := s.Err(); err != nil { - return nil, err - } - - // NFS per-operation stats appear last before the next device entry - perOpStats, err := parseNFSOperationStats(s) - if err != nil { - return nil, err - } - - stats.Operations = perOpStats - - return stats, nil -} - -// parseNFSBytesStats parses a NFSBytesStats line using an input set of -// integer fields. -func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { - if len(ss) != fieldBytesLen { - return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss) - } - - ns := make([]uint64, 0, fieldBytesLen) - for _, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - return &NFSBytesStats{ - Read: ns[0], - Write: ns[1], - DirectRead: ns[2], - DirectWrite: ns[3], - ReadTotal: ns[4], - WriteTotal: ns[5], - ReadPages: ns[6], - WritePages: ns[7], - }, nil -} - -// parseNFSEventsStats parses a NFSEventsStats line using an input set of -// integer fields. -func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { - if len(ss) != fieldEventsLen { - return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss) - } - - ns := make([]uint64, 0, fieldEventsLen) - for _, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - return &NFSEventsStats{ - InodeRevalidate: ns[0], - DnodeRevalidate: ns[1], - DataInvalidate: ns[2], - AttributeInvalidate: ns[3], - VFSOpen: ns[4], - VFSLookup: ns[5], - VFSAccess: ns[6], - VFSUpdatePage: ns[7], - VFSReadPage: ns[8], - VFSReadPages: ns[9], - VFSWritePage: ns[10], - VFSWritePages: ns[11], - VFSGetdents: ns[12], - VFSSetattr: ns[13], - VFSFlush: ns[14], - VFSFsync: ns[15], - VFSLock: ns[16], - VFSFileRelease: ns[17], - CongestionWait: ns[18], - Truncation: ns[19], - WriteExtension: ns[20], - SillyRename: ns[21], - ShortRead: ns[22], - ShortWrite: ns[23], - JukeboxDelay: ns[24], - PNFSRead: ns[25], - PNFSWrite: ns[26], - }, nil -} - -// parseNFSOperationStats parses a slice of NFSOperationStats by scanning -// additional information about per-operation statistics until an empty -// line is reached. -func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { - const ( - // Minimum number of expected fields in each per-operation statistics set - minFields = 9 - ) - - var ops []NFSOperationStats - - for s.Scan() { - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 { - // Must break when reading a blank line after per-operation stats to - // enable top-level function to parse the next device entry - break - } - - if len(ss) < minFields { - return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss) - } - - // Skip string operation name for integers - ns := make([]uint64, 0, minFields-1) - for _, st := range ss[1:] { - n, err := strconv.ParseUint(st, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - opStats := NFSOperationStats{ - Operation: strings.TrimSuffix(ss[0], ":"), - Requests: ns[0], - Transmissions: ns[1], - MajorTimeouts: ns[2], - BytesSent: ns[3], - BytesReceived: ns[4], - CumulativeQueueMilliseconds: ns[5], - CumulativeTotalResponseMilliseconds: ns[6], - CumulativeTotalRequestMilliseconds: ns[7], - } - - if len(ns) > 8 { - opStats.Errors = ns[8] - } - - ops = append(ops, opStats) - } - - return ops, s.Err() -} - -// parseNFSTransportStats parses a NFSTransportStats line using an input set of -// integer fields matched to a specific stats version. -func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { - // Extract the protocol field. It is the only string value in the line - protocol := ss[0] - ss = ss[1:] - - switch statVersion { - case statVersion10: - var expectedLength int - switch protocol { - case "tcp": - expectedLength = fieldTransport10TCPLen - case "udp": - expectedLength = fieldTransport10UDPLen - default: - return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss) - } - if len(ss) != expectedLength { - return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss) - } - case statVersion11: - var expectedLength int - switch protocol { - case "tcp": - expectedLength = fieldTransport11TCPLen - case "udp": - expectedLength = fieldTransport11UDPLen - case "rdma": - expectedLength = fieldTransport11RDMAMinLen - default: - return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) - } - if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || - (protocol == "rdma" && len(ss) < expectedLength) { - return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) - } - default: - return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) - } - - // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay - // in a v1.0 response. Since the stat length is bigger for TCP stats, we use - // the TCP length here. - // - // Note: slice length must be set to length of v1.1 stats to avoid a panic when - // only v1.0 stats are present. - // See: https://github.com/prometheus/node_exporter/issues/571. - // - // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen - ns := make([]uint64, fieldTransport11RDMAMaxLen+3) - for i, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns[i] = n - } - - // The fields differ depending on the transport protocol (TCP or UDP) - // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt - // - // For the udp RPC transport there is no connection count, connect idle time, - // or idle time (fields #3, #4, and #5); all other fields are the same. So - // we set them to 0 here. - switch protocol { - case "udp": - ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) - case "tcp": - ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) - case "rdma": - ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) - } - - return &NFSTransportStats{ - // NFS xprt over tcp or udp - Protocol: protocol, - Port: ns[0], - Bind: ns[1], - Connect: ns[2], - ConnectIdleTime: ns[3], - IdleTimeSeconds: ns[4], - Sends: ns[5], - Receives: ns[6], - BadTransactionIDs: ns[7], - CumulativeActiveRequests: ns[8], - CumulativeBacklog: ns[9], - - // NFS xprt over tcp or udp - // And statVersion 1.1 - MaximumRPCSlotsUsed: ns[10], - CumulativeSendingQueue: ns[11], - CumulativePendingQueue: ns[12], - - // NFS xprt over rdma - // And stat Version 1.1 - ReadChunkCount: ns[13], - WriteChunkCount: ns[14], - ReplyChunkCount: ns[15], - TotalRdmaRequest: ns[16], - PullupCopyCount: ns[17], - HardwayRegisterCount: ns[18], - FailedMarshalCount: ns[19], - BadReplyCount: ns[20], - MrsRecovered: ns[21], - MrsOrphaned: ns[22], - MrsAllocated: ns[23], - EmptySendctxQ: ns[24], - TotalRdmaReply: ns[25], - FixupCopyCount: ns[26], - ReplyWaitsForSend: ns[27], - LocalInvNeeded: ns[28], - NomsgCallCount: ns[29], - BcallCount: ns[30], - }, nil -} diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go deleted file mode 100644 index 316df5fbb7..0000000000 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// A ConntrackStatEntry represents one line from net/stat/nf_conntrack -// and contains netfilter conntrack statistics at one CPU core. -type ConntrackStatEntry struct { - Entries uint64 - Searched uint64 - Found uint64 - New uint64 - Invalid uint64 - Ignore uint64 - Delete uint64 - DeleteList uint64 - Insert uint64 - InsertFailed uint64 - Drop uint64 - EarlyDrop uint64 - SearchRestart uint64 -} - -// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores. -func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { - return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) -} - -// Parses a slice of ConntrackStatEntries from the given filepath. -func readConntrackStat(path string) ([]ConntrackStatEntry, error) { - // This file is small and can be read with one syscall. - b, err := util.ReadFileNoStat(path) - if err != nil { - // Do not wrap this error so the caller can detect os.IsNotExist and - // similar conditions. - return nil, err - } - - stat, err := parseConntrackStat(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err) - } - - return stat, nil -} - -// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries. -func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { - var entries []ConntrackStatEntry - - scanner := bufio.NewScanner(r) - scanner.Scan() - for scanner.Scan() { - fields := strings.Fields(scanner.Text()) - conntrackEntry, err := parseConntrackStatEntry(fields) - if err != nil { - return nil, err - } - entries = append(entries, *conntrackEntry) - } - - return entries, nil -} - -// Parses a ConntrackStatEntry from given array of fields. -func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { - entries, err := util.ParseHexUint64s(fields) - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err) - } - numEntries := len(entries) - if numEntries < 16 || numEntries > 17 { - return nil, - fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries) - } - - stats := &ConntrackStatEntry{ - Entries: *entries[0], - Searched: *entries[1], - Found: *entries[2], - New: *entries[3], - Invalid: *entries[4], - Ignore: *entries[5], - Delete: *entries[6], - DeleteList: *entries[7], - Insert: *entries[8], - InsertFailed: *entries[9], - Drop: *entries[10], - EarlyDrop: *entries[11], - } - - // Ignore missing search_restart on Linux < 2.6.35. - if numEntries == 17 { - stats.SearchRestart = *entries[16] - } - - return stats, nil -} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go deleted file mode 100644 index e66208aa05..0000000000 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "os" - "sort" - "strconv" - "strings" -) - -// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. -type NetDevLine struct { - Name string `json:"name"` // The name of the interface. - RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. - RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. - RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. - RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. - RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. - RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. - RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. - RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. - TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. - TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. - TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. - TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. - TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. - TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. - TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. - TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. -} - -// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys -// are interface names. -type NetDev map[string]NetDevLine - -// NetDev returns kernel/system statistics read from /proc/net/dev. -func (fs FS) NetDev() (NetDev, error) { - return newNetDev(fs.proc.Path("net/dev")) -} - -// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev. -func (p Proc) NetDev() (NetDev, error) { - return newNetDev(p.path("net/dev")) -} - -// newNetDev creates a new NetDev from the contents of the given file. -func newNetDev(file string) (NetDev, error) { - f, err := os.Open(file) - if err != nil { - return NetDev{}, err - } - defer f.Close() - - netDev := NetDev{} - s := bufio.NewScanner(f) - for n := 0; s.Scan(); n++ { - // Skip the 2 header lines. - if n < 2 { - continue - } - - line, err := netDev.parseLine(s.Text()) - if err != nil { - return netDev, err - } - - netDev[line.Name] = *line - } - - return netDev, s.Err() -} - -// parseLine parses a single line from the /proc/net/dev file. Header lines -// must be filtered prior to calling this method. -func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { - idx := strings.LastIndex(rawLine, ":") - if idx == -1 { - return nil, errors.New("invalid net/dev line, missing colon") - } - fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:])) - - var err error - line := &NetDevLine{} - - // Interface Name - line.Name = strings.TrimSpace(rawLine[:idx]) - if line.Name == "" { - return nil, errors.New("invalid net/dev line, empty interface name") - } - - // RX - line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) - if err != nil { - return nil, err - } - line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) - if err != nil { - return nil, err - } - line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) - if err != nil { - return nil, err - } - line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) - if err != nil { - return nil, err - } - line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, err - } - line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) - if err != nil { - return nil, err - } - line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) - if err != nil { - return nil, err - } - - // TX - line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) - if err != nil { - return nil, err - } - line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) - if err != nil { - return nil, err - } - line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) - if err != nil { - return nil, err - } - line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) - if err != nil { - return nil, err - } - line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) - if err != nil { - return nil, err - } - line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) - if err != nil { - return nil, err - } - line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) - if err != nil { - return nil, err - } - line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) - if err != nil { - return nil, err - } - - return line, nil -} - -// Total aggregates the values across interfaces and returns a new NetDevLine. -// The Name field will be a sorted comma separated list of interface names. -func (netDev NetDev) Total() NetDevLine { - total := NetDevLine{} - - names := make([]string, 0, len(netDev)) - for _, ifc := range netDev { - names = append(names, ifc.Name) - total.RxBytes += ifc.RxBytes - total.RxPackets += ifc.RxPackets - total.RxErrors += ifc.RxErrors - total.RxDropped += ifc.RxDropped - total.RxFIFO += ifc.RxFIFO - total.RxFrame += ifc.RxFrame - total.RxCompressed += ifc.RxCompressed - total.RxMulticast += ifc.RxMulticast - total.TxBytes += ifc.TxBytes - total.TxPackets += ifc.TxPackets - total.TxErrors += ifc.TxErrors - total.TxDropped += ifc.TxDropped - total.TxFIFO += ifc.TxFIFO - total.TxCollisions += ifc.TxCollisions - total.TxCarrier += ifc.TxCarrier - total.TxCompressed += ifc.TxCompressed - } - sort.Strings(names) - total.Name = strings.Join(names, ", ") - - return total -} diff --git a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go deleted file mode 100644 index f50b38e352..0000000000 --- a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "io" - "os" - "strconv" - "strings" -) - -// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc//net/dev_snmp6/. -// The outer map's keys are interface names and the inner map's keys are stat names. -// -// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type. -type NetDevSNMP6 map[string]map[string]uint64 - -// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/ -// directory. -func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) { - return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6")) -} - -// Returns kernel/system statistics read from interface files within the /proc//net/dev_snmp6/ -// directory. -func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) { - return newNetDevSNMP6(p.path("net/dev_snmp6")) -} - -// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory. -func newNetDevSNMP6(dir string) (NetDevSNMP6, error) { - netDevSNMP6 := make(NetDevSNMP6) - - // The net/dev_snmp6 folders contain one file per interface - ifaceFiles, err := os.ReadDir(dir) - if err != nil { - // On systems with IPv6 disabled, this directory won't exist. - // Do nothing. - if errors.Is(err, os.ErrNotExist) { - return netDevSNMP6, err - } - return netDevSNMP6, err - } - - for _, iFaceFile := range ifaceFiles { - f, err := os.Open(dir + "/" + iFaceFile.Name()) - if err != nil { - return netDevSNMP6, err - } - defer f.Close() - - netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f) - if err != nil { - return netDevSNMP6, err - } - } - - return netDevSNMP6, nil -} - -func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) { - m := make(map[string]uint64) - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - stat := strings.Fields(scanner.Text()) - if len(stat) < 2 { - continue - } - key, val := stat[0], stat[1] - - // Expect stat name to contain "6" or be "ifIndex" - if strings.Contains(key, "6") || key == "ifIndex" { - v, err := strconv.ParseUint(val, 10, 64) - if err != nil { - return m, err - } - - m[key] = v - } - } - return m, scanner.Err() -} diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go deleted file mode 100644 index 19e3378f72..0000000000 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "encoding/hex" - "fmt" - "io" - "net" - "os" - "strconv" - "strings" -) - -const ( - // Maximum size limit used by io.LimitReader while reading the content of the - // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic - // as each line represents a single used socket. - // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. - // With e.g. 150 Byte per line and the maximum number of 65535, - // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP. - readLimit = 4294967296 // Byte -> 4 GiB -) - -// This contains generic data structures for both udp and tcp sockets. -type ( - // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header. - NetIPSocket []*netIPSocketLine - - // NetIPSocketSummary provides already computed values like the total queue lengths or - // the total number of used sockets. In contrast to NetIPSocket it does not collect - // the parsed lines into a slice. - NetIPSocketSummary struct { - // TxQueueLength shows the total queue length of all parsed tx_queue lengths. - TxQueueLength uint64 - // RxQueueLength shows the total queue length of all parsed rx_queue lengths. - RxQueueLength uint64 - // UsedSockets shows the total number of parsed lines representing the - // number of used sockets. - UsedSockets uint64 - // Drops shows the total number of dropped packets of all UDP sockets. - Drops *uint64 - } - - // A single line parser for fields from /proc/net/{t,u}dp{,6}. - // Fields which are not used by IPSocket are skipped. - // Drops is non-nil for udp{,6}, but nil for tcp{,6}. - // For the proc file format details, see https://linux.die.net/man/5/proc. - netIPSocketLine struct { - Sl uint64 - LocalAddr net.IP - LocalPort uint64 - RemAddr net.IP - RemPort uint64 - St uint64 - TxQueue uint64 - RxQueue uint64 - UID uint64 - Inode uint64 - Drops *uint64 - } -) - -func newNetIPSocket(file string) (NetIPSocket, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - var netIPSocket NetIPSocket - isUDP := strings.Contains(file, "udp") - - lr := io.LimitReader(f, readLimit) - s := bufio.NewScanner(lr) - s.Scan() // skip first line with headers - for s.Scan() { - fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields, isUDP) - if err != nil { - return nil, err - } - netIPSocket = append(netIPSocket, line) - } - if err := s.Err(); err != nil { - return nil, err - } - return netIPSocket, nil -} - -// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file. -func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - var netIPSocketSummary NetIPSocketSummary - var udpPacketDrops uint64 - isUDP := strings.Contains(file, "udp") - - lr := io.LimitReader(f, readLimit) - s := bufio.NewScanner(lr) - s.Scan() // skip first line with headers - for s.Scan() { - fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields, isUDP) - if err != nil { - return nil, err - } - netIPSocketSummary.TxQueueLength += line.TxQueue - netIPSocketSummary.RxQueueLength += line.RxQueue - netIPSocketSummary.UsedSockets++ - if isUDP { - udpPacketDrops += *line.Drops - netIPSocketSummary.Drops = &udpPacketDrops - } - } - if err := s.Err(); err != nil { - return nil, err - } - return &netIPSocketSummary, nil -} - -// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order. - -func parseIP(hexIP string) (net.IP, error) { - var byteIP []byte - byteIP, err := hex.DecodeString(hexIP) - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) - } - switch len(byteIP) { - case 4: - return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil - case 16: - i := net.IP{ - byteIP[3], byteIP[2], byteIP[1], byteIP[0], - byteIP[7], byteIP[6], byteIP[5], byteIP[4], - byteIP[11], byteIP[10], byteIP[9], byteIP[8], - byteIP[15], byteIP[14], byteIP[13], byteIP[12], - } - return i, nil - default: - return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil) - } -} - -// parseNetIPSocketLine parses a single line, represented by a list of fields. -func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) { - line := &netIPSocketLine{} - if len(fields) < 10 { - return nil, fmt.Errorf( - "%w: Less than 10 columns found %q", - ErrFileParse, - strings.Join(fields, " "), - ) - } - var err error // parse error - - // sl - s := strings.Split(fields[0], ":") - if len(s) != 2 { - return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0]) - } - - if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { - return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) - } - // local_address - l := strings.Split(fields[1], ":") - if len(l) != 2 { - return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1]) - } - if line.LocalAddr, err = parseIP(l[0]); err != nil { - return nil, err - } - if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { - return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) - } - - // remote_address - r := strings.Split(fields[2], ":") - if len(r) != 2 { - return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1]) - } - if line.RemAddr, err = parseIP(r[0]); err != nil { - return nil, err - } - if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { - return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) - } - - // st - if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { - return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) - } - - // tx_queue and rx_queue - q := strings.Split(fields[4], ":") - if len(q) != 2 { - return nil, fmt.Errorf( - "%w: Missing colon for tx/rx queues in socket line %q", - ErrFileParse, - fields[4], - ) - } - if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { - return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) - } - if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { - return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) - } - - // uid - if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { - return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) - } - - // inode - if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { - return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) - } - - // drops - if isUDP { - drops, err := strconv.ParseUint(fields[12], 0, 64) - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err) - } - line.Drops = &drops - } - - return line, nil -} diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go deleted file mode 100644 index 8d4b1ac05b..0000000000 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// NetProtocolStats stores the contents from /proc/net/protocols. -type NetProtocolStats map[string]NetProtocolStatLine - -// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We -// only care about the first six columns as the rest are not likely to change -// and only serve to provide a set of capabilities for each protocol. -type NetProtocolStatLine struct { - Name string // 0 The name of the protocol - Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock) - Sockets int64 // 2 Number of sockets in use by this protocol - Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol - Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure. - MaxHeader uint64 // 5 Protocol specific max header size - Slab bool // 6 Indicates whether or not memory is allocated from the SLAB - ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module - Capabilities NetProtocolCapabilities -} - -// NetProtocolCapabilities contains a list of capabilities for each protocol. -type NetProtocolCapabilities struct { - Close bool // 8 - Connect bool // 9 - Disconnect bool // 10 - Accept bool // 11 - IoCtl bool // 12 - Init bool // 13 - Destroy bool // 14 - Shutdown bool // 15 - SetSockOpt bool // 16 - GetSockOpt bool // 17 - SendMsg bool // 18 - RecvMsg bool // 19 - SendPage bool // 20 - Bind bool // 21 - BacklogRcv bool // 22 - Hash bool // 23 - UnHash bool // 24 - GetPort bool // 25 - EnterMemoryPressure bool // 26 -} - -// NetProtocols reads stats from /proc/net/protocols and returns a map of -// PortocolStatLine entries. As of this writing no official Linux Documentation -// exists, however the source is fairly self-explanatory and the format seems -// stable since its introduction in 2.6.12-rc2 -// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452 -// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586 -func (fs FS) NetProtocols() (NetProtocolStats, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols")) - if err != nil { - return NetProtocolStats{}, err - } - return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data))) -} - -func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) { - nps := NetProtocolStats{} - - // Skip the header line - s.Scan() - - for s.Scan() { - line, err := nps.parseLine(s.Text()) - if err != nil { - return NetProtocolStats{}, err - } - - nps[line.Name] = *line - } - return nps, nil -} - -func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) { - line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}} - var err error - const enabled = "yes" - const disabled = "no" - - fields := strings.Fields(rawLine) - line.Name = fields[0] - line.Size, err = strconv.ParseUint(fields[1], 10, 64) - if err != nil { - return nil, err - } - line.Sockets, err = strconv.ParseInt(fields[2], 10, 64) - if err != nil { - return nil, err - } - line.Memory, err = strconv.ParseInt(fields[3], 10, 64) - if err != nil { - return nil, err - } - switch fields[4] { - case enabled: - line.Pressure = 1 - case disabled: - line.Pressure = 0 - default: - line.Pressure = -1 - } - line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - switch fields[6] { - case enabled: - line.Slab = true - case disabled: - line.Slab = false - default: - return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name) - } - line.ModuleName = fields[7] - - err = line.Capabilities.parseCapabilities(fields[8:]) - if err != nil { - return nil, err - } - - return line, nil -} - -func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error { - // The capabilities are all bools so we can loop over to map them - capabilityFields := [...]*bool{ - &pc.Close, - &pc.Connect, - &pc.Disconnect, - &pc.Accept, - &pc.IoCtl, - &pc.Init, - &pc.Destroy, - &pc.Shutdown, - &pc.SetSockOpt, - &pc.GetSockOpt, - &pc.SendMsg, - &pc.RecvMsg, - &pc.SendPage, - &pc.Bind, - &pc.BacklogRcv, - &pc.Hash, - &pc.UnHash, - &pc.GetPort, - &pc.EnterMemoryPressure, - } - - for i := 0; i < len(capabilities); i++ { - switch capabilities[i] { - case "y": - *capabilityFields[i] = true - case "n": - *capabilityFields[i] = false - default: - return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i) - } - } - return nil -} diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go deleted file mode 100644 index deb7029fe1..0000000000 --- a/vendor/github.com/prometheus/procfs/net_route.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -const ( - blackholeRepresentation string = "*" - blackholeIfaceName string = "blackhole" - routeLineColumns int = 11 -) - -// A NetRouteLine represents one line from net/route. -type NetRouteLine struct { - Iface string - Destination uint32 - Gateway uint32 - Flags uint32 - RefCnt uint32 - Use uint32 - Metric uint32 - Mask uint32 - MTU uint32 - Window uint32 - IRTT uint32 -} - -func (fs FS) NetRoute() ([]NetRouteLine, error) { - return readNetRoute(fs.proc.Path("net", "route")) -} - -func readNetRoute(path string) ([]NetRouteLine, error) { - b, err := util.ReadFileNoStat(path) - if err != nil { - return nil, err - } - - routelines, err := parseNetRoute(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("failed to read net route from %s: %w", path, err) - } - return routelines, nil -} - -func parseNetRoute(r io.Reader) ([]NetRouteLine, error) { - var routelines []NetRouteLine - - scanner := bufio.NewScanner(r) - scanner.Scan() - for scanner.Scan() { - fields := strings.Fields(scanner.Text()) - routeline, err := parseNetRouteLine(fields) - if err != nil { - return nil, err - } - routelines = append(routelines, *routeline) - } - return routelines, nil -} - -func parseNetRouteLine(fields []string) (*NetRouteLine, error) { - if len(fields) != routeLineColumns { - return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields)) - } - iface := fields[0] - if iface == blackholeRepresentation { - iface = blackholeIfaceName - } - destination, err := strconv.ParseUint(fields[1], 16, 32) - if err != nil { - return nil, err - } - gateway, err := strconv.ParseUint(fields[2], 16, 32) - if err != nil { - return nil, err - } - flags, err := strconv.ParseUint(fields[3], 10, 32) - if err != nil { - return nil, err - } - refcnt, err := strconv.ParseUint(fields[4], 10, 32) - if err != nil { - return nil, err - } - use, err := strconv.ParseUint(fields[5], 10, 32) - if err != nil { - return nil, err - } - metric, err := strconv.ParseUint(fields[6], 10, 32) - if err != nil { - return nil, err - } - mask, err := strconv.ParseUint(fields[7], 16, 32) - if err != nil { - return nil, err - } - mtu, err := strconv.ParseUint(fields[8], 10, 32) - if err != nil { - return nil, err - } - window, err := strconv.ParseUint(fields[9], 10, 32) - if err != nil { - return nil, err - } - irtt, err := strconv.ParseUint(fields[10], 10, 32) - if err != nil { - return nil, err - } - routeline := &NetRouteLine{ - Iface: iface, - Destination: uint32(destination), - Gateway: uint32(gateway), - Flags: uint32(flags), - RefCnt: uint32(refcnt), - Use: uint32(use), - Metric: uint32(metric), - Mask: uint32(mask), - MTU: uint32(mtu), - Window: uint32(window), - IRTT: uint32(irtt), - } - return routeline, nil -} diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go deleted file mode 100644 index fae62b13d9..0000000000 --- a/vendor/github.com/prometheus/procfs/net_sockstat.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6, -// respectively. -type NetSockstat struct { - // Used is non-nil for IPv4 sockstat results, but nil for IPv6. - Used *int - Protocols []NetSockstatProtocol -} - -// A NetSockstatProtocol contains statistics about a given socket protocol. -// Pointer fields indicate that the value may or may not be present on any -// given protocol. -type NetSockstatProtocol struct { - Protocol string - InUse int - Orphan *int - TW *int - Alloc *int - Mem *int - Memory *int -} - -// NetSockstat retrieves IPv4 socket statistics. -func (fs FS) NetSockstat() (*NetSockstat, error) { - return readSockstat(fs.proc.Path("net", "sockstat")) -} - -// NetSockstat6 retrieves IPv6 socket statistics. -// -// If IPv6 is disabled on this kernel, the returned error can be checked with -// os.IsNotExist. -func (fs FS) NetSockstat6() (*NetSockstat, error) { - return readSockstat(fs.proc.Path("net", "sockstat6")) -} - -// readSockstat opens and parses a NetSockstat from the input file. -func readSockstat(name string) (*NetSockstat, error) { - // This file is small and can be read with one syscall. - b, err := util.ReadFileNoStat(name) - if err != nil { - // Do not wrap this error so the caller can detect os.IsNotExist and - // similar conditions. - return nil, err - } - - stat, err := parseSockstat(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err) - } - - return stat, nil -} - -// parseSockstat reads the contents of a sockstat file and parses a NetSockstat. -func parseSockstat(r io.Reader) (*NetSockstat, error) { - var stat NetSockstat - s := bufio.NewScanner(r) - for s.Scan() { - // Expect a minimum of a protocol and one key/value pair. - fields := strings.Split(s.Text(), " ") - if len(fields) < 3 { - return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text()) - } - - // The remaining fields are key/value pairs. - kvs, err := parseSockstatKVs(fields[1:]) - if err != nil { - return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) - } - - // The first field is the protocol. We must trim its colon suffix. - proto := strings.TrimSuffix(fields[0], ":") - switch proto { - case "sockets": - // Special case: IPv4 has a sockets "used" key/value pair that we - // embed at the top level of the structure. - used := kvs["used"] - stat.Used = &used - default: - // Parse all other lines as individual protocols. - nsp := parseSockstatProtocol(kvs) - nsp.Protocol = proto - stat.Protocols = append(stat.Protocols, nsp) - } - } - - if err := s.Err(); err != nil { - return nil, err - } - - return &stat, nil -} - -// parseSockstatKVs parses a string slice into a map of key/value pairs. -func parseSockstatKVs(kvs []string) (map[string]int, error) { - if len(kvs)%2 != 0 { - return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs) - } - - // Iterate two values at a time to gather key/value pairs. - out := make(map[string]int, len(kvs)/2) - for i := 0; i < len(kvs); i += 2 { - vp := util.NewValueParser(kvs[i+1]) - out[kvs[i]] = vp.Int() - - if err := vp.Err(); err != nil { - return nil, err - } - } - - return out, nil -} - -// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map. -func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol { - var nsp NetSockstatProtocol - for k, v := range kvs { - // Capture the range variable to ensure we get unique pointers for - // each of the optional fields. - v := v - switch k { - case "inuse": - nsp.InUse = v - case "orphan": - nsp.Orphan = &v - case "tw": - nsp.TW = &v - case "alloc": - nsp.Alloc = &v - case "mem": - nsp.Mem = &v - case "memory": - nsp.Memory = &v - } - } - - return nsp -} diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go deleted file mode 100644 index 71c8059f4d..0000000000 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// For the proc file format details, -// See: -// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 -// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 -// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 -// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 - -// SoftnetStat contains a single row of data from /proc/net/softnet_stat. -type SoftnetStat struct { - // Number of processed packets. - Processed uint32 - // Number of dropped packets. - Dropped uint32 - // Number of times processing packets ran out of quota. - TimeSqueezed uint32 - // Number of collision occur while obtaining device lock while transmitting. - CPUCollision uint32 - // Number of times cpu woken up received_rps. - ReceivedRps uint32 - // number of times flow limit has been reached. - FlowLimitCount uint32 - // Softnet backlog status. - SoftnetBacklogLen uint32 - // CPU id owning this softnet_data. - Index uint32 - // softnet_data's Width. - Width int -} - -var softNetProcFile = "net/softnet_stat" - -// NetSoftnetStat reads data from /proc/net/softnet_stat. -func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { - b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile)) - if err != nil { - return nil, err - } - - entries, err := parseSoftnet(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err) - } - - return entries, nil -} - -func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { - const minColumns = 9 - - s := bufio.NewScanner(r) - - var stats []SoftnetStat - cpuIndex := 0 - for s.Scan() { - columns := strings.Fields(s.Text()) - width := len(columns) - softnetStat := SoftnetStat{} - - if width < minColumns { - return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns) - } - - // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 - if width >= minColumns { - us, err := parseHexUint32s(columns[0:9]) - if err != nil { - return nil, err - } - - softnetStat.Processed = us[0] - softnetStat.Dropped = us[1] - softnetStat.TimeSqueezed = us[2] - softnetStat.CPUCollision = us[8] - } - - // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 - if width >= 10 { - us, err := parseHexUint32s(columns[9:10]) - if err != nil { - return nil, err - } - - softnetStat.ReceivedRps = us[0] - } - - // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 - if width >= 11 { - us, err := parseHexUint32s(columns[10:11]) - if err != nil { - return nil, err - } - - softnetStat.FlowLimitCount = us[0] - } - - // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 - if width >= 13 { - us, err := parseHexUint32s(columns[11:13]) - if err != nil { - return nil, err - } - - softnetStat.SoftnetBacklogLen = us[0] - softnetStat.Index = us[1] - } else { - // For older kernels, create the Index based on the scan line number. - softnetStat.Index = uint32(cpuIndex) - } - softnetStat.Width = width - stats = append(stats, softnetStat) - cpuIndex++ - } - - return stats, nil -} - -func parseHexUint32s(ss []string) ([]uint32, error) { - us := make([]uint32, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 16, 32) - if err != nil { - return nil, err - } - - us = append(us, uint32(u)) - } - - return us, nil -} diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go deleted file mode 100644 index 0396d72015..0000000000 --- a/vendor/github.com/prometheus/procfs/net_tcp.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -type ( - // NetTCP represents the contents of /proc/net/tcp{,6} file without the header. - NetTCP []*netIPSocketLine - - // NetTCPSummary provides already computed values like the total queue lengths or - // the total number of used sockets. In contrast to NetTCP it does not collect - // the parsed lines into a slice. - NetTCPSummary NetIPSocketSummary -) - -// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams -// read from /proc/net/tcp. -// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. -func (fs FS) NetTCP() (NetTCP, error) { - return newNetTCP(fs.proc.Path("net/tcp")) -} - -// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams -// read from /proc/net/tcp6. -// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. -func (fs FS) NetTCP6() (NetTCP, error) { - return newNetTCP(fs.proc.Path("net/tcp6")) -} - -// NetTCPSummary returns already computed statistics like the total queue lengths -// for TCP datagrams read from /proc/net/tcp. -// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. -func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { - return newNetTCPSummary(fs.proc.Path("net/tcp")) -} - -// NetTCP6Summary returns already computed statistics like the total queue lengths -// for TCP datagrams read from /proc/net/tcp6. -// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. -func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { - return newNetTCPSummary(fs.proc.Path("net/tcp6")) -} - -// newNetTCP creates a new NetTCP{,6} from the contents of the given file. -func newNetTCP(file string) (NetTCP, error) { - n, err := newNetIPSocket(file) - n1 := NetTCP(n) - return n1, err -} - -func newNetTCPSummary(file string) (*NetTCPSummary, error) { - n, err := newNetIPSocketSummary(file) - if n == nil { - return nil, err - } - n1 := NetTCPSummary(*n) - return &n1, err -} diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go deleted file mode 100644 index 13994c1782..0000000000 --- a/vendor/github.com/prometheus/procfs/net_tls_stat.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2023 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" -) - -// TLSStat struct represents data in /proc/net/tls_stat. -// See https://docs.kernel.org/networking/tls.html#statistics -type TLSStat struct { - // number of TX sessions currently installed where host handles cryptography - TLSCurrTxSw int - // number of RX sessions currently installed where host handles cryptography - TLSCurrRxSw int - // number of TX sessions currently installed where NIC handles cryptography - TLSCurrTxDevice int - // number of RX sessions currently installed where NIC handles cryptography - TLSCurrRxDevice int - //number of TX sessions opened with host cryptography - TLSTxSw int - //number of RX sessions opened with host cryptography - TLSRxSw int - // number of TX sessions opened with NIC cryptography - TLSTxDevice int - // number of RX sessions opened with NIC cryptography - TLSRxDevice int - // record decryption failed (e.g. due to incorrect authentication tag) - TLSDecryptError int - // number of RX resyncs sent to NICs handling cryptography - TLSRxDeviceResync int - // number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records. - TLSDecryptRetry int - // number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. - TLSRxNoPadViolation int -} - -// NewTLSStat reads the tls_stat statistics. -func NewTLSStat() (TLSStat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return TLSStat{}, err - } - - return fs.NewTLSStat() -} - -// NewTLSStat reads the tls_stat statistics. -func (fs FS) NewTLSStat() (TLSStat, error) { - file, err := os.Open(fs.proc.Path("net/tls_stat")) - if err != nil { - return TLSStat{}, err - } - defer file.Close() - - var ( - tlsstat = TLSStat{} - s = bufio.NewScanner(file) - ) - - for s.Scan() { - fields := strings.Fields(s.Text()) - - if len(fields) != 2 { - return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text()) - } - - name := fields[0] - value, err := strconv.Atoi(fields[1]) - if err != nil { - return TLSStat{}, err - } - - switch name { - case "TlsCurrTxSw": - tlsstat.TLSCurrTxSw = value - case "TlsCurrRxSw": - tlsstat.TLSCurrRxSw = value - case "TlsCurrTxDevice": - tlsstat.TLSCurrTxDevice = value - case "TlsCurrRxDevice": - tlsstat.TLSCurrRxDevice = value - case "TlsTxSw": - tlsstat.TLSTxSw = value - case "TlsRxSw": - tlsstat.TLSRxSw = value - case "TlsTxDevice": - tlsstat.TLSTxDevice = value - case "TlsRxDevice": - tlsstat.TLSRxDevice = value - case "TlsDecryptError": - tlsstat.TLSDecryptError = value - case "TlsRxDeviceResync": - tlsstat.TLSRxDeviceResync = value - case "TlsDecryptRetry": - tlsstat.TLSDecryptRetry = value - case "TlsRxNoPadViolation": - tlsstat.TLSRxNoPadViolation = value - } - - } - - return tlsstat, s.Err() -} diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go deleted file mode 100644 index 9ac3daf2d4..0000000000 --- a/vendor/github.com/prometheus/procfs/net_udp.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -type ( - // NetUDP represents the contents of /proc/net/udp{,6} file without the header. - NetUDP []*netIPSocketLine - - // NetUDPSummary provides already computed values like the total queue lengths or - // the total number of used sockets. In contrast to NetUDP it does not collect - // the parsed lines into a slice. - NetUDPSummary NetIPSocketSummary -) - -// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams -// read from /proc/net/udp. -func (fs FS) NetUDP() (NetUDP, error) { - return newNetUDP(fs.proc.Path("net/udp")) -} - -// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams -// read from /proc/net/udp6. -func (fs FS) NetUDP6() (NetUDP, error) { - return newNetUDP(fs.proc.Path("net/udp6")) -} - -// NetUDPSummary returns already computed statistics like the total queue lengths -// for UDP datagrams read from /proc/net/udp. -func (fs FS) NetUDPSummary() (*NetUDPSummary, error) { - return newNetUDPSummary(fs.proc.Path("net/udp")) -} - -// NetUDP6Summary returns already computed statistics like the total queue lengths -// for UDP datagrams read from /proc/net/udp6. -func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) { - return newNetUDPSummary(fs.proc.Path("net/udp6")) -} - -// newNetUDP creates a new NetUDP{,6} from the contents of the given file. -func newNetUDP(file string) (NetUDP, error) { - n, err := newNetIPSocket(file) - n1 := NetUDP(n) - return n1, err -} - -func newNetUDPSummary(file string) (*NetUDPSummary, error) { - n, err := newNetIPSocketSummary(file) - if n == nil { - return nil, err - } - n1 := NetUDPSummary(*n) - return &n1, err -} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go deleted file mode 100644 index d7e0cacb4c..0000000000 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// For the proc file format details, -// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 -// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. - -// Constants for the various /proc/net/unix enumerations. -// TODO: match against x/sys/unix or similar? -const ( - netUnixTypeStream = 1 - netUnixTypeDgram = 2 - netUnixTypeSeqpacket = 5 - - netUnixFlagDefault = 0 - netUnixFlagListen = 1 << 16 - - netUnixStateUnconnected = 1 - netUnixStateConnecting = 2 - netUnixStateConnected = 3 - netUnixStateDisconnected = 4 -) - -// NetUNIXType is the type of the type field. -type NetUNIXType uint64 - -// NetUNIXFlags is the type of the flags field. -type NetUNIXFlags uint64 - -// NetUNIXState is the type of the state field. -type NetUNIXState uint64 - -// NetUNIXLine represents a line of /proc/net/unix. -type NetUNIXLine struct { - KernelPtr string - RefCount uint64 - Protocol uint64 - Flags NetUNIXFlags - Type NetUNIXType - State NetUNIXState - Inode uint64 - Path string -} - -// NetUNIX holds the data read from /proc/net/unix. -type NetUNIX struct { - Rows []*NetUNIXLine -} - -// NetUNIX returns data read from /proc/net/unix. -func (fs FS) NetUNIX() (*NetUNIX, error) { - return readNetUNIX(fs.proc.Path("net/unix")) -} - -// readNetUNIX reads data in /proc/net/unix format from the specified file. -func readNetUNIX(file string) (*NetUNIX, error) { - // This file could be quite large and a streaming read is desirable versus - // reading the entire contents at once. - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - return parseNetUNIX(f) -} - -// parseNetUNIX creates a NetUnix structure from the incoming stream. -func parseNetUNIX(r io.Reader) (*NetUNIX, error) { - // Begin scanning by checking for the existence of Inode. - s := bufio.NewScanner(r) - s.Scan() - - // From the man page of proc(5), it does not contain an Inode field, - // but in actually it exists. This code works for both cases. - hasInode := strings.Contains(s.Text(), "Inode") - - // Expect a minimum number of fields, but Inode and Path are optional: - // Num RefCount Protocol Flags Type St Inode Path - minFields := 6 - if hasInode { - minFields++ - } - - var nu NetUNIX - for s.Scan() { - line := s.Text() - item, err := nu.parseLine(line, hasInode, minFields) - if err != nil { - return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) - } - - nu.Rows = append(nu.Rows, item) - } - - if err := s.Err(); err != nil { - return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err) - } - - return &nu, nil -} - -func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) { - fields := strings.Fields(line) - - l := len(fields) - if l < minFields { - return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l) - } - - // Field offsets are as follows: - // Num RefCount Protocol Flags Type St Inode Path - - kernelPtr := strings.TrimSuffix(fields[0], ":") - - users, err := u.parseUsers(fields[1]) - if err != nil { - return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err) - } - - flags, err := u.parseFlags(fields[3]) - if err != nil { - return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) - } - - typ, err := u.parseType(fields[4]) - if err != nil { - return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err) - } - - state, err := u.parseState(fields[5]) - if err != nil { - return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err) - } - - var inode uint64 - if hasInode { - inode, err = u.parseInode(fields[6]) - if err != nil { - return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err) - } - } - - n := &NetUNIXLine{ - KernelPtr: kernelPtr, - RefCount: users, - Type: typ, - Flags: flags, - State: state, - Inode: inode, - } - - // Path field is optional. - if l > minFields { - // Path occurs at either index 6 or 7 depending on whether inode is - // already present. - pathIdx := 7 - if !hasInode { - pathIdx-- - } - - n.Path = fields[pathIdx] - } - - return n, nil -} - -func (u NetUNIX) parseUsers(s string) (uint64, error) { - return strconv.ParseUint(s, 16, 32) -} - -func (u NetUNIX) parseType(s string) (NetUNIXType, error) { - typ, err := strconv.ParseUint(s, 16, 16) - if err != nil { - return 0, err - } - - return NetUNIXType(typ), nil -} - -func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) { - flags, err := strconv.ParseUint(s, 16, 32) - if err != nil { - return 0, err - } - - return NetUNIXFlags(flags), nil -} - -func (u NetUNIX) parseState(s string) (NetUNIXState, error) { - st, err := strconv.ParseInt(s, 16, 8) - if err != nil { - return 0, err - } - - return NetUNIXState(st), nil -} - -func (u NetUNIX) parseInode(s string) (uint64, error) { - return strconv.ParseUint(s, 10, 64) -} - -func (t NetUNIXType) String() string { - switch t { - case netUnixTypeStream: - return "stream" - case netUnixTypeDgram: - return "dgram" - case netUnixTypeSeqpacket: - return "seqpacket" - } - return "unknown" -} - -func (f NetUNIXFlags) String() string { - switch f { - case netUnixFlagListen: - return "listen" - default: - return "default" - } -} - -func (s NetUNIXState) String() string { - switch s { - case netUnixStateUnconnected: - return "unconnected" - case netUnixStateConnecting: - return "connecting" - case netUnixStateConnected: - return "connected" - case netUnixStateDisconnected: - return "disconnected" - } - return "unknown" -} diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go deleted file mode 100644 index 7c597bc870..0000000000 --- a/vendor/github.com/prometheus/procfs/net_wireless.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2023 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Wireless models the content of /proc/net/wireless. -type Wireless struct { - Name string - - // Status is the current 4-digit hex value status of the interface. - Status uint64 - - // QualityLink is the link quality. - QualityLink int - - // QualityLevel is the signal gain (dBm). - QualityLevel int - - // QualityNoise is the signal noise baseline (dBm). - QualityNoise int - - // DiscardedNwid is the number of discarded packets with wrong nwid/essid. - DiscardedNwid int - - // DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP). - DiscardedCrypt int - - // DiscardedFrag is the number of discarded packets that can't perform MAC reassembly. - DiscardedFrag int - - // DiscardedRetry is the number of discarded packets that reached max MAC retries. - DiscardedRetry int - - // DiscardedMisc is the number of discarded packets for other reasons. - DiscardedMisc int - - // MissedBeacon is the number of missed beacons/superframe. - MissedBeacon int -} - -// Wireless returns kernel wireless statistics. -func (fs FS) Wireless() ([]*Wireless, error) { - b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless")) - if err != nil { - return nil, err - } - - m, err := parseWireless(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err) - } - - return m, nil -} - -// parseWireless parses the contents of /proc/net/wireless. -/* -Inter-| sta-| Quality | Discarded packets | Missed | WE -face | tus | link level noise | nwid crypt frag retry misc | beacon | 22 - eth1: 0000 5. -256. -10. 0 1 0 3 0 0 - eth2: 0000 5. -256. -20. 0 2 0 4 0 0 -*/ -func parseWireless(r io.Reader) ([]*Wireless, error) { - var ( - interfaces []*Wireless - scanner = bufio.NewScanner(r) - ) - - for n := 0; scanner.Scan(); n++ { - // Skip the 2 header lines. - if n < 2 { - continue - } - - line := scanner.Text() - - parts := strings.Split(line, ":") - if len(parts) != 2 { - return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line) - } - - name := strings.TrimSpace(parts[0]) - stats := strings.Fields(parts[1]) - - if len(stats) < 10 { - return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line) - } - - status, err := strconv.ParseUint(stats[0], 16, 16) - if err != nil { - return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line) - } - - qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], ".")) - if err != nil { - return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) - } - - qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], ".")) - if err != nil { - return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) - } - - qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], ".")) - if err != nil { - return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) - } - - dnwid, err := strconv.Atoi(stats[4]) - if err != nil { - return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) - } - - dcrypt, err := strconv.Atoi(stats[5]) - if err != nil { - return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) - } - - dfrag, err := strconv.Atoi(stats[6]) - if err != nil { - return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) - } - - dretry, err := strconv.Atoi(stats[7]) - if err != nil { - return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) - } - - dmisc, err := strconv.Atoi(stats[8]) - if err != nil { - return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) - } - - mbeacon, err := strconv.Atoi(stats[9]) - if err != nil { - return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) - } - - w := &Wireless{ - Name: name, - Status: status, - QualityLink: qlink, - QualityLevel: qlevel, - QualityNoise: qnoise, - DiscardedNwid: dnwid, - DiscardedCrypt: dcrypt, - DiscardedFrag: dfrag, - DiscardedRetry: dretry, - DiscardedMisc: dmisc, - MissedBeacon: mbeacon, - } - - interfaces = append(interfaces, w) - } - - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) - } - - return interfaces, nil -} diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go deleted file mode 100644 index 932ef20468..0000000000 --- a/vendor/github.com/prometheus/procfs/net_xfrm.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2017 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" -) - -// XfrmStat models the contents of /proc/net/xfrm_stat. -type XfrmStat struct { - // All errors which are not matched by other - XfrmInError int - // No buffer is left - XfrmInBufferError int - // Header Error - XfrmInHdrError int - // No state found - // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong - XfrmInNoStates int - // Transformation protocol specific error - // e.g. SA Key is wrong - XfrmInStateProtoError int - // Transformation mode specific error - XfrmInStateModeError int - // Sequence error - // e.g. sequence number is out of window - XfrmInStateSeqError int - // State is expired - XfrmInStateExpired int - // State has mismatch option - // e.g. UDP encapsulation type is mismatched - XfrmInStateMismatch int - // State is invalid - XfrmInStateInvalid int - // No matching template for states - // e.g. Inbound SAs are correct but SP rule is wrong - XfrmInTmplMismatch int - // No policy is found for states - // e.g. Inbound SAs are correct but no SP is found - XfrmInNoPols int - // Policy discards - XfrmInPolBlock int - // Policy error - XfrmInPolError int - // All errors which are not matched by others - XfrmOutError int - // Bundle generation error - XfrmOutBundleGenError int - // Bundle check error - XfrmOutBundleCheckError int - // No state was found - XfrmOutNoStates int - // Transformation protocol specific error - XfrmOutStateProtoError int - // Transportation mode specific error - XfrmOutStateModeError int - // Sequence error - // i.e sequence number overflow - XfrmOutStateSeqError int - // State is expired - XfrmOutStateExpired int - // Policy discads - XfrmOutPolBlock int - // Policy is dead - XfrmOutPolDead int - // Policy Error - XfrmOutPolError int - // Forward routing of a packet is not allowed - XfrmFwdHdrError int - // State is invalid, perhaps expired - XfrmOutStateInvalid int - // State hasn’t been fully acquired before use - XfrmAcquireError int -} - -// NewXfrmStat reads the xfrm_stat statistics. -func NewXfrmStat() (XfrmStat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return XfrmStat{}, err - } - - return fs.NewXfrmStat() -} - -// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. -func (fs FS) NewXfrmStat() (XfrmStat, error) { - file, err := os.Open(fs.proc.Path("net/xfrm_stat")) - if err != nil { - return XfrmStat{}, err - } - defer file.Close() - - var ( - x = XfrmStat{} - s = bufio.NewScanner(file) - ) - - for s.Scan() { - fields := strings.Fields(s.Text()) - - if len(fields) != 2 { - return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text()) - } - - name := fields[0] - value, err := strconv.Atoi(fields[1]) - if err != nil { - return XfrmStat{}, err - } - - switch name { - case "XfrmInError": - x.XfrmInError = value - case "XfrmInBufferError": - x.XfrmInBufferError = value - case "XfrmInHdrError": - x.XfrmInHdrError = value - case "XfrmInNoStates": - x.XfrmInNoStates = value - case "XfrmInStateProtoError": - x.XfrmInStateProtoError = value - case "XfrmInStateModeError": - x.XfrmInStateModeError = value - case "XfrmInStateSeqError": - x.XfrmInStateSeqError = value - case "XfrmInStateExpired": - x.XfrmInStateExpired = value - case "XfrmInStateInvalid": - x.XfrmInStateInvalid = value - case "XfrmInTmplMismatch": - x.XfrmInTmplMismatch = value - case "XfrmInNoPols": - x.XfrmInNoPols = value - case "XfrmInPolBlock": - x.XfrmInPolBlock = value - case "XfrmInPolError": - x.XfrmInPolError = value - case "XfrmOutError": - x.XfrmOutError = value - case "XfrmInStateMismatch": - x.XfrmInStateMismatch = value - case "XfrmOutBundleGenError": - x.XfrmOutBundleGenError = value - case "XfrmOutBundleCheckError": - x.XfrmOutBundleCheckError = value - case "XfrmOutNoStates": - x.XfrmOutNoStates = value - case "XfrmOutStateProtoError": - x.XfrmOutStateProtoError = value - case "XfrmOutStateModeError": - x.XfrmOutStateModeError = value - case "XfrmOutStateSeqError": - x.XfrmOutStateSeqError = value - case "XfrmOutStateExpired": - x.XfrmOutStateExpired = value - case "XfrmOutPolBlock": - x.XfrmOutPolBlock = value - case "XfrmOutPolDead": - x.XfrmOutPolDead = value - case "XfrmOutPolError": - x.XfrmOutPolError = value - case "XfrmFwdHdrError": - x.XfrmFwdHdrError = value - case "XfrmOutStateInvalid": - x.XfrmOutStateInvalid = value - case "XfrmAcquireError": - x.XfrmAcquireError = value - } - - } - - return x, s.Err() -} diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go deleted file mode 100644 index 742dff453b..0000000000 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "os" - "path/filepath" - "strconv" - "strings" -) - -// NetStat contains statistics for all the counters from one file. -type NetStat struct { - Stats map[string][]uint64 - Filename string -} - -// NetStat retrieves stats from `/proc/net/stat/`. -func (fs FS) NetStat() ([]NetStat, error) { - statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*")) - if err != nil { - return nil, err - } - - var netStatsTotal []NetStat - - for _, filePath := range statFiles { - procNetstat, err := parseNetstat(filePath) - if err != nil { - return nil, err - } - procNetstat.Filename = filepath.Base(filePath) - - netStatsTotal = append(netStatsTotal, procNetstat) - } - return netStatsTotal, nil -} - -// parseNetstat parses the metrics from `/proc/net/stat/` file -// and returns a NetStat structure. -func parseNetstat(filePath string) (NetStat, error) { - netStat := NetStat{ - Stats: make(map[string][]uint64), - } - file, err := os.Open(filePath) - if err != nil { - return netStat, err - } - defer file.Close() - - scanner := bufio.NewScanner(file) - scanner.Scan() - - // First string is always a header for stats - var headers []string - headers = append(headers, strings.Fields(scanner.Text())...) - - // Other strings represent per-CPU counters - for scanner.Scan() { - for num, counter := range strings.Fields(scanner.Text()) { - value, err := strconv.ParseUint(counter, 16, 64) - if err != nil { - return NetStat{}, err - } - netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value) - } - } - - return netStat, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go deleted file mode 100644 index 368187fa88..0000000000 --- a/vendor/github.com/prometheus/procfs/proc.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Proc provides information about a running process. -type Proc struct { - // The process ID. - PID int - - fs FS -} - -// Procs represents a list of Proc structs. -type Procs []Proc - -var ( - ErrFileParse = errors.New("error parsing file") - ErrFileRead = errors.New("error reading file") - ErrMountPoint = errors.New("error accessing mount point") -) - -func (p Procs) Len() int { return len(p) } -func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } - -// Self returns a process for the current process read via /proc/self. -func Self() (Proc, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil || errors.Unwrap(err) == ErrMountPoint { - return Proc{}, err - } - return fs.Self() -} - -// NewProc returns a process for the given pid under /proc. -func NewProc(pid int) (Proc, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Proc{}, err - } - return fs.Proc(pid) -} - -// AllProcs returns a list of all currently available processes under /proc. -func AllProcs() (Procs, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Procs{}, err - } - return fs.AllProcs() -} - -// Self returns a process for the current process. -func (fs FS) Self() (Proc, error) { - p, err := os.Readlink(fs.proc.Path("self")) - if err != nil { - return Proc{}, err - } - pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), "")) - if err != nil { - return Proc{}, err - } - return fs.Proc(pid) -} - -// NewProc returns a process for the given pid. -// -// Deprecated: Use fs.Proc() instead. -func (fs FS) NewProc(pid int) (Proc, error) { - return fs.Proc(pid) -} - -// Proc returns a process for the given pid. -func (fs FS) Proc(pid int) (Proc, error) { - if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { - return Proc{}, err - } - return Proc{PID: pid, fs: fs}, nil -} - -// AllProcs returns a list of all currently available processes. -func (fs FS) AllProcs() (Procs, error) { - d, err := os.Open(fs.proc.Path()) - if err != nil { - return Procs{}, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) - } - - p := Procs{} - for _, n := range names { - pid, err := strconv.ParseInt(n, 10, 64) - if err != nil { - continue - } - p = append(p, Proc{PID: int(pid), fs: fs}) - } - - return p, nil -} - -// CmdLine returns the command line of a process. -func (p Proc) CmdLine() ([]string, error) { - data, err := util.ReadFileNoStat(p.path("cmdline")) - if err != nil { - return nil, err - } - - if len(data) < 1 { - return []string{}, nil - } - - return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil -} - -// Wchan returns the wchan (wait channel) of a process. -func (p Proc) Wchan() (string, error) { - f, err := os.Open(p.path("wchan")) - if err != nil { - return "", err - } - defer f.Close() - - data, err := io.ReadAll(f) - if err != nil { - return "", err - } - - wchan := string(data) - if wchan == "" || wchan == "0" { - return "", nil - } - - return wchan, nil -} - -// Comm returns the command name of a process. -func (p Proc) Comm() (string, error) { - data, err := util.ReadFileNoStat(p.path("comm")) - if err != nil { - return "", err - } - - return strings.TrimSpace(string(data)), nil -} - -// Executable returns the absolute path of the executable command of a process. -func (p Proc) Executable() (string, error) { - exe, err := os.Readlink(p.path("exe")) - if os.IsNotExist(err) { - return "", nil - } - - return exe, err -} - -// Cwd returns the absolute path to the current working directory of the process. -func (p Proc) Cwd() (string, error) { - wd, err := os.Readlink(p.path("cwd")) - if os.IsNotExist(err) { - return "", nil - } - - return wd, err -} - -// RootDir returns the absolute path to the process's root directory (as set by chroot). -func (p Proc) RootDir() (string, error) { - rdir, err := os.Readlink(p.path("root")) - if os.IsNotExist(err) { - return "", nil - } - - return rdir, err -} - -// FileDescriptors returns the currently open file descriptors of a process. -func (p Proc) FileDescriptors() ([]uintptr, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - fds := make([]uintptr, len(names)) - for i, n := range names { - fd, err := strconv.ParseInt(n, 10, 32) - if err != nil { - return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err) - } - fds[i] = uintptr(fd) - } - - return fds, nil -} - -// FileDescriptorTargets returns the targets of all file descriptors of a process. -// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. -func (p Proc) FileDescriptorTargets() ([]string, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - targets := make([]string, len(names)) - - for i, name := range names { - target, err := os.Readlink(p.path("fd", name)) - if err == nil { - targets[i] = target - } - } - - return targets, nil -} - -// FileDescriptorsLen returns the number of currently open file descriptors of -// a process. -func (p Proc) FileDescriptorsLen() (int, error) { - // Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901 - if p.fs.isReal { - stat, err := os.Stat(p.path("fd")) - if err != nil { - return 0, err - } - - size := stat.Size() - if size > 0 { - return int(size), nil - } - } - - fds, err := p.fileDescriptors() - if err != nil { - return 0, err - } - - return len(fds), nil -} - -// MountStats retrieves statistics and configuration for mount points in a -// process's namespace. -func (p Proc) MountStats() ([]*Mount, error) { - f, err := os.Open(p.path("mountstats")) - if err != nil { - return nil, err - } - defer f.Close() - - return parseMountStats(f) -} - -// MountInfo retrieves mount information for mount points in a -// process's namespace. -// It supplies information missing in `/proc/self/mounts` and -// fixes various other problems with that file too. -func (p Proc) MountInfo() ([]*MountInfo, error) { - data, err := util.ReadFileNoStat(p.path("mountinfo")) - if err != nil { - return nil, err - } - return parseMountInfo(data) -} - -func (p Proc) fileDescriptors() ([]string, error) { - d, err := os.Open(p.path("fd")) - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) - } - - return names, nil -} - -func (p Proc) path(pa ...string) string { - return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) -} - -// FileDescriptorsInfo retrieves information about all file descriptors of -// the process. -func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - var fdinfos ProcFDInfos - - for _, n := range names { - fdinfo, err := p.FDInfo(n) - if err != nil { - continue - } - fdinfos = append(fdinfos, *fdinfo) - } - - return fdinfos, nil -} - -// Schedstat returns task scheduling information for the process. -func (p Proc) Schedstat() (ProcSchedstat, error) { - contents, err := os.ReadFile(p.path("schedstat")) - if err != nil { - return ProcSchedstat{}, err - } - return parseProcSchedstat(string(contents)) -} diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go deleted file mode 100644 index 4a64347c03..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a -// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource -// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies -// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in -// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of -// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID -// in this hierarchy -// -// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html -type Cgroup struct { - // HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one - // hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number - HierarchyID int - // Controllers using this hierarchy of processes. Controllers are also known as subsystems. For - // Cgroups V2 this may be empty, as all active controllers use the same hierarchy - Controllers []string - // Path of this control group, relative to the mount point of the cgroupfs representing this specific - // hierarchy - Path string -} - -// parseCgroupString parses each line of the /proc/[pid]/cgroup file -// Line format is hierarchyID:[controller1,controller2]:path. -func parseCgroupString(cgroupStr string) (*Cgroup, error) { - var err error - - fields := strings.SplitN(cgroupStr, ":", 3) - if len(fields) < 3 { - return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr) - } - - cgroup := &Cgroup{ - Path: fields[2], - Controllers: nil, - } - cgroup.HierarchyID, err = strconv.Atoi(fields[0]) - if err != nil { - return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID) - } - if fields[1] != "" { - ssNames := strings.Split(fields[1], ",") - cgroup.Controllers = append(cgroup.Controllers, ssNames...) - } - return cgroup, nil -} - -// parseCgroups reads each line of the /proc/[pid]/cgroup file. -func parseCgroups(data []byte) ([]Cgroup, error) { - var cgroups []Cgroup - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - mountString := scanner.Text() - parsedMounts, err := parseCgroupString(mountString) - if err != nil { - return nil, err - } - cgroups = append(cgroups, *parsedMounts) - } - - err := scanner.Err() - return cgroups, err -} - -// Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process -// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, -// so the len of the returned struct is equal to the number of active hierarchies on this system. -func (p Proc) Cgroups() ([]Cgroup, error) { - data, err := util.ReadFileNoStat(p.path("cgroup")) - if err != nil { - return nil, err - } - return parseCgroups(data) -} diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go deleted file mode 100644 index 5dd4938999..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_cgroups.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// CgroupSummary models one line from /proc/cgroups. -// This file contains information about the controllers that are compiled into the kernel. -// -// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html -type CgroupSummary struct { - // The name of the controller. controller is also known as subsystem. - SubsysName string - // The unique ID of the cgroup hierarchy on which this controller is mounted. - Hierarchy int - // The number of control groups in this hierarchy using this controller. - Cgroups int - // This field contains the value 1 if this controller is enabled, or 0 if it has been disabled - Enabled int -} - -// parseCgroupSummary parses each line of the /proc/cgroup file -// Line format is `subsys_name hierarchy num_cgroups enabled`. -func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { - var err error - - fields := strings.Fields(CgroupSummaryStr) - // require at least 4 fields - if len(fields) < 4 { - return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr) - } - - CgroupSummary := &CgroupSummary{ - SubsysName: fields[0], - } - CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1]) - if err != nil { - return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1]) - } - CgroupSummary.Cgroups, err = strconv.Atoi(fields[2]) - if err != nil { - return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2]) - } - CgroupSummary.Enabled, err = strconv.Atoi(fields[3]) - if err != nil { - return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3]) - } - return CgroupSummary, nil -} - -// parseCgroupSummary reads each line of the /proc/cgroup file. -func parseCgroupSummary(data []byte) ([]CgroupSummary, error) { - var CgroupSummarys []CgroupSummary - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - CgroupSummaryString := scanner.Text() - // ignore comment lines - if strings.HasPrefix(CgroupSummaryString, "#") { - continue - } - CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString) - if err != nil { - return nil, err - } - CgroupSummarys = append(CgroupSummarys, *CgroupSummary) - } - - err := scanner.Err() - return CgroupSummarys, err -} - -// CgroupSummarys returns information about current /proc/cgroups. -func (fs FS) CgroupSummarys() ([]CgroupSummary, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("cgroups")) - if err != nil { - return nil, err - } - return parseCgroupSummary(data) -} diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go deleted file mode 100644 index 57a89895d6..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_environ.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Environ reads process environments from `/proc//environ`. -func (p Proc) Environ() ([]string, error) { - environments := make([]string, 0) - - data, err := util.ReadFileNoStat(p.path("environ")) - if err != nil { - return environments, err - } - - environments = strings.Split(string(data), "\000") - if len(environments) > 0 { - environments = environments[:len(environments)-1] - } - - return environments, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go deleted file mode 100644 index fa761b3529..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "regexp" - - "github.com/prometheus/procfs/internal/util" -) - -var ( - rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) - rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) - rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) - rIno = regexp.MustCompile(`^ino:\s+(\d+)$`) - rInotify = regexp.MustCompile(`^inotify`) - rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) -) - -// ProcFDInfo contains represents file descriptor information. -type ProcFDInfo struct { - // File descriptor - FD string - // File offset - Pos string - // File access mode and status flags - Flags string - // Mount point ID - MntID string - // Inode number - Ino string - // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) - InotifyInfos []InotifyInfo -} - -// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty. -func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { - data, err := util.ReadFileNoStat(p.path("fdinfo", fd)) - if err != nil { - return nil, err - } - - var text, pos, flags, mntid, ino string - var inotify []InotifyInfo - - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - text = scanner.Text() - if rPos.MatchString(text) { - pos = rPos.FindStringSubmatch(text)[1] - } else if rFlags.MatchString(text) { - flags = rFlags.FindStringSubmatch(text)[1] - } else if rMntID.MatchString(text) { - mntid = rMntID.FindStringSubmatch(text)[1] - } else if rIno.MatchString(text) { - ino = rIno.FindStringSubmatch(text)[1] - } else if rInotify.MatchString(text) { - newInotify, err := parseInotifyInfo(text) - if err != nil { - return nil, err - } - inotify = append(inotify, *newInotify) - } - } - - i := &ProcFDInfo{ - FD: fd, - Pos: pos, - Flags: flags, - MntID: mntid, - Ino: ino, - InotifyInfos: inotify, - } - - return i, nil -} - -// InotifyInfo represents a single inotify line in the fdinfo file. -type InotifyInfo struct { - // Watch descriptor number - WD string - // Inode number - Ino string - // Device ID - Sdev string - // Mask of events being monitored - Mask string -} - -// InotifyInfo constructor. Only available on kernel 3.8+. -func parseInotifyInfo(line string) (*InotifyInfo, error) { - m := rInotifyParts.FindStringSubmatch(line) - if len(m) >= 4 { - var mask string - if len(m) == 5 { - mask = m[4] - } - i := &InotifyInfo{ - WD: m[1], - Ino: m[2], - Sdev: m[3], - Mask: mask, - } - return i, nil - } - return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line) -} - -// ProcFDInfos represents a list of ProcFDInfo structs. -type ProcFDInfos []ProcFDInfo - -func (p ProcFDInfos) Len() int { return len(p) } -func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } - -// InotifyWatchLen returns the total number of inotify watches. -func (p ProcFDInfos) InotifyWatchLen() (int, error) { - length := 0 - for _, f := range p { - length += len(f.InotifyInfos) - } - - return length, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go deleted file mode 100644 index 86b4b45246..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_interrupts.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Interrupt represents a single interrupt line. -type Interrupt struct { - // Info is the type of interrupt. - Info string - // Devices is the name of the device that is located at that IRQ - Devices string - // Values is the number of interrupts per CPU. - Values []string -} - -// Interrupts models the content of /proc/interrupts. Key is the IRQ number. -// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts -// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output -type Interrupts map[string]Interrupt - -// Interrupts creates a new instance from a given Proc instance. -func (p Proc) Interrupts() (Interrupts, error) { - data, err := util.ReadFileNoStat(p.path("interrupts")) - if err != nil { - return nil, err - } - return parseInterrupts(bytes.NewReader(data)) -} - -func parseInterrupts(r io.Reader) (Interrupts, error) { - var ( - interrupts = Interrupts{} - scanner = bufio.NewScanner(r) - ) - - if !scanner.Scan() { - return nil, errors.New("interrupts empty") - } - cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu - - for scanner.Scan() { - parts := strings.Fields(scanner.Text()) - if len(parts) == 0 { // skip empty lines - continue - } - if len(parts) < 2 { - return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts) - } - intName := parts[0][:len(parts[0])-1] // remove trailing : - - if len(parts) == 2 { - interrupts[intName] = Interrupt{ - Info: "", - Devices: "", - Values: []string{ - parts[1], - }, - } - continue - } - - intr := Interrupt{ - Values: parts[1 : cpuNum+1], - } - - if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt - intr.Info = parts[cpuNum+1] - intr.Devices = strings.Join(parts[cpuNum+2:], " ") - } else { - intr.Info = strings.Join(parts[cpuNum+1:], " ") - } - interrupts[intName] = intr - } - - return interrupts, scanner.Err() -} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go deleted file mode 100644 index d15b66ddb6..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcIO models the content of /proc//io. -type ProcIO struct { - // Chars read. - RChar uint64 - // Chars written. - WChar uint64 - // Read syscalls. - SyscR uint64 - // Write syscalls. - SyscW uint64 - // Bytes read. - ReadBytes uint64 - // Bytes written. - WriteBytes uint64 - // Bytes written, but taking into account truncation. See - // Documentation/filesystems/proc.txt in the kernel sources for - // detailed explanation. - CancelledWriteBytes int64 -} - -// IO creates a new ProcIO instance from a given Proc instance. -func (p Proc) IO() (ProcIO, error) { - pio := ProcIO{} - - data, err := util.ReadFileNoStat(p.path("io")) - if err != nil { - return pio, err - } - - ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + - "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" //nolint:misspell - - _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, - &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) - - return pio, err -} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go deleted file mode 100644 index 9530b14bc6..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "os" - "regexp" - "strconv" -) - -// ProcLimits represents the soft limits for each of the process's resource -// limits. For more information see getrlimit(2): -// http://man7.org/linux/man-pages/man2/getrlimit.2.html. -type ProcLimits struct { - // CPU time limit in seconds. - CPUTime uint64 - // Maximum size of files that the process may create. - FileSize uint64 - // Maximum size of the process's data segment (initialized data, - // uninitialized data, and heap). - DataSize uint64 - // Maximum size of the process stack in bytes. - StackSize uint64 - // Maximum size of a core file. - CoreFileSize uint64 - // Limit of the process's resident set in pages. - ResidentSet uint64 - // Maximum number of processes that can be created for the real user ID of - // the calling process. - Processes uint64 - // Value one greater than the maximum file descriptor number that can be - // opened by this process. - OpenFiles uint64 - // Maximum number of bytes of memory that may be locked into RAM. - LockedMemory uint64 - // Maximum size of the process's virtual memory address space in bytes. - AddressSpace uint64 - // Limit on the combined number of flock(2) locks and fcntl(2) leases that - // this process may establish. - FileLocks uint64 - // Limit of signals that may be queued for the real user ID of the calling - // process. - PendingSignals uint64 - // Limit on the number of bytes that can be allocated for POSIX message - // queues for the real user ID of the calling process. - MsqqueueSize uint64 - // Limit of the nice priority set using setpriority(2) or nice(2). - NicePriority uint64 - // Limit of the real-time priority set using sched_setscheduler(2) or - // sched_setparam(2). - RealtimePriority uint64 - // Limit (in microseconds) on the amount of CPU time that a process - // scheduled under a real-time scheduling policy may consume without making - // a blocking system call. - RealtimeTimeout uint64 -} - -const ( - limitsFields = 4 - limitsUnlimited = "unlimited" -) - -var ( - limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`) -) - -// NewLimits returns the current soft limits of the process. -// -// Deprecated: Use p.Limits() instead. -func (p Proc) NewLimits() (ProcLimits, error) { - return p.Limits() -} - -// Limits returns the current soft limits of the process. -func (p Proc) Limits() (ProcLimits, error) { - f, err := os.Open(p.path("limits")) - if err != nil { - return ProcLimits{}, err - } - defer f.Close() - - var ( - l = ProcLimits{} - s = bufio.NewScanner(f) - ) - - s.Scan() // Skip limits header - - for s.Scan() { - //fields := limitsMatch.Split(s.Text(), limitsFields) - fields := limitsMatch.FindStringSubmatch(s.Text()) - if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text()) - } - - switch fields[1] { - case "Max cpu time": - l.CPUTime, err = parseUint(fields[2]) - case "Max file size": - l.FileSize, err = parseUint(fields[2]) - case "Max data size": - l.DataSize, err = parseUint(fields[2]) - case "Max stack size": - l.StackSize, err = parseUint(fields[2]) - case "Max core file size": - l.CoreFileSize, err = parseUint(fields[2]) - case "Max resident set": - l.ResidentSet, err = parseUint(fields[2]) - case "Max processes": - l.Processes, err = parseUint(fields[2]) - case "Max open files": - l.OpenFiles, err = parseUint(fields[2]) - case "Max locked memory": - l.LockedMemory, err = parseUint(fields[2]) - case "Max address space": - l.AddressSpace, err = parseUint(fields[2]) - case "Max file locks": - l.FileLocks, err = parseUint(fields[2]) - case "Max pending signals": - l.PendingSignals, err = parseUint(fields[2]) - case "Max msgqueue size": - l.MsqqueueSize, err = parseUint(fields[2]) - case "Max nice priority": - l.NicePriority, err = parseUint(fields[2]) - case "Max realtime priority": - l.RealtimePriority, err = parseUint(fields[2]) - case "Max realtime timeout": - l.RealtimeTimeout, err = parseUint(fields[2]) - } - if err != nil { - return ProcLimits{}, err - } - } - - return l, s.Err() -} - -func parseUint(s string) (uint64, error) { - if s == limitsUnlimited { - return 18446744073709551615, nil - } - i, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err) - } - return i, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go deleted file mode 100644 index 7e75c286b5..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris -// +build !js - -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" - - "golang.org/x/sys/unix" -) - -// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`. -type ProcMapPermissions struct { - // mapping has the [R]ead flag set - Read bool - // mapping has the [W]rite flag set - Write bool - // mapping has the [X]ecutable flag set - Execute bool - // mapping has the [S]hared flag set - Shared bool - // mapping is marked as [P]rivate (copy on write) - Private bool -} - -// ProcMap contains the process memory-mappings of the process -// read from `/proc/[pid]/maps`. -type ProcMap struct { - // The start address of current mapping. - StartAddr uintptr - // The end address of the current mapping - EndAddr uintptr - // The permissions for this mapping - Perms *ProcMapPermissions - // The current offset into the file/fd (e.g., shared libs) - Offset int64 - // Device owner of this mapping (major:minor) in Mkdev format. - Dev uint64 - // The inode of the device above - Inode uint64 - // The file or psuedofile (or empty==anonymous) - Pathname string -} - -// parseDevice parses the device token of a line and converts it to a dev_t -// (mkdev) like structure. -func parseDevice(s string) (uint64, error) { - i := strings.Index(s, ":") - if i == -1 { - return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s) - } - - major, err := strconv.ParseUint(s[0:i], 16, 0) - if err != nil { - return 0, err - } - - minor, err := strconv.ParseUint(s[i+1:], 16, 0) - if err != nil { - return 0, err - } - - return unix.Mkdev(uint32(major), uint32(minor)), nil -} - -// parseAddress converts a hex-string to a uintptr. -func parseAddress(s string) (uintptr, error) { - a, err := strconv.ParseUint(s, 16, 0) - if err != nil { - return 0, err - } - - return uintptr(a), nil -} - -// parseAddresses parses the start-end address. -func parseAddresses(s string) (uintptr, uintptr, error) { - idx := strings.Index(s, "-") - if idx == -1 { - return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s) - } - - saddr, err := parseAddress(s[0:idx]) - if err != nil { - return 0, 0, err - } - - eaddr, err := parseAddress(s[idx+1:]) - if err != nil { - return 0, 0, err - } - - return saddr, eaddr, nil -} - -// parsePermissions parses a token and returns any that are set. -func parsePermissions(s string) (*ProcMapPermissions, error) { - if len(s) < 4 { - return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse) - } - - perms := ProcMapPermissions{} - for _, ch := range s { - switch ch { - case 'r': - perms.Read = true - case 'w': - perms.Write = true - case 'x': - perms.Execute = true - case 'p': - perms.Private = true - case 's': - perms.Shared = true - } - } - - return &perms, nil -} - -// parseProcMap will attempt to parse a single line within a proc/[pid]/maps -// buffer. -func parseProcMap(text string) (*ProcMap, error) { - fields := strings.Fields(text) - if len(fields) < 5 { - return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse) - } - - saddr, eaddr, err := parseAddresses(fields[0]) - if err != nil { - return nil, err - } - - perms, err := parsePermissions(fields[1]) - if err != nil { - return nil, err - } - - offset, err := strconv.ParseInt(fields[2], 16, 0) - if err != nil { - return nil, err - } - - device, err := parseDevice(fields[3]) - if err != nil { - return nil, err - } - - inode, err := strconv.ParseUint(fields[4], 10, 0) - if err != nil { - return nil, err - } - - pathname := "" - - if len(fields) >= 5 { - pathname = strings.Join(fields[5:], " ") - } - - return &ProcMap{ - StartAddr: saddr, - EndAddr: eaddr, - Perms: perms, - Offset: offset, - Dev: device, - Inode: inode, - Pathname: pathname, - }, nil -} - -// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the -// process. -func (p Proc) ProcMaps() ([]*ProcMap, error) { - file, err := os.Open(p.path("maps")) - if err != nil { - return nil, err - } - defer file.Close() - - maps := []*ProcMap{} - scan := bufio.NewScanner(file) - - for scan.Scan() { - m, err := parseProcMap(scan.Text()) - if err != nil { - return nil, err - } - - maps = append(maps, m) - } - - return maps, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go deleted file mode 100644 index 4248c1716e..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ /dev/null @@ -1,443 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcNetstat models the content of /proc//net/netstat. -type ProcNetstat struct { - // The process ID. - PID int - TcpExt - IpExt -} - -type TcpExt struct { // nolint:revive - SyncookiesSent *float64 - SyncookiesRecv *float64 - SyncookiesFailed *float64 - EmbryonicRsts *float64 - PruneCalled *float64 - RcvPruned *float64 - OfoPruned *float64 - OutOfWindowIcmps *float64 - LockDroppedIcmps *float64 - ArpFilter *float64 - TW *float64 - TWRecycled *float64 - TWKilled *float64 - PAWSActive *float64 - PAWSEstab *float64 - DelayedACKs *float64 - DelayedACKLocked *float64 - DelayedACKLost *float64 - ListenOverflows *float64 - ListenDrops *float64 - TCPHPHits *float64 - TCPPureAcks *float64 - TCPHPAcks *float64 - TCPRenoRecovery *float64 - TCPSackRecovery *float64 - TCPSACKReneging *float64 - TCPSACKReorder *float64 - TCPRenoReorder *float64 - TCPTSReorder *float64 - TCPFullUndo *float64 - TCPPartialUndo *float64 - TCPDSACKUndo *float64 - TCPLossUndo *float64 - TCPLostRetransmit *float64 - TCPRenoFailures *float64 - TCPSackFailures *float64 - TCPLossFailures *float64 - TCPFastRetrans *float64 - TCPSlowStartRetrans *float64 - TCPTimeouts *float64 - TCPLossProbes *float64 - TCPLossProbeRecovery *float64 - TCPRenoRecoveryFail *float64 - TCPSackRecoveryFail *float64 - TCPRcvCollapsed *float64 - TCPDSACKOldSent *float64 - TCPDSACKOfoSent *float64 - TCPDSACKRecv *float64 - TCPDSACKOfoRecv *float64 - TCPAbortOnData *float64 - TCPAbortOnClose *float64 - TCPAbortOnMemory *float64 - TCPAbortOnTimeout *float64 - TCPAbortOnLinger *float64 - TCPAbortFailed *float64 - TCPMemoryPressures *float64 - TCPMemoryPressuresChrono *float64 - TCPSACKDiscard *float64 - TCPDSACKIgnoredOld *float64 - TCPDSACKIgnoredNoUndo *float64 - TCPSpuriousRTOs *float64 - TCPMD5NotFound *float64 - TCPMD5Unexpected *float64 - TCPMD5Failure *float64 - TCPSackShifted *float64 - TCPSackMerged *float64 - TCPSackShiftFallback *float64 - TCPBacklogDrop *float64 - PFMemallocDrop *float64 - TCPMinTTLDrop *float64 - TCPDeferAcceptDrop *float64 - IPReversePathFilter *float64 - TCPTimeWaitOverflow *float64 - TCPReqQFullDoCookies *float64 - TCPReqQFullDrop *float64 - TCPRetransFail *float64 - TCPRcvCoalesce *float64 - TCPRcvQDrop *float64 - TCPOFOQueue *float64 - TCPOFODrop *float64 - TCPOFOMerge *float64 - TCPChallengeACK *float64 - TCPSYNChallenge *float64 - TCPFastOpenActive *float64 - TCPFastOpenActiveFail *float64 - TCPFastOpenPassive *float64 - TCPFastOpenPassiveFail *float64 - TCPFastOpenListenOverflow *float64 - TCPFastOpenCookieReqd *float64 - TCPFastOpenBlackhole *float64 - TCPSpuriousRtxHostQueues *float64 - BusyPollRxPackets *float64 - TCPAutoCorking *float64 - TCPFromZeroWindowAdv *float64 - TCPToZeroWindowAdv *float64 - TCPWantZeroWindowAdv *float64 - TCPSynRetrans *float64 - TCPOrigDataSent *float64 - TCPHystartTrainDetect *float64 - TCPHystartTrainCwnd *float64 - TCPHystartDelayDetect *float64 - TCPHystartDelayCwnd *float64 - TCPACKSkippedSynRecv *float64 - TCPACKSkippedPAWS *float64 - TCPACKSkippedSeq *float64 - TCPACKSkippedFinWait2 *float64 - TCPACKSkippedTimeWait *float64 - TCPACKSkippedChallenge *float64 - TCPWinProbe *float64 - TCPKeepAlive *float64 - TCPMTUPFail *float64 - TCPMTUPSuccess *float64 - TCPWqueueTooBig *float64 -} - -type IpExt struct { // nolint:revive - InNoRoutes *float64 - InTruncatedPkts *float64 - InMcastPkts *float64 - OutMcastPkts *float64 - InBcastPkts *float64 - OutBcastPkts *float64 - InOctets *float64 - OutOctets *float64 - InMcastOctets *float64 - OutMcastOctets *float64 - InBcastOctets *float64 - OutBcastOctets *float64 - InCsumErrors *float64 - InNoECTPkts *float64 - InECT1Pkts *float64 - InECT0Pkts *float64 - InCEPkts *float64 - ReasmOverlaps *float64 -} - -func (p Proc) Netstat() (ProcNetstat, error) { - filename := p.path("net/netstat") - data, err := util.ReadFileNoStat(filename) - if err != nil { - return ProcNetstat{PID: p.PID}, err - } - procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename) - procNetstat.PID = p.PID - return procNetstat, err -} - -// parseProcNetstat parses the metrics from proc//net/netstat file -// and returns a ProcNetstat structure. -func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { - var ( - scanner = bufio.NewScanner(r) - procNetstat = ProcNetstat{} - ) - - for scanner.Scan() { - nameParts := strings.Split(scanner.Text(), " ") - scanner.Scan() - valueParts := strings.Split(scanner.Text(), " ") - // Remove trailing :. - protocol := strings.TrimSuffix(nameParts[0], ":") - if len(nameParts) != len(valueParts) { - return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", - ErrFileParse, fileName, protocol) - } - for i := 1; i < len(nameParts); i++ { - value, err := strconv.ParseFloat(valueParts[i], 64) - if err != nil { - return procNetstat, err - } - key := nameParts[i] - - switch protocol { - case "TcpExt": - switch key { - case "SyncookiesSent": - procNetstat.SyncookiesSent = &value - case "SyncookiesRecv": - procNetstat.SyncookiesRecv = &value - case "SyncookiesFailed": - procNetstat.SyncookiesFailed = &value - case "EmbryonicRsts": - procNetstat.EmbryonicRsts = &value - case "PruneCalled": - procNetstat.PruneCalled = &value - case "RcvPruned": - procNetstat.RcvPruned = &value - case "OfoPruned": - procNetstat.OfoPruned = &value - case "OutOfWindowIcmps": - procNetstat.OutOfWindowIcmps = &value - case "LockDroppedIcmps": - procNetstat.LockDroppedIcmps = &value - case "ArpFilter": - procNetstat.ArpFilter = &value - case "TW": - procNetstat.TW = &value - case "TWRecycled": - procNetstat.TWRecycled = &value - case "TWKilled": - procNetstat.TWKilled = &value - case "PAWSActive": - procNetstat.PAWSActive = &value - case "PAWSEstab": - procNetstat.PAWSEstab = &value - case "DelayedACKs": - procNetstat.DelayedACKs = &value - case "DelayedACKLocked": - procNetstat.DelayedACKLocked = &value - case "DelayedACKLost": - procNetstat.DelayedACKLost = &value - case "ListenOverflows": - procNetstat.ListenOverflows = &value - case "ListenDrops": - procNetstat.ListenDrops = &value - case "TCPHPHits": - procNetstat.TCPHPHits = &value - case "TCPPureAcks": - procNetstat.TCPPureAcks = &value - case "TCPHPAcks": - procNetstat.TCPHPAcks = &value - case "TCPRenoRecovery": - procNetstat.TCPRenoRecovery = &value - case "TCPSackRecovery": - procNetstat.TCPSackRecovery = &value - case "TCPSACKReneging": - procNetstat.TCPSACKReneging = &value - case "TCPSACKReorder": - procNetstat.TCPSACKReorder = &value - case "TCPRenoReorder": - procNetstat.TCPRenoReorder = &value - case "TCPTSReorder": - procNetstat.TCPTSReorder = &value - case "TCPFullUndo": - procNetstat.TCPFullUndo = &value - case "TCPPartialUndo": - procNetstat.TCPPartialUndo = &value - case "TCPDSACKUndo": - procNetstat.TCPDSACKUndo = &value - case "TCPLossUndo": - procNetstat.TCPLossUndo = &value - case "TCPLostRetransmit": - procNetstat.TCPLostRetransmit = &value - case "TCPRenoFailures": - procNetstat.TCPRenoFailures = &value - case "TCPSackFailures": - procNetstat.TCPSackFailures = &value - case "TCPLossFailures": - procNetstat.TCPLossFailures = &value - case "TCPFastRetrans": - procNetstat.TCPFastRetrans = &value - case "TCPSlowStartRetrans": - procNetstat.TCPSlowStartRetrans = &value - case "TCPTimeouts": - procNetstat.TCPTimeouts = &value - case "TCPLossProbes": - procNetstat.TCPLossProbes = &value - case "TCPLossProbeRecovery": - procNetstat.TCPLossProbeRecovery = &value - case "TCPRenoRecoveryFail": - procNetstat.TCPRenoRecoveryFail = &value - case "TCPSackRecoveryFail": - procNetstat.TCPSackRecoveryFail = &value - case "TCPRcvCollapsed": - procNetstat.TCPRcvCollapsed = &value - case "TCPDSACKOldSent": - procNetstat.TCPDSACKOldSent = &value - case "TCPDSACKOfoSent": - procNetstat.TCPDSACKOfoSent = &value - case "TCPDSACKRecv": - procNetstat.TCPDSACKRecv = &value - case "TCPDSACKOfoRecv": - procNetstat.TCPDSACKOfoRecv = &value - case "TCPAbortOnData": - procNetstat.TCPAbortOnData = &value - case "TCPAbortOnClose": - procNetstat.TCPAbortOnClose = &value - case "TCPDeferAcceptDrop": - procNetstat.TCPDeferAcceptDrop = &value - case "IPReversePathFilter": - procNetstat.IPReversePathFilter = &value - case "TCPTimeWaitOverflow": - procNetstat.TCPTimeWaitOverflow = &value - case "TCPReqQFullDoCookies": - procNetstat.TCPReqQFullDoCookies = &value - case "TCPReqQFullDrop": - procNetstat.TCPReqQFullDrop = &value - case "TCPRetransFail": - procNetstat.TCPRetransFail = &value - case "TCPRcvCoalesce": - procNetstat.TCPRcvCoalesce = &value - case "TCPRcvQDrop": - procNetstat.TCPRcvQDrop = &value - case "TCPOFOQueue": - procNetstat.TCPOFOQueue = &value - case "TCPOFODrop": - procNetstat.TCPOFODrop = &value - case "TCPOFOMerge": - procNetstat.TCPOFOMerge = &value - case "TCPChallengeACK": - procNetstat.TCPChallengeACK = &value - case "TCPSYNChallenge": - procNetstat.TCPSYNChallenge = &value - case "TCPFastOpenActive": - procNetstat.TCPFastOpenActive = &value - case "TCPFastOpenActiveFail": - procNetstat.TCPFastOpenActiveFail = &value - case "TCPFastOpenPassive": - procNetstat.TCPFastOpenPassive = &value - case "TCPFastOpenPassiveFail": - procNetstat.TCPFastOpenPassiveFail = &value - case "TCPFastOpenListenOverflow": - procNetstat.TCPFastOpenListenOverflow = &value - case "TCPFastOpenCookieReqd": - procNetstat.TCPFastOpenCookieReqd = &value - case "TCPFastOpenBlackhole": - procNetstat.TCPFastOpenBlackhole = &value - case "TCPSpuriousRtxHostQueues": - procNetstat.TCPSpuriousRtxHostQueues = &value - case "BusyPollRxPackets": - procNetstat.BusyPollRxPackets = &value - case "TCPAutoCorking": - procNetstat.TCPAutoCorking = &value - case "TCPFromZeroWindowAdv": - procNetstat.TCPFromZeroWindowAdv = &value - case "TCPToZeroWindowAdv": - procNetstat.TCPToZeroWindowAdv = &value - case "TCPWantZeroWindowAdv": - procNetstat.TCPWantZeroWindowAdv = &value - case "TCPSynRetrans": - procNetstat.TCPSynRetrans = &value - case "TCPOrigDataSent": - procNetstat.TCPOrigDataSent = &value - case "TCPHystartTrainDetect": - procNetstat.TCPHystartTrainDetect = &value - case "TCPHystartTrainCwnd": - procNetstat.TCPHystartTrainCwnd = &value - case "TCPHystartDelayDetect": - procNetstat.TCPHystartDelayDetect = &value - case "TCPHystartDelayCwnd": - procNetstat.TCPHystartDelayCwnd = &value - case "TCPACKSkippedSynRecv": - procNetstat.TCPACKSkippedSynRecv = &value - case "TCPACKSkippedPAWS": - procNetstat.TCPACKSkippedPAWS = &value - case "TCPACKSkippedSeq": - procNetstat.TCPACKSkippedSeq = &value - case "TCPACKSkippedFinWait2": - procNetstat.TCPACKSkippedFinWait2 = &value - case "TCPACKSkippedTimeWait": - procNetstat.TCPACKSkippedTimeWait = &value - case "TCPACKSkippedChallenge": - procNetstat.TCPACKSkippedChallenge = &value - case "TCPWinProbe": - procNetstat.TCPWinProbe = &value - case "TCPKeepAlive": - procNetstat.TCPKeepAlive = &value - case "TCPMTUPFail": - procNetstat.TCPMTUPFail = &value - case "TCPMTUPSuccess": - procNetstat.TCPMTUPSuccess = &value - case "TCPWqueueTooBig": - procNetstat.TCPWqueueTooBig = &value - } - case "IpExt": - switch key { - case "InNoRoutes": - procNetstat.InNoRoutes = &value - case "InTruncatedPkts": - procNetstat.InTruncatedPkts = &value - case "InMcastPkts": - procNetstat.InMcastPkts = &value - case "OutMcastPkts": - procNetstat.OutMcastPkts = &value - case "InBcastPkts": - procNetstat.InBcastPkts = &value - case "OutBcastPkts": - procNetstat.OutBcastPkts = &value - case "InOctets": - procNetstat.InOctets = &value - case "OutOctets": - procNetstat.OutOctets = &value - case "InMcastOctets": - procNetstat.InMcastOctets = &value - case "OutMcastOctets": - procNetstat.OutMcastOctets = &value - case "InBcastOctets": - procNetstat.InBcastOctets = &value - case "OutBcastOctets": - procNetstat.OutBcastOctets = &value - case "InCsumErrors": - procNetstat.InCsumErrors = &value - case "InNoECTPkts": - procNetstat.InNoECTPkts = &value - case "InECT1Pkts": - procNetstat.InECT1Pkts = &value - case "InECT0Pkts": - procNetstat.InECT0Pkts = &value - case "InCEPkts": - procNetstat.InCEPkts = &value - case "ReasmOverlaps": - procNetstat.ReasmOverlaps = &value - } - } - } - } - return procNetstat, scanner.Err() -} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go deleted file mode 100644 index 0f8f847f95..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "os" - "strconv" - "strings" -) - -// Namespace represents a single namespace of a process. -type Namespace struct { - Type string // Namespace type. - Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. -} - -// Namespaces contains all of the namespaces that the process is contained in. -type Namespaces map[string]Namespace - -// Namespaces reads from /proc//ns/* to get the namespaces of which the -// process is a member. -func (p Proc) Namespaces() (Namespaces, error) { - d, err := os.Open(p.path("ns")) - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err) - } - - ns := make(Namespaces, len(names)) - for _, name := range names { - target, err := os.Readlink(p.path("ns", name)) - if err != nil { - return nil, err - } - - fields := strings.SplitN(target, ":", 2) - if len(fields) != 2 { - return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target) - } - - typ := fields[0] - inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) - if err != nil { - return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err) - } - - ns[name] = Namespace{typ, uint32(inode)} - } - - return ns, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go deleted file mode 100644 index ccd35f153a..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -// The PSI / pressure interface is described at -// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt -// Each resource (cpu, io, memory, ...) is exposed as a single file. -// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure. -// Each line contains several averages (over n seconds) and a total in µs. -// -// Example io pressure file: -// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362 -// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" - -// PSILine is a single line of values as returned by `/proc/pressure/*`. -// -// The Avg entries are averages over n seconds, as a percentage. -// The Total line is in microseconds. -type PSILine struct { - Avg10 float64 - Avg60 float64 - Avg300 float64 - Total uint64 -} - -// PSIStats represent pressure stall information from /proc/pressure/* -// -// "Some" indicates the share of time in which at least some tasks are stalled. -// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously. -type PSIStats struct { - Some *PSILine - Full *PSILine -} - -// PSIStatsForResource reads pressure stall information for the specified -// resource from /proc/pressure/. At time of writing this can be -// either "cpu", "memory" or "io". -func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { - data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) - if err != nil { - return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) - } - - return parsePSIStats(bytes.NewReader(data)) -} - -// parsePSIStats parses the specified file for pressure stall information. -func parsePSIStats(r io.Reader) (PSIStats, error) { - psiStats := PSIStats{} - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - l := scanner.Text() - prefix := strings.Split(l, " ")[0] - switch prefix { - case "some": - psi := PSILine{} - _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) - if err != nil { - return PSIStats{}, err - } - psiStats.Some = &psi - case "full": - psi := PSILine{} - _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) - if err != nil { - return PSIStats{}, err - } - psiStats.Full = &psi - default: - // If we encounter a line with an unknown prefix, ignore it and move on - // Should new measurement types be added in the future we'll simply ignore them instead - // of erroring on retrieval - continue - } - } - - return psiStats, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go deleted file mode 100644 index 9a297afcf8..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows -// +build !windows - -package procfs - -import ( - "bufio" - "errors" - "os" - "regexp" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -var ( - // Match the header line before each mapped zone in `/proc/pid/smaps`. - procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) -) - -type ProcSMapsRollup struct { - // Amount of the mapping that is currently resident in RAM. - Rss uint64 - // Process's proportional share of this mapping. - Pss uint64 - // Size in bytes of clean shared pages. - SharedClean uint64 - // Size in bytes of dirty shared pages. - SharedDirty uint64 - // Size in bytes of clean private pages. - PrivateClean uint64 - // Size in bytes of dirty private pages. - PrivateDirty uint64 - // Amount of memory currently marked as referenced or accessed. - Referenced uint64 - // Amount of memory that does not belong to any file. - Anonymous uint64 - // Amount would-be-anonymous memory currently on swap. - Swap uint64 - // Process's proportional memory on swap. - SwapPss uint64 -} - -// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the -// process. -// -// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will -// we read and summed. -func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) { - data, err := util.ReadFileNoStat(p.path("smaps_rollup")) - if err != nil && os.IsNotExist(err) { - return p.procSMapsRollupManual() - } - if err != nil { - return ProcSMapsRollup{}, err - } - - lines := strings.Split(string(data), "\n") - smaps := ProcSMapsRollup{} - - // skip first line which don't contains information we need - lines = lines[1:] - for _, line := range lines { - if line == "" { - continue - } - - if err := smaps.parseLine(line); err != nil { - return ProcSMapsRollup{}, err - } - } - - return smaps, nil -} - -// Read /proc/pid/smaps and do the roll-up in Go code. -func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { - file, err := os.Open(p.path("smaps")) - if err != nil { - return ProcSMapsRollup{}, err - } - defer file.Close() - - smaps := ProcSMapsRollup{} - scan := bufio.NewScanner(file) - - for scan.Scan() { - line := scan.Text() - - if procSMapsHeaderLine.MatchString(line) { - continue - } - - if err := smaps.parseLine(line); err != nil { - return ProcSMapsRollup{}, err - } - } - - return smaps, nil -} - -func (s *ProcSMapsRollup) parseLine(line string) error { - kv := strings.SplitN(line, ":", 2) - if len(kv) != 2 { - return errors.New("invalid net/dev line, missing colon") - } - - k := kv[0] - if k == "VmFlags" { - return nil - } - - v := strings.TrimSpace(kv[1]) - v = strings.TrimSuffix(v, " kB") - - vKBytes, err := strconv.ParseUint(v, 10, 64) - if err != nil { - return err - } - vBytes := vKBytes * 1024 - - s.addValue(k, vBytes) - - return nil -} - -func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) { - switch k { - case "Rss": - s.Rss += vUintBytes - case "Pss": - s.Pss += vUintBytes - case "Shared_Clean": - s.SharedClean += vUintBytes - case "Shared_Dirty": - s.SharedDirty += vUintBytes - case "Private_Clean": - s.PrivateClean += vUintBytes - case "Private_Dirty": - s.PrivateDirty += vUintBytes - case "Referenced": - s.Referenced += vUintBytes - case "Anonymous": - s.Anonymous += vUintBytes - case "Swap": - s.Swap += vUintBytes - case "SwapPss": - s.SwapPss += vUintBytes - } -} diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go deleted file mode 100644 index 4bdc90b07e..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcSnmp models the content of /proc//net/snmp. -type ProcSnmp struct { - // The process ID. - PID int - Ip - Icmp - IcmpMsg - Tcp - Udp - UdpLite -} - -type Ip struct { // nolint:revive - Forwarding *float64 - DefaultTTL *float64 - InReceives *float64 - InHdrErrors *float64 - InAddrErrors *float64 - ForwDatagrams *float64 - InUnknownProtos *float64 - InDiscards *float64 - InDelivers *float64 - OutRequests *float64 - OutDiscards *float64 - OutNoRoutes *float64 - ReasmTimeout *float64 - ReasmReqds *float64 - ReasmOKs *float64 - ReasmFails *float64 - FragOKs *float64 - FragFails *float64 - FragCreates *float64 -} - -type Icmp struct { // nolint:revive - InMsgs *float64 - InErrors *float64 - InCsumErrors *float64 - InDestUnreachs *float64 - InTimeExcds *float64 - InParmProbs *float64 - InSrcQuenchs *float64 - InRedirects *float64 - InEchos *float64 - InEchoReps *float64 - InTimestamps *float64 - InTimestampReps *float64 - InAddrMasks *float64 - InAddrMaskReps *float64 - OutMsgs *float64 - OutErrors *float64 - OutDestUnreachs *float64 - OutTimeExcds *float64 - OutParmProbs *float64 - OutSrcQuenchs *float64 - OutRedirects *float64 - OutEchos *float64 - OutEchoReps *float64 - OutTimestamps *float64 - OutTimestampReps *float64 - OutAddrMasks *float64 - OutAddrMaskReps *float64 -} - -type IcmpMsg struct { - InType3 *float64 - OutType3 *float64 -} - -type Tcp struct { // nolint:revive - RtoAlgorithm *float64 - RtoMin *float64 - RtoMax *float64 - MaxConn *float64 - ActiveOpens *float64 - PassiveOpens *float64 - AttemptFails *float64 - EstabResets *float64 - CurrEstab *float64 - InSegs *float64 - OutSegs *float64 - RetransSegs *float64 - InErrs *float64 - OutRsts *float64 - InCsumErrors *float64 -} - -type Udp struct { // nolint:revive - InDatagrams *float64 - NoPorts *float64 - InErrors *float64 - OutDatagrams *float64 - RcvbufErrors *float64 - SndbufErrors *float64 - InCsumErrors *float64 - IgnoredMulti *float64 -} - -type UdpLite struct { // nolint:revive - InDatagrams *float64 - NoPorts *float64 - InErrors *float64 - OutDatagrams *float64 - RcvbufErrors *float64 - SndbufErrors *float64 - InCsumErrors *float64 - IgnoredMulti *float64 -} - -func (p Proc) Snmp() (ProcSnmp, error) { - filename := p.path("net/snmp") - data, err := util.ReadFileNoStat(filename) - if err != nil { - return ProcSnmp{PID: p.PID}, err - } - procSnmp, err := parseSnmp(bytes.NewReader(data), filename) - procSnmp.PID = p.PID - return procSnmp, err -} - -// parseSnmp parses the metrics from proc//net/snmp file -// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}). -func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { - var ( - scanner = bufio.NewScanner(r) - procSnmp = ProcSnmp{} - ) - - for scanner.Scan() { - nameParts := strings.Split(scanner.Text(), " ") - scanner.Scan() - valueParts := strings.Split(scanner.Text(), " ") - // Remove trailing :. - protocol := strings.TrimSuffix(nameParts[0], ":") - if len(nameParts) != len(valueParts) { - return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", - ErrFileParse, fileName, protocol) - } - for i := 1; i < len(nameParts); i++ { - value, err := strconv.ParseFloat(valueParts[i], 64) - if err != nil { - return procSnmp, err - } - key := nameParts[i] - - switch protocol { - case "Ip": - switch key { - case "Forwarding": - procSnmp.Forwarding = &value - case "DefaultTTL": - procSnmp.DefaultTTL = &value - case "InReceives": - procSnmp.InReceives = &value - case "InHdrErrors": - procSnmp.InHdrErrors = &value - case "InAddrErrors": - procSnmp.InAddrErrors = &value - case "ForwDatagrams": - procSnmp.ForwDatagrams = &value - case "InUnknownProtos": - procSnmp.InUnknownProtos = &value - case "InDiscards": - procSnmp.InDiscards = &value - case "InDelivers": - procSnmp.InDelivers = &value - case "OutRequests": - procSnmp.OutRequests = &value - case "OutDiscards": - procSnmp.OutDiscards = &value - case "OutNoRoutes": - procSnmp.OutNoRoutes = &value - case "ReasmTimeout": - procSnmp.ReasmTimeout = &value - case "ReasmReqds": - procSnmp.ReasmReqds = &value - case "ReasmOKs": - procSnmp.ReasmOKs = &value - case "ReasmFails": - procSnmp.ReasmFails = &value - case "FragOKs": - procSnmp.FragOKs = &value - case "FragFails": - procSnmp.FragFails = &value - case "FragCreates": - procSnmp.FragCreates = &value - } - case "Icmp": - switch key { - case "InMsgs": - procSnmp.InMsgs = &value - case "InErrors": - procSnmp.Icmp.InErrors = &value - case "InCsumErrors": - procSnmp.Icmp.InCsumErrors = &value - case "InDestUnreachs": - procSnmp.InDestUnreachs = &value - case "InTimeExcds": - procSnmp.InTimeExcds = &value - case "InParmProbs": - procSnmp.InParmProbs = &value - case "InSrcQuenchs": - procSnmp.InSrcQuenchs = &value - case "InRedirects": - procSnmp.InRedirects = &value - case "InEchos": - procSnmp.InEchos = &value - case "InEchoReps": - procSnmp.InEchoReps = &value - case "InTimestamps": - procSnmp.InTimestamps = &value - case "InTimestampReps": - procSnmp.InTimestampReps = &value - case "InAddrMasks": - procSnmp.InAddrMasks = &value - case "InAddrMaskReps": - procSnmp.InAddrMaskReps = &value - case "OutMsgs": - procSnmp.OutMsgs = &value - case "OutErrors": - procSnmp.OutErrors = &value - case "OutDestUnreachs": - procSnmp.OutDestUnreachs = &value - case "OutTimeExcds": - procSnmp.OutTimeExcds = &value - case "OutParmProbs": - procSnmp.OutParmProbs = &value - case "OutSrcQuenchs": - procSnmp.OutSrcQuenchs = &value - case "OutRedirects": - procSnmp.OutRedirects = &value - case "OutEchos": - procSnmp.OutEchos = &value - case "OutEchoReps": - procSnmp.OutEchoReps = &value - case "OutTimestamps": - procSnmp.OutTimestamps = &value - case "OutTimestampReps": - procSnmp.OutTimestampReps = &value - case "OutAddrMasks": - procSnmp.OutAddrMasks = &value - case "OutAddrMaskReps": - procSnmp.OutAddrMaskReps = &value - } - case "IcmpMsg": - switch key { - case "InType3": - procSnmp.InType3 = &value - case "OutType3": - procSnmp.OutType3 = &value - } - case "Tcp": - switch key { - case "RtoAlgorithm": - procSnmp.RtoAlgorithm = &value - case "RtoMin": - procSnmp.RtoMin = &value - case "RtoMax": - procSnmp.RtoMax = &value - case "MaxConn": - procSnmp.MaxConn = &value - case "ActiveOpens": - procSnmp.ActiveOpens = &value - case "PassiveOpens": - procSnmp.PassiveOpens = &value - case "AttemptFails": - procSnmp.AttemptFails = &value - case "EstabResets": - procSnmp.EstabResets = &value - case "CurrEstab": - procSnmp.CurrEstab = &value - case "InSegs": - procSnmp.InSegs = &value - case "OutSegs": - procSnmp.OutSegs = &value - case "RetransSegs": - procSnmp.RetransSegs = &value - case "InErrs": - procSnmp.InErrs = &value - case "OutRsts": - procSnmp.OutRsts = &value - case "InCsumErrors": - procSnmp.Tcp.InCsumErrors = &value - } - case "Udp": - switch key { - case "InDatagrams": - procSnmp.Udp.InDatagrams = &value - case "NoPorts": - procSnmp.Udp.NoPorts = &value - case "InErrors": - procSnmp.Udp.InErrors = &value - case "OutDatagrams": - procSnmp.Udp.OutDatagrams = &value - case "RcvbufErrors": - procSnmp.Udp.RcvbufErrors = &value - case "SndbufErrors": - procSnmp.Udp.SndbufErrors = &value - case "InCsumErrors": - procSnmp.Udp.InCsumErrors = &value - case "IgnoredMulti": - procSnmp.Udp.IgnoredMulti = &value - } - case "UdpLite": - switch key { - case "InDatagrams": - procSnmp.UdpLite.InDatagrams = &value - case "NoPorts": - procSnmp.UdpLite.NoPorts = &value - case "InErrors": - procSnmp.UdpLite.InErrors = &value - case "OutDatagrams": - procSnmp.UdpLite.OutDatagrams = &value - case "RcvbufErrors": - procSnmp.UdpLite.RcvbufErrors = &value - case "SndbufErrors": - procSnmp.UdpLite.SndbufErrors = &value - case "InCsumErrors": - procSnmp.UdpLite.InCsumErrors = &value - case "IgnoredMulti": - procSnmp.UdpLite.IgnoredMulti = &value - } - } - } - } - return procSnmp, scanner.Err() -} diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go deleted file mode 100644 index fb7fd3995b..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ /dev/null @@ -1,381 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "errors" - "io" - "os" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcSnmp6 models the content of /proc//net/snmp6. -type ProcSnmp6 struct { - // The process ID. - PID int - Ip6 - Icmp6 - Udp6 - UdpLite6 -} - -type Ip6 struct { // nolint:revive - InReceives *float64 - InHdrErrors *float64 - InTooBigErrors *float64 - InNoRoutes *float64 - InAddrErrors *float64 - InUnknownProtos *float64 - InTruncatedPkts *float64 - InDiscards *float64 - InDelivers *float64 - OutForwDatagrams *float64 - OutRequests *float64 - OutDiscards *float64 - OutNoRoutes *float64 - ReasmTimeout *float64 - ReasmReqds *float64 - ReasmOKs *float64 - ReasmFails *float64 - FragOKs *float64 - FragFails *float64 - FragCreates *float64 - InMcastPkts *float64 - OutMcastPkts *float64 - InOctets *float64 - OutOctets *float64 - InMcastOctets *float64 - OutMcastOctets *float64 - InBcastOctets *float64 - OutBcastOctets *float64 - InNoECTPkts *float64 - InECT1Pkts *float64 - InECT0Pkts *float64 - InCEPkts *float64 -} - -type Icmp6 struct { - InMsgs *float64 - InErrors *float64 - OutMsgs *float64 - OutErrors *float64 - InCsumErrors *float64 - InDestUnreachs *float64 - InPktTooBigs *float64 - InTimeExcds *float64 - InParmProblems *float64 - InEchos *float64 - InEchoReplies *float64 - InGroupMembQueries *float64 - InGroupMembResponses *float64 - InGroupMembReductions *float64 - InRouterSolicits *float64 - InRouterAdvertisements *float64 - InNeighborSolicits *float64 - InNeighborAdvertisements *float64 - InRedirects *float64 - InMLDv2Reports *float64 - OutDestUnreachs *float64 - OutPktTooBigs *float64 - OutTimeExcds *float64 - OutParmProblems *float64 - OutEchos *float64 - OutEchoReplies *float64 - OutGroupMembQueries *float64 - OutGroupMembResponses *float64 - OutGroupMembReductions *float64 - OutRouterSolicits *float64 - OutRouterAdvertisements *float64 - OutNeighborSolicits *float64 - OutNeighborAdvertisements *float64 - OutRedirects *float64 - OutMLDv2Reports *float64 - InType1 *float64 - InType134 *float64 - InType135 *float64 - InType136 *float64 - InType143 *float64 - OutType133 *float64 - OutType135 *float64 - OutType136 *float64 - OutType143 *float64 -} - -type Udp6 struct { // nolint:revive - InDatagrams *float64 - NoPorts *float64 - InErrors *float64 - OutDatagrams *float64 - RcvbufErrors *float64 - SndbufErrors *float64 - InCsumErrors *float64 - IgnoredMulti *float64 -} - -type UdpLite6 struct { // nolint:revive - InDatagrams *float64 - NoPorts *float64 - InErrors *float64 - OutDatagrams *float64 - RcvbufErrors *float64 - SndbufErrors *float64 - InCsumErrors *float64 -} - -func (p Proc) Snmp6() (ProcSnmp6, error) { - filename := p.path("net/snmp6") - data, err := util.ReadFileNoStat(filename) - if err != nil { - // On systems with IPv6 disabled, this file won't exist. - // Do nothing. - if errors.Is(err, os.ErrNotExist) { - return ProcSnmp6{PID: p.PID}, nil - } - - return ProcSnmp6{PID: p.PID}, err - } - - procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data)) - procSnmp6.PID = p.PID - return procSnmp6, err -} - -// parseSnmp6 parses the metrics from proc//net/snmp6 file -// and returns a map contains those metrics. -func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { - var ( - scanner = bufio.NewScanner(r) - procSnmp6 = ProcSnmp6{} - ) - - for scanner.Scan() { - stat := strings.Fields(scanner.Text()) - if len(stat) < 2 { - continue - } - // Expect to have "6" in metric name, skip line otherwise - if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 { - protocol := stat[0][:sixIndex+1] - key := stat[0][sixIndex+1:] - value, err := strconv.ParseFloat(stat[1], 64) - if err != nil { - return procSnmp6, err - } - - switch protocol { - case "Ip6": - switch key { - case "InReceives": - procSnmp6.InReceives = &value - case "InHdrErrors": - procSnmp6.InHdrErrors = &value - case "InTooBigErrors": - procSnmp6.InTooBigErrors = &value - case "InNoRoutes": - procSnmp6.InNoRoutes = &value - case "InAddrErrors": - procSnmp6.InAddrErrors = &value - case "InUnknownProtos": - procSnmp6.InUnknownProtos = &value - case "InTruncatedPkts": - procSnmp6.InTruncatedPkts = &value - case "InDiscards": - procSnmp6.InDiscards = &value - case "InDelivers": - procSnmp6.InDelivers = &value - case "OutForwDatagrams": - procSnmp6.OutForwDatagrams = &value - case "OutRequests": - procSnmp6.OutRequests = &value - case "OutDiscards": - procSnmp6.OutDiscards = &value - case "OutNoRoutes": - procSnmp6.OutNoRoutes = &value - case "ReasmTimeout": - procSnmp6.ReasmTimeout = &value - case "ReasmReqds": - procSnmp6.ReasmReqds = &value - case "ReasmOKs": - procSnmp6.ReasmOKs = &value - case "ReasmFails": - procSnmp6.ReasmFails = &value - case "FragOKs": - procSnmp6.FragOKs = &value - case "FragFails": - procSnmp6.FragFails = &value - case "FragCreates": - procSnmp6.FragCreates = &value - case "InMcastPkts": - procSnmp6.InMcastPkts = &value - case "OutMcastPkts": - procSnmp6.OutMcastPkts = &value - case "InOctets": - procSnmp6.InOctets = &value - case "OutOctets": - procSnmp6.OutOctets = &value - case "InMcastOctets": - procSnmp6.InMcastOctets = &value - case "OutMcastOctets": - procSnmp6.OutMcastOctets = &value - case "InBcastOctets": - procSnmp6.InBcastOctets = &value - case "OutBcastOctets": - procSnmp6.OutBcastOctets = &value - case "InNoECTPkts": - procSnmp6.InNoECTPkts = &value - case "InECT1Pkts": - procSnmp6.InECT1Pkts = &value - case "InECT0Pkts": - procSnmp6.InECT0Pkts = &value - case "InCEPkts": - procSnmp6.InCEPkts = &value - - } - case "Icmp6": - switch key { - case "InMsgs": - procSnmp6.InMsgs = &value - case "InErrors": - procSnmp6.Icmp6.InErrors = &value - case "OutMsgs": - procSnmp6.OutMsgs = &value - case "OutErrors": - procSnmp6.OutErrors = &value - case "InCsumErrors": - procSnmp6.Icmp6.InCsumErrors = &value - case "InDestUnreachs": - procSnmp6.InDestUnreachs = &value - case "InPktTooBigs": - procSnmp6.InPktTooBigs = &value - case "InTimeExcds": - procSnmp6.InTimeExcds = &value - case "InParmProblems": - procSnmp6.InParmProblems = &value - case "InEchos": - procSnmp6.InEchos = &value - case "InEchoReplies": - procSnmp6.InEchoReplies = &value - case "InGroupMembQueries": - procSnmp6.InGroupMembQueries = &value - case "InGroupMembResponses": - procSnmp6.InGroupMembResponses = &value - case "InGroupMembReductions": - procSnmp6.InGroupMembReductions = &value - case "InRouterSolicits": - procSnmp6.InRouterSolicits = &value - case "InRouterAdvertisements": - procSnmp6.InRouterAdvertisements = &value - case "InNeighborSolicits": - procSnmp6.InNeighborSolicits = &value - case "InNeighborAdvertisements": - procSnmp6.InNeighborAdvertisements = &value - case "InRedirects": - procSnmp6.InRedirects = &value - case "InMLDv2Reports": - procSnmp6.InMLDv2Reports = &value - case "OutDestUnreachs": - procSnmp6.OutDestUnreachs = &value - case "OutPktTooBigs": - procSnmp6.OutPktTooBigs = &value - case "OutTimeExcds": - procSnmp6.OutTimeExcds = &value - case "OutParmProblems": - procSnmp6.OutParmProblems = &value - case "OutEchos": - procSnmp6.OutEchos = &value - case "OutEchoReplies": - procSnmp6.OutEchoReplies = &value - case "OutGroupMembQueries": - procSnmp6.OutGroupMembQueries = &value - case "OutGroupMembResponses": - procSnmp6.OutGroupMembResponses = &value - case "OutGroupMembReductions": - procSnmp6.OutGroupMembReductions = &value - case "OutRouterSolicits": - procSnmp6.OutRouterSolicits = &value - case "OutRouterAdvertisements": - procSnmp6.OutRouterAdvertisements = &value - case "OutNeighborSolicits": - procSnmp6.OutNeighborSolicits = &value - case "OutNeighborAdvertisements": - procSnmp6.OutNeighborAdvertisements = &value - case "OutRedirects": - procSnmp6.OutRedirects = &value - case "OutMLDv2Reports": - procSnmp6.OutMLDv2Reports = &value - case "InType1": - procSnmp6.InType1 = &value - case "InType134": - procSnmp6.InType134 = &value - case "InType135": - procSnmp6.InType135 = &value - case "InType136": - procSnmp6.InType136 = &value - case "InType143": - procSnmp6.InType143 = &value - case "OutType133": - procSnmp6.OutType133 = &value - case "OutType135": - procSnmp6.OutType135 = &value - case "OutType136": - procSnmp6.OutType136 = &value - case "OutType143": - procSnmp6.OutType143 = &value - } - case "Udp6": - switch key { - case "InDatagrams": - procSnmp6.Udp6.InDatagrams = &value - case "NoPorts": - procSnmp6.Udp6.NoPorts = &value - case "InErrors": - procSnmp6.Udp6.InErrors = &value - case "OutDatagrams": - procSnmp6.Udp6.OutDatagrams = &value - case "RcvbufErrors": - procSnmp6.Udp6.RcvbufErrors = &value - case "SndbufErrors": - procSnmp6.Udp6.SndbufErrors = &value - case "InCsumErrors": - procSnmp6.Udp6.InCsumErrors = &value - case "IgnoredMulti": - procSnmp6.IgnoredMulti = &value - } - case "UdpLite6": - switch key { - case "InDatagrams": - procSnmp6.UdpLite6.InDatagrams = &value - case "NoPorts": - procSnmp6.UdpLite6.NoPorts = &value - case "InErrors": - procSnmp6.UdpLite6.InErrors = &value - case "OutDatagrams": - procSnmp6.UdpLite6.OutDatagrams = &value - case "RcvbufErrors": - procSnmp6.UdpLite6.RcvbufErrors = &value - case "SndbufErrors": - procSnmp6.UdpLite6.SndbufErrors = &value - case "InCsumErrors": - procSnmp6.UdpLite6.InCsumErrors = &value - } - } - } - } - return procSnmp6, scanner.Err() -} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go deleted file mode 100644 index 06a8d931c9..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "fmt" - "os" - - "github.com/prometheus/procfs/internal/util" -) - -// Originally, this USER_HZ value was dynamically retrieved via a sysconf call -// which required cgo. However, that caused a lot of problems regarding -// cross-compilation. Alternatives such as running a binary to determine the -// value, or trying to derive it in some other way were all problematic. After -// much research it was determined that USER_HZ is actually hardcoded to 100 on -// all Go-supported platforms as of the time of this writing. This is why we -// decided to hardcode it here as well. It is not impossible that there could -// be systems with exceptions, but they should be very exotic edge cases, and -// in that case, the worst outcome will be two misreported metrics. -// -// See also the following discussions: -// -// - https://github.com/prometheus/node_exporter/issues/52 -// - https://github.com/prometheus/procfs/pull/2 -// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue -const userHZ = 100 - -// ProcStat provides status information about the process, -// read from /proc/[pid]/stat. -type ProcStat struct { - // The process ID. - PID int - // The filename of the executable. - Comm string - // The process state. - State string - // The PID of the parent of this process. - PPID int - // The process group ID of the process. - PGRP int - // The session ID of the process. - Session int - // The controlling terminal of the process. - TTY int - // The ID of the foreground process group of the controlling terminal of - // the process. - TPGID int - // The kernel flags word of the process. - Flags uint - // The number of minor faults the process has made which have not required - // loading a memory page from disk. - MinFlt uint - // The number of minor faults that the process's waited-for children have - // made. - CMinFlt uint - // The number of major faults the process has made which have required - // loading a memory page from disk. - MajFlt uint - // The number of major faults that the process's waited-for children have - // made. - CMajFlt uint - // Amount of time that this process has been scheduled in user mode, - // measured in clock ticks. - UTime uint - // Amount of time that this process has been scheduled in kernel mode, - // measured in clock ticks. - STime uint - // Amount of time that this process's waited-for children have been - // scheduled in user mode, measured in clock ticks. - CUTime int - // Amount of time that this process's waited-for children have been - // scheduled in kernel mode, measured in clock ticks. - CSTime int - // For processes running a real-time scheduling policy, this is the negated - // scheduling priority, minus one. - Priority int - // The nice value, a value in the range 19 (low priority) to -20 (high - // priority). - Nice int - // Number of threads in this process. - NumThreads int - // The time the process started after system boot, the value is expressed - // in clock ticks. - Starttime uint64 - // Virtual memory size in bytes. - VSize uint - // Resident set size in pages. - RSS int - // Soft limit in bytes on the rss of the process. - RSSLimit uint64 - // CPU number last executed on. - Processor uint - // Real-time scheduling priority, a number in the range 1 to 99 for processes - // scheduled under a real-time policy, or 0, for non-real-time processes. - RTPriority uint - // Scheduling policy. - Policy uint - // Aggregated block I/O delays, measured in clock ticks (centiseconds). - DelayAcctBlkIOTicks uint64 - // Guest time of the process (time spent running a virtual CPU for a guest - // operating system), measured in clock ticks. - GuestTime int - // Guest time of the process's children, measured in clock ticks. - CGuestTime int - - proc FS -} - -// NewStat returns the current status information of the process. -// -// Deprecated: Use p.Stat() instead. -func (p Proc) NewStat() (ProcStat, error) { - return p.Stat() -} - -// Stat returns the current status information of the process. -func (p Proc) Stat() (ProcStat, error) { - data, err := util.ReadFileNoStat(p.path("stat")) - if err != nil { - return ProcStat{}, err - } - - var ( - ignoreInt64 int64 - ignoreUint64 uint64 - - s = ProcStat{PID: p.PID, proc: p.fs} - l = bytes.Index(data, []byte("(")) - r = bytes.LastIndex(data, []byte(")")) - ) - - if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data) - } - - s.Comm = string(data[l+1 : r]) - - // Check the following resources for the details about the particular stat - // fields and their data types: - // * https://man7.org/linux/man-pages/man5/proc.5.html - // * https://man7.org/linux/man-pages/man3/scanf.3.html - _, err = fmt.Fscan( - bytes.NewBuffer(data[r+2:]), - &s.State, - &s.PPID, - &s.PGRP, - &s.Session, - &s.TTY, - &s.TPGID, - &s.Flags, - &s.MinFlt, - &s.CMinFlt, - &s.MajFlt, - &s.CMajFlt, - &s.UTime, - &s.STime, - &s.CUTime, - &s.CSTime, - &s.Priority, - &s.Nice, - &s.NumThreads, - &ignoreInt64, - &s.Starttime, - &s.VSize, - &s.RSS, - &s.RSSLimit, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreInt64, - &s.Processor, - &s.RTPriority, - &s.Policy, - &s.DelayAcctBlkIOTicks, - &s.GuestTime, - &s.CGuestTime, - ) - if err != nil { - return ProcStat{}, err - } - - return s, nil -} - -// VirtualMemory returns the virtual memory size in bytes. -func (s ProcStat) VirtualMemory() uint { - return s.VSize -} - -// ResidentMemory returns the resident memory size in bytes. -func (s ProcStat) ResidentMemory() int { - return s.RSS * os.Getpagesize() -} - -// StartTime returns the unix timestamp of the process in seconds. -func (s ProcStat) StartTime() (float64, error) { - stat, err := s.proc.Stat() - if err != nil { - return 0, err - } - return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil -} - -// CPUTime returns the total CPU user and system time in seconds. -func (s ProcStat) CPUTime() float64 { - return float64(s.UTime+s.STime) / userHZ -} diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go deleted file mode 100644 index dd8aa56885..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "math/bits" - "sort" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcStatus provides status information about the process, -// read from /proc/[pid]/status. -type ProcStatus struct { - // The process ID. - PID int - // The process name. - Name string - - // Thread group ID. - TGID int - // List of Pid namespace. - NSpids []uint64 - - // Peak virtual memory size. - VmPeak uint64 // nolint:revive - // Virtual memory size. - VmSize uint64 // nolint:revive - // Locked memory size. - VmLck uint64 // nolint:revive - // Pinned memory size. - VmPin uint64 // nolint:revive - // Peak resident set size. - VmHWM uint64 // nolint:revive - // Resident set size (sum of RssAnnon RssFile and RssShmem). - VmRSS uint64 // nolint:revive - // Size of resident anonymous memory. - RssAnon uint64 // nolint:revive - // Size of resident file mappings. - RssFile uint64 // nolint:revive - // Size of resident shared memory. - RssShmem uint64 // nolint:revive - // Size of data segments. - VmData uint64 // nolint:revive - // Size of stack segments. - VmStk uint64 // nolint:revive - // Size of text segments. - VmExe uint64 // nolint:revive - // Shared library code size. - VmLib uint64 // nolint:revive - // Page table entries size. - VmPTE uint64 // nolint:revive - // Size of second-level page tables. - VmPMD uint64 // nolint:revive - // Swapped-out virtual memory size by anonymous private. - VmSwap uint64 // nolint:revive - // Size of hugetlb memory portions - HugetlbPages uint64 - - // Number of voluntary context switches. - VoluntaryCtxtSwitches uint64 - // Number of involuntary context switches. - NonVoluntaryCtxtSwitches uint64 - - // UIDs of the process (Real, effective, saved set, and filesystem UIDs) - UIDs [4]uint64 - // GIDs of the process (Real, effective, saved set, and filesystem GIDs) - GIDs [4]uint64 - - // CpusAllowedList: List of cpu cores processes are allowed to run on. - CpusAllowedList []uint64 -} - -// NewStatus returns the current status information of the process. -func (p Proc) NewStatus() (ProcStatus, error) { - data, err := util.ReadFileNoStat(p.path("status")) - if err != nil { - return ProcStatus{}, err - } - - s := ProcStatus{PID: p.PID} - - lines := strings.Split(string(data), "\n") - for _, line := range lines { - if !bytes.Contains([]byte(line), []byte(":")) { - continue - } - - kv := strings.SplitN(line, ":", 2) - - // removes spaces - k := strings.TrimSpace(kv[0]) - v := strings.TrimSpace(kv[1]) - // removes "kB" - v = strings.TrimSuffix(v, " kB") - - // value to int when possible - // we can skip error check here, 'cause vKBytes is not used when value is a string - vKBytes, _ := strconv.ParseUint(v, 10, 64) - // convert kB to B - vBytes := vKBytes * 1024 - - err = s.fillStatus(k, v, vKBytes, vBytes) - if err != nil { - return ProcStatus{}, err - } - } - - return s, nil -} - -func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error { - switch k { - case "Tgid": - s.TGID = int(vUint) - case "Name": - s.Name = vString - case "Uid": - var err error - for i, v := range strings.Split(vString, "\t") { - s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) - if err != nil { - return err - } - } - case "Gid": - var err error - for i, v := range strings.Split(vString, "\t") { - s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) - if err != nil { - return err - } - } - case "NSpid": - nspids, err := calcNSPidsList(vString) - if err != nil { - return err - } - s.NSpids = nspids - case "VmPeak": - s.VmPeak = vUintBytes - case "VmSize": - s.VmSize = vUintBytes - case "VmLck": - s.VmLck = vUintBytes - case "VmPin": - s.VmPin = vUintBytes - case "VmHWM": - s.VmHWM = vUintBytes - case "VmRSS": - s.VmRSS = vUintBytes - case "RssAnon": - s.RssAnon = vUintBytes - case "RssFile": - s.RssFile = vUintBytes - case "RssShmem": - s.RssShmem = vUintBytes - case "VmData": - s.VmData = vUintBytes - case "VmStk": - s.VmStk = vUintBytes - case "VmExe": - s.VmExe = vUintBytes - case "VmLib": - s.VmLib = vUintBytes - case "VmPTE": - s.VmPTE = vUintBytes - case "VmPMD": - s.VmPMD = vUintBytes - case "VmSwap": - s.VmSwap = vUintBytes - case "HugetlbPages": - s.HugetlbPages = vUintBytes - case "voluntary_ctxt_switches": - s.VoluntaryCtxtSwitches = vUint - case "nonvoluntary_ctxt_switches": - s.NonVoluntaryCtxtSwitches = vUint - case "Cpus_allowed_list": - s.CpusAllowedList = calcCpusAllowedList(vString) - } - - return nil -} - -// TotalCtxtSwitches returns the total context switch. -func (s ProcStatus) TotalCtxtSwitches() uint64 { - return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches -} - -func calcCpusAllowedList(cpuString string) []uint64 { - s := strings.Split(cpuString, ",") - - var g []uint64 - - for _, cpu := range s { - // parse cpu ranges, example: 1-3=[1,2,3] - if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 { - startCPU, _ := strconv.ParseUint(l[0], 10, 64) - endCPU, _ := strconv.ParseUint(l[1], 10, 64) - - for i := startCPU; i <= endCPU; i++ { - g = append(g, i) - } - } else if len(l) == 1 { - cpu, _ := strconv.ParseUint(l[0], 10, 64) - g = append(g, cpu) - } - - } - - sort.Slice(g, func(i, j int) bool { return g[i] < g[j] }) - return g -} - -func calcNSPidsList(nspidsString string) ([]uint64, error) { - s := strings.Split(nspidsString, "\t") - var nspids []uint64 - - for _, nspid := range s { - nspid, err := strconv.ParseUint(nspid, 10, 64) - if err != nil { - return nil, err - } - nspids = append(nspids, nspid) - } - - return nspids, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go deleted file mode 100644 index 3810d1ac99..0000000000 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -func sysctlToPath(sysctl string) string { - return strings.ReplaceAll(sysctl, ".", "/") -} - -func (fs FS) SysctlStrings(sysctl string) ([]string, error) { - value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl))) - if err != nil { - return nil, err - } - return strings.Fields(value), nil - -} - -func (fs FS) SysctlInts(sysctl string) ([]int, error) { - fields, err := fs.SysctlStrings(sysctl) - if err != nil { - return nil, err - } - - values := make([]int, len(fields)) - for i, f := range fields { - vp := util.NewValueParser(f) - values[i] = vp.Int() - if err := vp.Err(); err != nil { - return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) - } - } - return values, nil -} diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go deleted file mode 100644 index 5f7f32dc83..0000000000 --- a/vendor/github.com/prometheus/procfs/schedstat.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "os" - "regexp" - "strconv" -) - -var ( - cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`) - procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`) -) - -// Schedstat contains scheduler statistics from /proc/schedstat -// -// See -// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt -// for a detailed description of what these numbers mean. -// -// Note the current kernel documentation claims some of the time units are in -// jiffies when they are actually in nanoseconds since 2.6.23 with the -// introduction of CFS. A fix to the documentation is pending. See -// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473 -type Schedstat struct { - CPUs []*SchedstatCPU -} - -// SchedstatCPU contains the values from one "cpu" line. -type SchedstatCPU struct { - CPUNum string - - RunningNanoseconds uint64 - WaitingNanoseconds uint64 - RunTimeslices uint64 -} - -// ProcSchedstat contains the values from `/proc//schedstat`. -type ProcSchedstat struct { - RunningNanoseconds uint64 - WaitingNanoseconds uint64 - RunTimeslices uint64 -} - -// Schedstat reads data from `/proc/schedstat`. -func (fs FS) Schedstat() (*Schedstat, error) { - file, err := os.Open(fs.proc.Path("schedstat")) - if err != nil { - return nil, err - } - defer file.Close() - - stats := &Schedstat{} - scanner := bufio.NewScanner(file) - - for scanner.Scan() { - match := cpuLineRE.FindStringSubmatch(scanner.Text()) - if match != nil { - cpu := &SchedstatCPU{} - cpu.CPUNum = match[1] - - cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64) - if err != nil { - continue - } - - cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64) - if err != nil { - continue - } - - cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64) - if err != nil { - continue - } - - stats.CPUs = append(stats.CPUs, cpu) - } - } - - return stats, nil -} - -func parseProcSchedstat(contents string) (ProcSchedstat, error) { - var ( - stats ProcSchedstat - err error - ) - match := procLineRE.FindStringSubmatch(contents) - - if match != nil { - stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64) - if err != nil { - return stats, err - } - - stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64) - if err != nil { - return stats, err - } - - stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64) - return stats, err - } - - return stats, errors.New("could not parse schedstat") -} diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go deleted file mode 100644 index 8611c90177..0000000000 --- a/vendor/github.com/prometheus/procfs/slab.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -var ( - slabSpace = regexp.MustCompile(`\s+`) - slabVer = regexp.MustCompile(`slabinfo -`) - slabHeader = regexp.MustCompile(`# name`) -) - -// Slab represents a slab pool in the kernel. -type Slab struct { - Name string - ObjActive int64 - ObjNum int64 - ObjSize int64 - ObjPerSlab int64 - PagesPerSlab int64 - // tunables - Limit int64 - Batch int64 - SharedFactor int64 - SlabActive int64 - SlabNum int64 - SharedAvail int64 -} - -// SlabInfo represents info for all slabs. -type SlabInfo struct { - Slabs []*Slab -} - -func shouldParseSlab(line string) bool { - if slabVer.MatchString(line) { - return false - } - if slabHeader.MatchString(line) { - return false - } - return true -} - -// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1. -func parseV21SlabEntry(line string) (*Slab, error) { - // First cleanup whitespace. - l := slabSpace.ReplaceAllString(line, " ") - s := strings.Split(l, " ") - if len(s) != 16 { - return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line) - } - var err error - i := &Slab{Name: s[0]} - i.ObjActive, err = strconv.ParseInt(s[1], 10, 64) - if err != nil { - return nil, err - } - i.ObjNum, err = strconv.ParseInt(s[2], 10, 64) - if err != nil { - return nil, err - } - i.ObjSize, err = strconv.ParseInt(s[3], 10, 64) - if err != nil { - return nil, err - } - i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64) - if err != nil { - return nil, err - } - i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64) - if err != nil { - return nil, err - } - i.Limit, err = strconv.ParseInt(s[8], 10, 64) - if err != nil { - return nil, err - } - i.Batch, err = strconv.ParseInt(s[9], 10, 64) - if err != nil { - return nil, err - } - i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64) - if err != nil { - return nil, err - } - i.SlabActive, err = strconv.ParseInt(s[13], 10, 64) - if err != nil { - return nil, err - } - i.SlabNum, err = strconv.ParseInt(s[14], 10, 64) - if err != nil { - return nil, err - } - i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64) - if err != nil { - return nil, err - } - return i, nil -} - -// parseSlabInfo21 is used to parse a slabinfo 2.1 file. -func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) { - scanner := bufio.NewScanner(r) - s := SlabInfo{Slabs: []*Slab{}} - for scanner.Scan() { - line := scanner.Text() - if !shouldParseSlab(line) { - continue - } - slab, err := parseV21SlabEntry(line) - if err != nil { - return s, err - } - s.Slabs = append(s.Slabs, slab) - } - return s, nil -} - -// SlabInfo reads data from `/proc/slabinfo`. -func (fs FS) SlabInfo() (SlabInfo, error) { - // TODO: Consider passing options to allow for parsing different - // slabinfo versions. However, slabinfo 2.1 has been stable since - // kernel 2.6.10 and later. - data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo")) - if err != nil { - return SlabInfo{}, err - } - - return parseSlabInfo21(bytes.NewReader(data)) -} diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go deleted file mode 100644 index 403e6ae708..0000000000 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Softirqs represents the softirq statistics. -type Softirqs struct { - Hi []uint64 - Timer []uint64 - NetTx []uint64 - NetRx []uint64 - Block []uint64 - IRQPoll []uint64 - Tasklet []uint64 - Sched []uint64 - HRTimer []uint64 - RCU []uint64 -} - -func (fs FS) Softirqs() (Softirqs, error) { - fileName := fs.proc.Path("softirqs") - data, err := util.ReadFileNoStat(fileName) - if err != nil { - return Softirqs{}, err - } - - reader := bytes.NewReader(data) - - return parseSoftirqs(reader) -} - -func parseSoftirqs(r io.Reader) (Softirqs, error) { - var ( - softirqs = Softirqs{} - scanner = bufio.NewScanner(r) - ) - - if !scanner.Scan() { - return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead) - } - - for scanner.Scan() { - parts := strings.Fields(scanner.Text()) - var err error - - // require at least one cpu - if len(parts) < 2 { - continue - } - switch parts[0] { - case "HI:": - perCPU := parts[1:] - softirqs.Hi = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) - } - } - case "TIMER:": - perCPU := parts[1:] - softirqs.Timer = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) - } - } - case "NET_TX:": - perCPU := parts[1:] - softirqs.NetTx = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) - } - } - case "NET_RX:": - perCPU := parts[1:] - softirqs.NetRx = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) - } - } - case "BLOCK:": - perCPU := parts[1:] - softirqs.Block = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) - } - } - case "IRQ_POLL:": - perCPU := parts[1:] - softirqs.IRQPoll = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) - } - } - case "TASKLET:": - perCPU := parts[1:] - softirqs.Tasklet = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) - } - } - case "SCHED:": - perCPU := parts[1:] - softirqs.Sched = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) - } - } - case "HRTIMER:": - perCPU := parts[1:] - softirqs.HRTimer = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) - } - } - case "RCU:": - perCPU := parts[1:] - softirqs.RCU = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) - } - } - } - } - - if err := scanner.Err(); err != nil { - return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err) - } - - return softirqs, scanner.Err() -} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go deleted file mode 100644 index e36b41c18a..0000000000 --- a/vendor/github.com/prometheus/procfs/stat.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/fs" - "github.com/prometheus/procfs/internal/util" -) - -// CPUStat shows how much time the cpu spend in various stages. -type CPUStat struct { - User float64 - Nice float64 - System float64 - Idle float64 - Iowait float64 - IRQ float64 - SoftIRQ float64 - Steal float64 - Guest float64 - GuestNice float64 -} - -// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. -// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html -// It is possible to get per-cpu stats by reading `/proc/softirqs`. -type SoftIRQStat struct { - Hi uint64 - Timer uint64 - NetTx uint64 - NetRx uint64 - Block uint64 - BlockIoPoll uint64 - Tasklet uint64 - Sched uint64 - Hrtimer uint64 - Rcu uint64 -} - -// Stat represents kernel/system statistics. -type Stat struct { - // Boot time in seconds since the Epoch. - BootTime uint64 - // Summed up cpu statistics. - CPUTotal CPUStat - // Per-CPU statistics. - CPU map[int64]CPUStat - // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. - IRQTotal uint64 - // Number of times a numbered IRQ was triggered. - IRQ []uint64 - // Number of times a context switch happened. - ContextSwitches uint64 - // Number of times a process was created. - ProcessCreated uint64 - // Number of processes currently running. - ProcessesRunning uint64 - // Number of processes currently blocked (waiting for IO). - ProcessesBlocked uint64 - // Number of times a softirq was scheduled. - SoftIRQTotal uint64 - // Detailed softirq statistics. - SoftIRQ SoftIRQStat -} - -// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). -func parseCPUStat(line string) (CPUStat, int64, error) { - cpuStat := CPUStat{} - var cpu string - - count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", - &cpu, - &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, - &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, - &cpuStat.Guest, &cpuStat.GuestNice) - - if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err) - } - if count == 0 { - return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line) - } - - cpuStat.User /= userHZ - cpuStat.Nice /= userHZ - cpuStat.System /= userHZ - cpuStat.Idle /= userHZ - cpuStat.Iowait /= userHZ - cpuStat.IRQ /= userHZ - cpuStat.SoftIRQ /= userHZ - cpuStat.Steal /= userHZ - cpuStat.Guest /= userHZ - cpuStat.GuestNice /= userHZ - - if cpu == "cpu" { - return cpuStat, -1, nil - } - - cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) - if err != nil { - return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) - } - - return cpuStat, cpuID, nil -} - -// Parse a softirq line. -func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { - softIRQStat := SoftIRQStat{} - var total uint64 - var prefix string - - _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", - &prefix, &total, - &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, - &softIRQStat.Block, &softIRQStat.BlockIoPoll, - &softIRQStat.Tasklet, &softIRQStat.Sched, - &softIRQStat.Hrtimer, &softIRQStat.Rcu) - - if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err) - } - - return softIRQStat, total, nil -} - -// NewStat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -// -// Deprecated: Use fs.Stat() instead. -func NewStat() (Stat, error) { - fs, err := NewFS(fs.DefaultProcMountPoint) - if err != nil { - return Stat{}, err - } - return fs.Stat() -} - -// NewStat returns information about current cpu/process statistics. -// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt -// -// Deprecated: Use fs.Stat() instead. -func (fs FS) NewStat() (Stat, error) { - return fs.Stat() -} - -// Stat returns information about current cpu/process statistics. -// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt -func (fs FS) Stat() (Stat, error) { - fileName := fs.proc.Path("stat") - data, err := util.ReadFileNoStat(fileName) - if err != nil { - return Stat{}, err - } - procStat, err := parseStat(bytes.NewReader(data), fileName) - if err != nil { - return Stat{}, err - } - return procStat, nil -} - -// parseStat parses the metrics from /proc/[pid]/stat. -func parseStat(r io.Reader, fileName string) (Stat, error) { - var ( - scanner = bufio.NewScanner(r) - stat = Stat{ - CPU: make(map[int64]CPUStat), - } - err error - ) - - // Increase default scanner buffer to handle very long `intr` lines. - buf := make([]byte, 0, 8*1024) - scanner.Buffer(buf, 1024*1024) - - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - continue - } - switch { - case parts[0] == "btime": - if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) - } - case parts[0] == "intr": - if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) - } - numberedIRQs := parts[2:] - stat.IRQ = make([]uint64, len(numberedIRQs)) - for i, count := range numberedIRQs { - if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) - } - } - case parts[0] == "ctxt": - if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) - } - case parts[0] == "processes": - if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) - } - case parts[0] == "procs_running": - if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) - } - case parts[0] == "procs_blocked": - if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) - } - case parts[0] == "softirq": - softIRQStats, total, err := parseSoftIRQStat(line) - if err != nil { - return Stat{}, err - } - stat.SoftIRQTotal = total - stat.SoftIRQ = softIRQStats - case strings.HasPrefix(parts[0], "cpu"): - cpuStat, cpuID, err := parseCPUStat(line) - if err != nil { - return Stat{}, err - } - if cpuID == -1 { - stat.CPUTotal = cpuStat - } else { - stat.CPU[cpuID] = cpuStat - } - } - } - - if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err) - } - - return stat, nil -} diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go deleted file mode 100644 index 65fec834bf..0000000000 --- a/vendor/github.com/prometheus/procfs/swaps.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Swap represents an entry in /proc/swaps. -type Swap struct { - Filename string - Type string - Size int - Used int - Priority int -} - -// Swaps returns a slice of all configured swap devices on the system. -func (fs FS) Swaps() ([]*Swap, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("swaps")) - if err != nil { - return nil, err - } - return parseSwaps(data) -} - -func parseSwaps(info []byte) ([]*Swap, error) { - swaps := []*Swap{} - scanner := bufio.NewScanner(bytes.NewReader(info)) - scanner.Scan() // ignore header line - for scanner.Scan() { - swapString := scanner.Text() - parsedSwap, err := parseSwapString(swapString) - if err != nil { - return nil, err - } - swaps = append(swaps, parsedSwap) - } - - err := scanner.Err() - return swaps, err -} - -func parseSwapString(swapString string) (*Swap, error) { - var err error - - swapFields := strings.Fields(swapString) - swapLength := len(swapFields) - if swapLength < 5 { - return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString) - } - - swap := &Swap{ - Filename: swapFields[0], - Type: swapFields[1], - } - - swap.Size, err = strconv.Atoi(swapFields[2]) - if err != nil { - return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) - } - swap.Used, err = strconv.Atoi(swapFields[3]) - if err != nil { - return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) - } - swap.Priority, err = strconv.Atoi(swapFields[4]) - if err != nil { - return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) - } - - return swap, nil -} diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go deleted file mode 100644 index 80e0e947be..0000000000 --- a/vendor/github.com/prometheus/procfs/thread.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "os" - "strconv" - - fsi "github.com/prometheus/procfs/internal/fs" -) - -// Provide access to /proc/PID/task/TID files, for thread specific values. Since -// such files have the same structure as /proc/PID/ ones, the data structures -// and the parsers for the latter may be reused. - -// AllThreads returns a list of all currently available threads under /proc/PID. -func AllThreads(pid int) (Procs, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Procs{}, err - } - return fs.AllThreads(pid) -} - -// AllThreads returns a list of all currently available threads for PID. -func (fs FS) AllThreads(pid int) (Procs, error) { - taskPath := fs.proc.Path(strconv.Itoa(pid), "task") - d, err := os.Open(taskPath) - if err != nil { - return Procs{}, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err) - } - - t := Procs{} - for _, n := range names { - tid, err := strconv.ParseInt(n, 10, 64) - if err != nil { - continue - } - - t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}}) - } - - return t, nil -} - -// Thread returns a process for a given PID, TID. -func (fs FS) Thread(pid, tid int) (Proc, error) { - taskPath := fs.proc.Path(strconv.Itoa(pid), "task") - if _, err := os.Stat(taskPath); err != nil { - return Proc{}, err - } - return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil -} - -// Thread returns a process for a given TID of Proc. -func (proc Proc) Thread(tid int) (Proc, error) { - tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal} - if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil { - return Proc{}, err - } - return Proc{PID: tid, fs: tfs}, nil -} diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar deleted file mode 100644 index 19ef02b8d4..0000000000 --- a/vendor/github.com/prometheus/procfs/ttar +++ /dev/null @@ -1,413 +0,0 @@ -#!/usr/bin/env bash - -# Purpose: plain text tar format -# Limitations: - only suitable for text files, directories, and symlinks -# - stores only filename, content, and mode -# - not designed for untrusted input -# -# Note: must work with bash version 3.2 (macOS) - -# Copyright 2017 Roger Luethi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -o nounset - -# Sanitize environment (for instance, standard sorting of glob matches) -export LC_ALL=C - -path="" -CMD="" -ARG_STRING="$*" - -#------------------------------------------------------------------------------ -# Not all sed implementations can work on null bytes. In order to make ttar -# work out of the box on macOS, use Python as a stream editor. - -USE_PYTHON=0 - -PYTHON_CREATE_FILTER=$(cat << 'PCF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'EOF', r'\EOF', line) - line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) - line = re.sub('\x00', r'NULLBYTE', line) - sys.stdout.write(line) -PCF -) - -PYTHON_EXTRACT_FILTER=$(cat << 'PEF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'(?/dev/null; then - echo "ERROR Python not found. Aborting." - exit 2 - fi - USE_PYTHON=1 - fi -} - -#------------------------------------------------------------------------------ - -function usage { - bname=$(basename "$0") - cat << USAGE -Usage: $bname [-C

] -c -f (create archive) - $bname -t -f (list archive contents) - $bname [-C ] -x -f (extract archive) - -Options: - -C (change directory) - -v (verbose) - --recursive-unlink (recursively delete existing directory if path - collides with file or directory to extract) - -Example: Change to sysfs directory, create ttar file from fixtures directory - $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ -USAGE -exit "$1" -} - -function vecho { - if [ "${VERBOSE:-}" == "yes" ]; then - echo >&7 "$@" - fi -} - -function set_cmd { - if [ -n "$CMD" ]; then - echo "ERROR: more than one command given" - echo - usage 2 - fi - CMD=$1 -} - -unset VERBOSE -unset RECURSIVE_UNLINK - -while getopts :cf:-:htxvC: opt; do - case $opt in - c) - set_cmd "create" - ;; - f) - ARCHIVE=$OPTARG - ;; - h) - usage 0 - ;; - t) - set_cmd "list" - ;; - x) - set_cmd "extract" - ;; - v) - VERBOSE=yes - exec 7>&1 - ;; - C) - CDIR=$OPTARG - ;; - -) - case $OPTARG in - recursive-unlink) - RECURSIVE_UNLINK="yes" - ;; - *) - echo -e "Error: invalid option -$OPTARG" - echo - usage 1 - ;; - esac - ;; - *) - echo >&2 "ERROR: invalid option -$OPTARG" - echo - usage 1 - ;; - esac -done - -# Remove processed options from arguments -shift $(( OPTIND - 1 )); - -if [ "${CMD:-}" == "" ]; then - echo >&2 "ERROR: no command given" - echo - usage 1 -elif [ "${ARCHIVE:-}" == "" ]; then - echo >&2 "ERROR: no archive name given" - echo - usage 1 -fi - -function list { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while read -r line; do - line_no=$(( line_no + 1 )) - if [ $size -gt 0 ]; then - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - echo "$path" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - echo "$path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - echo "$path -> ${BASH_REMATCH[1]}" - fi - done < "$ttar_file" -} - -function extract { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while IFS= read -r line; do - line_no=$(( line_no + 1 )) - local eof_without_newline - if [ "$size" -gt 0 ]; then - if [[ "$line" =~ [^\\]EOF ]]; then - # An EOF not preceded by a backslash indicates that the line - # does not end with a newline - eof_without_newline=1 - else - eof_without_newline=0 - fi - # Replace NULLBYTE with null byte if at beginning of line - # Replace NULLBYTE with null byte unless preceded by backslash - # Remove one backslash in front of NULLBYTE (if any) - # Remove EOF unless preceded by backslash - # Remove one backslash in front of EOF - if [ $USE_PYTHON -eq 1 ]; then - echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" - else - # The repeated pattern makes up for sed's lack of negative - # lookbehind assertions (for consecutive null bytes). - echo -n "$line" | \ - sed -e 's/^NULLBYTE/\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\\NULLBYTE/NULLBYTE/g; - s/\([^\\]\)EOF/\1/g; - s/\\EOF/EOF/g; - ' >> "$path" - fi - if [[ "$eof_without_newline" -eq 0 ]]; then - echo >> "$path" - fi - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - if [ -L "$path" ]; then - rm "$path" - elif [ -d "$path" ]; then - if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then - rm -r "$path" - else - # Safe because symlinks to directories are dealt with above - rmdir "$path" - fi - elif [ -e "$path" ]; then - rm "$path" - fi - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - # Create file even if it is zero-length. - touch "$path" - vecho " $path" - elif [[ $line =~ ^Mode:\ (.*)$ ]]; then - mode=${BASH_REMATCH[1]} - chmod "$mode" "$path" - vecho "$mode" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - mkdir -p "$path" - vecho " $path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - ln -s "${BASH_REMATCH[1]}" "$path" - vecho " $path -> ${BASH_REMATCH[1]}" - elif [[ $line =~ ^# ]]; then - # Ignore comments between files - continue - else - echo >&2 "ERROR: Unknown keyword on line $line_no: $line" - exit 1 - fi - done < "$ttar_file" -} - -function div { - echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ - "- - - - - -" -} - -function get_mode { - local mfile=$1 - if [ -z "${STAT_OPTION:-}" ]; then - if stat -c '%a' "$mfile" >/dev/null 2>&1; then - # GNU stat - STAT_OPTION='-c' - STAT_FORMAT='%a' - else - # BSD stat - STAT_OPTION='-f' - # Octal output, user/group/other (omit file type, sticky bit) - STAT_FORMAT='%OLp' - fi - fi - stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" -} - -function _create { - shopt -s nullglob - local mode - local eof_without_newline - while (( "$#" )); do - file=$1 - if [ -L "$file" ]; then - echo "Path: $file" - symlinkTo=$(readlink "$file") - echo "SymlinkTo: $symlinkTo" - vecho " $file -> $symlinkTo" - div - elif [ -d "$file" ]; then - # Strip trailing slash (if there is one) - file=${file%/} - echo "Directory: $file" - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file/" - div - # Find all files and dirs, including hidden/dot files - for x in "$file/"{*,.[^.]*}; do - _create "$x" - done - elif [ -f "$file" ]; then - echo "Path: $file" - lines=$(wc -l "$file"|awk '{print $1}') - eof_without_newline=0 - if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ - [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then - eof_without_newline=1 - lines=$((lines+1)) - fi - echo "Lines: $lines" - # Add backslash in front of EOF - # Add backslash in front of NULLBYTE - # Replace null byte with NULLBYTE - if [ $USE_PYTHON -eq 1 ]; then - < "$file" python -c "$PYTHON_CREATE_FILTER" - else - < "$file" \ - sed 's/EOF/\\EOF/g; - s/NULLBYTE/\\NULLBYTE/g; - s/\x0/NULLBYTE/g; - ' - fi - if [[ "$eof_without_newline" -eq 1 ]]; then - # Finish line with EOF to indicate that the original line did - # not end with a linefeed - echo "EOF" - fi - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file" - div - else - echo >&2 "ERROR: file not found ($file in $(pwd))" - exit 2 - fi - shift - done -} - -function create { - ttar_file=$1 - shift - if [ -z "${1:-}" ]; then - echo >&2 "ERROR: missing arguments." - echo - usage 1 - fi - if [ -e "$ttar_file" ]; then - rm "$ttar_file" - fi - exec > "$ttar_file" - echo "# Archive created by ttar $ARG_STRING" - _create "$@" -} - -test_environment - -if [ -n "${CDIR:-}" ]; then - if [[ "$ARCHIVE" != /* ]]; then - # Relative path: preserve the archive's location before changing - # directory - ARCHIVE="$(pwd)/$ARCHIVE" - fi - cd "$CDIR" -fi - -"$CMD" "$ARCHIVE" "$@" diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go deleted file mode 100644 index 51c49d89e8..0000000000 --- a/vendor/github.com/prometheus/procfs/vm.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows -// +build !windows - -package procfs - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// The VM interface is described at -// -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt -// -// Each setting is exposed as a single file. -// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array -// and numa_zonelist_order (deprecated) which is a string. -type VM struct { - AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes - BlockDump *int64 // /proc/sys/vm/block_dump - CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed - DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes - DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio - DirtyBytes *int64 // /proc/sys/vm/dirty_bytes - DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs - DirtyRatio *int64 // /proc/sys/vm/dirty_ratio - DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds - DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs - DropCaches *int64 // /proc/sys/vm/drop_caches - ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold - HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group - LaptopMode *int64 // /proc/sys/vm/laptop_mode - LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout - LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio - MaxMapCount *int64 // /proc/sys/vm/max_map_count - MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill - MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery - MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes - MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio - MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio - MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr - NrHugepages *int64 // /proc/sys/vm/nr_hugepages - NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy - NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages - NumaStat *int64 // /proc/sys/vm/numa_stat - NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order - OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks - OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task - OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes - OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory - OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio - PageCluster *int64 // /proc/sys/vm/page-cluster - PanicOnOom *int64 // /proc/sys/vm/panic_on_oom - PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction - StatInterval *int64 // /proc/sys/vm/stat_interval - Swappiness *int64 // /proc/sys/vm/swappiness - UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes - VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure - WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor - WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor - ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode -} - -// VM reads the VM statistics from the specified `proc` filesystem. -func (fs FS) VM() (*VM, error) { - path := fs.proc.Path("sys/vm") - file, err := os.Stat(path) - if err != nil { - return nil, err - } - if !file.Mode().IsDir() { - return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path) - } - - files, err := os.ReadDir(path) - if err != nil { - return nil, err - } - - var vm VM - for _, f := range files { - if f.IsDir() { - continue - } - - name := filepath.Join(path, f.Name()) - // ignore errors on read, as there are some write only - // in /proc/sys/vm - value, err := util.SysReadFile(name) - if err != nil { - continue - } - vp := util.NewValueParser(value) - - switch f.Name() { - case "admin_reserve_kbytes": - vm.AdminReserveKbytes = vp.PInt64() - case "block_dump": - vm.BlockDump = vp.PInt64() - case "compact_unevictable_allowed": - vm.CompactUnevictableAllowed = vp.PInt64() - case "dirty_background_bytes": - vm.DirtyBackgroundBytes = vp.PInt64() - case "dirty_background_ratio": - vm.DirtyBackgroundRatio = vp.PInt64() - case "dirty_bytes": - vm.DirtyBytes = vp.PInt64() - case "dirty_expire_centisecs": - vm.DirtyExpireCentisecs = vp.PInt64() - case "dirty_ratio": - vm.DirtyRatio = vp.PInt64() - case "dirtytime_expire_seconds": - vm.DirtytimeExpireSeconds = vp.PInt64() - case "dirty_writeback_centisecs": - vm.DirtyWritebackCentisecs = vp.PInt64() - case "drop_caches": - vm.DropCaches = vp.PInt64() - case "extfrag_threshold": - vm.ExtfragThreshold = vp.PInt64() - case "hugetlb_shm_group": - vm.HugetlbShmGroup = vp.PInt64() - case "laptop_mode": - vm.LaptopMode = vp.PInt64() - case "legacy_va_layout": - vm.LegacyVaLayout = vp.PInt64() - case "lowmem_reserve_ratio": - stringSlice := strings.Fields(value) - pint64Slice := make([]*int64, 0, len(stringSlice)) - for _, value := range stringSlice { - vp := util.NewValueParser(value) - pint64Slice = append(pint64Slice, vp.PInt64()) - } - vm.LowmemReserveRatio = pint64Slice - case "max_map_count": - vm.MaxMapCount = vp.PInt64() - case "memory_failure_early_kill": - vm.MemoryFailureEarlyKill = vp.PInt64() - case "memory_failure_recovery": - vm.MemoryFailureRecovery = vp.PInt64() - case "min_free_kbytes": - vm.MinFreeKbytes = vp.PInt64() - case "min_slab_ratio": - vm.MinSlabRatio = vp.PInt64() - case "min_unmapped_ratio": - vm.MinUnmappedRatio = vp.PInt64() - case "mmap_min_addr": - vm.MmapMinAddr = vp.PInt64() - case "nr_hugepages": - vm.NrHugepages = vp.PInt64() - case "nr_hugepages_mempolicy": - vm.NrHugepagesMempolicy = vp.PInt64() - case "nr_overcommit_hugepages": - vm.NrOvercommitHugepages = vp.PInt64() - case "numa_stat": - vm.NumaStat = vp.PInt64() - case "numa_zonelist_order": - vm.NumaZonelistOrder = value - case "oom_dump_tasks": - vm.OomDumpTasks = vp.PInt64() - case "oom_kill_allocating_task": - vm.OomKillAllocatingTask = vp.PInt64() - case "overcommit_kbytes": - vm.OvercommitKbytes = vp.PInt64() - case "overcommit_memory": - vm.OvercommitMemory = vp.PInt64() - case "overcommit_ratio": - vm.OvercommitRatio = vp.PInt64() - case "page-cluster": - vm.PageCluster = vp.PInt64() - case "panic_on_oom": - vm.PanicOnOom = vp.PInt64() - case "percpu_pagelist_fraction": - vm.PercpuPagelistFraction = vp.PInt64() - case "stat_interval": - vm.StatInterval = vp.PInt64() - case "swappiness": - vm.Swappiness = vp.PInt64() - case "user_reserve_kbytes": - vm.UserReserveKbytes = vp.PInt64() - case "vfs_cache_pressure": - vm.VfsCachePressure = vp.PInt64() - case "watermark_boost_factor": - vm.WatermarkBoostFactor = vp.PInt64() - case "watermark_scale_factor": - vm.WatermarkScaleFactor = vp.PInt64() - case "zone_reclaim_mode": - vm.ZoneReclaimMode = vp.PInt64() - } - if err := vp.Err(); err != nil { - return nil, err - } - } - - return &vm, nil -} diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go deleted file mode 100644 index e54d94b090..0000000000 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows -// +build !windows - -package procfs - -import ( - "bytes" - "fmt" - "os" - "regexp" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Zoneinfo holds info parsed from /proc/zoneinfo. -type Zoneinfo struct { - Node string - Zone string - NrFreePages *int64 - Min *int64 - Low *int64 - High *int64 - Scanned *int64 - Spanned *int64 - Present *int64 - Managed *int64 - NrActiveAnon *int64 - NrInactiveAnon *int64 - NrIsolatedAnon *int64 - NrAnonPages *int64 - NrAnonTransparentHugepages *int64 - NrActiveFile *int64 - NrInactiveFile *int64 - NrIsolatedFile *int64 - NrFilePages *int64 - NrSlabReclaimable *int64 - NrSlabUnreclaimable *int64 - NrMlockStack *int64 - NrKernelStack *int64 - NrMapped *int64 - NrDirty *int64 - NrWriteback *int64 - NrUnevictable *int64 - NrShmem *int64 - NrDirtied *int64 - NrWritten *int64 - NumaHit *int64 - NumaMiss *int64 - NumaForeign *int64 - NumaInterleave *int64 - NumaLocal *int64 - NumaOther *int64 - Protection []*int64 -} - -var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) - -// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of -// structs containing the relevant info. More information available here: -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt -func (fs FS) Zoneinfo() ([]Zoneinfo, error) { - data, err := os.ReadFile(fs.proc.Path("zoneinfo")) - if err != nil { - return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) - } - zoneinfo, err := parseZoneinfo(data) - if err != nil { - return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) - } - return zoneinfo, nil -} - -func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { - - zoneinfo := []Zoneinfo{} - - zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode")) - for _, block := range zoneinfoBlocks { - var zoneinfoElement Zoneinfo - lines := strings.Split(string(block), "\n") - for _, line := range lines { - - if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil { - zoneinfoElement.Node = nodeZone[1] - zoneinfoElement.Zone = nodeZone[2] - continue - } - if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") { - continue - } - parts := strings.Fields(strings.TrimSpace(line)) - if len(parts) < 2 { - continue - } - vp := util.NewValueParser(parts[1]) - switch parts[0] { - case "nr_free_pages": - zoneinfoElement.NrFreePages = vp.PInt64() - case "min": - zoneinfoElement.Min = vp.PInt64() - case "low": - zoneinfoElement.Low = vp.PInt64() - case "high": - zoneinfoElement.High = vp.PInt64() - case "scanned": - zoneinfoElement.Scanned = vp.PInt64() - case "spanned": - zoneinfoElement.Spanned = vp.PInt64() - case "present": - zoneinfoElement.Present = vp.PInt64() - case "managed": - zoneinfoElement.Managed = vp.PInt64() - case "nr_active_anon": - zoneinfoElement.NrActiveAnon = vp.PInt64() - case "nr_inactive_anon": - zoneinfoElement.NrInactiveAnon = vp.PInt64() - case "nr_isolated_anon": - zoneinfoElement.NrIsolatedAnon = vp.PInt64() - case "nr_anon_pages": - zoneinfoElement.NrAnonPages = vp.PInt64() - case "nr_anon_transparent_hugepages": - zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64() - case "nr_active_file": - zoneinfoElement.NrActiveFile = vp.PInt64() - case "nr_inactive_file": - zoneinfoElement.NrInactiveFile = vp.PInt64() - case "nr_isolated_file": - zoneinfoElement.NrIsolatedFile = vp.PInt64() - case "nr_file_pages": - zoneinfoElement.NrFilePages = vp.PInt64() - case "nr_slab_reclaimable": - zoneinfoElement.NrSlabReclaimable = vp.PInt64() - case "nr_slab_unreclaimable": - zoneinfoElement.NrSlabUnreclaimable = vp.PInt64() - case "nr_mlock_stack": - zoneinfoElement.NrMlockStack = vp.PInt64() - case "nr_kernel_stack": - zoneinfoElement.NrKernelStack = vp.PInt64() - case "nr_mapped": - zoneinfoElement.NrMapped = vp.PInt64() - case "nr_dirty": - zoneinfoElement.NrDirty = vp.PInt64() - case "nr_writeback": - zoneinfoElement.NrWriteback = vp.PInt64() - case "nr_unevictable": - zoneinfoElement.NrUnevictable = vp.PInt64() - case "nr_shmem": - zoneinfoElement.NrShmem = vp.PInt64() - case "nr_dirtied": - zoneinfoElement.NrDirtied = vp.PInt64() - case "nr_written": - zoneinfoElement.NrWritten = vp.PInt64() - case "numa_hit": - zoneinfoElement.NumaHit = vp.PInt64() - case "numa_miss": - zoneinfoElement.NumaMiss = vp.PInt64() - case "numa_foreign": - zoneinfoElement.NumaForeign = vp.PInt64() - case "numa_interleave": - zoneinfoElement.NumaInterleave = vp.PInt64() - case "numa_local": - zoneinfoElement.NumaLocal = vp.PInt64() - case "numa_other": - zoneinfoElement.NumaOther = vp.PInt64() - case "protection:": - protectionParts := strings.Split(line, ":") - protectionValues := strings.Replace(protectionParts[1], "(", "", 1) - protectionValues = strings.Replace(protectionValues, ")", "", 1) - protectionValues = strings.TrimSpace(protectionValues) - protectionStringMap := strings.Split(protectionValues, ", ") - val, err := util.ParsePInt64s(protectionStringMap) - if err == nil { - zoneinfoElement.Protection = val - } - } - - } - - zoneinfo = append(zoneinfo, zoneinfoElement) - } - return zoneinfo, nil -} diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml index 409a5b631c..ce9afeaeea 100644 --- a/vendor/github.com/rcrowley/go-metrics/.travis.yml +++ b/vendor/github.com/rcrowley/go-metrics/.travis.yml @@ -13,6 +13,7 @@ go: - "1.12" - "1.13" - "1.14" + - "1.15" script: - ./validate.sh diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md index 27ddfee8b8..6492bfe851 100644 --- a/vendor/github.com/rcrowley/go-metrics/README.md +++ b/vendor/github.com/rcrowley/go-metrics/README.md @@ -7,6 +7,15 @@ Go port of Coda Hale's Metrics library: . Documentation: . +Archived as of April 1 2025 +----- +This repository is no longer maintained. The authors recommend you explore the +following newer, more widely adopted libraries for your Go instrumentation +needs: + +* [OpenTelemetry Go SDK](https://opentelemetry.io/docs/languages/go/instrumentation/#metrics) +* [Prometheus Go Client Library](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus) + Usage ----- diff --git a/vendor/github.com/segmentio/asm/LICENSE b/vendor/github.com/segmentio/asm/LICENSE new file mode 100644 index 0000000000..5e93dab621 --- /dev/null +++ b/vendor/github.com/segmentio/asm/LICENSE @@ -0,0 +1,16 @@ +MIT No Attribution + +Copyright 2023 Segment + +Permission is hereby granted, free of charge, to any person obtaining a copy of this +software and associated documentation files (the "Software"), to deal in the Software +without restriction, including without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/segmentio/asm/base64/base64.go b/vendor/github.com/segmentio/asm/base64/base64.go new file mode 100644 index 0000000000..dd2128d4a9 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/base64.go @@ -0,0 +1,67 @@ +package base64 + +import ( + "encoding/base64" +) + +const ( + StdPadding rune = base64.StdPadding + NoPadding rune = base64.NoPadding + + encodeStd = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" + encodeURL = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" + encodeIMAP = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+," + + letterRange = int8('Z' - 'A' + 1) +) + +// StdEncoding is the standard base64 encoding, as defined in RFC 4648. +var StdEncoding = NewEncoding(encodeStd) + +// URLEncoding is the alternate base64 encoding defined in RFC 4648. +// It is typically used in URLs and file names. +var URLEncoding = NewEncoding(encodeURL) + +// RawStdEncoding is the standard unpadded base64 encoding defined in RFC 4648 section 3.2. +// This is the same as StdEncoding but omits padding characters. +var RawStdEncoding = StdEncoding.WithPadding(NoPadding) + +// RawURLEncoding is the unpadded alternate base64 encoding defined in RFC 4648. +// This is the same as URLEncoding but omits padding characters. +var RawURLEncoding = URLEncoding.WithPadding(NoPadding) + +// NewEncoding returns a new padded Encoding defined by the given alphabet, +// which must be a 64-byte string that does not contain the padding character +// or CR / LF ('\r', '\n'). Unlike the standard library, the encoding alphabet +// cannot be abitrary, and it must follow one of the know standard encoding +// variants. +// +// Required alphabet values: +// * [0,26): characters 'A'..'Z' +// * [26,52): characters 'a'..'z' +// * [52,62): characters '0'..'9' +// Flexible alphabet value options: +// * RFC 4648, RFC 1421, RFC 2045, RFC 2152, RFC 4880: '+' and '/' +// * RFC 4648 URI: '-' and '_' +// * RFC 3501: '+' and ',' +// +// The resulting Encoding uses the default padding character ('='), which may +// be changed or disabled via WithPadding. The padding characters is urestricted, +// but it must be a character outside of the encoder alphabet. +func NewEncoding(encoder string) *Encoding { + if len(encoder) != 64 { + panic("encoding alphabet is not 64-bytes long") + } + + if _, ok := allowedEncoding[encoder]; !ok { + panic("non-standard encoding alphabets are not supported") + } + + return newEncoding(encoder) +} + +var allowedEncoding = map[string]struct{}{ + encodeStd: {}, + encodeURL: {}, + encodeIMAP: {}, +} diff --git a/vendor/github.com/segmentio/asm/base64/base64_amd64.go b/vendor/github.com/segmentio/asm/base64/base64_amd64.go new file mode 100644 index 0000000000..4136098eaa --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/base64_amd64.go @@ -0,0 +1,78 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package base64 + +import ( + "encoding/base64" + + "github.com/segmentio/asm/cpu" + "github.com/segmentio/asm/cpu/x86" +) + +const ( + encLutSize = 32 + decLutSize = 48 + minEncodeLen = 28 + minDecodeLen = 45 +) + +func newEncoding(encoder string) *Encoding { + e := &Encoding{base: base64.NewEncoding(encoder)} + if cpu.X86.Has(x86.AVX2) { + e.enableEncodeAVX2(encoder) + e.enableDecodeAVX2(encoder) + } + return e +} + +func (e *Encoding) enableEncodeAVX2(encoder string) { + // Translate values 0..63 to the Base64 alphabet. There are five sets: + // + // From To Add Index Example + // [0..25] [65..90] +65 0 ABCDEFGHIJKLMNOPQRSTUVWXYZ + // [26..51] [97..122] +71 1 abcdefghijklmnopqrstuvwxyz + // [52..61] [48..57] -4 [2..11] 0123456789 + // [62] [43] -19 12 + + // [63] [47] -16 13 / + tab := [encLutSize]int8{int8(encoder[0]), int8(encoder[letterRange]) - letterRange} + for i, ch := range encoder[2*letterRange:] { + tab[2+i] = int8(ch) - 2*letterRange - int8(i) + } + + e.enc = encodeAVX2 + e.enclut = tab +} + +func (e *Encoding) enableDecodeAVX2(encoder string) { + c62, c63 := int8(encoder[62]), int8(encoder[63]) + url := c63 == '_' + if url { + c63 = '/' + } + + // Translate values from the Base64 alphabet using five sets. Values outside + // of these ranges are considered invalid: + // + // From To Add Index Example + // [47] [63] +16 1 / + // [43] [62] +19 2 + + // [48..57] [52..61] +4 3 0123456789 + // [65..90] [0..25] -65 4,5 ABCDEFGHIJKLMNOPQRSTUVWXYZ + // [97..122] [26..51] -71 6,7 abcdefghijklmnopqrstuvwxyz + tab := [decLutSize]int8{ + 0, 63 - c63, 62 - c62, 4, -65, -65, -71, -71, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x15, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x13, 0x1B, 0x1B, 0x1B, 0x1B, 0x1B, + } + tab[(c62&15)+16] = 0x1A + tab[(c63&15)+16] = 0x1A + + if url { + e.dec = decodeAVX2URI + } else { + e.dec = decodeAVX2 + } + e.declut = tab +} diff --git a/vendor/github.com/segmentio/asm/base64/base64_arm64.go b/vendor/github.com/segmentio/asm/base64/base64_arm64.go new file mode 100644 index 0000000000..276f300287 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/base64_arm64.go @@ -0,0 +1,42 @@ +//go:build arm64 && !purego +// +build arm64,!purego + +package base64 + +import ( + "encoding/base64" +) + +const ( + encLutSize = 16 + decLutSize = 2 + minEncodeLen = 16 * 3 + minDecodeLen = 8 * 4 +) + +func newEncoding(encoder string) *Encoding { + e := &Encoding{base: base64.NewEncoding(encoder)} + e.enableEncodeARM64(encoder) + e.enableDecodeARM64(encoder) + return e +} + +func (e *Encoding) enableEncodeARM64(encoder string) { + c62, c63 := int8(encoder[62]), int8(encoder[63]) + tab := [encLutSize]int8{ + 'a' - 26, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, + '0' - 52, '0' - 52, '0' - 52, c62 - 62, c63 - 63, 'A', 0, 0, + } + + e.enc = encodeARM64 + e.enclut = tab +} + +func (e *Encoding) enableDecodeARM64(encoder string) { + if encoder == encodeStd { + e.dec = decodeStdARM64 + } else { + e.dec = decodeARM64 + } + e.declut = [decLutSize]int8{int8(encoder[62]), int8(encoder[63])} +} diff --git a/vendor/github.com/segmentio/asm/base64/base64_asm.go b/vendor/github.com/segmentio/asm/base64/base64_asm.go new file mode 100644 index 0000000000..f9afadd7f2 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/base64_asm.go @@ -0,0 +1,94 @@ +//go:build (amd64 || arm64) && !purego +// +build amd64 arm64 +// +build !purego + +package base64 + +import ( + "encoding/base64" + + "github.com/segmentio/asm/internal/unsafebytes" +) + +// An Encoding is a radix 64 encoding/decoding scheme, defined by a +// 64-character alphabet. +type Encoding struct { + enc func(dst []byte, src []byte, lut *int8) (int, int) + enclut [encLutSize]int8 + + dec func(dst []byte, src []byte, lut *int8) (int, int) + declut [decLutSize]int8 + + base *base64.Encoding +} + +// WithPadding creates a duplicate Encoding updated with a specified padding +// character, or NoPadding to disable padding. The padding character must not +// be contained in the encoding alphabet, must not be '\r' or '\n', and must +// be no greater than '\xFF'. +func (enc Encoding) WithPadding(padding rune) *Encoding { + enc.base = enc.base.WithPadding(padding) + return &enc +} + +// Strict creates a duplicate encoding updated with strict decoding enabled. +// This requires that trailing padding bits are zero. +func (enc Encoding) Strict() *Encoding { + enc.base = enc.base.Strict() + return &enc +} + +// Encode encodes src using the defined encoding alphabet. +// This will write EncodedLen(len(src)) bytes to dst. +func (enc *Encoding) Encode(dst, src []byte) { + if len(src) >= minEncodeLen && enc.enc != nil { + d, s := enc.enc(dst, src, &enc.enclut[0]) + dst = dst[d:] + src = src[s:] + } + enc.base.Encode(dst, src) +} + +// Encode encodes src using the encoding enc, writing +// EncodedLen(len(src)) bytes to dst. +func (enc *Encoding) EncodeToString(src []byte) string { + buf := make([]byte, enc.base.EncodedLen(len(src))) + enc.Encode(buf, src) + return string(buf) +} + +// EncodedLen calculates the base64-encoded byte length for a message +// of length n. +func (enc *Encoding) EncodedLen(n int) int { + return enc.base.EncodedLen(n) +} + +// Decode decodes src using the defined encoding alphabet. +// This will write DecodedLen(len(src)) bytes to dst and return the number of +// bytes written. +func (enc *Encoding) Decode(dst, src []byte) (n int, err error) { + var d, s int + if len(src) >= minDecodeLen && enc.dec != nil { + d, s = enc.dec(dst, src, &enc.declut[0]) + dst = dst[d:] + src = src[s:] + } + n, err = enc.base.Decode(dst, src) + n += d + return +} + +// DecodeString decodes the base64 encoded string s, returns the decoded +// value as bytes. +func (enc *Encoding) DecodeString(s string) ([]byte, error) { + src := unsafebytes.BytesOf(s) + dst := make([]byte, enc.base.DecodedLen(len(s))) + n, err := enc.Decode(dst, src) + return dst[:n], err +} + +// DecodedLen calculates the decoded byte length for a base64-encoded message +// of length n. +func (enc *Encoding) DecodedLen(n int) int { + return enc.base.DecodedLen(n) +} diff --git a/vendor/github.com/segmentio/asm/base64/base64_default.go b/vendor/github.com/segmentio/asm/base64/base64_default.go new file mode 100644 index 0000000000..1720da5ca7 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/base64_default.go @@ -0,0 +1,14 @@ +//go:build purego || !(amd64 || arm64) +// +build purego !amd64,!arm64 + +package base64 + +import "encoding/base64" + +// An Encoding is a radix 64 encoding/decoding scheme, defined by a +// 64-character alphabet. +type Encoding = base64.Encoding + +func newEncoding(encoder string) *Encoding { + return base64.NewEncoding(encoder) +} diff --git a/vendor/github.com/segmentio/asm/base64/decode_amd64.go b/vendor/github.com/segmentio/asm/base64/decode_amd64.go new file mode 100644 index 0000000000..e85bf6a925 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/decode_amd64.go @@ -0,0 +1,9 @@ +// Code generated by command: go run decode_asm.go -pkg base64 -out ../base64/decode_amd64.s -stubs ../base64/decode_amd64.go. DO NOT EDIT. + +//go:build !purego + +package base64 + +func decodeAVX2(dst []byte, src []byte, lut *int8) (int, int) + +func decodeAVX2URI(dst []byte, src []byte, lut *int8) (int, int) diff --git a/vendor/github.com/segmentio/asm/base64/decode_amd64.s b/vendor/github.com/segmentio/asm/base64/decode_amd64.s new file mode 100644 index 0000000000..ade5442c3b --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/decode_amd64.s @@ -0,0 +1,143 @@ +// Code generated by command: go run decode_asm.go -pkg base64 -out ../base64/decode_amd64.s -stubs ../base64/decode_amd64.go. DO NOT EDIT. + +//go:build !purego + +#include "textflag.h" + +DATA b64_dec_lut_hi<>+0(SB)/8, $0x0804080402011010 +DATA b64_dec_lut_hi<>+8(SB)/8, $0x1010101010101010 +DATA b64_dec_lut_hi<>+16(SB)/8, $0x0804080402011010 +DATA b64_dec_lut_hi<>+24(SB)/8, $0x1010101010101010 +GLOBL b64_dec_lut_hi<>(SB), RODATA|NOPTR, $32 + +DATA b64_dec_madd1<>+0(SB)/8, $0x0140014001400140 +DATA b64_dec_madd1<>+8(SB)/8, $0x0140014001400140 +DATA b64_dec_madd1<>+16(SB)/8, $0x0140014001400140 +DATA b64_dec_madd1<>+24(SB)/8, $0x0140014001400140 +GLOBL b64_dec_madd1<>(SB), RODATA|NOPTR, $32 + +DATA b64_dec_madd2<>+0(SB)/8, $0x0001100000011000 +DATA b64_dec_madd2<>+8(SB)/8, $0x0001100000011000 +DATA b64_dec_madd2<>+16(SB)/8, $0x0001100000011000 +DATA b64_dec_madd2<>+24(SB)/8, $0x0001100000011000 +GLOBL b64_dec_madd2<>(SB), RODATA|NOPTR, $32 + +DATA b64_dec_shuf_lo<>+0(SB)/8, $0x0000000000000000 +DATA b64_dec_shuf_lo<>+8(SB)/8, $0x0600010200000000 +GLOBL b64_dec_shuf_lo<>(SB), RODATA|NOPTR, $16 + +DATA b64_dec_shuf<>+0(SB)/8, $0x090a040506000102 +DATA b64_dec_shuf<>+8(SB)/8, $0x000000000c0d0e08 +DATA b64_dec_shuf<>+16(SB)/8, $0x0c0d0e08090a0405 +DATA b64_dec_shuf<>+24(SB)/8, $0x0000000000000000 +GLOBL b64_dec_shuf<>(SB), RODATA|NOPTR, $32 + +// func decodeAVX2(dst []byte, src []byte, lut *int8) (int, int) +// Requires: AVX, AVX2, SSE4.1 +TEXT ·decodeAVX2(SB), NOSPLIT, $0-72 + MOVQ dst_base+0(FP), AX + MOVQ src_base+24(FP), DX + MOVQ lut+48(FP), SI + MOVQ src_len+32(FP), DI + MOVB $0x2f, CL + PINSRB $0x00, CX, X8 + VPBROADCASTB X8, Y8 + XORQ CX, CX + XORQ BX, BX + VPXOR Y7, Y7, Y7 + VPERMQ $0x44, (SI), Y6 + VPERMQ $0x44, 16(SI), Y4 + VMOVDQA b64_dec_lut_hi<>+0(SB), Y5 + +loop: + VMOVDQU (DX)(BX*1), Y0 + VPSRLD $0x04, Y0, Y2 + VPAND Y8, Y0, Y3 + VPSHUFB Y3, Y4, Y3 + VPAND Y8, Y2, Y2 + VPSHUFB Y2, Y5, Y9 + VPTEST Y9, Y3 + JNE done + VPCMPEQB Y8, Y0, Y3 + VPADDB Y3, Y2, Y2 + VPSHUFB Y2, Y6, Y2 + VPADDB Y0, Y2, Y0 + VPMADDUBSW b64_dec_madd1<>+0(SB), Y0, Y0 + VPMADDWD b64_dec_madd2<>+0(SB), Y0, Y0 + VEXTRACTI128 $0x01, Y0, X1 + VPSHUFB b64_dec_shuf_lo<>+0(SB), X1, X1 + VPSHUFB b64_dec_shuf<>+0(SB), Y0, Y0 + VPBLENDD $0x08, Y1, Y0, Y1 + VPBLENDD $0xc0, Y7, Y1, Y1 + VMOVDQU Y1, (AX)(CX*1) + ADDQ $0x18, CX + ADDQ $0x20, BX + SUBQ $0x20, DI + CMPQ DI, $0x2d + JB done + JMP loop + +done: + MOVQ CX, ret+56(FP) + MOVQ BX, ret1+64(FP) + VZEROUPPER + RET + +// func decodeAVX2URI(dst []byte, src []byte, lut *int8) (int, int) +// Requires: AVX, AVX2, SSE4.1 +TEXT ·decodeAVX2URI(SB), NOSPLIT, $0-72 + MOVB $0x2f, AL + PINSRB $0x00, AX, X0 + VPBROADCASTB X0, Y0 + MOVB $0x5f, AL + PINSRB $0x00, AX, X1 + VPBROADCASTB X1, Y1 + MOVQ dst_base+0(FP), AX + MOVQ src_base+24(FP), DX + MOVQ lut+48(FP), SI + MOVQ src_len+32(FP), DI + MOVB $0x2f, CL + PINSRB $0x00, CX, X10 + VPBROADCASTB X10, Y10 + XORQ CX, CX + XORQ BX, BX + VPXOR Y9, Y9, Y9 + VPERMQ $0x44, (SI), Y8 + VPERMQ $0x44, 16(SI), Y6 + VMOVDQA b64_dec_lut_hi<>+0(SB), Y7 + +loop: + VMOVDQU (DX)(BX*1), Y2 + VPCMPEQB Y2, Y1, Y4 + VPBLENDVB Y4, Y0, Y2, Y2 + VPSRLD $0x04, Y2, Y4 + VPAND Y10, Y2, Y5 + VPSHUFB Y5, Y6, Y5 + VPAND Y10, Y4, Y4 + VPSHUFB Y4, Y7, Y11 + VPTEST Y11, Y5 + JNE done + VPCMPEQB Y10, Y2, Y5 + VPADDB Y5, Y4, Y4 + VPSHUFB Y4, Y8, Y4 + VPADDB Y2, Y4, Y2 + VPMADDUBSW b64_dec_madd1<>+0(SB), Y2, Y2 + VPMADDWD b64_dec_madd2<>+0(SB), Y2, Y2 + VEXTRACTI128 $0x01, Y2, X3 + VPSHUFB b64_dec_shuf_lo<>+0(SB), X3, X3 + VPSHUFB b64_dec_shuf<>+0(SB), Y2, Y2 + VPBLENDD $0x08, Y3, Y2, Y3 + VPBLENDD $0xc0, Y9, Y3, Y3 + VMOVDQU Y3, (AX)(CX*1) + ADDQ $0x18, CX + ADDQ $0x20, BX + SUBQ $0x20, DI + CMPQ DI, $0x2d + JB done + JMP loop + +done: + MOVQ CX, ret+56(FP) + MOVQ BX, ret1+64(FP) + VZEROUPPER + RET diff --git a/vendor/github.com/segmentio/asm/base64/decode_arm64.go b/vendor/github.com/segmentio/asm/base64/decode_arm64.go new file mode 100644 index 0000000000..d44baa1dc5 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/decode_arm64.go @@ -0,0 +1,7 @@ +//go:build !purego +// +build !purego + +package base64 + +func decodeARM64(dst []byte, src []byte, lut *int8) (int, int) +func decodeStdARM64(dst []byte, src []byte, lut *int8) (int, int) diff --git a/vendor/github.com/segmentio/asm/base64/decode_arm64.s b/vendor/github.com/segmentio/asm/base64/decode_arm64.s new file mode 100644 index 0000000000..67d206f8cc --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/decode_arm64.s @@ -0,0 +1,213 @@ +#include "textflag.h" + +#define LOAD_ARGS() \ + MOVD dst_base+0(FP), R0; \ + MOVD R0, R3; \ + MOVD src_base+24(FP), R1; \ + MOVD R1, R4; \ + MOVD src_len+32(FP), R2; \ + BIC $31, R2, R2; \ + ADD R1, R2, R2 + +#define LOAD_ARG_LUT() \ + MOVD lut+48(FP), R5; \ + VLD2R (R5), [V0.B16, V1.B16] + +#define LOAD_CONST_LUT() \ + MOVD $·mask_lut(SB), R6; \ + MOVD $·bpos_lut(SB), R7; \ + MOVD $·shft_lut(SB), R8; \ + VLD1 (R6), [V2.B16]; \ + VLD1 (R7), [V3.B16]; \ + VLD1 (R8), [V4.B16]; \ + VMOVI $43, V5.B8; \ + VMOVI $47, V6.B8; \ + VMOVI $15, V7.B8; \ + VMOVI $16, V8.B8; \ + +#define LOAD_INPUT() \ + VLD4 (R4), [V10.B8, V11.B8, V12.B8, V13.B8] + +#define COMPARE_INPUT(v) \ + VCMEQ V10.B8, v.B8, V14.B8; \ + VCMEQ V11.B8, v.B8, V15.B8; \ + VCMEQ V12.B8, v.B8, V16.B8; \ + VCMEQ V13.B8, v.B8, V17.B8 + +#define UPDATE_INPUT(v) \ + VBIT V14.B8, v.B8, V10.B8; \ + VBIT V15.B8, v.B8, V11.B8; \ + VBIT V16.B8, v.B8, V12.B8; \ + VBIT V17.B8, v.B8, V13.B8 + +#define DECODE_INPUT(goto_err) \ + /* Create hi/lo nibles */ \ + VUSHR $4, V10.B8, V18.B8; \ + VUSHR $4, V11.B8, V19.B8; \ + VUSHR $4, V12.B8, V20.B8; \ + VUSHR $4, V13.B8, V21.B8; \ + VAND V7.B8, V10.B8, V22.B8; \ + VAND V7.B8, V11.B8, V23.B8; \ + VAND V7.B8, V12.B8, V24.B8; \ + VAND V7.B8, V13.B8, V25.B8; \ + /* Detect invalid input characters */ \ + VTBL V22.B8, [V2.B8], V22.B8; \ + VTBL V23.B8, [V2.B8], V23.B8; \ + VTBL V24.B8, [V2.B8], V24.B8; \ + VTBL V25.B8, [V2.B8], V25.B8; \ + VTBL V18.B8, [V3.B8], V26.B8; \ + VTBL V19.B8, [V3.B8], V27.B8; \ + VTBL V20.B8, [V3.B8], V28.B8; \ + VTBL V21.B8, [V3.B8], V29.B8; \ + VAND V22.B8, V26.B8, V26.B8; \ + VAND V23.B8, V27.B8, V27.B8; \ + VAND V24.B8, V28.B8, V28.B8; \ + VAND V25.B8, V29.B8, V29.B8; \ + WORD $0x0e209b5a /* VCMEQ $0, V26.B8, V26.B8 */; \ + WORD $0x0e209b7b /* VCMEQ $0, V27.B8, V27.B8 */; \ + WORD $0x0e209b9c /* VCMEQ $0, V28.B8, V28.B8 */; \ + WORD $0x0e209bbd /* VCMEQ $0, V29.B8, V29.B8 */; \ + VORR V26.B8, V27.B8, V26.B8; \ + VORR V28.B8, V29.B8, V28.B8; \ + VORR V26.B8, V28.B8, V26.B8; \ + VMOV V26.D[0], R5; \ + VMOV V26.D[1], R6; \ + ORR R6, R5; \ + CBNZ R5, goto_err; \ + /* Shift hi nibles */ \ + VTBL V18.B8, [V4.B8], V18.B8; \ + VTBL V19.B8, [V4.B8], V19.B8; \ + VTBL V20.B8, [V4.B8], V20.B8; \ + VTBL V21.B8, [V4.B8], V21.B8; \ + VBIT V14.B8, V8.B8, V18.B8; \ + VBIT V15.B8, V8.B8, V19.B8; \ + VBIT V16.B8, V8.B8, V20.B8; \ + VBIT V17.B8, V8.B8, V21.B8; \ + /* Combine results */ \ + VADD V18.B8, V10.B8, V10.B8; \ + VADD V19.B8, V11.B8, V11.B8; \ + VADD V20.B8, V12.B8, V12.B8; \ + VADD V21.B8, V13.B8, V13.B8; \ + VUSHR $4, V11.B8, V14.B8; \ + VUSHR $2, V12.B8, V15.B8; \ + VSHL $2, V10.B8, V10.B8; \ + VSHL $4, V11.B8, V11.B8; \ + VSHL $6, V12.B8, V12.B8; \ + VORR V10.B8, V14.B8, V16.B8; \ + VORR V11.B8, V15.B8, V17.B8; \ + VORR V12.B8, V13.B8, V18.B8 + +#define ADVANCE_LOOP(goto_loop) \ + VST3.P [V16.B8, V17.B8, V18.B8], 24(R3); \ + ADD $32, R4; \ + CMP R4, R2; \ + BGT goto_loop + +#define RETURN() \ + SUB R0, R3; \ + SUB R1, R4; \ + MOVD R3, ret+56(FP); \ + MOVD R4, ret1+64(FP); \ + RET + + +// func decodeARM64(dst []byte, src []byte, lut *int8) (int, int) +TEXT ·decodeARM64(SB),NOSPLIT,$0-72 + LOAD_ARGS() + LOAD_ARG_LUT() + LOAD_CONST_LUT() + +loop: + LOAD_INPUT() + + // Compare and normalize the 63rd and 64th characters + COMPARE_INPUT(V0) + UPDATE_INPUT(V5) + COMPARE_INPUT(V1) + UPDATE_INPUT(V6) + + DECODE_INPUT(done) // Detect invalid input characters + ADVANCE_LOOP(loop) // Store results and continue + +done: + // RETURN() replacing the macro to please go vet. + SUB R0, R3; + SUB R1, R4; + MOVD R3, ret+56(FP); + MOVD R4, ret1+64(FP); + RET + + +// func decodeStdARM64(dst []byte, src []byte, lut *int8) (int, int) +TEXT ·decodeStdARM64(SB),NOSPLIT,$0-72 + LOAD_ARGS() + LOAD_CONST_LUT() + +loop: + LOAD_INPUT() + COMPARE_INPUT(V6) // Compare to '+' + DECODE_INPUT(done) // Detect invalid input characters + ADVANCE_LOOP(loop) // Store results and continue + +done: + // RETURN() replacing the macro to please go vet. + SUB R0, R3; + SUB R1, R4; + MOVD R3, ret+56(FP); + MOVD R4, ret1+64(FP); + RET + + +DATA ·mask_lut+0x00(SB)/1, $0xa8 +DATA ·mask_lut+0x01(SB)/1, $0xf8 +DATA ·mask_lut+0x02(SB)/1, $0xf8 +DATA ·mask_lut+0x03(SB)/1, $0xf8 +DATA ·mask_lut+0x04(SB)/1, $0xf8 +DATA ·mask_lut+0x05(SB)/1, $0xf8 +DATA ·mask_lut+0x06(SB)/1, $0xf8 +DATA ·mask_lut+0x07(SB)/1, $0xf8 +DATA ·mask_lut+0x08(SB)/1, $0xf8 +DATA ·mask_lut+0x09(SB)/1, $0xf8 +DATA ·mask_lut+0x0a(SB)/1, $0xf0 +DATA ·mask_lut+0x0b(SB)/1, $0x54 +DATA ·mask_lut+0x0c(SB)/1, $0x50 +DATA ·mask_lut+0x0d(SB)/1, $0x50 +DATA ·mask_lut+0x0e(SB)/1, $0x50 +DATA ·mask_lut+0x0f(SB)/1, $0x54 +GLOBL ·mask_lut(SB), NOPTR|RODATA, $16 + +DATA ·bpos_lut+0x00(SB)/1, $0x01 +DATA ·bpos_lut+0x01(SB)/1, $0x02 +DATA ·bpos_lut+0x02(SB)/1, $0x04 +DATA ·bpos_lut+0x03(SB)/1, $0x08 +DATA ·bpos_lut+0x04(SB)/1, $0x10 +DATA ·bpos_lut+0x05(SB)/1, $0x20 +DATA ·bpos_lut+0x06(SB)/1, $0x40 +DATA ·bpos_lut+0x07(SB)/1, $0x80 +DATA ·bpos_lut+0x08(SB)/1, $0x00 +DATA ·bpos_lut+0x09(SB)/1, $0x00 +DATA ·bpos_lut+0x0a(SB)/1, $0x00 +DATA ·bpos_lut+0x0b(SB)/1, $0x00 +DATA ·bpos_lut+0x0c(SB)/1, $0x00 +DATA ·bpos_lut+0x0d(SB)/1, $0x00 +DATA ·bpos_lut+0x0e(SB)/1, $0x00 +DATA ·bpos_lut+0x0f(SB)/1, $0x00 +GLOBL ·bpos_lut(SB), NOPTR|RODATA, $16 + +DATA ·shft_lut+0x00(SB)/1, $0x00 +DATA ·shft_lut+0x01(SB)/1, $0x00 +DATA ·shft_lut+0x02(SB)/1, $0x13 +DATA ·shft_lut+0x03(SB)/1, $0x04 +DATA ·shft_lut+0x04(SB)/1, $0xbf +DATA ·shft_lut+0x05(SB)/1, $0xbf +DATA ·shft_lut+0x06(SB)/1, $0xb9 +DATA ·shft_lut+0x07(SB)/1, $0xb9 +DATA ·shft_lut+0x08(SB)/1, $0x00 +DATA ·shft_lut+0x09(SB)/1, $0x00 +DATA ·shft_lut+0x0a(SB)/1, $0x00 +DATA ·shft_lut+0x0b(SB)/1, $0x00 +DATA ·shft_lut+0x0c(SB)/1, $0x00 +DATA ·shft_lut+0x0d(SB)/1, $0x00 +DATA ·shft_lut+0x0e(SB)/1, $0x00 +DATA ·shft_lut+0x0f(SB)/1, $0x00 +GLOBL ·shft_lut(SB), NOPTR|RODATA, $16 diff --git a/vendor/github.com/segmentio/asm/base64/encode_amd64.go b/vendor/github.com/segmentio/asm/base64/encode_amd64.go new file mode 100644 index 0000000000..a83c81f157 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/encode_amd64.go @@ -0,0 +1,7 @@ +// Code generated by command: go run encode_asm.go -pkg base64 -out ../base64/encode_amd64.s -stubs ../base64/encode_amd64.go. DO NOT EDIT. + +//go:build !purego + +package base64 + +func encodeAVX2(dst []byte, src []byte, lut *int8) (int, int) diff --git a/vendor/github.com/segmentio/asm/base64/encode_amd64.s b/vendor/github.com/segmentio/asm/base64/encode_amd64.s new file mode 100644 index 0000000000..6797c977e8 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/encode_amd64.s @@ -0,0 +1,87 @@ +// Code generated by command: go run encode_asm.go -pkg base64 -out ../base64/encode_amd64.s -stubs ../base64/encode_amd64.go. DO NOT EDIT. + +//go:build !purego + +#include "textflag.h" + +// func encodeAVX2(dst []byte, src []byte, lut *int8) (int, int) +// Requires: AVX, AVX2, SSE4.1 +TEXT ·encodeAVX2(SB), NOSPLIT, $0-72 + MOVQ dst_base+0(FP), AX + MOVQ src_base+24(FP), DX + MOVQ lut+48(FP), SI + MOVQ src_len+32(FP), DI + MOVB $0x33, CL + PINSRB $0x00, CX, X4 + VPBROADCASTB X4, Y4 + MOVB $0x19, CL + PINSRB $0x00, CX, X5 + VPBROADCASTB X5, Y5 + XORQ CX, CX + XORQ BX, BX + + // Load the 16-byte LUT into both lanes of the register + VPERMQ $0x44, (SI), Y3 + + // Load the first block using a mask to avoid potential fault + VMOVDQU b64_enc_load<>+0(SB), Y0 + VPMASKMOVD -4(DX)(BX*1), Y0, Y0 + +loop: + VPSHUFB b64_enc_shuf<>+0(SB), Y0, Y0 + VPAND b64_enc_mask1<>+0(SB), Y0, Y1 + VPSLLW $0x08, Y1, Y2 + VPSLLW $0x04, Y1, Y1 + VPBLENDW $0xaa, Y2, Y1, Y2 + VPAND b64_enc_mask2<>+0(SB), Y0, Y1 + VPMULHUW b64_enc_mult<>+0(SB), Y1, Y0 + VPOR Y0, Y2, Y0 + VPSUBUSB Y4, Y0, Y1 + VPCMPGTB Y5, Y0, Y2 + VPSUBB Y2, Y1, Y1 + VPSHUFB Y1, Y3, Y1 + VPADDB Y0, Y1, Y0 + VMOVDQU Y0, (AX)(CX*1) + ADDQ $0x20, CX + ADDQ $0x18, BX + SUBQ $0x18, DI + CMPQ DI, $0x20 + JB done + VMOVDQU -4(DX)(BX*1), Y0 + JMP loop + +done: + MOVQ CX, ret+56(FP) + MOVQ BX, ret1+64(FP) + VZEROUPPER + RET + +DATA b64_enc_load<>+0(SB)/8, $0x8000000000000000 +DATA b64_enc_load<>+8(SB)/8, $0x8000000080000000 +DATA b64_enc_load<>+16(SB)/8, $0x8000000080000000 +DATA b64_enc_load<>+24(SB)/8, $0x8000000080000000 +GLOBL b64_enc_load<>(SB), RODATA|NOPTR, $32 + +DATA b64_enc_shuf<>+0(SB)/8, $0x0809070805060405 +DATA b64_enc_shuf<>+8(SB)/8, $0x0e0f0d0e0b0c0a0b +DATA b64_enc_shuf<>+16(SB)/8, $0x0405030401020001 +DATA b64_enc_shuf<>+24(SB)/8, $0x0a0b090a07080607 +GLOBL b64_enc_shuf<>(SB), RODATA|NOPTR, $32 + +DATA b64_enc_mask1<>+0(SB)/8, $0x003f03f0003f03f0 +DATA b64_enc_mask1<>+8(SB)/8, $0x003f03f0003f03f0 +DATA b64_enc_mask1<>+16(SB)/8, $0x003f03f0003f03f0 +DATA b64_enc_mask1<>+24(SB)/8, $0x003f03f0003f03f0 +GLOBL b64_enc_mask1<>(SB), RODATA|NOPTR, $32 + +DATA b64_enc_mask2<>+0(SB)/8, $0x0fc0fc000fc0fc00 +DATA b64_enc_mask2<>+8(SB)/8, $0x0fc0fc000fc0fc00 +DATA b64_enc_mask2<>+16(SB)/8, $0x0fc0fc000fc0fc00 +DATA b64_enc_mask2<>+24(SB)/8, $0x0fc0fc000fc0fc00 +GLOBL b64_enc_mask2<>(SB), RODATA|NOPTR, $32 + +DATA b64_enc_mult<>+0(SB)/8, $0x0400004004000040 +DATA b64_enc_mult<>+8(SB)/8, $0x0400004004000040 +DATA b64_enc_mult<>+16(SB)/8, $0x0400004004000040 +DATA b64_enc_mult<>+24(SB)/8, $0x0400004004000040 +GLOBL b64_enc_mult<>(SB), RODATA|NOPTR, $32 diff --git a/vendor/github.com/segmentio/asm/base64/encode_arm64.go b/vendor/github.com/segmentio/asm/base64/encode_arm64.go new file mode 100644 index 0000000000..b6a3814928 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/encode_arm64.go @@ -0,0 +1,6 @@ +//go:build !purego +// +build !purego + +package base64 + +func encodeARM64(dst []byte, src []byte, lut *int8) (int, int) diff --git a/vendor/github.com/segmentio/asm/base64/encode_arm64.s b/vendor/github.com/segmentio/asm/base64/encode_arm64.s new file mode 100644 index 0000000000..4654313bbd --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/encode_arm64.s @@ -0,0 +1,97 @@ +#include "textflag.h" + +#define Rdst R0 +#define Rsrc R1 +#define Rlen R2 +#define Rwr R3 +#define Rrem R4 +#define Rtmp R5 + +#define Vlut V0 +#define Vfld0 V6 +#define Vfld1 V7 +#define Vfld2 V8 +#define Vfld3 V9 +#define Vsrc0 V10 +#define Vsrc1 V11 +#define Vsrc2 V12 +#define Vr0a V13 +#define Vr1a V14 +#define Vr2a V15 +#define Vr3a V16 +#define Vr0b V17 +#define Vr1b V18 +#define Vr2b V19 +#define Vr3b V20 + +// func encodeARM64(dst []byte, src []byte, lut *int8) (int, int) +TEXT ·encodeARM64(SB),NOSPLIT,$0-72 + // Load dst/src info + MOVD dst_base+0(FP), Rdst + MOVD src_base+24(FP), Rsrc + MOVD src_len+32(FP), Rlen + MOVD lut+48(FP), Rtmp + VLD1 (Rtmp), [Vlut.B16] + + MOVD Rlen, Rrem + MOVD Rdst, Rwr + + VMOVI $51, V1.B16 + VMOVI $26, V2.B16 + VMOVI $63, V3.B16 + VMOVI $13, V4.B16 + +loop: + VLD3.P 48(Rsrc), [Vsrc0.B16, Vsrc1.B16, Vsrc2.B16] + + // Split 3 source blocks into 4 lookup inputs + VUSHR $2, Vsrc0.B16, Vfld0.B16 + VUSHR $4, Vsrc1.B16, Vfld1.B16 + VUSHR $6, Vsrc2.B16, Vfld2.B16 + VSHL $4, Vsrc0.B16, Vsrc0.B16 + VSHL $2, Vsrc1.B16, Vsrc1.B16 + VORR Vsrc0.B16, Vfld1.B16, Vfld1.B16 + VORR Vsrc1.B16, Vfld2.B16, Vfld2.B16 + VAND V3.B16, Vfld1.B16, Vfld1.B16 + VAND V3.B16, Vfld2.B16, Vfld2.B16 + VAND V3.B16, Vsrc2.B16, Vfld3.B16 + + WORD $0x6e212ccd // VUQSUB V1.B16, Vfld0.B16, Vr0a.B16 + WORD $0x4e263451 // VCMGT V2.B16, Vfld0.B16, Vr0b.B16 + VAND V4.B16, Vr0b.B16, Vr0b.B16 + VORR Vr0b.B16, Vr0a.B16, Vr0a.B16 + WORD $0x6e212cee // VUQSUB V1.B16, Vfld1.B16, Vr1a.B16 + WORD $0x4e273452 // VCMGT V2.B16, Vfld1.B16, Vr1b.B16 + VAND V4.B16, Vr1b.B16, Vr1b.B16 + VORR Vr1b.B16, Vr1a.B16, Vr1a.B16 + WORD $0x6e212d0f // VUQSUB V1.B16, Vfld2.B16, Vr2a.B16 + WORD $0x4e283453 // VCMGT V2.B16, Vfld2.B16, Vr2b.B16 + VAND V4.B16, Vr2b.B16, Vr2b.B16 + VORR Vr2b.B16, Vr2a.B16, Vr2a.B16 + WORD $0x6e212d30 // VUQSUB V1.B16, Vfld3.B16, Vr3a.B16 + WORD $0x4e293454 // VCMGT V2.B16, Vfld3.B16, Vr3b.B16 + VAND V4.B16, Vr3b.B16, Vr3b.B16 + VORR Vr3b.B16, Vr3a.B16, Vr3a.B16 + + // Add result of lookup table to each field + VTBL Vr0a.B16, [Vlut.B16], Vr0a.B16 + VADD Vr0a.B16, Vfld0.B16, Vfld0.B16 + VTBL Vr1a.B16, [Vlut.B16], Vr1a.B16 + VADD Vr1a.B16, Vfld1.B16, Vfld1.B16 + VTBL Vr2a.B16, [Vlut.B16], Vr2a.B16 + VADD Vr2a.B16, Vfld2.B16, Vfld2.B16 + VTBL Vr3a.B16, [Vlut.B16], Vr3a.B16 + VADD Vr3a.B16, Vfld3.B16, Vfld3.B16 + + VST4.P [Vfld0.B16, Vfld1.B16, Vfld2.B16, Vfld3.B16], 64(Rwr) + SUB $48, Rrem + CMP $48, Rrem + BGE loop + +done: + SUB Rdst, Rwr + SUB Rrem, Rlen + MOVD Rwr, ret+56(FP) + MOVD Rlen, ret1+64(FP) + RET + diff --git a/vendor/github.com/segmentio/asm/cpu/arm/arm.go b/vendor/github.com/segmentio/asm/cpu/arm/arm.go new file mode 100644 index 0000000000..47c695a075 --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/arm/arm.go @@ -0,0 +1,80 @@ +package arm + +import ( + "github.com/segmentio/asm/cpu/cpuid" + . "golang.org/x/sys/cpu" +) + +type CPU cpuid.CPU + +func (cpu CPU) Has(feature Feature) bool { + return cpuid.CPU(cpu).Has(cpuid.Feature(feature)) +} + +func (cpu *CPU) set(feature Feature, enable bool) { + (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable) +} + +type Feature cpuid.Feature + +const ( + SWP Feature = 1 << iota // SWP instruction support + HALF // Half-word load and store support + THUMB // ARM Thumb instruction set + BIT26 // Address space limited to 26-bits + FASTMUL // 32-bit operand, 64-bit result multiplication support + FPA // Floating point arithmetic support + VFP // Vector floating point support + EDSP // DSP Extensions support + JAVA // Java instruction set + IWMMXT // Intel Wireless MMX technology support + CRUNCH // MaverickCrunch context switching and handling + THUMBEE // Thumb EE instruction set + NEON // NEON instruction set + VFPv3 // Vector floating point version 3 support + VFPv3D16 // Vector floating point version 3 D8-D15 + TLS // Thread local storage support + VFPv4 // Vector floating point version 4 support + IDIVA // Integer divide instruction support in ARM mode + IDIVT // Integer divide instruction support in Thumb mode + VFPD32 // Vector floating point version 3 D15-D31 + LPAE // Large Physical Address Extensions + EVTSTRM // Event stream support + AES // AES hardware implementation + PMULL // Polynomial multiplication instruction set + SHA1 // SHA1 hardware implementation + SHA2 // SHA2 hardware implementation + CRC32 // CRC32 hardware implementation +) + +func ABI() CPU { + cpu := CPU(0) + cpu.set(SWP, ARM.HasSWP) + cpu.set(HALF, ARM.HasHALF) + cpu.set(THUMB, ARM.HasTHUMB) + cpu.set(BIT26, ARM.Has26BIT) + cpu.set(FASTMUL, ARM.HasFASTMUL) + cpu.set(FPA, ARM.HasFPA) + cpu.set(VFP, ARM.HasVFP) + cpu.set(EDSP, ARM.HasEDSP) + cpu.set(JAVA, ARM.HasJAVA) + cpu.set(IWMMXT, ARM.HasIWMMXT) + cpu.set(CRUNCH, ARM.HasCRUNCH) + cpu.set(THUMBEE, ARM.HasTHUMBEE) + cpu.set(NEON, ARM.HasNEON) + cpu.set(VFPv3, ARM.HasVFPv3) + cpu.set(VFPv3D16, ARM.HasVFPv3D16) + cpu.set(TLS, ARM.HasTLS) + cpu.set(VFPv4, ARM.HasVFPv4) + cpu.set(IDIVA, ARM.HasIDIVA) + cpu.set(IDIVT, ARM.HasIDIVT) + cpu.set(VFPD32, ARM.HasVFPD32) + cpu.set(LPAE, ARM.HasLPAE) + cpu.set(EVTSTRM, ARM.HasEVTSTRM) + cpu.set(AES, ARM.HasAES) + cpu.set(PMULL, ARM.HasPMULL) + cpu.set(SHA1, ARM.HasSHA1) + cpu.set(SHA2, ARM.HasSHA2) + cpu.set(CRC32, ARM.HasCRC32) + return cpu +} diff --git a/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go b/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go new file mode 100644 index 0000000000..0c5134c76e --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go @@ -0,0 +1,74 @@ +package arm64 + +import ( + "github.com/segmentio/asm/cpu/cpuid" + . "golang.org/x/sys/cpu" +) + +type CPU cpuid.CPU + +func (cpu CPU) Has(feature Feature) bool { + return cpuid.CPU(cpu).Has(cpuid.Feature(feature)) +} + +func (cpu *CPU) set(feature Feature, enable bool) { + (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable) +} + +type Feature cpuid.Feature + +const ( + FP Feature = 1 << iota // Floating-point instruction set (always available) + ASIMD // Advanced SIMD (always available) + EVTSTRM // Event stream support + AES // AES hardware implementation + PMULL // Polynomial multiplication instruction set + SHA1 // SHA1 hardware implementation + SHA2 // SHA2 hardware implementation + CRC32 // CRC32 hardware implementation + ATOMICS // Atomic memory operation instruction set + FPHP // Half precision floating-point instruction set + ASIMDHP // Advanced SIMD half precision instruction set + CPUID // CPUID identification scheme registers + ASIMDRDM // Rounding double multiply add/subtract instruction set + JSCVT // Javascript conversion from floating-point to integer + FCMA // Floating-point multiplication and addition of complex numbers + LRCPC // Release Consistent processor consistent support + DCPOP // Persistent memory support + SHA3 // SHA3 hardware implementation + SM3 // SM3 hardware implementation + SM4 // SM4 hardware implementation + ASIMDDP // Advanced SIMD double precision instruction set + SHA512 // SHA512 hardware implementation + SVE // Scalable Vector Extensions + ASIMDFHM // Advanced SIMD multiplication FP16 to FP32 +) + +func ABI() CPU { + cpu := CPU(0) + cpu.set(FP, ARM64.HasFP) + cpu.set(ASIMD, ARM64.HasASIMD) + cpu.set(EVTSTRM, ARM64.HasEVTSTRM) + cpu.set(AES, ARM64.HasAES) + cpu.set(PMULL, ARM64.HasPMULL) + cpu.set(SHA1, ARM64.HasSHA1) + cpu.set(SHA2, ARM64.HasSHA2) + cpu.set(CRC32, ARM64.HasCRC32) + cpu.set(ATOMICS, ARM64.HasATOMICS) + cpu.set(FPHP, ARM64.HasFPHP) + cpu.set(ASIMDHP, ARM64.HasASIMDHP) + cpu.set(CPUID, ARM64.HasCPUID) + cpu.set(ASIMDRDM, ARM64.HasASIMDRDM) + cpu.set(JSCVT, ARM64.HasJSCVT) + cpu.set(FCMA, ARM64.HasFCMA) + cpu.set(LRCPC, ARM64.HasLRCPC) + cpu.set(DCPOP, ARM64.HasDCPOP) + cpu.set(SHA3, ARM64.HasSHA3) + cpu.set(SM3, ARM64.HasSM3) + cpu.set(SM4, ARM64.HasSM4) + cpu.set(ASIMDDP, ARM64.HasASIMDDP) + cpu.set(SHA512, ARM64.HasSHA512) + cpu.set(SVE, ARM64.HasSVE) + cpu.set(ASIMDFHM, ARM64.HasASIMDFHM) + return cpu +} diff --git a/vendor/github.com/segmentio/asm/cpu/cpu.go b/vendor/github.com/segmentio/asm/cpu/cpu.go new file mode 100644 index 0000000000..6ddf4973f5 --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/cpu.go @@ -0,0 +1,22 @@ +// Pakage cpu provides APIs to detect CPU features available at runtime. +package cpu + +import ( + "github.com/segmentio/asm/cpu/arm" + "github.com/segmentio/asm/cpu/arm64" + "github.com/segmentio/asm/cpu/x86" +) + +var ( + // X86 is the bitset representing the set of the x86 instruction sets are + // supported by the CPU. + X86 = x86.ABI() + + // ARM is the bitset representing which parts of the arm instruction sets + // are supported by the CPU. + ARM = arm.ABI() + + // ARM64 is the bitset representing which parts of the arm64 instruction + // sets are supported by the CPU. + ARM64 = arm64.ABI() +) diff --git a/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go b/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go new file mode 100644 index 0000000000..0949d3d584 --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go @@ -0,0 +1,32 @@ +// Package cpuid provides generic types used to represent CPU features supported +// by the architecture. +package cpuid + +// CPU is a bitset of feature flags representing the capabilities of various CPU +// architeectures that this package provides optimized assembly routines for. +// +// The intent is to provide a stable ABI between the Go code that generate the +// assembly, and the program that uses the library functions. +type CPU uint64 + +// Feature represents a single CPU feature. +type Feature uint64 + +const ( + // None is a Feature value that has no CPU features enabled. + None Feature = 0 + // All is a Feature value that has all CPU features enabled. + All Feature = 0xFFFFFFFFFFFFFFFF +) + +func (cpu CPU) Has(feature Feature) bool { + return (Feature(cpu) & feature) == feature +} + +func (cpu *CPU) Set(feature Feature, enabled bool) { + if enabled { + *cpu |= CPU(feature) + } else { + *cpu &= ^CPU(feature) + } +} diff --git a/vendor/github.com/segmentio/asm/cpu/x86/x86.go b/vendor/github.com/segmentio/asm/cpu/x86/x86.go new file mode 100644 index 0000000000..9e93537583 --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/x86/x86.go @@ -0,0 +1,76 @@ +package x86 + +import ( + "github.com/segmentio/asm/cpu/cpuid" + . "golang.org/x/sys/cpu" +) + +type CPU cpuid.CPU + +func (cpu CPU) Has(feature Feature) bool { + return cpuid.CPU(cpu).Has(cpuid.Feature(feature)) +} + +func (cpu *CPU) set(feature Feature, enable bool) { + (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable) +} + +type Feature cpuid.Feature + +const ( + SSE Feature = 1 << iota // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSE41 // Penryn SSE4.1 functions + SSE42 // Nehalem SSE4.2 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSSE3 // Conroe SSSE3 functions + AVX // AVX functions + AVX2 // AVX2 functions + AVX512BF16 // AVX-512 BFLOAT16 Instructions + AVX512BITALG // AVX-512 Bit Algorithms + AVX512BW // AVX-512 Byte and Word Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512F // AVX-512 Foundation + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 + AVX512VL // AVX-512 Vector Length Extensions + AVX512VNNI // AVX-512 Vector Neural Network Instructions + AVX512VP2INTERSECT // AVX-512 Intersect for D/Q + AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword + CMOV // Conditional move +) + +func ABI() CPU { + cpu := CPU(0) + cpu.set(SSE, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have SEE? + cpu.set(SSE2, X86.HasSSE2) + cpu.set(SSE3, X86.HasSSE3) + cpu.set(SSE41, X86.HasSSE41) + cpu.set(SSE42, X86.HasSSE42) + cpu.set(SSE4A, false) // TODO: add upstream support in golang.org/x/sys/cpu? + cpu.set(SSSE3, X86.HasSSSE3) + cpu.set(AVX, X86.HasAVX) + cpu.set(AVX2, X86.HasAVX2) + cpu.set(AVX512BF16, X86.HasAVX512BF16) + cpu.set(AVX512BITALG, X86.HasAVX512BITALG) + cpu.set(AVX512BW, X86.HasAVX512BW) + cpu.set(AVX512CD, X86.HasAVX512CD) + cpu.set(AVX512DQ, X86.HasAVX512DQ) + cpu.set(AVX512ER, X86.HasAVX512ER) + cpu.set(AVX512F, X86.HasAVX512F) + cpu.set(AVX512IFMA, X86.HasAVX512IFMA) + cpu.set(AVX512PF, X86.HasAVX512PF) + cpu.set(AVX512VBMI, X86.HasAVX512VBMI) + cpu.set(AVX512VBMI2, X86.HasAVX512VBMI2) + cpu.set(AVX512VL, X86.HasAVX512VL) + cpu.set(AVX512VNNI, X86.HasAVX512VNNI) + cpu.set(AVX512VP2INTERSECT, false) // TODO: add upstream support in golang.org/x/sys/cpu? + cpu.set(AVX512VPOPCNTDQ, X86.HasAVX512VPOPCNTDQ) + cpu.set(CMOV, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have CMOV? + return cpu +} diff --git a/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go b/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go new file mode 100644 index 0000000000..913c9cc68b --- /dev/null +++ b/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go @@ -0,0 +1,20 @@ +package unsafebytes + +import "unsafe" + +func Pointer(b []byte) *byte { + return *(**byte)(unsafe.Pointer(&b)) +} + +func String(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func BytesOf(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&sliceHeader{str: s, cap: len(s)})) +} + +type sliceHeader struct { + str string + cap int +} diff --git a/vendor/github.com/valyala/fastjson/.gitignore b/vendor/github.com/valyala/fastjson/.gitignore new file mode 100644 index 0000000000..6e92f57d46 --- /dev/null +++ b/vendor/github.com/valyala/fastjson/.gitignore @@ -0,0 +1 @@ +tags diff --git a/vendor/github.com/valyala/fastjson/.travis.yml b/vendor/github.com/valyala/fastjson/.travis.yml new file mode 100644 index 0000000000..472a82190c --- /dev/null +++ b/vendor/github.com/valyala/fastjson/.travis.yml @@ -0,0 +1,19 @@ +language: go + +go: + - 1.10.x + +script: + # build test for supported platforms + - GOOS=linux go build + - GOOS=darwin go build + - GOOS=freebsd go build + - GOOS=windows go build + + # run tests on a standard platform + - go test -v ./... -coverprofile=coverage.txt -covermode=atomic + - go test -v ./... -race + +after_success: + # Upload coverage results to codecov.io + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/valyala/fastjson/LICENSE b/vendor/github.com/valyala/fastjson/LICENSE new file mode 100644 index 0000000000..6f665f3e29 --- /dev/null +++ b/vendor/github.com/valyala/fastjson/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2018 Aliaksandr Valialkin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/valyala/fastjson/README.md b/vendor/github.com/valyala/fastjson/README.md new file mode 100644 index 0000000000..f32c693937 --- /dev/null +++ b/vendor/github.com/valyala/fastjson/README.md @@ -0,0 +1,227 @@ +[![Build Status](https://travis-ci.org/valyala/fastjson.svg)](https://travis-ci.org/valyala/fastjson) +[![GoDoc](https://godoc.org/github.com/valyala/fastjson?status.svg)](http://godoc.org/github.com/valyala/fastjson) +[![Go Report](https://goreportcard.com/badge/github.com/valyala/fastjson)](https://goreportcard.com/report/github.com/valyala/fastjson) +[![codecov](https://codecov.io/gh/valyala/fastjson/branch/master/graph/badge.svg)](https://codecov.io/gh/valyala/fastjson) + +# fastjson - fast JSON parser and validator for Go + + +## Features + + * Fast. As usual, up to 15x faster than the standard [encoding/json](https://golang.org/pkg/encoding/json/). + See [benchmarks](#benchmarks). + * Parses arbitrary JSON without schema, reflection, struct magic and code generation + contrary to [easyjson](https://github.com/mailru/easyjson). + * Provides simple [API](http://godoc.org/github.com/valyala/fastjson). + * Outperforms [jsonparser](https://github.com/buger/jsonparser) and [gjson](https://github.com/tidwall/gjson) + when accessing multiple unrelated fields, since `fastjson` parses the input JSON only once. + * Validates the parsed JSON unlike [jsonparser](https://github.com/buger/jsonparser) + and [gjson](https://github.com/tidwall/gjson). + * May quickly extract a part of the original JSON with `Value.Get(...).MarshalTo` and modify it + with [Del](https://godoc.org/github.com/valyala/fastjson#Value.Del) + and [Set](https://godoc.org/github.com/valyala/fastjson#Value.Set) functions. + * May parse array containing values with distinct types (aka non-homogenous types). + For instance, `fastjson` easily parses the following JSON array `[123, "foo", [456], {"k": "v"}, null]`. + * `fastjson` preserves the original order of object items when calling + [Object.Visit](https://godoc.org/github.com/valyala/fastjson#Object.Visit). + + +## Known limitations + + * Requies extra care to work with - references to certain objects recursively + returned by [Parser](https://godoc.org/github.com/valyala/fastjson#Parser) + must be released before the next call to [Parse](https://godoc.org/github.com/valyala/fastjson#Parser.Parse). + Otherwise the program may work improperly. The same applies to objects returned by [Arena](https://godoc.org/github.com/valyala/fastjson#Arena). + Adhere recommendations from [docs](https://godoc.org/github.com/valyala/fastjson). + * Cannot parse JSON from `io.Reader`. There is [Scanner](https://godoc.org/github.com/valyala/fastjson#Scanner) + for parsing stream of JSON values from a string. + + +## Usage + +One-liner accessing a single field: +```go + s := []byte(`{"foo": [123, "bar"]}`) + fmt.Printf("foo.0=%d\n", fastjson.GetInt(s, "foo", "0")) + + // Output: + // foo.0=123 +``` + +Accessing multiple fields with error handling: +```go + var p fastjson.Parser + v, err := p.Parse(`{ + "str": "bar", + "int": 123, + "float": 1.23, + "bool": true, + "arr": [1, "foo", {}] + }`) + if err != nil { + log.Fatal(err) + } + fmt.Printf("foo=%s\n", v.GetStringBytes("str")) + fmt.Printf("int=%d\n", v.GetInt("int")) + fmt.Printf("float=%f\n", v.GetFloat64("float")) + fmt.Printf("bool=%v\n", v.GetBool("bool")) + fmt.Printf("arr.1=%s\n", v.GetStringBytes("arr", "1")) + + // Output: + // foo=bar + // int=123 + // float=1.230000 + // bool=true + // arr.1=foo +``` + +See also [examples](https://godoc.org/github.com/valyala/fastjson#pkg-examples). + + +## Security + + * `fastjson` shouldn't crash or panic when parsing input strings specially crafted + by an attacker. It must return error on invalid input JSON. + * `fastjson` requires up to `sizeof(Value) * len(inputJSON)` bytes of memory + for parsing `inputJSON` string. Limit the maximum size of the `inputJSON` + before parsing it in order to limit the maximum memory usage. + + +## Performance optimization tips + + * Re-use [Parser](https://godoc.org/github.com/valyala/fastjson#Parser) and [Scanner](https://godoc.org/github.com/valyala/fastjson#Scanner) + for parsing many JSONs. This reduces memory allocations overhead. + [ParserPool](https://godoc.org/github.com/valyala/fastjson#ParserPool) may be useful in this case. + * Prefer calling `Value.Get*` on the value returned from [Parser](https://godoc.org/github.com/valyala/fastjson#Parser) + instead of calling `Get*` one-liners when multiple fields + must be obtained from JSON, since each `Get*` one-liner re-parses + the input JSON again. + * Prefer calling once [Value.Get](https://godoc.org/github.com/valyala/fastjson#Value.Get) + for common prefix paths and then calling `Value.Get*` on the returned value + for distinct suffix paths. + * Prefer iterating over array returned from [Value.GetArray](https://godoc.org/github.com/valyala/fastjson#Object.Visit) + with a range loop instead of calling `Value.Get*` for each array item. + +## Fuzzing +Install [go-fuzz](https://github.com/dvyukov/go-fuzz) & optionally the go-fuzz-corpus. + +```bash +go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build +``` + +Build using `go-fuzz-build` and run `go-fuzz` with an optional corpus. + +```bash +mkdir -p workdir/corpus +cp $GOPATH/src/github.com/dvyukov/go-fuzz-corpus/json/corpus/* workdir/corpus +go-fuzz-build github.com/valyala/fastjson +go-fuzz -bin=fastjson-fuzz.zip -workdir=workdir +``` + +## Benchmarks + +Go 1.12 has been used for benchmarking. + +Legend: + + * `small` - parse [small.json](testdata/small.json) (190 bytes). + * `medium` - parse [medium.json](testdata/medium.json) (2.3KB). + * `large` - parse [large.json](testdata/large.json) (28KB). + * `canada` - parse [canada.json](testdata/canada.json) (2.2MB). + * `citm` - parse [citm_catalog.json](testdata/citm_catalog.json) (1.7MB). + * `twitter` - parse [twitter.json](testdata/twitter.json) (617KB). + + * `stdjson-map` - parse into a `map[string]interface{}` using `encoding/json`. + * `stdjson-struct` - parse into a struct containing + a subset of fields of the parsed JSON, using `encoding/json`. + * `stdjson-empty-struct` - parse into an empty struct using `encoding/json`. + This is the fastest possible solution for `encoding/json`, may be used + for json validation. See also benchmark results for json validation. + * `fastjson` - parse using `fastjson` without fields access. + * `fastjson-get` - parse using `fastjson` with fields access similar to `stdjson-struct`. + +``` +$ GOMAXPROCS=1 go test github.com/valyala/fastjson -bench='Parse$' +goos: linux +goarch: amd64 +pkg: github.com/valyala/fastjson +BenchmarkParse/small/stdjson-map 200000 7305 ns/op 26.01 MB/s 960 B/op 51 allocs/op +BenchmarkParse/small/stdjson-struct 500000 3431 ns/op 55.37 MB/s 224 B/op 4 allocs/op +BenchmarkParse/small/stdjson-empty-struct 500000 2273 ns/op 83.58 MB/s 168 B/op 2 allocs/op +BenchmarkParse/small/fastjson 5000000 347 ns/op 547.53 MB/s 0 B/op 0 allocs/op +BenchmarkParse/small/fastjson-get 2000000 620 ns/op 306.39 MB/s 0 B/op 0 allocs/op +BenchmarkParse/medium/stdjson-map 30000 40672 ns/op 57.26 MB/s 10196 B/op 208 allocs/op +BenchmarkParse/medium/stdjson-struct 30000 47792 ns/op 48.73 MB/s 9174 B/op 258 allocs/op +BenchmarkParse/medium/stdjson-empty-struct 100000 22096 ns/op 105.40 MB/s 280 B/op 5 allocs/op +BenchmarkParse/medium/fastjson 500000 3025 ns/op 769.90 MB/s 0 B/op 0 allocs/op +BenchmarkParse/medium/fastjson-get 500000 3211 ns/op 725.20 MB/s 0 B/op 0 allocs/op +BenchmarkParse/large/stdjson-map 2000 614079 ns/op 45.79 MB/s 210734 B/op 2785 allocs/op +BenchmarkParse/large/stdjson-struct 5000 298554 ns/op 94.18 MB/s 15616 B/op 353 allocs/op +BenchmarkParse/large/stdjson-empty-struct 5000 268577 ns/op 104.69 MB/s 280 B/op 5 allocs/op +BenchmarkParse/large/fastjson 50000 35210 ns/op 798.56 MB/s 5 B/op 0 allocs/op +BenchmarkParse/large/fastjson-get 50000 35171 ns/op 799.46 MB/s 5 B/op 0 allocs/op +BenchmarkParse/canada/stdjson-map 20 68147307 ns/op 33.03 MB/s 12260502 B/op 392539 allocs/op +BenchmarkParse/canada/stdjson-struct 20 68044518 ns/op 33.08 MB/s 12260123 B/op 392534 allocs/op +BenchmarkParse/canada/stdjson-empty-struct 100 17709250 ns/op 127.11 MB/s 280 B/op 5 allocs/op +BenchmarkParse/canada/fastjson 300 4182404 ns/op 538.22 MB/s 254902 B/op 381 allocs/op +BenchmarkParse/canada/fastjson-get 300 4274744 ns/op 526.60 MB/s 254902 B/op 381 allocs/op +BenchmarkParse/citm/stdjson-map 50 27772612 ns/op 62.19 MB/s 5214163 B/op 95402 allocs/op +BenchmarkParse/citm/stdjson-struct 100 14936191 ns/op 115.64 MB/s 1989 B/op 75 allocs/op +BenchmarkParse/citm/stdjson-empty-struct 100 14946034 ns/op 115.56 MB/s 280 B/op 5 allocs/op +BenchmarkParse/citm/fastjson 1000 1879714 ns/op 918.87 MB/s 17628 B/op 30 allocs/op +BenchmarkParse/citm/fastjson-get 1000 1881598 ns/op 917.94 MB/s 17628 B/op 30 allocs/op +BenchmarkParse/twitter/stdjson-map 100 11289146 ns/op 55.94 MB/s 2187878 B/op 31266 allocs/op +BenchmarkParse/twitter/stdjson-struct 300 5779442 ns/op 109.27 MB/s 408 B/op 6 allocs/op +BenchmarkParse/twitter/stdjson-empty-struct 300 5738504 ns/op 110.05 MB/s 408 B/op 6 allocs/op +BenchmarkParse/twitter/fastjson 2000 774042 ns/op 815.86 MB/s 2541 B/op 2 allocs/op +BenchmarkParse/twitter/fastjson-get 2000 777833 ns/op 811.89 MB/s 2541 B/op 2 allocs/op +``` + +Benchmark results for json validation: + +``` +$ GOMAXPROCS=1 go test github.com/valyala/fastjson -bench='Validate$' +goos: linux +goarch: amd64 +pkg: github.com/valyala/fastjson +BenchmarkValidate/small/stdjson 2000000 955 ns/op 198.83 MB/s 72 B/op 2 allocs/op +BenchmarkValidate/small/fastjson 5000000 384 ns/op 493.60 MB/s 0 B/op 0 allocs/op +BenchmarkValidate/medium/stdjson 200000 10799 ns/op 215.66 MB/s 184 B/op 5 allocs/op +BenchmarkValidate/medium/fastjson 300000 3809 ns/op 611.30 MB/s 0 B/op 0 allocs/op +BenchmarkValidate/large/stdjson 10000 133064 ns/op 211.31 MB/s 184 B/op 5 allocs/op +BenchmarkValidate/large/fastjson 30000 45268 ns/op 621.14 MB/s 0 B/op 0 allocs/op +BenchmarkValidate/canada/stdjson 200 8470904 ns/op 265.74 MB/s 184 B/op 5 allocs/op +BenchmarkValidate/canada/fastjson 500 2973377 ns/op 757.07 MB/s 0 B/op 0 allocs/op +BenchmarkValidate/citm/stdjson 200 7273172 ns/op 237.48 MB/s 184 B/op 5 allocs/op +BenchmarkValidate/citm/fastjson 1000 1684430 ns/op 1025.39 MB/s 0 B/op 0 allocs/op +BenchmarkValidate/twitter/stdjson 500 2849439 ns/op 221.63 MB/s 312 B/op 6 allocs/op +BenchmarkValidate/twitter/fastjson 2000 1036796 ns/op 609.10 MB/s 0 B/op 0 allocs/op +``` + +## FAQ + + * Q: _There are a ton of other high-perf packages for JSON parsing in Go. Why creating yet another package?_ + A: Because other packages require either rigid JSON schema via struct magic + and code generation or perform poorly when multiple unrelated fields + must be obtained from the parsed JSON. + Additionally, `fastjson` provides nicer [API](http://godoc.org/github.com/valyala/fastjson). + + * Q: _What is the main purpose for `fastjson`?_ + A: High-perf JSON parsing for [RTB](https://www.iab.com/wp-content/uploads/2015/05/OpenRTB_API_Specification_Version_2_3_1.pdf) + and other [JSON-RPC](https://en.wikipedia.org/wiki/JSON-RPC) services. + + * Q: _Why fastjson doesn't provide fast marshaling (serialization)?_ + A: Actually it provides some sort of marshaling - see [Value.MarshalTo](https://godoc.org/github.com/valyala/fastjson#Value.MarshalTo). + But I'd recommend using [quicktemplate](https://github.com/valyala/quicktemplate#use-cases) + for high-performance JSON marshaling :) + + * Q: _`fastjson` crashes my program!_ + A: There is high probability of improper use. + * Make sure you don't hold references to objects recursively returned by `Parser` / `Scanner` + beyond the next `Parser.Parse` / `Scanner.Next` call + if such restriction is mentioned in [docs](https://github.com/valyala/fastjson/issues/new). + * Make sure you don't access `fastjson` objects from concurrently running goroutines + if such restriction is mentioned in [docs](https://github.com/valyala/fastjson/issues/new). + * Build and run your program with [-race](https://golang.org/doc/articles/race_detector.html) flag. + Make sure the race detector detects zero races. + * If your program continue crashing after fixing issues mentioned above, [file a bug](https://github.com/valyala/fastjson/issues/new). diff --git a/vendor/github.com/valyala/fastjson/arena.go b/vendor/github.com/valyala/fastjson/arena.go new file mode 100644 index 0000000000..1a512d5f35 --- /dev/null +++ b/vendor/github.com/valyala/fastjson/arena.go @@ -0,0 +1,126 @@ +package fastjson + +import ( + "strconv" +) + +// Arena may be used for fast creation and re-use of Values. +// +// Typical Arena lifecycle: +// +// 1. Construct Values via the Arena and Value.Set* calls. +// 2. Marshal the constructed Values with Value.MarshalTo call. +// 3. Reset all the constructed Values at once by Arena.Reset call. +// 4. Go to 1 and re-use the Arena. +// +// It is unsafe calling Arena methods from concurrent goroutines. +// Use per-goroutine Arenas or ArenaPool instead. +type Arena struct { + b []byte + c cache +} + +// Reset resets all the Values allocated by a. +// +// Values previously allocated by a cannot be used after the Reset call. +func (a *Arena) Reset() { + a.b = a.b[:0] + a.c.reset() +} + +// NewObject returns new empty object value. +// +// New entries may be added to the returned object via Set call. +// +// The returned object is valid until Reset is called on a. +func (a *Arena) NewObject() *Value { + v := a.c.getValue() + v.t = TypeObject + v.o.reset() + return v +} + +// NewArray returns new empty array value. +// +// New entries may be added to the returned array via Set* calls. +// +// The returned array is valid until Reset is called on a. +func (a *Arena) NewArray() *Value { + v := a.c.getValue() + v.t = TypeArray + v.a = v.a[:0] + return v +} + +// NewString returns new string value containing s. +// +// The returned string is valid until Reset is called on a. +func (a *Arena) NewString(s string) *Value { + v := a.c.getValue() + v.t = typeRawString + bLen := len(a.b) + a.b = escapeString(a.b, s) + v.s = b2s(a.b[bLen+1 : len(a.b)-1]) + return v +} + +// NewStringBytes returns new string value containing b. +// +// The returned string is valid until Reset is called on a. +func (a *Arena) NewStringBytes(b []byte) *Value { + v := a.c.getValue() + v.t = typeRawString + bLen := len(a.b) + a.b = escapeString(a.b, b2s(b)) + v.s = b2s(a.b[bLen+1 : len(a.b)-1]) + return v +} + +// NewNumberFloat64 returns new number value containing f. +// +// The returned number is valid until Reset is called on a. +func (a *Arena) NewNumberFloat64(f float64) *Value { + v := a.c.getValue() + v.t = TypeNumber + bLen := len(a.b) + a.b = strconv.AppendFloat(a.b, f, 'g', -1, 64) + v.s = b2s(a.b[bLen:]) + return v +} + +// NewNumberInt returns new number value containing n. +// +// The returned number is valid until Reset is called on a. +func (a *Arena) NewNumberInt(n int) *Value { + v := a.c.getValue() + v.t = TypeNumber + bLen := len(a.b) + a.b = strconv.AppendInt(a.b, int64(n), 10) + v.s = b2s(a.b[bLen:]) + return v +} + +// NewNumberString returns new number value containing s. +// +// The returned number is valid until Reset is called on a. +func (a *Arena) NewNumberString(s string) *Value { + v := a.c.getValue() + v.t = TypeNumber + v.s = s + return v +} + +// NewNull returns null value. +func (a *Arena) NewNull() *Value { + return valueNull +} + +// NewTrue returns true value. +func (a *Arena) NewTrue() *Value { + return valueTrue +} + +// NewFalse return false value. +func (a *Arena) NewFalse() *Value { + return valueFalse +} diff --git a/vendor/github.com/valyala/fastjson/doc.go b/vendor/github.com/valyala/fastjson/doc.go new file mode 100644 index 0000000000..3dbff364d1 --- /dev/null +++ b/vendor/github.com/valyala/fastjson/doc.go @@ -0,0 +1,8 @@ +/* +Package fastjson provides fast JSON parsing. + +Arbitrary JSON may be parsed by fastjson without the need for creating structs +or for generating go code. Just parse JSON and get the required fields with +Get* functions. +*/ +package fastjson diff --git a/vendor/github.com/valyala/fastjson/fastfloat/parse.go b/vendor/github.com/valyala/fastjson/fastfloat/parse.go new file mode 100644 index 0000000000..b37838da62 --- /dev/null +++ b/vendor/github.com/valyala/fastjson/fastfloat/parse.go @@ -0,0 +1,515 @@ +package fastfloat + +import ( + "fmt" + "math" + "strconv" + "strings" +) + +// ParseUint64BestEffort parses uint64 number s. +// +// It is equivalent to strconv.ParseUint(s, 10, 64), but is faster. +// +// 0 is returned if the number cannot be parsed. +// See also ParseUint64, which returns parse error if the number cannot be parsed. +func ParseUint64BestEffort(s string) uint64 { + if len(s) == 0 { + return 0 + } + i := uint(0) + d := uint64(0) + j := i + for i < uint(len(s)) { + if s[i] >= '0' && s[i] <= '9' { + d = d*10 + uint64(s[i]-'0') + i++ + if i > 18 { + // The integer part may be out of range for uint64. + // Fall back to slow parsing. + dd, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return 0 + } + return dd + } + continue + } + break + } + if i <= j { + return 0 + } + if i < uint(len(s)) { + // Unparsed tail left. + return 0 + } + return d +} + +// ParseUint64 parses uint64 from s. +// +// It is equivalent to strconv.ParseUint(s, 10, 64), but is faster. +// +// See also ParseUint64BestEffort. +func ParseUint64(s string) (uint64, error) { + if len(s) == 0 { + return 0, fmt.Errorf("cannot parse uint64 from empty string") + } + i := uint(0) + d := uint64(0) + j := i + for i < uint(len(s)) { + if s[i] >= '0' && s[i] <= '9' { + d = d*10 + uint64(s[i]-'0') + i++ + if i > 18 { + // The integer part may be out of range for uint64. + // Fall back to slow parsing. + dd, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, err + } + return dd, nil + } + continue + } + break + } + if i <= j { + return 0, fmt.Errorf("cannot parse uint64 from %q", s) + } + if i < uint(len(s)) { + // Unparsed tail left. + return 0, fmt.Errorf("unparsed tail left after parsing uint64 from %q: %q", s, s[i:]) + } + return d, nil +} + +// ParseInt64BestEffort parses int64 number s. +// +// It is equivalent to strconv.ParseInt(s, 10, 64), but is faster. +// +// 0 is returned if the number cannot be parsed. +// See also ParseInt64, which returns parse error if the number cannot be parsed. +func ParseInt64BestEffort(s string) int64 { + if len(s) == 0 { + return 0 + } + i := uint(0) + minus := s[0] == '-' + if minus { + i++ + if i >= uint(len(s)) { + return 0 + } + } + + d := int64(0) + j := i + for i < uint(len(s)) { + if s[i] >= '0' && s[i] <= '9' { + d = d*10 + int64(s[i]-'0') + i++ + if i > 18 { + // The integer part may be out of range for int64. + // Fall back to slow parsing. + dd, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0 + } + return dd + } + continue + } + break + } + if i <= j { + return 0 + } + if i < uint(len(s)) { + // Unparsed tail left. + return 0 + } + if minus { + d = -d + } + return d +} + +// ParseInt64 parses int64 number s. +// +// It is equivalent to strconv.ParseInt(s, 10, 64), but is faster. +// +// See also ParseInt64BestEffort. +func ParseInt64(s string) (int64, error) { + if len(s) == 0 { + return 0, fmt.Errorf("cannot parse int64 from empty string") + } + i := uint(0) + minus := s[0] == '-' + if minus { + i++ + if i >= uint(len(s)) { + return 0, fmt.Errorf("cannot parse int64 from %q", s) + } + } + + d := int64(0) + j := i + for i < uint(len(s)) { + if s[i] >= '0' && s[i] <= '9' { + d = d*10 + int64(s[i]-'0') + i++ + if i > 18 { + // The integer part may be out of range for int64. + // Fall back to slow parsing. + dd, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, err + } + return dd, nil + } + continue + } + break + } + if i <= j { + return 0, fmt.Errorf("cannot parse int64 from %q", s) + } + if i < uint(len(s)) { + // Unparsed tail left. + return 0, fmt.Errorf("unparsed tail left after parsing int64 form %q: %q", s, s[i:]) + } + if minus { + d = -d + } + return d, nil +} + +// Exact powers of 10. +// +// This works faster than math.Pow10, since it avoids additional multiplication. +var float64pow10 = [...]float64{ + 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, +} + +// ParseBestEffort parses floating-point number s. +// +// It is equivalent to strconv.ParseFloat(s, 64), but is faster. +// +// 0 is returned if the number cannot be parsed. +// See also Parse, which returns parse error if the number cannot be parsed. +func ParseBestEffort(s string) float64 { + if len(s) == 0 { + return 0 + } + i := uint(0) + minus := s[0] == '-' + if minus { + i++ + if i >= uint(len(s)) { + return 0 + } + } + + // the integer part might be elided to remain compliant + // with https://go.dev/ref/spec#Floating-point_literals + if s[i] == '.' && (i+1 >= uint(len(s)) || s[i+1] < '0' || s[i+1] > '9') { + return 0 + } + + d := uint64(0) + j := i + for i < uint(len(s)) { + if s[i] >= '0' && s[i] <= '9' { + d = d*10 + uint64(s[i]-'0') + i++ + if i > 18 { + // The integer part may be out of range for uint64. + // Fall back to slow parsing. + f, err := strconv.ParseFloat(s, 64) + if err != nil && !math.IsInf(f, 0) { + return 0 + } + return f + } + continue + } + break + } + if i <= j && s[i] != '.' { + s = s[i:] + if strings.HasPrefix(s, "+") { + s = s[1:] + } + // "infinity" is needed for OpenMetrics support. + // See https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md + if strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") { + if minus { + return -inf + } + return inf + } + if strings.EqualFold(s, "nan") { + return nan + } + return 0 + } + f := float64(d) + if i >= uint(len(s)) { + // Fast path - just integer. + if minus { + f = -f + } + return f + } + + if s[i] == '.' { + // Parse fractional part. + i++ + if i >= uint(len(s)) { + // the fractional part may be elided to remain compliant + // with https://go.dev/ref/spec#Floating-point_literals + return f + } + k := i + for i < uint(len(s)) { + if s[i] >= '0' && s[i] <= '9' { + d = d*10 + uint64(s[i]-'0') + i++ + if i-j >= uint(len(float64pow10)) { + // The mantissa is out of range. Fall back to standard parsing. + f, err := strconv.ParseFloat(s, 64) + if err != nil && !math.IsInf(f, 0) { + return 0 + } + return f + } + continue + } + break + } + if i < k { + return 0 + } + // Convert the entire mantissa to a float at once to avoid rounding errors. + f = float64(d) / float64pow10[i-k] + if i >= uint(len(s)) { + // Fast path - parsed fractional number. + if minus { + f = -f + } + return f + } + } + if s[i] == 'e' || s[i] == 'E' { + // Parse exponent part. + i++ + if i >= uint(len(s)) { + return 0 + } + expMinus := false + if s[i] == '+' || s[i] == '-' { + expMinus = s[i] == '-' + i++ + if i >= uint(len(s)) { + return 0 + } + } + exp := int16(0) + j := i + for i < uint(len(s)) { + if s[i] >= '0' && s[i] <= '9' { + exp = exp*10 + int16(s[i]-'0') + i++ + if exp > 300 { + // The exponent may be too big for float64. + // Fall back to standard parsing. + f, err := strconv.ParseFloat(s, 64) + if err != nil && !math.IsInf(f, 0) { + return 0 + } + return f + } + continue + } + break + } + if i <= j { + return 0 + } + if expMinus { + exp = -exp + } + f *= math.Pow10(int(exp)) + if i >= uint(len(s)) { + if minus { + f = -f + } + return f + } + } + return 0 +} + +// Parse parses floating-point number s. +// +// It is equivalent to strconv.ParseFloat(s, 64), but is faster. +// +// See also ParseBestEffort. +func Parse(s string) (float64, error) { + if len(s) == 0 { + return 0, fmt.Errorf("cannot parse float64 from empty string") + } + i := uint(0) + minus := s[0] == '-' + if minus { + i++ + if i >= uint(len(s)) { + return 0, fmt.Errorf("cannot parse float64 from %q", s) + } + } + + // the integer part might be elided to remain compliant + // with https://go.dev/ref/spec#Floating-point_literals + if s[i] == '.' && (i+1 >= uint(len(s)) || s[i+1] < '0' || s[i+1] > '9') { + return 0, fmt.Errorf("missing integer and fractional part in %q", s) + } + + d := uint64(0) + j := i + for i < uint(len(s)) { + if s[i] >= '0' && s[i] <= '9' { + d = d*10 + uint64(s[i]-'0') + i++ + if i > 18 { + // The integer part may be out of range for uint64. + // Fall back to slow parsing. + f, err := strconv.ParseFloat(s, 64) + if err != nil && !math.IsInf(f, 0) { + return 0, err + } + return f, nil + } + continue + } + break + } + if i <= j && s[i] != '.' { + ss := s[i:] + if strings.HasPrefix(ss, "+") { + ss = ss[1:] + } + // "infinity" is needed for OpenMetrics support. + // See https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md + if strings.EqualFold(ss, "inf") || strings.EqualFold(ss, "infinity") { + if minus { + return -inf, nil + } + return inf, nil + } + if strings.EqualFold(ss, "nan") { + return nan, nil + } + return 0, fmt.Errorf("unparsed tail left after parsing float64 from %q: %q", s, ss) + } + f := float64(d) + if i >= uint(len(s)) { + // Fast path - just integer. + if minus { + f = -f + } + return f, nil + } + + if s[i] == '.' { + // Parse fractional part. + i++ + if i >= uint(len(s)) { + // the fractional part might be elided to remain compliant + // with https://go.dev/ref/spec#Floating-point_literals + return f, nil + } + k := i + for i < uint(len(s)) { + if s[i] >= '0' && s[i] <= '9' { + d = d*10 + uint64(s[i]-'0') + i++ + if i-j >= uint(len(float64pow10)) { + // The mantissa is out of range. Fall back to standard parsing. + f, err := strconv.ParseFloat(s, 64) + if err != nil && !math.IsInf(f, 0) { + return 0, fmt.Errorf("cannot parse mantissa in %q: %s", s, err) + } + return f, nil + } + continue + } + break + } + if i < k { + return 0, fmt.Errorf("cannot find mantissa in %q", s) + } + // Convert the entire mantissa to a float at once to avoid rounding errors. + f = float64(d) / float64pow10[i-k] + if i >= uint(len(s)) { + // Fast path - parsed fractional number. + if minus { + f = -f + } + return f, nil + } + } + if s[i] == 'e' || s[i] == 'E' { + // Parse exponent part. + i++ + if i >= uint(len(s)) { + return 0, fmt.Errorf("cannot parse exponent in %q", s) + } + expMinus := false + if s[i] == '+' || s[i] == '-' { + expMinus = s[i] == '-' + i++ + if i >= uint(len(s)) { + return 0, fmt.Errorf("cannot parse exponent in %q", s) + } + } + exp := int16(0) + j := i + for i < uint(len(s)) { + if s[i] >= '0' && s[i] <= '9' { + exp = exp*10 + int16(s[i]-'0') + i++ + if exp > 300 { + // The exponent may be too big for float64. + // Fall back to standard parsing. + f, err := strconv.ParseFloat(s, 64) + if err != nil && !math.IsInf(f, 0) { + return 0, fmt.Errorf("cannot parse exponent in %q: %s", s, err) + } + return f, nil + } + continue + } + break + } + if i <= j { + return 0, fmt.Errorf("cannot parse exponent in %q", s) + } + if expMinus { + exp = -exp + } + f *= math.Pow10(int(exp)) + if i >= uint(len(s)) { + if minus { + f = -f + } + return f, nil + } + } + return 0, fmt.Errorf("cannot parse float64 from %q", s) +} + +var inf = math.Inf(1) +var nan = math.NaN() diff --git a/vendor/github.com/valyala/fastjson/fuzz.go b/vendor/github.com/valyala/fastjson/fuzz.go new file mode 100644 index 0000000000..d9da1f1abc --- /dev/null +++ b/vendor/github.com/valyala/fastjson/fuzz.go @@ -0,0 +1,23 @@ +//go:build gofuzz +// +build gofuzz + +package fastjson + +func Fuzz(data []byte) int { + err := ValidateBytes(data) + if err != nil { + return 0 + } + + v := MustParseBytes(data) + + dst := make([]byte, 0) + dst = v.MarshalTo(dst) + + err = ValidateBytes(dst) + if err != nil { + panic(err) + } + + return 1 +} diff --git a/vendor/github.com/valyala/fastjson/handy.go b/vendor/github.com/valyala/fastjson/handy.go new file mode 100644 index 0000000000..a5d5618f09 --- /dev/null +++ b/vendor/github.com/valyala/fastjson/handy.go @@ -0,0 +1,170 @@ +package fastjson + +var handyPool ParserPool + +// GetString returns string value for the field identified by keys path +// in JSON data. +// +// Array indexes may be represented as decimal numbers in keys. +// +// An empty string is returned on error. Use Parser for proper error handling. +// +// Parser is faster for obtaining multiple fields from JSON. +func GetString(data []byte, keys ...string) string { + p := handyPool.Get() + v, err := p.ParseBytes(data) + if err != nil { + handyPool.Put(p) + return "" + } + sb := v.GetStringBytes(keys...) + str := string(sb) + handyPool.Put(p) + return str +} + +// GetBytes returns string value for the field identified by keys path +// in JSON data. +// +// Array indexes may be represented as decimal numbers in keys. +// +// nil is returned on error. Use Parser for proper error handling. +// +// Parser is faster for obtaining multiple fields from JSON. +func GetBytes(data []byte, keys ...string) []byte { + p := handyPool.Get() + v, err := p.ParseBytes(data) + if err != nil { + handyPool.Put(p) + return nil + } + sb := v.GetStringBytes(keys...) + + // Make a copy of sb, since sb belongs to p. + var b []byte + if sb != nil { + b = append(b, sb...) + } + + handyPool.Put(p) + return b +} + +// GetInt returns int value for the field identified by keys path +// in JSON data. +// +// Array indexes may be represented as decimal numbers in keys. +// +// 0 is returned on error. Use Parser for proper error handling. +// +// Parser is faster for obtaining multiple fields from JSON. +func GetInt(data []byte, keys ...string) int { + p := handyPool.Get() + v, err := p.ParseBytes(data) + if err != nil { + handyPool.Put(p) + return 0 + } + n := v.GetInt(keys...) + handyPool.Put(p) + return n +} + +// GetFloat64 returns float64 value for the field identified by keys path +// in JSON data. +// +// Array indexes may be represented as decimal numbers in keys. +// +// 0 is returned on error. Use Parser for proper error handling. +// +// Parser is faster for obtaining multiple fields from JSON. +func GetFloat64(data []byte, keys ...string) float64 { + p := handyPool.Get() + v, err := p.ParseBytes(data) + if err != nil { + handyPool.Put(p) + return 0 + } + f := v.GetFloat64(keys...) + handyPool.Put(p) + return f +} + +// GetBool returns boolean value for the field identified by keys path +// in JSON data. +// +// Array indexes may be represented as decimal numbers in keys. +// +// False is returned on error. Use Parser for proper error handling. +// +// Parser is faster for obtaining multiple fields from JSON. +func GetBool(data []byte, keys ...string) bool { + p := handyPool.Get() + v, err := p.ParseBytes(data) + if err != nil { + handyPool.Put(p) + return false + } + b := v.GetBool(keys...) + handyPool.Put(p) + return b +} + +// Exists returns true if the field identified by keys path exists in JSON data. +// +// Array indexes may be represented as decimal numbers in keys. +// +// False is returned on error. Use Parser for proper error handling. +// +// Parser is faster when multiple fields must be checked in the JSON. +func Exists(data []byte, keys ...string) bool { + p := handyPool.Get() + v, err := p.ParseBytes(data) + if err != nil { + handyPool.Put(p) + return false + } + ok := v.Exists(keys...) + handyPool.Put(p) + return ok +} + +// Parse parses json string s. +// +// The function is slower than the Parser.Parse for re-used Parser. +func Parse(s string) (*Value, error) { + var p Parser + return p.Parse(s) +} + +// MustParse parses json string s. +// +// The function panics if s cannot be parsed. +// The function is slower than the Parser.Parse for re-used Parser. +func MustParse(s string) *Value { + v, err := Parse(s) + if err != nil { + panic(err) + } + return v +} + +// ParseBytes parses b containing json. +// +// The function is slower than the Parser.ParseBytes for re-used Parser. +func ParseBytes(b []byte) (*Value, error) { + var p Parser + return p.ParseBytes(b) +} + +// MustParseBytes parses b containing json. +// +// The function panics if b cannot be parsed. +// The function is slower than the Parser.ParseBytes for re-used Parser. +func MustParseBytes(b []byte) *Value { + v, err := ParseBytes(b) + if err != nil { + panic(err) + } + return v +} diff --git a/vendor/github.com/valyala/fastjson/parser.go b/vendor/github.com/valyala/fastjson/parser.go new file mode 100644 index 0000000000..885e1841ef --- /dev/null +++ b/vendor/github.com/valyala/fastjson/parser.go @@ -0,0 +1,976 @@ +package fastjson + +import ( + "fmt" + "github.com/valyala/fastjson/fastfloat" + "strconv" + "strings" + "unicode/utf16" +) + +// Parser parses JSON. +// +// Parser may be re-used for subsequent parsing. +// +// Parser cannot be used from concurrent goroutines. +// Use per-goroutine parsers or ParserPool instead. +type Parser struct { + // b contains working copy of the string to be parsed. + b []byte + + // c is a cache for json values. + c cache +} + +// Parse parses s containing JSON. +// +// The returned value is valid until the next call to Parse*. +// +// Use Scanner if a stream of JSON values must be parsed. +func (p *Parser) Parse(s string) (*Value, error) { + s = skipWS(s) + p.b = append(p.b[:0], s...) + p.c.reset() + + v, tail, err := parseValue(b2s(p.b), &p.c, 0) + if err != nil { + return nil, fmt.Errorf("cannot parse JSON: %s; unparsed tail: %q", err, startEndString(tail)) + } + tail = skipWS(tail) + if len(tail) > 0 { + return nil, fmt.Errorf("unexpected tail: %q", startEndString(tail)) + } + return v, nil +} + +// ParseBytes parses b containing JSON. +// +// The returned Value is valid until the next call to Parse*. +// +// Use Scanner if a stream of JSON values must be parsed. +func (p *Parser) ParseBytes(b []byte) (*Value, error) { + return p.Parse(b2s(b)) +} + +type cache struct { + vs []Value +} + +func (c *cache) reset() { + c.vs = c.vs[:0] +} + +func (c *cache) getValue() *Value { + if cap(c.vs) > len(c.vs) { + c.vs = c.vs[:len(c.vs)+1] + } else { + c.vs = append(c.vs, Value{}) + } + // Do not reset the value, since the caller must properly init it. + return &c.vs[len(c.vs)-1] +} + +func skipWS(s string) string { + if len(s) == 0 || s[0] > 0x20 { + // Fast path. + return s + } + return skipWSSlow(s) +} + +func skipWSSlow(s string) string { + if len(s) == 0 || s[0] != 0x20 && s[0] != 0x0A && s[0] != 0x09 && s[0] != 0x0D { + return s + } + for i := 1; i < len(s); i++ { + if s[i] != 0x20 && s[i] != 0x0A && s[i] != 0x09 && s[i] != 0x0D { + return s[i:] + } + } + return "" +} + +type kv struct { + k string + v *Value +} + +// MaxDepth is the maximum depth for nested JSON. +const MaxDepth = 300 + +func parseValue(s string, c *cache, depth int) (*Value, string, error) { + if len(s) == 0 { + return nil, s, fmt.Errorf("cannot parse empty string") + } + depth++ + if depth > MaxDepth { + return nil, s, fmt.Errorf("too big depth for the nested JSON; it exceeds %d", MaxDepth) + } + + if s[0] == '{' { + v, tail, err := parseObject(s[1:], c, depth) + if err != nil { + return nil, tail, fmt.Errorf("cannot parse object: %s", err) + } + return v, tail, nil + } + if s[0] == '[' { + v, tail, err := parseArray(s[1:], c, depth) + if err != nil { + return nil, tail, fmt.Errorf("cannot parse array: %s", err) + } + return v, tail, nil + } + if s[0] == '"' { + ss, tail, err := parseRawString(s[1:]) + if err != nil { + return nil, tail, fmt.Errorf("cannot parse string: %s", err) + } + v := c.getValue() + v.t = typeRawString + v.s = ss + return v, tail, nil + } + if s[0] == 't' { + if len(s) < len("true") || s[:len("true")] != "true" { + return nil, s, fmt.Errorf("unexpected value found: %q", s) + } + return valueTrue, s[len("true"):], nil + } + if s[0] == 'f' { + if len(s) < len("false") || s[:len("false")] != "false" { + return nil, s, fmt.Errorf("unexpected value found: %q", s) + } + return valueFalse, s[len("false"):], nil + } + if s[0] == 'n' { + if len(s) < len("null") || s[:len("null")] != "null" { + // Try parsing NaN + if len(s) >= 3 && strings.EqualFold(s[:3], "nan") { + v := c.getValue() + v.t = TypeNumber + v.s = s[:3] + return v, s[3:], nil + } + return nil, s, fmt.Errorf("unexpected value found: %q", s) + } + return valueNull, s[len("null"):], nil + } + + ns, tail, err := parseRawNumber(s) + if err != nil { + return nil, tail, fmt.Errorf("cannot parse number: %s", err) + } + v := c.getValue() + v.t = TypeNumber + v.s = ns + return v, tail, nil +} + +func parseArray(s string, c *cache, depth int) (*Value, string, error) { + s = skipWS(s) + if len(s) == 0 { + return nil, s, fmt.Errorf("missing ']'") + } + + if s[0] == ']' { + v := c.getValue() + v.t = TypeArray + v.a = v.a[:0] + return v, s[1:], nil + } + + a := c.getValue() + a.t = TypeArray + a.a = a.a[:0] + for { + var v *Value + var err error + + s = skipWS(s) + v, s, err = parseValue(s, c, depth) + if err != nil { + return nil, s, fmt.Errorf("cannot parse array value: %s", err) + } + a.a = append(a.a, v) + + s = skipWS(s) + if len(s) == 0 { + return nil, s, fmt.Errorf("unexpected end of array") + } + if s[0] == ',' { + s = s[1:] + continue + } + if s[0] == ']' { + s = s[1:] + return a, s, nil + } + return nil, s, fmt.Errorf("missing ',' after array value") + } +} + +func parseObject(s string, c *cache, depth int) (*Value, string, error) { + s = skipWS(s) + if len(s) == 0 { + return nil, s, fmt.Errorf("missing '}'") + } + + if s[0] == '}' { + v := c.getValue() + v.t = TypeObject + v.o.reset() + return v, s[1:], nil + } + + o := c.getValue() + o.t = TypeObject + o.o.reset() + for { + var err error + kv := o.o.getKV() + + // Parse key. + s = skipWS(s) + if len(s) == 0 || s[0] != '"' { + return nil, s, fmt.Errorf(`cannot find opening '"" for object key`) + } + kv.k, s, err = parseRawKey(s[1:]) + if err != nil { + return nil, s, fmt.Errorf("cannot parse object key: %s", err) + } + s = skipWS(s) + if len(s) == 0 || s[0] != ':' { + return nil, s, fmt.Errorf("missing ':' after object key") + } + s = s[1:] + + // Parse value + s = skipWS(s) + kv.v, s, err = parseValue(s, c, depth) + if err != nil { + return nil, s, fmt.Errorf("cannot parse object value: %s", err) + } + s = skipWS(s) + if len(s) == 0 { + return nil, s, fmt.Errorf("unexpected end of object") + } + if s[0] == ',' { + s = s[1:] + continue + } + if s[0] == '}' { + return o, s[1:], nil + } + return nil, s, fmt.Errorf("missing ',' after object value") + } +} + +func escapeString(dst []byte, s string) []byte { + if !hasSpecialChars(s) { + // Fast path - nothing to escape. + dst = append(dst, '"') + dst = append(dst, s...) + dst = append(dst, '"') + return dst + } + + // Slow path. + return strconv.AppendQuote(dst, s) +} + +func hasSpecialChars(s string) bool { + if strings.IndexByte(s, '"') >= 0 || strings.IndexByte(s, '\\') >= 0 { + return true + } + for i := 0; i < len(s); i++ { + if s[i] < 0x20 { + return true + } + } + return false +} + +func unescapeStringBestEffort(s string) string { + n := strings.IndexByte(s, '\\') + if n < 0 { + // Fast path - nothing to unescape. + return s + } + + // Slow path - unescape string. + b := s2b(s) // It is safe to do, since s points to a byte slice in Parser.b. + b = b[:n] + s = s[n+1:] + for len(s) > 0 { + ch := s[0] + s = s[1:] + switch ch { + case '"': + b = append(b, '"') + case '\\': + b = append(b, '\\') + case '/': + b = append(b, '/') + case 'b': + b = append(b, '\b') + case 'f': + b = append(b, '\f') + case 'n': + b = append(b, '\n') + case 'r': + b = append(b, '\r') + case 't': + b = append(b, '\t') + case 'u': + if len(s) < 4 { + // Too short escape sequence. Just store it unchanged. + b = append(b, "\\u"...) + break + } + xs := s[:4] + x, err := strconv.ParseUint(xs, 16, 16) + if err != nil { + // Invalid escape sequence. Just store it unchanged. + b = append(b, "\\u"...) + break + } + s = s[4:] + if !utf16.IsSurrogate(rune(x)) { + b = append(b, string(rune(x))...) + break + } + + // Surrogate. + // See https://en.wikipedia.org/wiki/Universal_Character_Set_characters#Surrogates + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + b = append(b, "\\u"...) + b = append(b, xs...) + break + } + x1, err := strconv.ParseUint(s[2:6], 16, 16) + if err != nil { + b = append(b, "\\u"...) + b = append(b, xs...) + break + } + r := utf16.DecodeRune(rune(x), rune(x1)) + b = append(b, string(r)...) + s = s[6:] + default: + // Unknown escape sequence. Just store it unchanged. + b = append(b, '\\', ch) + } + n = strings.IndexByte(s, '\\') + if n < 0 { + b = append(b, s...) + break + } + b = append(b, s[:n]...) + s = s[n+1:] + } + return b2s(b) +} + +// parseRawKey is similar to parseRawString, but is optimized +// for small-sized keys without escape sequences. +func parseRawKey(s string) (string, string, error) { + for i := 0; i < len(s); i++ { + if s[i] == '"' { + // Fast path. + return s[:i], s[i+1:], nil + } + if s[i] == '\\' { + // Slow path. + return parseRawString(s) + } + } + return s, "", fmt.Errorf(`missing closing '"'`) +} + +func parseRawString(s string) (string, string, error) { + n := strings.IndexByte(s, '"') + if n < 0 { + return s, "", fmt.Errorf(`missing closing '"'`) + } + if n == 0 || s[n-1] != '\\' { + // Fast path. No escaped ". + return s[:n], s[n+1:], nil + } + + // Slow path - possible escaped " found. + ss := s + for { + i := n - 1 + for i > 0 && s[i-1] == '\\' { + i-- + } + if uint(n-i)%2 == 0 { + return ss[:len(ss)-len(s)+n], s[n+1:], nil + } + s = s[n+1:] + + n = strings.IndexByte(s, '"') + if n < 0 { + return ss, "", fmt.Errorf(`missing closing '"'`) + } + if n == 0 || s[n-1] != '\\' { + return ss[:len(ss)-len(s)+n], s[n+1:], nil + } + } +} + +func parseRawNumber(s string) (string, string, error) { + // The caller must ensure len(s) > 0 + + // Find the end of the number. + for i := 0; i < len(s); i++ { + ch := s[i] + if (ch >= '0' && ch <= '9') || ch == '.' || ch == '-' || ch == 'e' || ch == 'E' || ch == '+' { + continue + } + if i == 0 || i == 1 && (s[0] == '-' || s[0] == '+') { + if len(s[i:]) >= 3 { + xs := s[i : i+3] + if strings.EqualFold(xs, "inf") || strings.EqualFold(xs, "nan") { + return s[:i+3], s[i+3:], nil + } + } + return "", s, fmt.Errorf("unexpected char: %q", s[:1]) + } + ns := s[:i] + s = s[i:] + return ns, s, nil + } + return s, "", nil +} + +// Object represents JSON object. +// +// Object cannot be used from concurrent goroutines. +// Use per-goroutine parsers or ParserPool instead. +type Object struct { + kvs []kv + keysUnescaped bool +} + +func (o *Object) reset() { + o.kvs = o.kvs[:0] + o.keysUnescaped = false +} + +// MarshalTo appends marshaled o to dst and returns the result. +func (o *Object) MarshalTo(dst []byte) []byte { + dst = append(dst, '{') + for i, kv := range o.kvs { + if o.keysUnescaped { + dst = escapeString(dst, kv.k) + } else { + dst = append(dst, '"') + dst = append(dst, kv.k...) + dst = append(dst, '"') + } + dst = append(dst, ':') + dst = kv.v.MarshalTo(dst) + if i != len(o.kvs)-1 { + dst = append(dst, ',') + } + } + dst = append(dst, '}') + return dst +} + +// String returns string representation for the o. +// +// This function is for debugging purposes only. It isn't optimized for speed. +// See MarshalTo instead. +func (o *Object) String() string { + b := o.MarshalTo(nil) + // It is safe converting b to string without allocation, since b is no longer + // reachable after this line. + return b2s(b) +} + +func (o *Object) getKV() *kv { + if cap(o.kvs) > len(o.kvs) { + o.kvs = o.kvs[:len(o.kvs)+1] + } else { + o.kvs = append(o.kvs, kv{}) + } + return &o.kvs[len(o.kvs)-1] +} + +func (o *Object) unescapeKeys() { + if o.keysUnescaped { + return + } + kvs := o.kvs + for i := range kvs { + kv := &kvs[i] + kv.k = unescapeStringBestEffort(kv.k) + } + o.keysUnescaped = true +} + +// Len returns the number of items in the o. +func (o *Object) Len() int { + return len(o.kvs) +} + +// Get returns the value for the given key in the o. +// +// Returns nil if the value for the given key isn't found. +// +// The returned value is valid until Parse is called on the Parser returned o. +func (o *Object) Get(key string) *Value { + if !o.keysUnescaped && strings.IndexByte(key, '\\') < 0 { + // Fast path - try searching for the key without object keys unescaping. + for _, kv := range o.kvs { + if kv.k == key { + return kv.v + } + } + } + + // Slow path - unescape object keys. + o.unescapeKeys() + + for _, kv := range o.kvs { + if kv.k == key { + return kv.v + } + } + return nil +} + +// Visit calls f for each item in the o in the original order +// of the parsed JSON. +// +// f cannot hold key and/or v after returning. +func (o *Object) Visit(f func(key []byte, v *Value)) { + if o == nil { + return + } + + o.unescapeKeys() + + for _, kv := range o.kvs { + f(s2b(kv.k), kv.v) + } +} + +// Value represents any JSON value. +// +// Call Type in order to determine the actual type of the JSON value. +// +// Value cannot be used from concurrent goroutines. +// Use per-goroutine parsers or ParserPool instead. +type Value struct { + o Object + a []*Value + s string + t Type +} + +// MarshalTo appends marshaled v to dst and returns the result. +func (v *Value) MarshalTo(dst []byte) []byte { + switch v.t { + case typeRawString: + dst = append(dst, '"') + dst = append(dst, v.s...) + dst = append(dst, '"') + return dst + case TypeObject: + return v.o.MarshalTo(dst) + case TypeArray: + dst = append(dst, '[') + for i, vv := range v.a { + dst = vv.MarshalTo(dst) + if i != len(v.a)-1 { + dst = append(dst, ',') + } + } + dst = append(dst, ']') + return dst + case TypeString: + return escapeString(dst, v.s) + case TypeNumber: + return append(dst, v.s...) + case TypeTrue: + return append(dst, "true"...) + case TypeFalse: + return append(dst, "false"...) + case TypeNull: + return append(dst, "null"...) + default: + panic(fmt.Errorf("BUG: unexpected Value type: %d", v.t)) + } +} + +// String returns string representation of the v. +// +// The function is for debugging purposes only. It isn't optimized for speed. +// See MarshalTo instead. +// +// Don't confuse this function with StringBytes, which must be called +// for obtaining the underlying JSON string for the v. +func (v *Value) String() string { + b := v.MarshalTo(nil) + // It is safe converting b to string without allocation, since b is no longer + // reachable after this line. + return b2s(b) +} + +// Type represents JSON type. +type Type int + +const ( + // TypeNull is JSON null. + TypeNull Type = 0 + + // TypeObject is JSON object type. + TypeObject Type = 1 + + // TypeArray is JSON array type. + TypeArray Type = 2 + + // TypeString is JSON string type. + TypeString Type = 3 + + // TypeNumber is JSON number type. + TypeNumber Type = 4 + + // TypeTrue is JSON true. + TypeTrue Type = 5 + + // TypeFalse is JSON false. + TypeFalse Type = 6 + + typeRawString Type = 7 +) + +// String returns string representation of t. +func (t Type) String() string { + switch t { + case TypeObject: + return "object" + case TypeArray: + return "array" + case TypeString: + return "string" + case TypeNumber: + return "number" + case TypeTrue: + return "true" + case TypeFalse: + return "false" + case TypeNull: + return "null" + + // typeRawString is skipped intentionally, + // since it shouldn't be visible to user. + default: + panic(fmt.Errorf("BUG: unknown Value type: %d", t)) + } +} + +// Type returns the type of the v. +func (v *Value) Type() Type { + if v.t == typeRawString { + v.s = unescapeStringBestEffort(v.s) + v.t = TypeString + } + return v.t +} + +// Exists returns true if the field exists for the given keys path. +// +// Array indexes may be represented as decimal numbers in keys. +func (v *Value) Exists(keys ...string) bool { + v = v.Get(keys...) + return v != nil +} + +// Get returns value by the given keys path. +// +// Array indexes may be represented as decimal numbers in keys. +// +// nil is returned for non-existing keys path. +// +// The returned value is valid until Parse is called on the Parser returned v. +func (v *Value) Get(keys ...string) *Value { + if v == nil { + return nil + } + for _, key := range keys { + if v.t == TypeObject { + v = v.o.Get(key) + if v == nil { + return nil + } + } else if v.t == TypeArray { + n, err := strconv.Atoi(key) + if err != nil || n < 0 || n >= len(v.a) { + return nil + } + v = v.a[n] + } else { + return nil + } + } + return v +} + +// GetObject returns object value by the given keys path. +// +// Array indexes may be represented as decimal numbers in keys. +// +// nil is returned for non-existing keys path or for invalid value type. +// +// The returned object is valid until Parse is called on the Parser returned v. +func (v *Value) GetObject(keys ...string) *Object { + v = v.Get(keys...) + if v == nil || v.t != TypeObject { + return nil + } + return &v.o +} + +// GetArray returns array value by the given keys path. +// +// Array indexes may be represented as decimal numbers in keys. +// +// nil is returned for non-existing keys path or for invalid value type. +// +// The returned array is valid until Parse is called on the Parser returned v. +func (v *Value) GetArray(keys ...string) []*Value { + v = v.Get(keys...) + if v == nil || v.t != TypeArray { + return nil + } + return v.a +} + +// GetFloat64 returns float64 value by the given keys path. +// +// Array indexes may be represented as decimal numbers in keys. +// +// 0 is returned for non-existing keys path or for invalid value type. +func (v *Value) GetFloat64(keys ...string) float64 { + v = v.Get(keys...) + if v == nil || v.Type() != TypeNumber { + return 0 + } + return fastfloat.ParseBestEffort(v.s) +} + +// GetInt returns int value by the given keys path. +// +// Array indexes may be represented as decimal numbers in keys. +// +// 0 is returned for non-existing keys path or for invalid value type. +func (v *Value) GetInt(keys ...string) int { + v = v.Get(keys...) + if v == nil || v.Type() != TypeNumber { + return 0 + } + n := fastfloat.ParseInt64BestEffort(v.s) + nn := int(n) + if int64(nn) != n { + return 0 + } + return nn +} + +// GetUint returns uint value by the given keys path. +// +// Array indexes may be represented as decimal numbers in keys. +// +// 0 is returned for non-existing keys path or for invalid value type. +func (v *Value) GetUint(keys ...string) uint { + v = v.Get(keys...) + if v == nil || v.Type() != TypeNumber { + return 0 + } + n := fastfloat.ParseUint64BestEffort(v.s) + nn := uint(n) + if uint64(nn) != n { + return 0 + } + return nn +} + +// GetInt64 returns int64 value by the given keys path. +// +// Array indexes may be represented as decimal numbers in keys. +// +// 0 is returned for non-existing keys path or for invalid value type. +func (v *Value) GetInt64(keys ...string) int64 { + v = v.Get(keys...) + if v == nil || v.Type() != TypeNumber { + return 0 + } + return fastfloat.ParseInt64BestEffort(v.s) +} + +// GetUint64 returns uint64 value by the given keys path. +// +// Array indexes may be represented as decimal numbers in keys. +// +// 0 is returned for non-existing keys path or for invalid value type. +func (v *Value) GetUint64(keys ...string) uint64 { + v = v.Get(keys...) + if v == nil || v.Type() != TypeNumber { + return 0 + } + return fastfloat.ParseUint64BestEffort(v.s) +} + +// GetStringBytes returns string value by the given keys path. +// +// Array indexes may be represented as decimal numbers in keys. +// +// nil is returned for non-existing keys path or for invalid value type. +// +// The returned string is valid until Parse is called on the Parser returned v. +func (v *Value) GetStringBytes(keys ...string) []byte { + v = v.Get(keys...) + if v == nil || v.Type() != TypeString { + return nil + } + return s2b(v.s) +} + +// GetBool returns bool value by the given keys path. +// +// Array indexes may be represented as decimal numbers in keys. +// +// false is returned for non-existing keys path or for invalid value type. +func (v *Value) GetBool(keys ...string) bool { + v = v.Get(keys...) + if v != nil && v.t == TypeTrue { + return true + } + return false +} + +// Object returns the underlying JSON object for the v. +// +// The returned object is valid until Parse is called on the Parser returned v. +// +// Use GetObject if you don't need error handling. +func (v *Value) Object() (*Object, error) { + if v.t != TypeObject { + return nil, fmt.Errorf("value doesn't contain object; it contains %s", v.Type()) + } + return &v.o, nil +} + +// Array returns the underlying JSON array for the v. +// +// The returned array is valid until Parse is called on the Parser returned v. +// +// Use GetArray if you don't need error handling. +func (v *Value) Array() ([]*Value, error) { + if v.t != TypeArray { + return nil, fmt.Errorf("value doesn't contain array; it contains %s", v.Type()) + } + return v.a, nil +} + +// StringBytes returns the underlying JSON string for the v. +// +// The returned string is valid until Parse is called on the Parser returned v. +// +// Use GetStringBytes if you don't need error handling. +func (v *Value) StringBytes() ([]byte, error) { + if v.Type() != TypeString { + return nil, fmt.Errorf("value doesn't contain string; it contains %s", v.Type()) + } + return s2b(v.s), nil +} + +// Float64 returns the underlying JSON number for the v. +// +// Use GetFloat64 if you don't need error handling. +func (v *Value) Float64() (float64, error) { + if v.Type() != TypeNumber { + return 0, fmt.Errorf("value doesn't contain number; it contains %s", v.Type()) + } + return fastfloat.Parse(v.s) +} + +// Int returns the underlying JSON int for the v. +// +// Use GetInt if you don't need error handling. +func (v *Value) Int() (int, error) { + if v.Type() != TypeNumber { + return 0, fmt.Errorf("value doesn't contain number; it contains %s", v.Type()) + } + n, err := fastfloat.ParseInt64(v.s) + if err != nil { + return 0, err + } + nn := int(n) + if int64(nn) != n { + return 0, fmt.Errorf("number %q doesn't fit int", v.s) + } + return nn, nil +} + +// Uint returns the underlying JSON uint for the v. +// +// Use GetInt if you don't need error handling. +func (v *Value) Uint() (uint, error) { + if v.Type() != TypeNumber { + return 0, fmt.Errorf("value doesn't contain number; it contains %s", v.Type()) + } + n, err := fastfloat.ParseUint64(v.s) + if err != nil { + return 0, err + } + nn := uint(n) + if uint64(nn) != n { + return 0, fmt.Errorf("number %q doesn't fit uint", v.s) + } + return nn, nil +} + +// Int64 returns the underlying JSON int64 for the v. +// +// Use GetInt64 if you don't need error handling. +func (v *Value) Int64() (int64, error) { + if v.Type() != TypeNumber { + return 0, fmt.Errorf("value doesn't contain number; it contains %s", v.Type()) + } + return fastfloat.ParseInt64(v.s) +} + +// Uint64 returns the underlying JSON uint64 for the v. +// +// Use GetInt64 if you don't need error handling. +func (v *Value) Uint64() (uint64, error) { + if v.Type() != TypeNumber { + return 0, fmt.Errorf("value doesn't contain number; it contains %s", v.Type()) + } + return fastfloat.ParseUint64(v.s) +} + +// Bool returns the underlying JSON bool for the v. +// +// Use GetBool if you don't need error handling. +func (v *Value) Bool() (bool, error) { + if v.t == TypeTrue { + return true, nil + } + if v.t == TypeFalse { + return false, nil + } + return false, fmt.Errorf("value doesn't contain bool; it contains %s", v.Type()) +} + +var ( + valueTrue = &Value{t: TypeTrue} + valueFalse = &Value{t: TypeFalse} + valueNull = &Value{t: TypeNull} +) diff --git a/vendor/github.com/valyala/fastjson/pool.go b/vendor/github.com/valyala/fastjson/pool.go new file mode 100644 index 0000000000..3f40fb4e21 --- /dev/null +++ b/vendor/github.com/valyala/fastjson/pool.go @@ -0,0 +1,53 @@ +package fastjson + +import ( + "sync" +) + +// ParserPool may be used for pooling Parsers for similarly typed JSONs. +type ParserPool struct { + pool sync.Pool +} + +// Get returns a Parser from pp. +// +// The Parser must be Put to pp after use. +func (pp *ParserPool) Get() *Parser { + v := pp.pool.Get() + if v == nil { + return &Parser{} + } + return v.(*Parser) +} + +// Put returns p to pp. +// +// p and objects recursively returned from p cannot be used after p +// is put into pp. +func (pp *ParserPool) Put(p *Parser) { + pp.pool.Put(p) +} + +// ArenaPool may be used for pooling Arenas for similarly typed JSONs. +type ArenaPool struct { + pool sync.Pool +} + +// Get returns an Arena from ap. +// +// The Arena must be Put to ap after use. +func (ap *ArenaPool) Get() *Arena { + v := ap.pool.Get() + if v == nil { + return &Arena{} + } + return v.(*Arena) +} + +// Put returns a to ap. +// +// a and objects created by a cannot be used after a is put into ap. +func (ap *ArenaPool) Put(a *Arena) { + a.Reset() + ap.pool.Put(a) +} diff --git a/vendor/github.com/valyala/fastjson/scanner.go b/vendor/github.com/valyala/fastjson/scanner.go new file mode 100644 index 0000000000..89b38816f0 --- /dev/null +++ b/vendor/github.com/valyala/fastjson/scanner.go @@ -0,0 +1,94 @@ +package fastjson + +import ( + "errors" +) + +// Scanner scans a series of JSON values. Values may be delimited by whitespace. +// +// Scanner may parse JSON lines ( http://jsonlines.org/ ). +// +// Scanner may be re-used for subsequent parsing. +// +// Scanner cannot be used from concurrent goroutines. +// +// Use Parser for parsing only a single JSON value. +type Scanner struct { + // b contains a working copy of json value passed to Init. + b []byte + + // s points to the next JSON value to parse. + s string + + // err contains the last error. + err error + + // v contains the last parsed JSON value. + v *Value + + // c is used for caching JSON values. + c cache +} + +// Init initializes sc with the given s. +// +// s may contain multiple JSON values, which may be delimited by whitespace. +func (sc *Scanner) Init(s string) { + sc.b = append(sc.b[:0], s...) + sc.s = b2s(sc.b) + sc.err = nil + sc.v = nil +} + +// InitBytes initializes sc with the given b. +// +// b may contain multiple JSON values, which may be delimited by whitespace. +func (sc *Scanner) InitBytes(b []byte) { + sc.Init(b2s(b)) +} + +// Next parses the next JSON value from s passed to Init. +// +// Returns true on success. The parsed value is available via Value call. +// +// Returns false either on error or on the end of s. +// Call Error in order to determine the cause of the returned false. +func (sc *Scanner) Next() bool { + if sc.err != nil { + return false + } + + sc.s = skipWS(sc.s) + if len(sc.s) == 0 { + sc.err = errEOF + return false + } + + sc.c.reset() + v, tail, err := parseValue(sc.s, &sc.c, 0) + if err != nil { + sc.err = err + return false + } + + sc.s = tail + sc.v = v + return true +} + +// Error returns the last error. +func (sc *Scanner) Error() error { + if sc.err == errEOF { + return nil + } + return sc.err +} + +// Value returns the last parsed value. +// +// The value is valid until the Next call. +func (sc *Scanner) Value() *Value { + return sc.v +} + +var errEOF = errors.New("end of s") diff --git a/vendor/github.com/valyala/fastjson/update.go b/vendor/github.com/valyala/fastjson/update.go new file mode 100644 index 0000000000..795b7fbd6d --- /dev/null +++ b/vendor/github.com/valyala/fastjson/update.go @@ -0,0 +1,113 @@ +package fastjson + +import ( + "strconv" + "strings" +) + +// Del deletes the entry with the given key from o. +func (o *Object) Del(key string) { + if o == nil { + return + } + if !o.keysUnescaped && strings.IndexByte(key, '\\') < 0 { + // Fast path - try searching for the key without object keys unescaping. + for i, kv := range o.kvs { + if kv.k == key { + o.kvs = append(o.kvs[:i], o.kvs[i+1:]...) + return + } + } + } + + // Slow path - unescape object keys before item search. + o.unescapeKeys() + + for i, kv := range o.kvs { + if kv.k == key { + o.kvs = append(o.kvs[:i], o.kvs[i+1:]...) + return + } + } +} + +// Del deletes the entry with the given key from array or object v. +func (v *Value) Del(key string) { + if v == nil { + return + } + if v.t == TypeObject { + v.o.Del(key) + return + } + if v.t == TypeArray { + n, err := strconv.Atoi(key) + if err != nil || n < 0 || n >= len(v.a) { + return + } + v.a = append(v.a[:n], v.a[n+1:]...) + } +} + +// Set sets (key, value) entry in the o. +// +// The value must be unchanged during o lifetime. +func (o *Object) Set(key string, value *Value) { + if o == nil { + return + } + if value == nil { + value = valueNull + } + o.unescapeKeys() + + // Try substituting already existing entry with the given key. + for i := range o.kvs { + kv := &o.kvs[i] + if kv.k == key { + kv.v = value + return + } + } + + // Add new entry. + kv := o.getKV() + kv.k = key + kv.v = value +} + +// Set sets (key, value) entry in the array or object v. +// +// The value must be unchanged during v lifetime. +func (v *Value) Set(key string, value *Value) { + if v == nil { + return + } + if v.t == TypeObject { + v.o.Set(key, value) + return + } + if v.t == TypeArray { + idx, err := strconv.Atoi(key) + if err != nil || idx < 0 { + return + } + v.SetArrayItem(idx, value) + } +} + +// SetArrayItem sets the value in the array v at idx position. +// +// The value must be unchanged during v lifetime. +func (v *Value) SetArrayItem(idx int, value *Value) { + if v == nil || v.t != TypeArray { + return + } + for idx >= len(v.a) { + v.a = append(v.a, valueNull) + } + if value == nil { + value = valueNull + } + v.a[idx] = value +} diff --git a/vendor/github.com/valyala/fastjson/util.go b/vendor/github.com/valyala/fastjson/util.go new file mode 100644 index 0000000000..03a53965a2 --- /dev/null +++ b/vendor/github.com/valyala/fastjson/util.go @@ -0,0 +1,30 @@ +package fastjson + +import ( + "reflect" + "unsafe" +) + +func b2s(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func s2b(s string) (b []byte) { + strh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + sh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + sh.Data = strh.Data + sh.Len = strh.Len + sh.Cap = strh.Len + return b +} + +const maxStartEndStringLen = 80 + +func startEndString(s string) string { + if len(s) <= maxStartEndStringLen { + return s + } + start := s[:40] + end := s[len(s)-40:] + return start + "..." + end +} diff --git a/vendor/github.com/valyala/fastjson/validate.go b/vendor/github.com/valyala/fastjson/validate.go new file mode 100644 index 0000000000..196f1c3dc6 --- /dev/null +++ b/vendor/github.com/valyala/fastjson/validate.go @@ -0,0 +1,308 @@ +package fastjson + +import ( + "fmt" + "strconv" + "strings" +) + +// Validate validates JSON s. +func Validate(s string) error { + s = skipWS(s) + + tail, err := validateValue(s) + if err != nil { + return fmt.Errorf("cannot parse JSON: %s; unparsed tail: %q", err, startEndString(tail)) + } + tail = skipWS(tail) + if len(tail) > 0 { + return fmt.Errorf("unexpected tail: %q", startEndString(tail)) + } + return nil +} + +// ValidateBytes validates JSON b. +func ValidateBytes(b []byte) error { + return Validate(b2s(b)) +} + +func validateValue(s string) (string, error) { + if len(s) == 0 { + return s, fmt.Errorf("cannot parse empty string") + } + + if s[0] == '{' { + tail, err := validateObject(s[1:]) + if err != nil { + return tail, fmt.Errorf("cannot parse object: %s", err) + } + return tail, nil + } + if s[0] == '[' { + tail, err := validateArray(s[1:]) + if err != nil { + return tail, fmt.Errorf("cannot parse array: %s", err) + } + return tail, nil + } + if s[0] == '"' { + sv, tail, err := validateString(s[1:]) + if err != nil { + return tail, fmt.Errorf("cannot parse string: %s", err) + } + // Scan the string for control chars. + for i := 0; i < len(sv); i++ { + if sv[i] < 0x20 { + return tail, fmt.Errorf("string cannot contain control char 0x%02X", sv[i]) + } + } + return tail, nil + } + if s[0] == 't' { + if len(s) < len("true") || s[:len("true")] != "true" { + return s, fmt.Errorf("unexpected value found: %q", s) + } + return s[len("true"):], nil + } + if s[0] == 'f' { + if len(s) < len("false") || s[:len("false")] != "false" { + return s, fmt.Errorf("unexpected value found: %q", s) + } + return s[len("false"):], nil + } + if s[0] == 'n' { + if len(s) < len("null") || s[:len("null")] != "null" { + return s, fmt.Errorf("unexpected value found: %q", s) + } + return s[len("null"):], nil + } + + tail, err := validateNumber(s) + if err != nil { + return tail, fmt.Errorf("cannot parse number: %s", err) + } + return tail, nil +} + +func validateArray(s string) (string, error) { + s = skipWS(s) + if len(s) == 0 { + return s, fmt.Errorf("missing ']'") + } + if s[0] == ']' { + return s[1:], nil + } + + for { + var err error + + s = skipWS(s) + s, err = validateValue(s) + if err != nil { + return s, fmt.Errorf("cannot parse array value: %s", err) + } + + s = skipWS(s) + if len(s) == 0 { + return s, fmt.Errorf("unexpected end of array") + } + if s[0] == ',' { + s = s[1:] + continue + } + if s[0] == ']' { + s = s[1:] + return s, nil + } + return s, fmt.Errorf("missing ',' after array value") + } +} + +func validateObject(s string) (string, error) { + s = skipWS(s) + if len(s) == 0 { + return s, fmt.Errorf("missing '}'") + } + if s[0] == '}' { + return s[1:], nil + } + + for { + var err error + + // Parse key. + s = skipWS(s) + if len(s) == 0 || s[0] != '"' { + return s, fmt.Errorf(`cannot find opening '"" for object key`) + } + + var key string + key, s, err = validateKey(s[1:]) + if err != nil { + return s, fmt.Errorf("cannot parse object key: %s", err) + } + // Scan the key for control chars. + for i := 0; i < len(key); i++ { + if key[i] < 0x20 { + return s, fmt.Errorf("object key cannot contain control char 0x%02X", key[i]) + } + } + s = skipWS(s) + if len(s) == 0 || s[0] != ':' { + return s, fmt.Errorf("missing ':' after object key") + } + s = s[1:] + + // Parse value + s = skipWS(s) + s, err = validateValue(s) + if err != nil { + return s, fmt.Errorf("cannot parse object value: %s", err) + } + s = skipWS(s) + if len(s) == 0 { + return s, fmt.Errorf("unexpected end of object") + } + if s[0] == ',' { + s = s[1:] + continue + } + if s[0] == '}' { + return s[1:], nil + } + return s, fmt.Errorf("missing ',' after object value") + } +} + +// validateKey is similar to validateString, but is optimized +// for typical object keys, which are quite small and have no escape sequences. +func validateKey(s string) (string, string, error) { + for i := 0; i < len(s); i++ { + if s[i] == '"' { + // Fast path - the key doesn't contain escape sequences. + return s[:i], s[i+1:], nil + } + if s[i] == '\\' { + // Slow path - the key contains escape sequences. + return validateString(s) + } + } + return "", s, fmt.Errorf(`missing closing '"'`) +} + +func validateString(s string) (string, string, error) { + // Try fast path - a string without escape sequences. + if n := strings.IndexByte(s, '"'); n >= 0 && strings.IndexByte(s[:n], '\\') < 0 { + return s[:n], s[n+1:], nil + } + + // Slow path - escape sequences are present. + rs, tail, err := parseRawString(s) + if err != nil { + return rs, tail, err + } + for { + n := strings.IndexByte(rs, '\\') + if n < 0 { + return rs, tail, nil + } + n++ + if n >= len(rs) { + return rs, tail, fmt.Errorf("BUG: parseRawString returned invalid string with trailing backslash: %q", rs) + } + ch := rs[n] + rs = rs[n+1:] + switch ch { + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + // Valid escape sequences - see http://json.org/ + break + case 'u': + if len(rs) < 4 { + return rs, tail, fmt.Errorf(`too short escape sequence: \u%s`, rs) + } + xs := rs[:4] + _, err := strconv.ParseUint(xs, 16, 16) + if err != nil { + return rs, tail, fmt.Errorf(`invalid escape sequence \u%s: %s`, xs, err) + } + rs = rs[4:] + default: + return rs, tail, fmt.Errorf(`unknown escape sequence \%c`, ch) + } + } +} + +func validateNumber(s string) (string, error) { + if len(s) == 0 { + return s, fmt.Errorf("zero-length number") + } + if s[0] == '-' { + s = s[1:] + if len(s) == 0 { + return s, fmt.Errorf("missing number after minus") + } + } + i := 0 + for i < len(s) { + if s[i] < '0' || s[i] > '9' { + break + } + i++ + } + if i <= 0 { + return s, fmt.Errorf("expecting 0..9 digit, got %c", s[0]) + } + if s[0] == '0' && i != 1 { + return s, fmt.Errorf("unexpected number starting from 0") + } + if i >= len(s) { + return "", nil + } + if s[i] == '.' { + // Validate fractional part + s = s[i+1:] + if len(s) == 0 { + return s, fmt.Errorf("missing fractional part") + } + i = 0 + for i < len(s) { + if s[i] < '0' || s[i] > '9' { + break + } + i++ + } + if i == 0 { + return s, fmt.Errorf("expecting 0..9 digit in fractional part, got %c", s[0]) + } + if i >= len(s) { + return "", nil + } + } + if s[i] == 'e' || s[i] == 'E' { + // Validate exponent part + s = s[i+1:] + if len(s) == 0 { + return s, fmt.Errorf("missing exponent part") + } + if s[0] == '-' || s[0] == '+' { + s = s[1:] + if len(s) == 0 { + return s, fmt.Errorf("missing exponent part") + } + } + i = 0 + for i < len(s) { + if s[i] < '0' || s[i] > '9' { + break + } + i++ + } + if i == 0 { + return s, fmt.Errorf("expecting 0..9 digit in exponent part, got %c", s[0]) + } + if i >= len(s) { + return "", nil + } + } + return s[i:], nil +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/LICENSE b/vendor/github.com/vektah/gqlparser/v2/LICENSE similarity index 100% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/LICENSE rename to vendor/github.com/vektah/gqlparser/v2/LICENSE diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/argmap.go b/vendor/github.com/vektah/gqlparser/v2/ast/argmap.go similarity index 77% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/argmap.go rename to vendor/github.com/vektah/gqlparser/v2/ast/argmap.go index 43f6a3d6fc..1b65c27693 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/argmap.go +++ b/vendor/github.com/vektah/gqlparser/v2/ast/argmap.go @@ -1,11 +1,15 @@ package ast -func arg2map(defs ArgumentDefinitionList, args ArgumentList, vars map[string]interface{}) map[string]interface{} { - result := map[string]interface{}{} +func arg2map( + defs ArgumentDefinitionList, + args ArgumentList, + vars map[string]any, +) map[string]any { + result := map[string]any{} var err error for _, argDef := range defs { - var val interface{} + var val any var hasValue bool if argValue := args.ForName(argDef.Name); argValue != nil { diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/collections.go b/vendor/github.com/vektah/gqlparser/v2/ast/collections.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/collections.go rename to vendor/github.com/vektah/gqlparser/v2/ast/collections.go diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/comment.go b/vendor/github.com/vektah/gqlparser/v2/ast/comment.go new file mode 100644 index 0000000000..8fcfda5813 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/ast/comment.go @@ -0,0 +1,31 @@ +package ast + +import ( + "strconv" + "strings" +) + +type Comment struct { + Value string + Position *Position +} + +func (c *Comment) Text() string { + return strings.TrimPrefix(c.Value, "#") +} + +type CommentGroup struct { + List []*Comment +} + +func (c *CommentGroup) Dump() string { + if len(c.List) == 0 { + return "" + } + var builder strings.Builder + for _, comment := range c.List { + builder.WriteString(comment.Value) + builder.WriteString("\n") + } + return strconv.Quote(builder.String()) +} diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/decode.go b/vendor/github.com/vektah/gqlparser/v2/ast/decode.go new file mode 100644 index 0000000000..c9966b2440 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/ast/decode.go @@ -0,0 +1,216 @@ +package ast + +import ( + "encoding/json" +) + +func UnmarshalSelectionSet(b []byte) (SelectionSet, error) { + var tmp []json.RawMessage + + if err := json.Unmarshal(b, &tmp); err != nil { + return nil, err + } + + result := make([]Selection, 0) + for _, item := range tmp { + var field Field + if err := json.Unmarshal(item, &field); err == nil { + result = append(result, &field) + continue + } + var fragmentSpread FragmentSpread + if err := json.Unmarshal(item, &fragmentSpread); err == nil { + result = append(result, &fragmentSpread) + continue + } + var inlineFragment InlineFragment + if err := json.Unmarshal(item, &inlineFragment); err == nil { + result = append(result, &inlineFragment) + continue + } + } + + return result, nil +} + +func (f *FragmentDefinition) UnmarshalJSON(b []byte) error { + var tmp map[string]json.RawMessage + if err := json.Unmarshal(b, &tmp); err != nil { + return err + } + for k := range tmp { + switch k { + case "Name": + err := json.Unmarshal(tmp[k], &f.Name) + if err != nil { + return err + } + case "VariableDefinition": + err := json.Unmarshal(tmp[k], &f.VariableDefinition) + if err != nil { + return err + } + case "TypeCondition": + err := json.Unmarshal(tmp[k], &f.TypeCondition) + if err != nil { + return err + } + case "Directives": + err := json.Unmarshal(tmp[k], &f.Directives) + if err != nil { + return err + } + case "SelectionSet": + ss, err := UnmarshalSelectionSet(tmp[k]) + if err != nil { + return err + } + f.SelectionSet = ss + case "Definition": + err := json.Unmarshal(tmp[k], &f.Definition) + if err != nil { + return err + } + case "Position": + err := json.Unmarshal(tmp[k], &f.Position) + if err != nil { + return err + } + } + } + return nil +} + +func (f *InlineFragment) UnmarshalJSON(b []byte) error { + var tmp map[string]json.RawMessage + if err := json.Unmarshal(b, &tmp); err != nil { + return err + } + for k := range tmp { + switch k { + case "TypeCondition": + err := json.Unmarshal(tmp[k], &f.TypeCondition) + if err != nil { + return err + } + case "Directives": + err := json.Unmarshal(tmp[k], &f.Directives) + if err != nil { + return err + } + case "SelectionSet": + ss, err := UnmarshalSelectionSet(tmp[k]) + if err != nil { + return err + } + f.SelectionSet = ss + case "ObjectDefinition": + err := json.Unmarshal(tmp[k], &f.ObjectDefinition) + if err != nil { + return err + } + case "Position": + err := json.Unmarshal(tmp[k], &f.Position) + if err != nil { + return err + } + } + } + return nil +} + +func (f *OperationDefinition) UnmarshalJSON(b []byte) error { + var tmp map[string]json.RawMessage + if err := json.Unmarshal(b, &tmp); err != nil { + return err + } + for k := range tmp { + switch k { + case "Operation": + err := json.Unmarshal(tmp[k], &f.Operation) + if err != nil { + return err + } + case "Name": + err := json.Unmarshal(tmp[k], &f.Name) + if err != nil { + return err + } + case "VariableDefinitions": + err := json.Unmarshal(tmp[k], &f.VariableDefinitions) + if err != nil { + return err + } + case "Directives": + err := json.Unmarshal(tmp[k], &f.Directives) + if err != nil { + return err + } + case "SelectionSet": + ss, err := UnmarshalSelectionSet(tmp[k]) + if err != nil { + return err + } + f.SelectionSet = ss + case "Position": + err := json.Unmarshal(tmp[k], &f.Position) + if err != nil { + return err + } + } + } + return nil +} + +func (f *Field) UnmarshalJSON(b []byte) error { + var tmp map[string]json.RawMessage + if err := json.Unmarshal(b, &tmp); err != nil { + return err + } + for k := range tmp { + switch k { + case "Alias": + err := json.Unmarshal(tmp[k], &f.Alias) + if err != nil { + return err + } + case "Name": + err := json.Unmarshal(tmp[k], &f.Name) + if err != nil { + return err + } + case "Arguments": + err := json.Unmarshal(tmp[k], &f.Arguments) + if err != nil { + return err + } + case "Directives": + err := json.Unmarshal(tmp[k], &f.Directives) + if err != nil { + return err + } + case "SelectionSet": + ss, err := UnmarshalSelectionSet(tmp[k]) + if err != nil { + return err + } + f.SelectionSet = ss + case "Position": + err := json.Unmarshal(tmp[k], &f.Position) + if err != nil { + return err + } + case "Definition": + err := json.Unmarshal(tmp[k], &f.Definition) + if err != nil { + return err + } + case "ObjectDefinition": + err := json.Unmarshal(tmp[k], &f.ObjectDefinition) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/definition.go b/vendor/github.com/vektah/gqlparser/v2/ast/definition.go similarity index 75% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/definition.go rename to vendor/github.com/vektah/gqlparser/v2/ast/definition.go index d203908168..426db7588c 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/definition.go +++ b/vendor/github.com/vektah/gqlparser/v2/ast/definition.go @@ -1,5 +1,7 @@ package ast +import "slices" + type DefinitionKind string const ( @@ -29,8 +31,12 @@ type Definition struct { Types []string // union EnumValues EnumValueList // enum - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` BuiltIn bool `dump:"-"` + + BeforeDescriptionComment *CommentGroup + AfterDescriptionComment *CommentGroup + EndOfDefinitionComment *CommentGroup } func (d *Definition) IsLeafType() bool { @@ -50,12 +56,7 @@ func (d *Definition) IsInputType() bool { } func (d *Definition) OneOf(types ...string) bool { - for _, t := range types { - if d.Name == t { - return true - } - } - return false + return slices.Contains(types, d.Name) } type FieldDefinition struct { @@ -65,7 +66,10 @@ type FieldDefinition struct { DefaultValue *Value // only for input objects Type *Type Directives DirectiveList - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` + + BeforeDescriptionComment *CommentGroup + AfterDescriptionComment *CommentGroup } type ArgumentDefinition struct { @@ -74,14 +78,20 @@ type ArgumentDefinition struct { DefaultValue *Value Type *Type Directives DirectiveList - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` + + BeforeDescriptionComment *CommentGroup + AfterDescriptionComment *CommentGroup } type EnumValueDefinition struct { Description string Name string Directives DirectiveList - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` + + BeforeDescriptionComment *CommentGroup + AfterDescriptionComment *CommentGroup } type DirectiveDefinition struct { @@ -90,5 +100,8 @@ type DirectiveDefinition struct { Arguments ArgumentDefinitionList Locations []DirectiveLocation IsRepeatable bool - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` + + BeforeDescriptionComment *CommentGroup + AfterDescriptionComment *CommentGroup } diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/directive.go b/vendor/github.com/vektah/gqlparser/v2/ast/directive.go similarity index 89% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/directive.go rename to vendor/github.com/vektah/gqlparser/v2/ast/directive.go index 5f6e8531f5..de8d984028 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/directive.go +++ b/vendor/github.com/vektah/gqlparser/v2/ast/directive.go @@ -3,7 +3,7 @@ package ast type DirectiveLocation string const ( - // Executable + // Executable. LocationQuery DirectiveLocation = `QUERY` LocationMutation DirectiveLocation = `MUTATION` LocationSubscription DirectiveLocation = `SUBSCRIPTION` @@ -12,7 +12,7 @@ const ( LocationFragmentSpread DirectiveLocation = `FRAGMENT_SPREAD` LocationInlineFragment DirectiveLocation = `INLINE_FRAGMENT` - // Type System + // Type System. LocationSchema DirectiveLocation = `SCHEMA` LocationScalar DirectiveLocation = `SCALAR` LocationObject DirectiveLocation = `OBJECT` @@ -30,7 +30,7 @@ const ( type Directive struct { Name string Arguments ArgumentList - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` // Requires validation ParentDefinition *Definition @@ -38,6 +38,9 @@ type Directive struct { Location DirectiveLocation } -func (d *Directive) ArgumentMap(vars map[string]interface{}) map[string]interface{} { +func (d *Directive) ArgumentMap(vars map[string]any) map[string]any { + if d.Definition == nil { + return nil + } return arg2map(d.Definition.Arguments, d.Arguments, vars) } diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/document.go b/vendor/github.com/vektah/gqlparser/v2/ast/document.go new file mode 100644 index 0000000000..66d55b43b4 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/ast/document.go @@ -0,0 +1,90 @@ +package ast + +type QueryDocument struct { + Operations OperationList + Fragments FragmentDefinitionList + Position *Position `dump:"-" json:"-"` + Comment *CommentGroup +} + +type SchemaDocument struct { + Schema SchemaDefinitionList + SchemaExtension SchemaDefinitionList + Directives DirectiveDefinitionList + Definitions DefinitionList + Extensions DefinitionList + Position *Position `dump:"-" json:"-"` + Comment *CommentGroup +} + +func (d *SchemaDocument) Merge(other *SchemaDocument) { + d.Schema = append(d.Schema, other.Schema...) + d.SchemaExtension = append(d.SchemaExtension, other.SchemaExtension...) + d.Directives = append(d.Directives, other.Directives...) + d.Definitions = append(d.Definitions, other.Definitions...) + d.Extensions = append(d.Extensions, other.Extensions...) +} + +type Schema struct { + Query *Definition + Mutation *Definition + Subscription *Definition + SchemaDirectives DirectiveList + + Types map[string]*Definition + Directives map[string]*DirectiveDefinition + + PossibleTypes map[string][]*Definition + Implements map[string][]*Definition + + Description string + + Comment *CommentGroup +} + +// AddTypes is the helper to add types definition to the schema. +func (s *Schema) AddTypes(defs ...*Definition) { + if s.Types == nil { + s.Types = make(map[string]*Definition) + } + for _, def := range defs { + s.Types[def.Name] = def + } +} + +func (s *Schema) AddPossibleType(name string, def *Definition) { + s.PossibleTypes[name] = append(s.PossibleTypes[name], def) +} + +// GetPossibleTypes will enumerate all the definitions for a given interface or union. +func (s *Schema) GetPossibleTypes(def *Definition) []*Definition { + return s.PossibleTypes[def.Name] +} + +func (s *Schema) AddImplements(name string, iface *Definition) { + s.Implements[name] = append(s.Implements[name], iface) +} + +// GetImplements returns all the interface and union definitions that the given definition +// satisfies. +func (s *Schema) GetImplements(def *Definition) []*Definition { + return s.Implements[def.Name] +} + +type SchemaDefinition struct { + Description string + Directives DirectiveList + OperationTypes OperationTypeDefinitionList + Position *Position `dump:"-" json:"-"` + + BeforeDescriptionComment *CommentGroup + AfterDescriptionComment *CommentGroup + EndOfDefinitionComment *CommentGroup +} + +type OperationTypeDefinition struct { + Operation Operation + Type string + Position *Position `dump:"-" json:"-"` + Comment *CommentGroup +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/dumper.go b/vendor/github.com/vektah/gqlparser/v2/ast/dumper.go similarity index 86% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/dumper.go rename to vendor/github.com/vektah/gqlparser/v2/ast/dumper.go index dbb7a7efaf..26cf693d9c 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/dumper.go +++ b/vendor/github.com/vektah/gqlparser/v2/ast/dumper.go @@ -8,8 +8,8 @@ import ( "strings" ) -// Dump turns ast into a stable string format for assertions in tests -func Dump(i interface{}) string { +// Dump turns ast into a stable string format for assertions in tests. +func Dump(i any) string { v := reflect.ValueOf(i) d := dumper{Buffer: &bytes.Buffer{}} @@ -40,13 +40,13 @@ func (d *dumper) dump(v reflect.Value) { d.WriteString("false") } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - d.WriteString(fmt.Sprintf("%d", v.Int())) + fmt.Fprintf(d, "%d", v.Int()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - d.WriteString(fmt.Sprintf("%d", v.Uint())) + fmt.Fprintf(d, "%d", v.Uint()) case reflect.Float32, reflect.Float64: - d.WriteString(fmt.Sprintf("%.2f", v.Float())) + fmt.Fprintf(d, "%.2f", v.Float()) case reflect.String: if v.Type().Name() != "string" { @@ -70,11 +70,11 @@ func (d *dumper) dump(v reflect.Value) { } func (d *dumper) writeIndent() { - d.Buffer.WriteString(strings.Repeat(" ", d.indent)) + d.WriteString(strings.Repeat(" ", d.indent)) } func (d *dumper) nl() { - d.Buffer.WriteByte('\n') + d.WriteByte('\n') d.writeIndent() } @@ -126,7 +126,6 @@ func isZero(v reflect.Value) bool { return v.IsNil() case reflect.Func, reflect.Map: return v.IsNil() - case reflect.Array, reflect.Slice: if v.IsNil() { return true @@ -144,10 +143,13 @@ func isZero(v reflect.Value) bool { return z case reflect.String: return v.String() == "" + case reflect.Bool: + // Never consider Bool field as zero value. + // Always include them in AST dump. + return false + default: + return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) } - - // Compare other types directly: - return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type())) } func (d *dumper) dumpPtr(v reflect.Value) { diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/fragment.go b/vendor/github.com/vektah/gqlparser/v2/ast/fragment.go similarity index 78% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/fragment.go rename to vendor/github.com/vektah/gqlparser/v2/ast/fragment.go index 57ab56c7c6..05805e1085 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/fragment.go +++ b/vendor/github.com/vektah/gqlparser/v2/ast/fragment.go @@ -8,7 +8,8 @@ type FragmentSpread struct { ObjectDefinition *Definition Definition *FragmentDefinition - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` + Comment *CommentGroup } type InlineFragment struct { @@ -19,7 +20,8 @@ type InlineFragment struct { // Require validation ObjectDefinition *Definition - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` + Comment *CommentGroup } type FragmentDefinition struct { @@ -34,5 +36,6 @@ type FragmentDefinition struct { // Require validation Definition *Definition - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` + Comment *CommentGroup } diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/operation.go b/vendor/github.com/vektah/gqlparser/v2/ast/operation.go similarity index 78% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/operation.go rename to vendor/github.com/vektah/gqlparser/v2/ast/operation.go index 3b37f81bf3..2efed025ba 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/operation.go +++ b/vendor/github.com/vektah/gqlparser/v2/ast/operation.go @@ -14,7 +14,8 @@ type OperationDefinition struct { VariableDefinitions VariableDefinitionList Directives DirectiveList SelectionSet SelectionSet - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` + Comment *CommentGroup } type VariableDefinition struct { @@ -22,7 +23,8 @@ type VariableDefinition struct { Type *Type DefaultValue *Value Directives DirectiveList - Position *Position `dump:"-"` + Position *Position `dump:"-" json:"-"` + Comment *CommentGroup // Requires validation Definition *Definition diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/path.go b/vendor/github.com/vektah/gqlparser/v2/ast/path.go new file mode 100644 index 0000000000..63f53a67a2 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/ast/path.go @@ -0,0 +1,72 @@ +package ast + +import ( + "bytes" + "encoding/json" + "fmt" +) + +var _ json.Unmarshaler = (*Path)(nil) + +type Path []PathElement + +type PathElement interface { + isPathElement() +} + +var ( + _ PathElement = PathIndex(0) + _ PathElement = PathName("") +) + +func (path Path) String() string { + if path == nil { + return "" + } + var str bytes.Buffer + for i, v := range path { + switch v := v.(type) { + case PathIndex: + fmt.Fprintf(&str, "[%d]", v) + case PathName: + if i != 0 { + str.WriteByte('.') + } + str.WriteString(string(v)) + default: + panic(fmt.Sprintf("unknown type: %T", v)) + } + } + return str.String() +} + +func (path *Path) UnmarshalJSON(b []byte) error { + var vs []any + err := json.Unmarshal(b, &vs) + if err != nil { + return err + } + + *path = make([]PathElement, 0, len(vs)) + for _, v := range vs { + switch v := v.(type) { + case string: + *path = append(*path, PathName(v)) + case int: + *path = append(*path, PathIndex(v)) + case float64: + *path = append(*path, PathIndex(int(v))) + default: + return fmt.Errorf("unknown path element type: %T", v) + } + } + return nil +} + +type PathIndex int + +func (PathIndex) isPathElement() {} + +type PathName string + +func (PathName) isPathElement() {} diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/selection.go b/vendor/github.com/vektah/gqlparser/v2/ast/selection.go new file mode 100644 index 0000000000..b6e6890d22 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/ast/selection.go @@ -0,0 +1,44 @@ +package ast + +type SelectionSet []Selection + +type Selection interface { + isSelection() + GetPosition() *Position +} + +func (*Field) isSelection() {} +func (*FragmentSpread) isSelection() {} +func (*InlineFragment) isSelection() {} + +func (f *Field) GetPosition() *Position { return f.Position } +func (s *FragmentSpread) GetPosition() *Position { return s.Position } +func (f *InlineFragment) GetPosition() *Position { return f.Position } + +type Field struct { + Alias string + Name string + Arguments ArgumentList + Directives DirectiveList + SelectionSet SelectionSet + Position *Position `dump:"-" json:"-"` + Comment *CommentGroup + + // Require validation + Definition *FieldDefinition + ObjectDefinition *Definition +} + +type Argument struct { + Name string + Value *Value + Position *Position `dump:"-" json:"-"` + Comment *CommentGroup +} + +func (f *Field) ArgumentMap(vars map[string]any) map[string]any { + if f.Definition == nil { + return nil + } + return arg2map(f.Definition.Arguments, f.Arguments, vars) +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/source.go b/vendor/github.com/vektah/gqlparser/v2/ast/source.go similarity index 93% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/source.go rename to vendor/github.com/vektah/gqlparser/v2/ast/source.go index 2949f83f7b..213a63a3c5 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/ast/source.go +++ b/vendor/github.com/vektah/gqlparser/v2/ast/source.go @@ -1,6 +1,6 @@ package ast -// Source covers a single *.graphql file +// Source covers a single *.graphql file. type Source struct { // Name is the filename of the source Name string diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/type.go b/vendor/github.com/vektah/gqlparser/v2/ast/type.go new file mode 100644 index 0000000000..669f1da57e --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/ast/type.go @@ -0,0 +1,68 @@ +package ast + +func NonNullNamedType(named string, pos *Position) *Type { + return &Type{NamedType: named, NonNull: true, Position: pos} +} + +func NamedType(named string, pos *Position) *Type { + return &Type{NamedType: named, NonNull: false, Position: pos} +} + +func NonNullListType(elem *Type, pos *Position) *Type { + return &Type{Elem: elem, NonNull: true, Position: pos} +} + +func ListType(elem *Type, pos *Position) *Type { + return &Type{Elem: elem, NonNull: false, Position: pos} +} + +type Type struct { + NamedType string + Elem *Type + NonNull bool + Position *Position `dump:"-" json:"-"` +} + +func (t *Type) Name() string { + if t.NamedType != "" { + return t.NamedType + } + + return t.Elem.Name() +} + +func (t *Type) String() string { + nn := "" + if t.NonNull { + nn = "!" + } + if t.NamedType != "" { + return t.NamedType + nn + } + + return "[" + t.Elem.String() + "]" + nn +} + +func (t *Type) IsCompatible(other *Type) bool { + if t.NamedType != other.NamedType { + return false + } + + if t.Elem != nil && other.Elem == nil { + return false + } + + if t.Elem != nil && !t.Elem.IsCompatible(other.Elem) { + return false + } + + if other.NonNull { + return t.NonNull + } + + return true +} + +func (t *Type) Dump() string { + return t.String() +} diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/value.go b/vendor/github.com/vektah/gqlparser/v2/ast/value.go new file mode 100644 index 0000000000..83b471633d --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/ast/value.go @@ -0,0 +1,123 @@ +package ast + +import ( + "fmt" + "strconv" + "strings" +) + +type ValueKind int + +const ( + Variable ValueKind = iota + IntValue + FloatValue + StringValue + BlockValue + BooleanValue + NullValue + EnumValue + ListValue + ObjectValue +) + +type Value struct { + Raw string + Children ChildValueList + Kind ValueKind + Position *Position `dump:"-" json:"-"` + Comment *CommentGroup + + // Require validation + Definition *Definition + VariableDefinition *VariableDefinition + ExpectedType *Type + ExpectedTypeHasDefault bool +} + +type ChildValue struct { + Name string + Value *Value + Position *Position `dump:"-" json:"-"` + Comment *CommentGroup +} + +func (v *Value) Value(vars map[string]any) (any, error) { + if v == nil { + return nil, nil + } + switch v.Kind { + case Variable: + if value, ok := vars[v.Raw]; ok { + return value, nil + } + if v.VariableDefinition != nil && v.VariableDefinition.DefaultValue != nil { + return v.VariableDefinition.DefaultValue.Value(vars) + } + return nil, nil + case IntValue: + return strconv.ParseInt(v.Raw, 10, 64) + case FloatValue: + return strconv.ParseFloat(v.Raw, 64) + case StringValue, BlockValue, EnumValue: + return v.Raw, nil + case BooleanValue: + return strconv.ParseBool(v.Raw) + case NullValue: + return nil, nil + case ListValue: + var val []any + for _, elem := range v.Children { + elemVal, err := elem.Value.Value(vars) + if err != nil { + return val, err + } + val = append(val, elemVal) + } + return val, nil + case ObjectValue: + val := map[string]any{} + for _, elem := range v.Children { + elemVal, err := elem.Value.Value(vars) + if err != nil { + return val, err + } + val[elem.Name] = elemVal + } + return val, nil + default: + panic(fmt.Errorf("unknown value kind %d", v.Kind)) + } +} + +func (v *Value) String() string { + if v == nil { + return "" + } + switch v.Kind { + case Variable: + return "$" + v.Raw + case IntValue, FloatValue, EnumValue, BooleanValue, NullValue: + return v.Raw + case StringValue, BlockValue: + return strconv.Quote(v.Raw) + case ListValue: + var val []string + for _, elem := range v.Children { + val = append(val, elem.Value.String()) + } + return "[" + strings.Join(val, ",") + "]" + case ObjectValue: + var val []string + for _, elem := range v.Children { + val = append(val, elem.Name+":"+elem.Value.String()) + } + return "{" + strings.Join(val, ",") + "}" + default: + panic(fmt.Errorf("unknown value kind %d", v.Kind)) + } +} + +func (v *Value) Dump() string { + return v.String() +} diff --git a/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go b/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go new file mode 100644 index 0000000000..b2ba01bbe5 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/gqlerror/error.go @@ -0,0 +1,198 @@ +package gqlerror + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/vektah/gqlparser/v2/ast" +) + +// Error is the standard graphql error type described in https://spec.graphql.org/draft/#sec-Errors +type Error struct { + Err error `json:"-"` + Message string `json:"message"` + Path ast.Path `json:"path,omitempty"` + Locations []Location `json:"locations,omitempty"` + Extensions map[string]any `json:"extensions,omitempty"` + Rule string `json:"-"` +} + +func (err *Error) SetFile(file string) { + if file == "" { + return + } + if err.Extensions == nil { + err.Extensions = map[string]any{} + } + + err.Extensions["file"] = file +} + +type Location struct { + Line int `json:"line,omitempty"` + Column int `json:"column,omitempty"` +} + +type List []*Error + +func (err *Error) Error() string { + var res strings.Builder + if err == nil { + return "" + } + filename, _ := err.Extensions["file"].(string) + if filename == "" { + filename = "input" + } + res.WriteString(filename) + + if len(err.Locations) > 0 { + res.WriteByte(':') + res.WriteString(strconv.Itoa(err.Locations[0].Line)) + res.WriteByte(':') + res.WriteString(strconv.Itoa(err.Locations[0].Column)) + } + + res.WriteString(": ") + if ps := err.pathString(); ps != "" { + res.WriteString(ps) + res.WriteByte(' ') + } + + res.WriteString(err.Message) + + return res.String() +} + +func (err *Error) pathString() string { + return err.Path.String() +} + +func (err *Error) Unwrap() error { + return err.Err +} + +func (err *Error) AsError() error { + if err == nil { + return nil + } + return err +} + +func (errs List) Error() string { + var buf strings.Builder + for _, err := range errs { + buf.WriteString(err.Error()) + buf.WriteByte('\n') + } + return buf.String() +} + +func (errs List) Is(target error) bool { + for _, err := range errs { + if errors.Is(err, target) { + return true + } + } + return false +} + +func (errs List) As(target any) bool { + for _, err := range errs { + if errors.As(err, target) { + return true + } + } + return false +} + +func (errs List) Unwrap() []error { + l := make([]error, len(errs)) + for i, err := range errs { + l[i] = err + } + return l +} + +func WrapPath(path ast.Path, err error) *Error { + if err == nil { + return nil + } + return &Error{ + Err: err, + Message: err.Error(), + Path: path, + } +} + +func Wrap(err error) *Error { + if err == nil { + return nil + } + return &Error{ + Err: err, + Message: err.Error(), + } +} + +func WrapIfUnwrapped(err error) *Error { + if err == nil { + return nil + } + gqlErr := &Error{} + if errors.As(err, &gqlErr) { + return gqlErr + } + return &Error{ + Err: err, + Message: err.Error(), + } +} + +func Errorf(message string, args ...any) *Error { + return &Error{ + Message: fmt.Sprintf(message, args...), + } +} + +func ErrorPathf(path ast.Path, message string, args ...any) *Error { + return &Error{ + Message: fmt.Sprintf(message, args...), + Path: path, + } +} + +func ErrorPosf(pos *ast.Position, message string, args ...any) *Error { + if pos == nil { + return ErrorLocf( + "", + -1, + -1, + message, + args..., + ) + } + return ErrorLocf( + pos.Src.Name, + pos.Line, + pos.Column, + message, + args..., + ) +} + +func ErrorLocf(file string, line, col int, message string, args ...any) *Error { + var extensions map[string]any + if file != "" { + extensions = map[string]any{"file": file} + } + return &Error{ + Message: fmt.Sprintf(message, args...), + Extensions: extensions, + Locations: []Location{ + {Line: line, Column: col}, + }, + } +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/blockstring.go b/vendor/github.com/vektah/gqlparser/v2/lexer/blockstring.go similarity index 100% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/blockstring.go rename to vendor/github.com/vektah/gqlparser/v2/lexer/blockstring.go diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/lexer.go b/vendor/github.com/vektah/gqlparser/v2/lexer/lexer.go similarity index 81% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/lexer.go rename to vendor/github.com/vektah/gqlparser/v2/lexer/lexer.go index f25555e650..7a82a42b33 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/lexer.go +++ b/vendor/github.com/vektah/gqlparser/v2/lexer/lexer.go @@ -2,13 +2,14 @@ package lexer import ( "bytes" + "slices" "unicode/utf8" - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - "github.com/open-policy-agent/opa/internal/gqlparser/gqlerror" + "github.com/vektah/gqlparser/v2/ast" + "github.com/vektah/gqlparser/v2/gqlerror" ) -// Lexer turns graphql request and schema strings into tokens +// Lexer turns graphql request and schema strings into tokens. type Lexer struct { *ast.Source // An offset into the string in bytes @@ -32,7 +33,7 @@ func New(src *ast.Source) Lexer { } } -// take one rune from input and advance end +// take one rune from input and advance end. func (s *Lexer) peek() (rune, int) { return utf8.DecodeRuneInString(s.Input[s.end:]) } @@ -55,7 +56,7 @@ func (s *Lexer) makeValueToken(kind Type, value string) (Token, error) { }, nil } -func (s *Lexer) makeError(format string, args ...interface{}) (Token, error) { +func (s *Lexer) makeError(format string, args ...any) (Token, *gqlerror.Error) { column := s.endRunes - s.lineStartRunes + 1 return Token{ Kind: Invalid, @@ -66,7 +67,7 @@ func (s *Lexer) makeError(format string, args ...interface{}) (Token, error) { Column: column, Src: s.Source, }, - }, gqlerror.ErrorLocf(s.Source.Name, s.line, column, format, args...) + }, gqlerror.ErrorLocf(s.Name, s.line, column, format, args...) } // ReadToken gets the next token from the source starting at the given position. @@ -74,8 +75,7 @@ func (s *Lexer) makeError(format string, args ...interface{}) (Token, error) { // This skips over whitespace and comments until it finds the next lexable // token, then lexes punctuators immediately or calls the appropriate helper // function for more complicated tokens. -func (s *Lexer) ReadToken() (token Token, err error) { - +func (s *Lexer) ReadToken() (Token, error) { s.ws() s.start = s.end s.startRunes = s.endRunes @@ -121,12 +121,61 @@ func (s *Lexer) ReadToken() (token Token, err error) { case '|': return s.makeValueToken(Pipe, "") case '#': - if comment, err := s.readComment(); err != nil { - return comment, err - } - return s.ReadToken() - - case '_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z': + return s.readComment() + + case '_', + 'a', + 'b', + 'c', + 'd', + 'e', + 'f', + 'g', + 'h', + 'i', + 'j', + 'k', + 'l', + 'm', + 'n', + 'o', + 'p', + 'q', + 'r', + 's', + 't', + 'u', + 'v', + 'w', + 'x', + 'y', + 'z', + 'A', + 'B', + 'C', + 'D', + 'E', + 'F', + 'G', + 'H', + 'I', + 'J', + 'K', + 'L', + 'M', + 'N', + 'O', + 'P', + 'Q', + 'R', + 'S', + 'T', + 'U', + 'V', + 'W', + 'X', + 'Y', + 'Z': return s.readName() case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': @@ -148,14 +197,16 @@ func (s *Lexer) ReadToken() (token Token, err error) { } if r == '\'' { - return s.makeError(`Unexpected single quote character ('), did you mean to use a double quote (")?`) + return s.makeError( + `Unexpected single quote character ('), did you mean to use a double quote (")?`, + ) } return s.makeError(`Cannot parse the unexpected character "%s".`, string(r)) } // ws reads from body starting at startPosition until it finds a non-whitespace -// or commented character, and updates the token end to include all whitespace +// or commented character, and updates the token end to include all whitespace. func (s *Lexer) ws() { for s.end < len(s.Input) { switch s.Input[s.end] { @@ -193,7 +244,7 @@ func (s *Lexer) ws() { // readComment from the input // -// #[\u0009\u0020-\uFFFF]* +// #[\u0009\u0020-\uFFFF]*. func (s *Lexer) readComment() (Token, error) { for s.end < len(s.Input) { r, w := s.peek() @@ -258,26 +309,23 @@ func (s *Lexer) readNumber() (Token, error) { return s.makeToken(Float) } return s.makeToken(Int) - } -// acceptByte if it matches any of given bytes, returning true if it found anything +// acceptByte if it matches any of given bytes, returning true if it found anything. func (s *Lexer) acceptByte(bytes ...uint8) bool { if s.end >= len(s.Input) { return false } - for _, accepted := range bytes { - if s.Input[s.end] == accepted { - s.end++ - s.endRunes++ - return true - } + if slices.Contains(bytes, s.Input[s.end]) { + s.end++ + s.endRunes++ + return true } return false } -// acceptDigits from the input, returning the number of digits it found +// acceptDigits from the input, returning the number of digits it found. func (s *Lexer) acceptDigits() int { consumed := 0 for s.end < len(s.Input) && s.Input[s.end] >= '0' && s.Input[s.end] <= '9' { @@ -290,7 +338,7 @@ func (s *Lexer) acceptDigits() int { } // describeNext peeks at the input and returns a human readable string. This should will alloc -// and should only be used in errors +// and should only be used in errors. func (s *Lexer) describeNext() string { if s.end < len(s.Input) { return `"` + string(s.Input[s.end]) + `"` @@ -300,7 +348,7 @@ func (s *Lexer) describeNext() string { // readString from the input // -// "([^"\\\u000A\u000D]|(\\(u[0-9a-fA-F]{4}|["\\/bfnrt])))*" +// "([^"\\\u000A\u000D]|(\\(u[0-9a-fA-F]{4}|["\\/bfnrt])))*". func (s *Lexer) readString() (Token, error) { inputLen := len(s.Input) @@ -321,8 +369,8 @@ func (s *Lexer) readString() (Token, error) { } switch r { default: - var char = rune(r) - var w = 1 + char := rune(r) + w := 1 // skip unicode overhead if we are in the ascii range if r >= 127 { @@ -337,7 +385,8 @@ func (s *Lexer) readString() (Token, error) { case '"': t, err := s.makeToken(String) - // the token should not include the quotes in its value, but should cover them in its position + // the token should not include the quotes in its value, but should cover them in its + // position t.Pos.Start-- t.Pos.End++ @@ -375,7 +424,10 @@ func (s *Lexer) readString() (Token, error) { if !ok { s.end++ s.endRunes++ - return s.makeError("Invalid character escape sequence: \\%s.", s.Input[s.end:s.end+5]) + return s.makeError( + "Invalid character escape sequence: \\%s.", + s.Input[s.end:s.end+5], + ) } buf.WriteRune(r) s.end += 6 @@ -410,7 +462,7 @@ func (s *Lexer) readString() (Token, error) { // readBlockString from the input // -// """("?"?(\\"""|\\(?!=""")|[^"\\]))*""" +// """("?"?(\\"""|\\(?!=""")|[^"\\]))*""". func (s *Lexer) readBlockString() (Token, error) { inputLen := len(s.Input) @@ -426,17 +478,29 @@ func (s *Lexer) readBlockString() (Token, error) { r := s.Input[s.end] // Closing triple quote (""") - if r == '"' && s.end+3 <= inputLen && s.Input[s.end:s.end+3] == `"""` { - t, err := s.makeValueToken(BlockString, blockStringValue(buf.String())) + if r == '"' { + // Count consecutive quotes + quoteCount := 1 + i := s.end + 1 + for i < inputLen && s.Input[i] == '"' { + quoteCount++ + i++ + } - // the token should not include the quotes in its value, but should cover them in its position - t.Pos.Start -= 3 - t.Pos.End += 3 + // If we have at least 3 quotes, use the last 3 as the closing quote + if quoteCount >= 3 { + // Add any extra quotes to the buffer (except the last 3) + for range quoteCount - 3 { + buf.WriteByte('"') + } - // skip the close quote - s.end += 3 - s.endRunes += 3 - return t, err + t, err := s.makeValueToken(BlockString, blockStringValue(buf.String())) + t.Pos.Start -= 3 + t.Pos.End += 3 + s.end += quoteCount + s.endRunes += quoteCount + return t, err + } } // SourceCharacter @@ -444,11 +508,12 @@ func (s *Lexer) readBlockString() (Token, error) { return s.makeError(`Invalid character within String: "\u%04d".`, r) } - if r == '\\' && s.end+4 <= inputLen && s.Input[s.end:s.end+4] == `\"""` { + switch { + case r == '\\' && s.end+4 <= inputLen && s.Input[s.end:s.end+4] == `\"""`: buf.WriteString(`"""`) s.end += 4 s.endRunes += 4 - } else if r == '\r' { + case r == '\r': if s.end+1 < inputLen && s.Input[s.end+1] == '\n' { s.end++ s.endRunes++ @@ -459,9 +524,9 @@ func (s *Lexer) readBlockString() (Token, error) { s.endRunes++ s.line++ s.lineStartRunes = s.endRunes - } else { - var char = rune(r) - var w = 1 + default: + char := rune(r) + w := 1 // skip unicode overhead if we are in the ascii range if r >= 127 { @@ -500,7 +565,7 @@ func unhex(b string) (v rune, ok bool) { // readName from the input // -// [_A-Za-z][_0-9A-Za-z]* +// [_A-Za-z][_0-9A-Za-z]*. func (s *Lexer) readName() (Token, error) { for s.end < len(s.Input) { r, w := s.peek() diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/lexer_test.yml b/vendor/github.com/vektah/gqlparser/v2/lexer/lexer_test.yml similarity index 91% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/lexer_test.yml rename to vendor/github.com/vektah/gqlparser/v2/lexer/lexer_test.yml index 5c4d5f0ff5..0899f4ca9b 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/lexer_test.yml +++ b/vendor/github.com/vektah/gqlparser/v2/lexer/lexer_test.yml @@ -26,22 +26,38 @@ simple tokens: column: 3 value: 'foo' - - name: skips whitespace - input: "\n\n foo\n\n\n" + - name: records line and column with comments + input: "\n\n\n#foo\n #bar\n foo\n" tokens: + - + kind: COMMENT + start: 3 + end: 7 + line: 4 + column: 0 + value: '#foo' + - + kind: COMMENT + start: 10 + end: 14 + line: 5 + column: 3 + value: '#bar' - kind: NAME - start: 6 - end: 9 + start: 17 + end: 20 + line: 6 + column: 3 value: 'foo' - - name: skips comments - input: "\n #comment\n foo#comment\n" + - name: skips whitespace + input: "\n\n foo\n\n\n" tokens: - kind: NAME - start: 18 - end: 21 + start: 6 + end: 9 value: 'foo' - name: skips commas @@ -78,6 +94,57 @@ simple tokens: end: 1 value: a +lexes comments: + - name: basic + input: '#simple' + tokens: + - + kind: COMMENT + start: 0 + end: 7 + value: '#simple' + + - name: two lines + input: "#first\n#second" + tokens: + - + kind: COMMENT + start: 0 + end: 6 + value: "#first" + - + kind: COMMENT + start: 7 + end: 14 + value: "#second" + + - name: whitespace + input: '# white space ' + tokens: + - + kind: COMMENT + start: 0 + end: 14 + value: '# white space ' + + - name: not escaped + input: '#not escaped \n\r\b\t\f' + tokens: + - + kind: COMMENT + start: 0 + end: 23 + value: '#not escaped \n\r\b\t\f' + + - name: slashes + input: '#slashes \\ \/' + tokens: + - + kind: COMMENT + start: 0 + end: 14 + value: '#slashes \\ \/' + lexes strings: - name: basic input: '"simple"' @@ -674,7 +741,6 @@ lex reports useful unknown character error: - name: question mark input: "?" error: - message: 'Cannot parse the unexpected character "?".' message: 'Cannot parse the unexpected character "?".' locations: [{ line: 1, column: 1 }] diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/token.go b/vendor/github.com/vektah/gqlparser/v2/lexer/token.go similarity index 97% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/token.go rename to vendor/github.com/vektah/gqlparser/v2/lexer/token.go index 79eefd0f4e..8985a7efb7 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/lexer/token.go +++ b/vendor/github.com/vektah/gqlparser/v2/lexer/token.go @@ -3,7 +3,7 @@ package lexer import ( "strconv" - "github.com/open-policy-agent/opa/internal/gqlparser/ast" + "github.com/vektah/gqlparser/v2/ast" ) const ( diff --git a/vendor/github.com/vektah/gqlparser/v2/parser/parser.go b/vendor/github.com/vektah/gqlparser/v2/parser/parser.go new file mode 100644 index 0000000000..b6a306406c --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/parser/parser.go @@ -0,0 +1,200 @@ +package parser + +import ( + "strconv" + + "github.com/vektah/gqlparser/v2/ast" + "github.com/vektah/gqlparser/v2/gqlerror" + "github.com/vektah/gqlparser/v2/lexer" +) + +type parser struct { + lexer lexer.Lexer + err error + + peeked bool + peekToken lexer.Token + peekError error + + prev lexer.Token + + comment *ast.CommentGroup + commentConsuming bool + + tokenCount int + maxTokenLimit int +} + +func (p *parser) SetMaxTokenLimit(maxToken int) { + p.maxTokenLimit = maxToken +} + +func (p *parser) consumeComment() (*ast.Comment, bool) { + if p.err != nil { + return nil, false + } + tok := p.peek() + if tok.Kind != lexer.Comment { + return nil, false + } + p.next() + return &ast.Comment{ + Value: tok.Value, + Position: &tok.Pos, + }, true +} + +func (p *parser) consumeCommentGroup() { + if p.err != nil { + return + } + if p.commentConsuming { + return + } + p.commentConsuming = true + + var comments []*ast.Comment + for { + comment, ok := p.consumeComment() + if !ok { + break + } + comments = append(comments, comment) + } + + p.comment = &ast.CommentGroup{List: comments} + p.commentConsuming = false +} + +func (p *parser) peekPos() *ast.Position { + if p.err != nil { + return nil + } + + peek := p.peek() + return &peek.Pos +} + +func (p *parser) peek() lexer.Token { + if p.err != nil { + return p.prev + } + + if !p.peeked { + p.peekToken, p.peekError = p.lexer.ReadToken() + p.peeked = true + if p.peekToken.Kind == lexer.Comment { + p.consumeCommentGroup() + } + } + + return p.peekToken +} + +func (p *parser) error(tok lexer.Token, format string, args ...any) { + if p.err != nil { + return + } + p.err = gqlerror.ErrorLocf(tok.Pos.Src.Name, tok.Pos.Line, tok.Pos.Column, format, args...) +} + +func (p *parser) next() lexer.Token { + if p.err != nil { + return p.prev + } + // Increment the token count before reading the next token + p.tokenCount++ + if p.maxTokenLimit != 0 && p.tokenCount > p.maxTokenLimit { + p.err = gqlerror.Errorf("exceeded token limit of %d", p.maxTokenLimit) + return p.prev + } + if p.peeked { + p.peeked = false + p.comment = nil + p.prev, p.err = p.peekToken, p.peekError + } else { + p.prev, p.err = p.lexer.ReadToken() + if p.prev.Kind == lexer.Comment { + p.consumeCommentGroup() + } + } + return p.prev +} + +func (p *parser) expectKeyword(value string) (lexer.Token, *ast.CommentGroup) { + tok := p.peek() + comment := p.comment + if tok.Kind == lexer.Name && tok.Value == value { + return p.next(), comment + } + + p.error(tok, "Expected %s, found %s", strconv.Quote(value), tok.String()) + return tok, comment +} + +func (p *parser) expect(kind lexer.Type) (lexer.Token, *ast.CommentGroup) { + tok := p.peek() + comment := p.comment + if tok.Kind == kind { + return p.next(), comment + } + + p.error(tok, "Expected %s, found %s", kind, tok.Kind.String()) + return tok, comment +} + +func (p *parser) skip(kind lexer.Type) bool { + if p.err != nil { + return false + } + + tok := p.peek() + + if tok.Kind != kind { + return false + } + p.next() + return true +} + +func (p *parser) unexpectedError() { + p.unexpectedToken(p.peek()) +} + +func (p *parser) unexpectedToken(tok lexer.Token) { + p.error(tok, "Unexpected %s", tok.String()) +} + +func (p *parser) many(start, end lexer.Type, cb func()) { + hasDef := p.skip(start) + if !hasDef { + return + } + + for p.peek().Kind != end && p.err == nil { + cb() + } + p.next() +} + +func (p *parser) some(start, end lexer.Type, cb func()) *ast.CommentGroup { + hasDef := p.skip(start) + if !hasDef { + return nil + } + + called := false + for p.peek().Kind != end && p.err == nil { + called = true + cb() + } + + if !called { + p.error(p.peek(), "expected at least one definition, found %s", p.peek().Kind.String()) + return nil + } + + comment := p.comment + p.next() + return comment +} diff --git a/vendor/github.com/vektah/gqlparser/v2/parser/query.go b/vendor/github.com/vektah/gqlparser/v2/parser/query.go new file mode 100644 index 0000000000..271a2ffe81 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/parser/query.go @@ -0,0 +1,372 @@ +package parser + +import ( + . "github.com/vektah/gqlparser/v2/ast" //nolint:staticcheck // bad, yeah + "github.com/vektah/gqlparser/v2/lexer" +) + +func ParseQuery(source *Source) (*QueryDocument, error) { + p := parser{ + lexer: lexer.New(source), + maxTokenLimit: 0, // 0 means unlimited + } + return p.parseQueryDocument(), p.err +} + +func ParseQueryWithTokenLimit(source *Source, maxTokenLimit int) (*QueryDocument, error) { + p := parser{ + lexer: lexer.New(source), + maxTokenLimit: maxTokenLimit, + } + return p.parseQueryDocument(), p.err +} + +func (p *parser) parseQueryDocument() *QueryDocument { + var doc QueryDocument + for p.peek().Kind != lexer.EOF { + if p.err != nil { + return &doc + } + doc.Position = p.peekPos() + switch p.peek().Kind { + case lexer.Name: + switch p.peek().Value { + case "query", "mutation", "subscription": + doc.Operations = append(doc.Operations, p.parseOperationDefinition()) + case "fragment": + doc.Fragments = append(doc.Fragments, p.parseFragmentDefinition()) + default: + p.unexpectedError() + } + case lexer.BraceL: + doc.Operations = append(doc.Operations, p.parseOperationDefinition()) + default: + p.unexpectedError() + } + } + + return &doc +} + +func (p *parser) parseOperationDefinition() *OperationDefinition { + if p.peek().Kind == lexer.BraceL { + return &OperationDefinition{ + Position: p.peekPos(), + Comment: p.comment, + Operation: Query, + SelectionSet: p.parseRequiredSelectionSet(), + } + } + + var od OperationDefinition + od.Position = p.peekPos() + od.Comment = p.comment + od.Operation = p.parseOperationType() + + if p.peek().Kind == lexer.Name { + od.Name = p.next().Value + } + + od.VariableDefinitions = p.parseVariableDefinitions() + od.Directives = p.parseDirectives(false) + od.SelectionSet = p.parseRequiredSelectionSet() + + return &od +} + +func (p *parser) parseOperationType() Operation { + tok := p.next() + switch tok.Value { + case "query": + return Query + case "mutation": + return Mutation + case "subscription": + return Subscription + } + p.unexpectedToken(tok) + return "" +} + +func (p *parser) parseVariableDefinitions() VariableDefinitionList { + var defs []*VariableDefinition + p.some(lexer.ParenL, lexer.ParenR, func() { + defs = append(defs, p.parseVariableDefinition()) + }) + + return defs +} + +func (p *parser) parseVariableDefinition() *VariableDefinition { + var def VariableDefinition + def.Position = p.peekPos() + def.Comment = p.comment + def.Variable = p.parseVariable() + + p.expect(lexer.Colon) + + def.Type = p.parseTypeReference() + + if p.skip(lexer.Equals) { + def.DefaultValue = p.parseValueLiteral(true) + } + + def.Directives = p.parseDirectives(false) + + return &def +} + +func (p *parser) parseVariable() string { + p.expect(lexer.Dollar) + return p.parseName() +} + +func (p *parser) parseOptionalSelectionSet() SelectionSet { + var selections []Selection + p.some(lexer.BraceL, lexer.BraceR, func() { + selections = append(selections, p.parseSelection()) + }) + + return selections +} + +func (p *parser) parseRequiredSelectionSet() SelectionSet { + if p.peek().Kind != lexer.BraceL { + p.error(p.peek(), "Expected %s, found %s", lexer.BraceL, p.peek().Kind.String()) + return nil + } + + var selections []Selection + p.some(lexer.BraceL, lexer.BraceR, func() { + selections = append(selections, p.parseSelection()) + }) + + return selections +} + +func (p *parser) parseSelection() Selection { + if p.peek().Kind == lexer.Spread { + return p.parseFragment() + } + return p.parseField() +} + +func (p *parser) parseField() *Field { + var field Field + field.Position = p.peekPos() + field.Comment = p.comment + field.Alias = p.parseName() + + if p.skip(lexer.Colon) { + field.Name = p.parseName() + } else { + field.Name = field.Alias + } + + field.Arguments = p.parseArguments(false) + field.Directives = p.parseDirectives(false) + if p.peek().Kind == lexer.BraceL { + field.SelectionSet = p.parseOptionalSelectionSet() + } + + return &field +} + +func (p *parser) parseArguments(isConst bool) ArgumentList { + var arguments ArgumentList + p.some(lexer.ParenL, lexer.ParenR, func() { + arguments = append(arguments, p.parseArgument(isConst)) + }) + + return arguments +} + +func (p *parser) parseArgument(isConst bool) *Argument { + arg := Argument{} + arg.Position = p.peekPos() + arg.Comment = p.comment + arg.Name = p.parseName() + p.expect(lexer.Colon) + + arg.Value = p.parseValueLiteral(isConst) + return &arg +} + +func (p *parser) parseFragment() Selection { + _, comment := p.expect(lexer.Spread) + + if peek := p.peek(); peek.Kind == lexer.Name && peek.Value != "on" { + return &FragmentSpread{ + Position: p.peekPos(), + Comment: comment, + Name: p.parseFragmentName(), + Directives: p.parseDirectives(false), + } + } + + var def InlineFragment + def.Position = p.peekPos() + def.Comment = comment + if p.peek().Value == "on" { + p.next() // "on" + + def.TypeCondition = p.parseName() + } + + def.Directives = p.parseDirectives(false) + def.SelectionSet = p.parseRequiredSelectionSet() + return &def +} + +func (p *parser) parseFragmentDefinition() *FragmentDefinition { + var def FragmentDefinition + def.Position = p.peekPos() + def.Comment = p.comment + p.expectKeyword("fragment") + + def.Name = p.parseFragmentName() + def.VariableDefinition = p.parseVariableDefinitions() + + p.expectKeyword("on") + + def.TypeCondition = p.parseName() + def.Directives = p.parseDirectives(false) + def.SelectionSet = p.parseRequiredSelectionSet() + return &def +} + +func (p *parser) parseFragmentName() string { + if p.peek().Value == "on" { + p.unexpectedError() + return "" + } + + return p.parseName() +} + +func (p *parser) parseValueLiteral(isConst bool) *Value { + token := p.peek() + + var kind ValueKind + switch token.Kind { + case lexer.BracketL: + return p.parseList(isConst) + case lexer.BraceL: + return p.parseObject(isConst) + case lexer.Dollar: + if isConst { + p.unexpectedError() + return nil + } + return &Value{ + Position: &token.Pos, + Comment: p.comment, + Raw: p.parseVariable(), + Kind: Variable, + } + case lexer.Int: + kind = IntValue + case lexer.Float: + kind = FloatValue + case lexer.String: + kind = StringValue + case lexer.BlockString: + kind = BlockValue + case lexer.Name: + switch token.Value { + case "true", "false": + kind = BooleanValue + case "null": + kind = NullValue + default: + kind = EnumValue + } + default: + p.unexpectedError() + return nil + } + + p.next() + + return &Value{Position: &token.Pos, Comment: p.comment, Raw: token.Value, Kind: kind} +} + +func (p *parser) parseList(isConst bool) *Value { + var values ChildValueList + pos := p.peekPos() + comment := p.comment + p.many(lexer.BracketL, lexer.BracketR, func() { + values = append(values, &ChildValue{Value: p.parseValueLiteral(isConst)}) + }) + + return &Value{Children: values, Kind: ListValue, Position: pos, Comment: comment} +} + +func (p *parser) parseObject(isConst bool) *Value { + var fields ChildValueList + pos := p.peekPos() + comment := p.comment + p.many(lexer.BraceL, lexer.BraceR, func() { + fields = append(fields, p.parseObjectField(isConst)) + }) + + return &Value{Children: fields, Kind: ObjectValue, Position: pos, Comment: comment} +} + +func (p *parser) parseObjectField(isConst bool) *ChildValue { + field := ChildValue{} + field.Position = p.peekPos() + field.Comment = p.comment + field.Name = p.parseName() + + p.expect(lexer.Colon) + + field.Value = p.parseValueLiteral(isConst) + return &field +} + +func (p *parser) parseDirectives(isConst bool) []*Directive { + var directives []*Directive + + for p.peek().Kind == lexer.At { + if p.err != nil { + break + } + directives = append(directives, p.parseDirective(isConst)) + } + return directives +} + +func (p *parser) parseDirective(isConst bool) *Directive { + p.expect(lexer.At) + + return &Directive{ + Position: p.peekPos(), + Name: p.parseName(), + Arguments: p.parseArguments(isConst), + } +} + +func (p *parser) parseTypeReference() *Type { + var typ Type + + if p.skip(lexer.BracketL) { + typ.Position = p.peekPos() + typ.Elem = p.parseTypeReference() + p.expect(lexer.BracketR) + } else { + typ.Position = p.peekPos() + typ.NamedType = p.parseName() + } + + if p.skip(lexer.Bang) { + typ.NonNull = true + } + return &typ +} + +func (p *parser) parseName() string { + token, _ := p.expect(lexer.Name) + + return token.Value +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/query_test.yml b/vendor/github.com/vektah/gqlparser/v2/parser/query_test.yml similarity index 98% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/query_test.yml rename to vendor/github.com/vektah/gqlparser/v2/parser/query_test.yml index a46a01e718..ec0580f5fa 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/parser/query_test.yml +++ b/vendor/github.com/vektah/gqlparser/v2/parser/query_test.yml @@ -436,6 +436,7 @@ large queries: - Alias: "id" Name: "id" + Comment: "# Copyright (c) 2015-present, Facebook, Inc.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n" - Operation: Operation("mutation") Name: "likeStory" diff --git a/vendor/github.com/vektah/gqlparser/v2/parser/schema.go b/vendor/github.com/vektah/gqlparser/v2/parser/schema.go new file mode 100644 index 0000000000..804f02c9f8 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/parser/schema.go @@ -0,0 +1,635 @@ +package parser + +import ( + . "github.com/vektah/gqlparser/v2/ast" //nolint:staticcheck // bad, yeah + "github.com/vektah/gqlparser/v2/lexer" +) + +func ParseSchemas(inputs ...*Source) (*SchemaDocument, error) { + sd := &SchemaDocument{} + for _, input := range inputs { + inputAst, err := ParseSchema(input) + if err != nil { + return nil, err + } + sd.Merge(inputAst) + } + return sd, nil +} + +func ParseSchema(source *Source) (*SchemaDocument, error) { + p := parser{ + lexer: lexer.New(source), + maxTokenLimit: 0, // default value is unlimited + } + sd, err := p.parseSchemaDocument(), p.err + if err != nil { + return nil, err + } + + for _, def := range sd.Definitions { + def.BuiltIn = source.BuiltIn + } + for _, def := range sd.Extensions { + def.BuiltIn = source.BuiltIn + } + + return sd, nil +} + +func ParseSchemasWithLimit(maxTokenLimit int, inputs ...*Source) (*SchemaDocument, error) { + sd := &SchemaDocument{} + for _, input := range inputs { + inputAst, err := ParseSchemaWithLimit(input, maxTokenLimit) + if err != nil { + return nil, err + } + sd.Merge(inputAst) + } + return sd, nil +} + +func ParseSchemaWithLimit(source *Source, maxTokenLimit int) (*SchemaDocument, error) { + p := parser{ + lexer: lexer.New(source), + maxTokenLimit: maxTokenLimit, // 0 is unlimited + } + sd, err := p.parseSchemaDocument(), p.err + if err != nil { + return nil, err + } + + for _, def := range sd.Definitions { + def.BuiltIn = source.BuiltIn + } + for _, def := range sd.Extensions { + def.BuiltIn = source.BuiltIn + } + + return sd, nil +} + +func (p *parser) parseSchemaDocument() *SchemaDocument { + var doc SchemaDocument + doc.Position = p.peekPos() + for p.peek().Kind != lexer.EOF { + if p.err != nil { + return nil + } + + var description descriptionWithComment + if p.peek().Kind == lexer.BlockString || p.peek().Kind == lexer.String { + description = p.parseDescription() + } + + if p.peek().Kind != lexer.Name { + p.unexpectedError() + break + } + + switch p.peek().Value { + case "scalar", "type", "interface", "union", "enum", "input": + doc.Definitions = append(doc.Definitions, p.parseTypeSystemDefinition(description)) + case "schema": + doc.Schema = append(doc.Schema, p.parseSchemaDefinition(description)) + case "directive": + doc.Directives = append(doc.Directives, p.parseDirectiveDefinition(description)) + case "extend": + if description.text != "" { + p.unexpectedToken(p.prev) + } + p.parseTypeSystemExtension(&doc) + default: + p.unexpectedError() + return nil + } + } + + // treat end of file comments + doc.Comment = p.comment + + return &doc +} + +func (p *parser) parseDescription() descriptionWithComment { + token := p.peek() + + var desc descriptionWithComment + if token.Kind != lexer.BlockString && token.Kind != lexer.String { + return desc + } + + desc.comment = p.comment + desc.text = p.next().Value + return desc +} + +func (p *parser) parseTypeSystemDefinition(description descriptionWithComment) *Definition { + tok := p.peek() + if tok.Kind != lexer.Name { + p.unexpectedError() + return nil + } + + switch tok.Value { + case "scalar": + return p.parseScalarTypeDefinition(description) + case "type": + return p.parseObjectTypeDefinition(description) + case "interface": + return p.parseInterfaceTypeDefinition(description) + case "union": + return p.parseUnionTypeDefinition(description) + case "enum": + return p.parseEnumTypeDefinition(description) + case "input": + return p.parseInputObjectTypeDefinition(description) + default: + p.unexpectedError() + return nil + } +} + +func (p *parser) parseSchemaDefinition(description descriptionWithComment) *SchemaDefinition { + _, comment := p.expectKeyword("schema") + + def := SchemaDefinition{} + def.Position = p.peekPos() + def.BeforeDescriptionComment = description.comment + def.Description = description.text + def.AfterDescriptionComment = comment + def.Directives = p.parseDirectives(true) + + def.EndOfDefinitionComment = p.some(lexer.BraceL, lexer.BraceR, func() { + def.OperationTypes = append(def.OperationTypes, p.parseOperationTypeDefinition()) + }) + return &def +} + +func (p *parser) parseOperationTypeDefinition() *OperationTypeDefinition { + var op OperationTypeDefinition + op.Position = p.peekPos() + op.Comment = p.comment + op.Operation = p.parseOperationType() + p.expect(lexer.Colon) + op.Type = p.parseName() + return &op +} + +func (p *parser) parseScalarTypeDefinition(description descriptionWithComment) *Definition { + _, comment := p.expectKeyword("scalar") + + var def Definition + def.Position = p.peekPos() + def.BeforeDescriptionComment = description.comment + def.Description = description.text + def.AfterDescriptionComment = comment + def.Kind = Scalar + def.Name = p.parseName() + def.Directives = p.parseDirectives(true) + return &def +} + +func (p *parser) parseObjectTypeDefinition(description descriptionWithComment) *Definition { + _, comment := p.expectKeyword("type") + + var def Definition + def.Position = p.peekPos() + def.Kind = Object + def.BeforeDescriptionComment = description.comment + def.Description = description.text + def.AfterDescriptionComment = comment + def.Name = p.parseName() + def.Interfaces = p.parseImplementsInterfaces() + def.Directives = p.parseDirectives(true) + def.Fields, def.EndOfDefinitionComment = p.parseFieldsDefinition() + return &def +} + +func (p *parser) parseImplementsInterfaces() []string { + var types []string + if p.peek().Value == "implements" { + p.next() + // optional leading ampersand + p.skip(lexer.Amp) + + types = append(types, p.parseName()) + for p.skip(lexer.Amp) && p.err == nil { + types = append(types, p.parseName()) + } + } + return types +} + +func (p *parser) parseFieldsDefinition() (FieldList, *CommentGroup) { + var defs FieldList + comment := p.some(lexer.BraceL, lexer.BraceR, func() { + defs = append(defs, p.parseFieldDefinition()) + }) + return defs, comment +} + +func (p *parser) parseFieldDefinition() *FieldDefinition { + var def FieldDefinition + def.Position = p.peekPos() + + desc := p.parseDescription() + if desc.text != "" { + def.BeforeDescriptionComment = desc.comment + def.Description = desc.text + } + + p.peek() // peek to set p.comment + def.AfterDescriptionComment = p.comment + def.Name = p.parseName() + def.Arguments = p.parseArgumentDefs() + p.expect(lexer.Colon) + def.Type = p.parseTypeReference() + def.Directives = p.parseDirectives(true) + + return &def +} + +func (p *parser) parseArgumentDefs() ArgumentDefinitionList { + var args ArgumentDefinitionList + p.some(lexer.ParenL, lexer.ParenR, func() { + args = append(args, p.parseArgumentDef()) + }) + return args +} + +func (p *parser) parseArgumentDef() *ArgumentDefinition { + var def ArgumentDefinition + def.Position = p.peekPos() + + desc := p.parseDescription() + if desc.text != "" { + def.BeforeDescriptionComment = desc.comment + def.Description = desc.text + } + + p.peek() // peek to set p.comment + def.AfterDescriptionComment = p.comment + def.Name = p.parseName() + p.expect(lexer.Colon) + def.Type = p.parseTypeReference() + if p.skip(lexer.Equals) { + def.DefaultValue = p.parseValueLiteral(true) + } + def.Directives = p.parseDirectives(true) + return &def +} + +func (p *parser) parseInputValueDef() *FieldDefinition { + var def FieldDefinition + def.Position = p.peekPos() + + desc := p.parseDescription() + if desc.text != "" { + def.BeforeDescriptionComment = desc.comment + def.Description = desc.text + } + + p.peek() // peek to set p.comment + def.AfterDescriptionComment = p.comment + def.Name = p.parseName() + p.expect(lexer.Colon) + def.Type = p.parseTypeReference() + if p.skip(lexer.Equals) { + def.DefaultValue = p.parseValueLiteral(true) + } + def.Directives = p.parseDirectives(true) + return &def +} + +func (p *parser) parseInterfaceTypeDefinition(description descriptionWithComment) *Definition { + _, comment := p.expectKeyword("interface") + + var def Definition + def.Position = p.peekPos() + def.Kind = Interface + def.BeforeDescriptionComment = description.comment + def.Description = description.text + def.AfterDescriptionComment = comment + def.Name = p.parseName() + def.Interfaces = p.parseImplementsInterfaces() + def.Directives = p.parseDirectives(true) + def.Fields, def.EndOfDefinitionComment = p.parseFieldsDefinition() + return &def +} + +func (p *parser) parseUnionTypeDefinition(description descriptionWithComment) *Definition { + _, comment := p.expectKeyword("union") + + var def Definition + def.Position = p.peekPos() + def.Kind = Union + def.BeforeDescriptionComment = description.comment + def.Description = description.text + def.AfterDescriptionComment = comment + def.Name = p.parseName() + def.Directives = p.parseDirectives(true) + def.Types = p.parseUnionMemberTypes() + return &def +} + +func (p *parser) parseUnionMemberTypes() []string { + var types []string + if p.skip(lexer.Equals) { + // optional leading pipe + p.skip(lexer.Pipe) + + types = append(types, p.parseName()) + for p.skip(lexer.Pipe) && p.err == nil { + types = append(types, p.parseName()) + } + } + return types +} + +func (p *parser) parseEnumTypeDefinition(description descriptionWithComment) *Definition { + _, comment := p.expectKeyword("enum") + + var def Definition + def.Position = p.peekPos() + def.Kind = Enum + def.BeforeDescriptionComment = description.comment + def.Description = description.text + def.AfterDescriptionComment = comment + def.Name = p.parseName() + def.Directives = p.parseDirectives(true) + def.EnumValues, def.EndOfDefinitionComment = p.parseEnumValuesDefinition() + return &def +} + +func (p *parser) parseEnumValuesDefinition() (EnumValueList, *CommentGroup) { + var values EnumValueList + comment := p.some(lexer.BraceL, lexer.BraceR, func() { + values = append(values, p.parseEnumValueDefinition()) + }) + return values, comment +} + +func (p *parser) parseEnumValueDefinition() *EnumValueDefinition { + var def EnumValueDefinition + def.Position = p.peekPos() + desc := p.parseDescription() + if desc.text != "" { + def.BeforeDescriptionComment = desc.comment + def.Description = desc.text + } + + p.peek() // peek to set p.comment + def.AfterDescriptionComment = p.comment + + def.Name = p.parseName() + def.Directives = p.parseDirectives(true) + + return &def +} + +func (p *parser) parseInputObjectTypeDefinition(description descriptionWithComment) *Definition { + _, comment := p.expectKeyword("input") + + var def Definition + def.Position = p.peekPos() + def.Kind = InputObject + def.BeforeDescriptionComment = description.comment + def.Description = description.text + def.AfterDescriptionComment = comment + def.Name = p.parseName() + def.Directives = p.parseDirectives(true) + def.Fields, def.EndOfDefinitionComment = p.parseInputFieldsDefinition() + return &def +} + +func (p *parser) parseInputFieldsDefinition() (FieldList, *CommentGroup) { + var values FieldList + comment := p.some(lexer.BraceL, lexer.BraceR, func() { + values = append(values, p.parseInputValueDef()) + }) + return values, comment +} + +func (p *parser) parseTypeSystemExtension(doc *SchemaDocument) { + _, comment := p.expectKeyword("extend") + + switch p.peek().Value { + case "schema": + doc.SchemaExtension = append(doc.SchemaExtension, p.parseSchemaExtension(comment)) + case "scalar": + doc.Extensions = append(doc.Extensions, p.parseScalarTypeExtension(comment)) + case "type": + doc.Extensions = append(doc.Extensions, p.parseObjectTypeExtension(comment)) + case "interface": + doc.Extensions = append(doc.Extensions, p.parseInterfaceTypeExtension(comment)) + case "union": + doc.Extensions = append(doc.Extensions, p.parseUnionTypeExtension(comment)) + case "enum": + doc.Extensions = append(doc.Extensions, p.parseEnumTypeExtension(comment)) + case "input": + doc.Extensions = append(doc.Extensions, p.parseInputObjectTypeExtension(comment)) + default: + p.unexpectedError() + } +} + +func (p *parser) parseSchemaExtension(comment *CommentGroup) *SchemaDefinition { + p.expectKeyword("schema") + + var def SchemaDefinition + def.Position = p.peekPos() + def.AfterDescriptionComment = comment + def.Directives = p.parseDirectives(true) + def.EndOfDefinitionComment = p.some(lexer.BraceL, lexer.BraceR, func() { + def.OperationTypes = append(def.OperationTypes, p.parseOperationTypeDefinition()) + }) + if len(def.Directives) == 0 && len(def.OperationTypes) == 0 { + p.unexpectedError() + } + return &def +} + +func (p *parser) parseScalarTypeExtension(comment *CommentGroup) *Definition { + p.expectKeyword("scalar") + + var def Definition + def.Position = p.peekPos() + def.AfterDescriptionComment = comment + def.Kind = Scalar + def.Name = p.parseName() + def.Directives = p.parseDirectives(true) + if len(def.Directives) == 0 { + p.unexpectedError() + } + return &def +} + +func (p *parser) parseObjectTypeExtension(comment *CommentGroup) *Definition { + p.expectKeyword("type") + + var def Definition + def.Position = p.peekPos() + def.AfterDescriptionComment = comment + def.Kind = Object + def.Name = p.parseName() + def.Interfaces = p.parseImplementsInterfaces() + def.Directives = p.parseDirectives(true) + def.Fields, def.EndOfDefinitionComment = p.parseFieldsDefinition() + if len(def.Interfaces) == 0 && len(def.Directives) == 0 && len(def.Fields) == 0 { + p.unexpectedError() + } + return &def +} + +func (p *parser) parseInterfaceTypeExtension(comment *CommentGroup) *Definition { + p.expectKeyword("interface") + + var def Definition + def.Position = p.peekPos() + def.AfterDescriptionComment = comment + def.Kind = Interface + def.Name = p.parseName() + def.Directives = p.parseDirectives(true) + def.Fields, def.EndOfDefinitionComment = p.parseFieldsDefinition() + if len(def.Directives) == 0 && len(def.Fields) == 0 { + p.unexpectedError() + } + return &def +} + +func (p *parser) parseUnionTypeExtension(comment *CommentGroup) *Definition { + p.expectKeyword("union") + + var def Definition + def.Position = p.peekPos() + def.AfterDescriptionComment = comment + def.Kind = Union + def.Name = p.parseName() + def.Directives = p.parseDirectives(true) + def.Types = p.parseUnionMemberTypes() + + if len(def.Directives) == 0 && len(def.Types) == 0 { + p.unexpectedError() + } + return &def +} + +func (p *parser) parseEnumTypeExtension(comment *CommentGroup) *Definition { + p.expectKeyword("enum") + + var def Definition + def.Position = p.peekPos() + def.AfterDescriptionComment = comment + def.Kind = Enum + def.Name = p.parseName() + def.Directives = p.parseDirectives(true) + def.EnumValues, def.EndOfDefinitionComment = p.parseEnumValuesDefinition() + if len(def.Directives) == 0 && len(def.EnumValues) == 0 { + p.unexpectedError() + } + return &def +} + +func (p *parser) parseInputObjectTypeExtension(comment *CommentGroup) *Definition { + p.expectKeyword("input") + + var def Definition + def.Position = p.peekPos() + def.AfterDescriptionComment = comment + def.Kind = InputObject + def.Name = p.parseName() + def.Directives = p.parseDirectives(false) + def.Fields, def.EndOfDefinitionComment = p.parseInputFieldsDefinition() + if len(def.Directives) == 0 && len(def.Fields) == 0 { + p.unexpectedError() + } + return &def +} + +func (p *parser) parseDirectiveDefinition(description descriptionWithComment) *DirectiveDefinition { + _, comment := p.expectKeyword("directive") + p.expect(lexer.At) + + var def DirectiveDefinition + def.Position = p.peekPos() + def.BeforeDescriptionComment = description.comment + def.Description = description.text + def.AfterDescriptionComment = comment + def.Name = p.parseName() + def.Arguments = p.parseArgumentDefs() + + if peek := p.peek(); peek.Kind == lexer.Name && peek.Value == "repeatable" { + def.IsRepeatable = true + p.skip(lexer.Name) + } + + p.expectKeyword("on") + def.Locations = p.parseDirectiveLocations() + return &def +} + +func (p *parser) parseDirectiveLocations() []DirectiveLocation { + p.skip(lexer.Pipe) + + locations := []DirectiveLocation{p.parseDirectiveLocation()} + + for p.skip(lexer.Pipe) && p.err == nil { + locations = append(locations, p.parseDirectiveLocation()) + } + + return locations +} + +func (p *parser) parseDirectiveLocation() DirectiveLocation { + name, _ := p.expect(lexer.Name) + + switch name.Value { + case `QUERY`: + return LocationQuery + case `MUTATION`: + return LocationMutation + case `SUBSCRIPTION`: + return LocationSubscription + case `FIELD`: + return LocationField + case `FRAGMENT_DEFINITION`: + return LocationFragmentDefinition + case `FRAGMENT_SPREAD`: + return LocationFragmentSpread + case `INLINE_FRAGMENT`: + return LocationInlineFragment + case `VARIABLE_DEFINITION`: + return LocationVariableDefinition + case `SCHEMA`: + return LocationSchema + case `SCALAR`: + return LocationScalar + case `OBJECT`: + return LocationObject + case `FIELD_DEFINITION`: + return LocationFieldDefinition + case `ARGUMENT_DEFINITION`: + return LocationArgumentDefinition + case `INTERFACE`: + return LocationInterface + case `UNION`: + return LocationUnion + case `ENUM`: + return LocationEnum + case `ENUM_VALUE`: + return LocationEnumValue + case `INPUT_OBJECT`: + return LocationInputObject + case `INPUT_FIELD_DEFINITION`: + return LocationInputFieldDefinition + } + + p.unexpectedToken(name) + return "" +} + +type descriptionWithComment struct { + text string + comment *CommentGroup +} diff --git a/vendor/github.com/vektah/gqlparser/v2/parser/schema_test.yml b/vendor/github.com/vektah/gqlparser/v2/parser/schema_test.yml new file mode 100644 index 0000000000..705514a995 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/parser/schema_test.yml @@ -0,0 +1,760 @@ +object types: + - name: simple + input: | + type Hello { + world: String + } + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("OBJECT") + Name: "Hello" + Fields: [FieldDefinition] + - + Name: "world" + Type: String + + - name: with comments + input: | + # Hello + # Hello another + type Hello { + # World + # World another + world: String + # end of type comments + } + # end of file comments + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("OBJECT") + Name: "Hello" + Fields: [FieldDefinition] + - + Name: "world" + Type: String + AfterDescriptionComment: "# World\n# World another\n" + AfterDescriptionComment: "# Hello\n# Hello another\n" + EndOfDefinitionComment: "# end of type comments\n" + Comment: "# end of file comments\n" + + - name: with comments and description + input: | + # Hello + # Hello another + "type description" + # Hello after description + # Hello after description another + type Hello { + # World + # World another + "field description" + # World after description + # World after description another + world: String + # end of definition coments + # end of definition comments another + } + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("OBJECT") + Description: "type description" + Name: "Hello" + Fields: [FieldDefinition] + - + Description: "field description" + Name: "world" + Type: String + BeforeDescriptionComment: "# World\n# World another\n" + AfterDescriptionComment: "# World after description\n# World after description another\n" + BeforeDescriptionComment: "# Hello\n# Hello another\n" + AfterDescriptionComment: "# Hello after description\n# Hello after description another\n" + EndOfDefinitionComment: "# end of definition coments\n# end of definition comments another\n" + + - name: with description + input: | + "Description" + type Hello { + world: String + } + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("OBJECT") + Description: "Description" + Name: "Hello" + Fields: [FieldDefinition] + - + Name: "world" + Type: String + + - name: with block description + input: | + # Before description comment + """ + Description + """ + # Even with comments between them + type Hello { + world: String + } + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("OBJECT") + Description: "Description" + Name: "Hello" + Fields: [FieldDefinition] + - + Name: "world" + Type: String + BeforeDescriptionComment: "# Before description comment\n" + AfterDescriptionComment: "# Even with comments between them\n" + - name: with field arg + input: | + type Hello { + world(flag: Boolean): String + } + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("OBJECT") + Name: "Hello" + Fields: [FieldDefinition] + - + Name: "world" + Arguments: [ArgumentDefinition] + - + Name: "flag" + Type: Boolean + Type: String + + - name: with field arg and default value + input: | + type Hello { + world(flag: Boolean = true): String + } + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("OBJECT") + Name: "Hello" + Fields: [FieldDefinition] + - + Name: "world" + Arguments: [ArgumentDefinition] + - + Name: "flag" + DefaultValue: true + Type: Boolean + Type: String + + - name: with field list arg + input: | + type Hello { + world(things: [String]): String + } + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("OBJECT") + Name: "Hello" + Fields: [FieldDefinition] + - + Name: "world" + Arguments: [ArgumentDefinition] + - + Name: "things" + Type: [String] + Type: String + + - name: with two args + input: | + type Hello { + world(argOne: Boolean, argTwo: Int): String + } + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("OBJECT") + Name: "Hello" + Fields: [FieldDefinition] + - + Name: "world" + Arguments: [ArgumentDefinition] + - + Name: "argOne" + Type: Boolean + - + Name: "argTwo" + Type: Int + Type: String + - name: must define one or more fields + input: | + type Hello {} + error: + message: "expected at least one definition, found }" + locations: [{ line: 1, column: 13 }] + +type extensions: + - name: Object extension + input: | + # comment + extend type Hello { + # comment world + world: String + # end of definition comment + } + ast: | + + Extensions: [Definition] + - + Kind: DefinitionKind("OBJECT") + Name: "Hello" + Fields: [FieldDefinition] + - + Name: "world" + Type: String + AfterDescriptionComment: "# comment world\n" + AfterDescriptionComment: "# comment\n" + EndOfDefinitionComment: "# end of definition comment\n" + + - name: without any fields + input: "extend type Hello implements Greeting" + ast: | + + Extensions: [Definition] + - + Kind: DefinitionKind("OBJECT") + Name: "Hello" + Interfaces: [string] + - "Greeting" + + - name: without fields twice + input: | + extend type Hello implements Greeting + extend type Hello implements SecondGreeting + ast: | + + Extensions: [Definition] + - + Kind: DefinitionKind("OBJECT") + Name: "Hello" + Interfaces: [string] + - "Greeting" + - + Kind: DefinitionKind("OBJECT") + Name: "Hello" + Interfaces: [string] + - "SecondGreeting" + + - name: without anything errors + input: "extend type Hello" + error: + message: "Unexpected " + locations: [{ line: 1, column: 18 }] + + - name: can have descriptions # hmm, this might not be spec compliant... + input: | + "Description" + extend type Hello { + world: String + } + error: + message: 'Unexpected String "Description"' + locations: [{ line: 1, column: 2 }] + + - name: can not have descriptions on types + input: | + extend "Description" type Hello { + world: String + } + error: + message: Unexpected String "Description" + locations: [{ line: 1, column: 9 }] + + - name: all can have directives + input: | + extend scalar Foo @deprecated + extend type Foo @deprecated + extend interface Foo @deprecated + extend union Foo @deprecated + extend enum Foo @deprecated + extend input Foo @deprecated + ast: | + + Extensions: [Definition] + - + Kind: DefinitionKind("SCALAR") + Name: "Foo" + Directives: [Directive] + - + Name: "deprecated" + - + Kind: DefinitionKind("OBJECT") + Name: "Foo" + Directives: [Directive] + - + Name: "deprecated" + - + Kind: DefinitionKind("INTERFACE") + Name: "Foo" + Directives: [Directive] + - + Name: "deprecated" + - + Kind: DefinitionKind("UNION") + Name: "Foo" + Directives: [Directive] + - + Name: "deprecated" + - + Kind: DefinitionKind("ENUM") + Name: "Foo" + Directives: [Directive] + - + Name: "deprecated" + - + Kind: DefinitionKind("INPUT_OBJECT") + Name: "Foo" + Directives: [Directive] + - + Name: "deprecated" + +schema definition: + - name: simple + input: | + schema { + query: Query + } + ast: | + + Schema: [SchemaDefinition] + - + OperationTypes: [OperationTypeDefinition] + - + Operation: Operation("query") + Type: "Query" + + - name: with comments and description + input: | + # before description comment + "description" + # after description comment + schema { + # before field comment + query: Query + # after field comment + } + ast: | + + Schema: [SchemaDefinition] + - + Description: "description" + OperationTypes: [OperationTypeDefinition] + - + Operation: Operation("query") + Type: "Query" + Comment: "# before field comment\n" + BeforeDescriptionComment: "# before description comment\n" + AfterDescriptionComment: "# after description comment\n" + EndOfDefinitionComment: "# after field comment\n" + +schema extensions: + - name: simple + input: | + extend schema { + mutation: Mutation + } + ast: | + + SchemaExtension: [SchemaDefinition] + - + OperationTypes: [OperationTypeDefinition] + - + Operation: Operation("mutation") + Type: "Mutation" + + - name: with comment and description + input: | + # before extend comment + extend schema { + # before field comment + mutation: Mutation + # after field comment + } + ast: | + + SchemaExtension: [SchemaDefinition] + - + OperationTypes: [OperationTypeDefinition] + - + Operation: Operation("mutation") + Type: "Mutation" + Comment: "# before field comment\n" + AfterDescriptionComment: "# before extend comment\n" + EndOfDefinitionComment: "# after field comment\n" + + - name: directive only + input: "extend schema @directive" + ast: | + + SchemaExtension: [SchemaDefinition] + - + Directives: [Directive] + - + Name: "directive" + + - name: without anything errors + input: "extend schema" + error: + message: "Unexpected " + locations: [{ line: 1, column: 14}] + +inheritance: + - name: single + input: "type Hello implements World { field: String }" + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("OBJECT") + Name: "Hello" + Interfaces: [string] + - "World" + Fields: [FieldDefinition] + - + Name: "field" + Type: String + + - name: multi + input: "type Hello implements Wo & rld { field: String }" + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("OBJECT") + Name: "Hello" + Interfaces: [string] + - "Wo" + - "rld" + Fields: [FieldDefinition] + - + Name: "field" + Type: String + + - name: multi with leading amp + input: "type Hello implements & Wo & rld { field: String }" + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("OBJECT") + Name: "Hello" + Interfaces: [string] + - "Wo" + - "rld" + Fields: [FieldDefinition] + - + Name: "field" + Type: String + +enums: + - name: single value + input: "enum Hello { WORLD }" + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("ENUM") + Name: "Hello" + EnumValues: [EnumValueDefinition] + - + Name: "WORLD" + + - name: double value + input: "enum Hello { WO, RLD }" + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("ENUM") + Name: "Hello" + EnumValues: [EnumValueDefinition] + - + Name: "WO" + - + Name: "RLD" + - name: must define one or more unique enum values + input: | + enum Hello {} + error: + message: "expected at least one definition, found }" + locations: [{ line: 1, column: 13 }] + +interface: + - name: simple + input: | + interface Hello { + world: String + } + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("INTERFACE") + Name: "Hello" + Fields: [FieldDefinition] + - + Name: "world" + Type: String + - name: must define one or more fields + input: | + interface Hello {} + error: + message: "expected at least one definition, found }" + locations: [{ line: 1, column: 18 }] + + - name: may define intermediate interfaces + input: | + interface IA { + id: ID! + } + + interface IIA implements IA { + id: ID! + } + + type A implements IIA { + id: ID! + } + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("INTERFACE") + Name: "IA" + Fields: [FieldDefinition] + - + Name: "id" + Type: ID! + - + Kind: DefinitionKind("INTERFACE") + Name: "IIA" + Interfaces: [string] + - "IA" + Fields: [FieldDefinition] + - + Name: "id" + Type: ID! + - + Kind: DefinitionKind("OBJECT") + Name: "A" + Interfaces: [string] + - "IIA" + Fields: [FieldDefinition] + - + Name: "id" + Type: ID! + +unions: + - name: simple + input: "union Hello = World" + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("UNION") + Name: "Hello" + Types: [string] + - "World" + + - name: with two types + input: "union Hello = Wo | Rld" + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("UNION") + Name: "Hello" + Types: [string] + - "Wo" + - "Rld" + + - name: with leading pipe + input: "union Hello = | Wo | Rld" + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("UNION") + Name: "Hello" + Types: [string] + - "Wo" + - "Rld" + + - name: cant be empty + input: "union Hello = || Wo | Rld" + error: + message: "Expected Name, found |" + locations: [{ line: 1, column: 16 }] + + - name: cant double pipe + input: "union Hello = Wo || Rld" + error: + message: "Expected Name, found |" + locations: [{ line: 1, column: 19 }] + + - name: cant have trailing pipe + input: "union Hello = | Wo | Rld |" + error: + message: "Expected Name, found " + locations: [{ line: 1, column: 27 }] + +scalar: + - name: simple + input: "scalar Hello" + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("SCALAR") + Name: "Hello" + +input object: + - name: simple + input: | + input Hello { + world: String + } + ast: | + + Definitions: [Definition] + - + Kind: DefinitionKind("INPUT_OBJECT") + Name: "Hello" + Fields: [FieldDefinition] + - + Name: "world" + Type: String + + - name: can not have args + input: | + input Hello { + world(foo: Int): String + } + error: + message: "Expected :, found (" + locations: [{ line: 2, column: 8 }] + - name: must define one or more input fields + input: | + input Hello {} + error: + message: "expected at least one definition, found }" + locations: [{ line: 1, column: 14 }] + +directives: + - name: simple + input: directive @foo on FIELD + ast: | + + Directives: [DirectiveDefinition] + - + Name: "foo" + Locations: [DirectiveLocation] + - DirectiveLocation("FIELD") + IsRepeatable: false + + - name: executable + input: | + directive @onQuery on QUERY + directive @onMutation on MUTATION + directive @onSubscription on SUBSCRIPTION + directive @onField on FIELD + directive @onFragmentDefinition on FRAGMENT_DEFINITION + directive @onFragmentSpread on FRAGMENT_SPREAD + directive @onInlineFragment on INLINE_FRAGMENT + directive @onVariableDefinition on VARIABLE_DEFINITION + ast: | + + Directives: [DirectiveDefinition] + - + Name: "onQuery" + Locations: [DirectiveLocation] + - DirectiveLocation("QUERY") + IsRepeatable: false + - + Name: "onMutation" + Locations: [DirectiveLocation] + - DirectiveLocation("MUTATION") + IsRepeatable: false + - + Name: "onSubscription" + Locations: [DirectiveLocation] + - DirectiveLocation("SUBSCRIPTION") + IsRepeatable: false + - + Name: "onField" + Locations: [DirectiveLocation] + - DirectiveLocation("FIELD") + IsRepeatable: false + - + Name: "onFragmentDefinition" + Locations: [DirectiveLocation] + - DirectiveLocation("FRAGMENT_DEFINITION") + IsRepeatable: false + - + Name: "onFragmentSpread" + Locations: [DirectiveLocation] + - DirectiveLocation("FRAGMENT_SPREAD") + IsRepeatable: false + - + Name: "onInlineFragment" + Locations: [DirectiveLocation] + - DirectiveLocation("INLINE_FRAGMENT") + IsRepeatable: false + - + Name: "onVariableDefinition" + Locations: [DirectiveLocation] + - DirectiveLocation("VARIABLE_DEFINITION") + IsRepeatable: false + + - name: repeatable + input: directive @foo repeatable on FIELD + ast: | + + Directives: [DirectiveDefinition] + - + Name: "foo" + Locations: [DirectiveLocation] + - DirectiveLocation("FIELD") + IsRepeatable: true + + - name: invalid location + input: "directive @foo on FIELD | INCORRECT_LOCATION" + error: + message: 'Unexpected Name "INCORRECT_LOCATION"' + locations: [{ line: 1, column: 27 }] + +fuzzer: + - name: 1 + input: "type o{d(g:[" + error: + message: 'Expected Name, found ' + locations: [{ line: 1, column: 13 }] + - name: 2 + input: "\"\"\"\r" + error: + message: 'Unexpected ' + locations: [{ line: 2, column: 1 }] diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/core/core.go b/vendor/github.com/vektah/gqlparser/v2/validator/core/core.go new file mode 100644 index 0000000000..7a6295c2f9 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/core/core.go @@ -0,0 +1,24 @@ +package core + +import ( + "github.com/vektah/gqlparser/v2/gqlerror" +) + +type AddErrFunc func(options ...ErrorOption) + +type RuleFunc func(observers *Events, addError AddErrFunc) + +type Rule struct { + Name string + RuleFunc RuleFunc +} + +// NameSorter sorts Rules by name. +// usage: sort.Sort(core.NameSorter(specifiedRules)) +type NameSorter []Rule + +func (a NameSorter) Len() int { return len(a) } +func (a NameSorter) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a NameSorter) Less(i, j int) bool { return a[i].Name < a[j].Name } + +type ErrorOption func(err *gqlerror.Error) diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/core/helpers.go b/vendor/github.com/vektah/gqlparser/v2/validator/core/helpers.go new file mode 100644 index 0000000000..f977d00a81 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/core/helpers.go @@ -0,0 +1,151 @@ +package core + +import ( + "bytes" + "fmt" + "math" + "sort" + "strings" + + "github.com/agnivade/levenshtein" + + "github.com/vektah/gqlparser/v2/ast" + "github.com/vektah/gqlparser/v2/gqlerror" +) + +func Message(msg string, args ...any) ErrorOption { + return func(err *gqlerror.Error) { + err.Message += fmt.Sprintf(msg, args...) + } +} + +func At(position *ast.Position) ErrorOption { + return func(err *gqlerror.Error) { + if position == nil { + return + } + err.Locations = append(err.Locations, gqlerror.Location{ + Line: position.Line, + Column: position.Column, + }) + if position.Src.Name != "" { + err.SetFile(position.Src.Name) + } + } +} + +func SuggestListQuoted(prefix, typed string, suggestions []string) ErrorOption { + suggested := SuggestionList(typed, suggestions) + return func(err *gqlerror.Error) { + if len(suggested) > 0 { + err.Message += " " + prefix + " " + QuotedOrList(suggested...) + "?" + } + } +} + +func SuggestListUnquoted(prefix, typed string, suggestions []string) ErrorOption { + suggested := SuggestionList(typed, suggestions) + return func(err *gqlerror.Error) { + if len(suggested) > 0 { + err.Message += " " + prefix + " " + OrList(suggested...) + "?" + } + } +} + +func Suggestf(suggestion string, args ...any) ErrorOption { + return func(err *gqlerror.Error) { + err.Message += " Did you mean " + fmt.Sprintf(suggestion, args...) + "?" + } +} + +// Given [ A, B, C ] return '"A", "B", or "C"'. +func QuotedOrList(items ...string) string { + itemsQuoted := make([]string, len(items)) + for i, item := range items { + itemsQuoted[i] = `"` + item + `"` + } + return OrList(itemsQuoted...) +} + +// Given [ A, B, C ] return 'A, B, or C'. +func OrList(items ...string) string { + var buf bytes.Buffer + + if len(items) > 5 { + items = items[:5] + } + if len(items) == 2 { + buf.WriteString(items[0]) + buf.WriteString(" or ") + buf.WriteString(items[1]) + return buf.String() + } + + for i, item := range items { + if i != 0 { + if i == len(items)-1 { + buf.WriteString(", or ") + } else { + buf.WriteString(", ") + } + } + buf.WriteString(item) + } + return buf.String() +} + +// Given an invalid input string and a list of valid options, returns a filtered +// list of valid options sorted based on their similarity with the input. +func SuggestionList(input string, options []string) []string { + var results []string + optionsByDistance := map[string]int{} + + for _, option := range options { + distance := lexicalDistance(input, option) + threshold := calcThreshold(input) + if distance <= threshold { + results = append(results, option) + optionsByDistance[option] = distance + } + } + + sort.Slice(results, func(i, j int) bool { + return optionsByDistance[results[i]] < optionsByDistance[results[j]] + }) + return results +} + +func calcThreshold(a string) (threshold int) { + // the logic is copied from here + // https://github.com/graphql/graphql-js/blob/47bd8c8897c72d3efc17ecb1599a95cee6bac5e8/src/jsutils/suggestionList.ts#L14 + threshold = max(int(math.Floor(float64(len(a))*0.4)+1), 1) + return threshold +} + +// Computes the lexical distance between strings A and B. +// +// The "distance" between two strings is given by counting the minimum number +// of edits needed to transform string A into string B. An edit can be an +// insertion, deletion, or substitution of a single character, or a swap of two +// adjacent characters. +// +// Includes a custom alteration from Damerau-Levenshtein to treat case changes +// as a single edit which helps identify mis-cased values with an edit distance +// of 1. +// +// This distance can be useful for detecting typos in input or sorting. +func lexicalDistance(a, b string) int { + if a == b { + return 0 + } + + a = strings.ToLower(a) + b = strings.ToLower(b) + + // Any case change counts as a single edit + if a == b { + return 1 + } + + return levenshtein.ComputeDistance(a, b) +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/core/walk.go b/vendor/github.com/vektah/gqlparser/v2/validator/core/walk.go new file mode 100644 index 0000000000..4e4c9b30f1 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/core/walk.go @@ -0,0 +1,308 @@ +package core + +import ( + "context" + "fmt" + + "github.com/vektah/gqlparser/v2/ast" +) + +type Events struct { + operationVisitor []func(walker *Walker, operation *ast.OperationDefinition) + field []func(walker *Walker, field *ast.Field) + fragment []func(walker *Walker, fragment *ast.FragmentDefinition) + inlineFragment []func(walker *Walker, inlineFragment *ast.InlineFragment) + fragmentSpread []func(walker *Walker, fragmentSpread *ast.FragmentSpread) + directive []func(walker *Walker, directive *ast.Directive) + directiveList []func(walker *Walker, directives []*ast.Directive) + value []func(walker *Walker, value *ast.Value) + variable []func(walker *Walker, variable *ast.VariableDefinition) +} + +func (o *Events) OnOperation(f func(walker *Walker, operation *ast.OperationDefinition)) { + o.operationVisitor = append(o.operationVisitor, f) +} + +func (o *Events) OnField(f func(walker *Walker, field *ast.Field)) { + o.field = append(o.field, f) +} + +func (o *Events) OnFragment(f func(walker *Walker, fragment *ast.FragmentDefinition)) { + o.fragment = append(o.fragment, f) +} + +func (o *Events) OnInlineFragment(f func(walker *Walker, inlineFragment *ast.InlineFragment)) { + o.inlineFragment = append(o.inlineFragment, f) +} + +func (o *Events) OnFragmentSpread(f func(walker *Walker, fragmentSpread *ast.FragmentSpread)) { + o.fragmentSpread = append(o.fragmentSpread, f) +} + +func (o *Events) OnDirective(f func(walker *Walker, directive *ast.Directive)) { + o.directive = append(o.directive, f) +} + +func (o *Events) OnDirectiveList(f func(walker *Walker, directives []*ast.Directive)) { + o.directiveList = append(o.directiveList, f) +} + +func (o *Events) OnValue(f func(walker *Walker, value *ast.Value)) { + o.value = append(o.value, f) +} + +func (o *Events) OnVariable(f func(walker *Walker, variable *ast.VariableDefinition)) { + o.variable = append(o.variable, f) +} + +func Walk(schema *ast.Schema, document *ast.QueryDocument, observers *Events) { + w := Walker{ + Observers: observers, + Schema: schema, + Document: document, + } + + w.walk() +} + +type Walker struct { + Context context.Context + Observers *Events + Schema *ast.Schema + Document *ast.QueryDocument + + validatedFragmentSpreads map[string]bool + CurrentOperation *ast.OperationDefinition +} + +func (w *Walker) walk() { + for _, child := range w.Document.Operations { + w.validatedFragmentSpreads = make(map[string]bool) + w.walkOperation(child) + } + for _, child := range w.Document.Fragments { + w.validatedFragmentSpreads = make(map[string]bool) + w.walkFragment(child) + } +} + +func (w *Walker) walkOperation(operation *ast.OperationDefinition) { + w.CurrentOperation = operation + for _, varDef := range operation.VariableDefinitions { + varDef.Definition = w.Schema.Types[varDef.Type.Name()] + for _, v := range w.Observers.variable { + v(w, varDef) + } + if varDef.DefaultValue != nil { + varDef.DefaultValue.ExpectedType = varDef.Type + varDef.DefaultValue.Definition = w.Schema.Types[varDef.Type.Name()] + } + } + + var def *ast.Definition + var loc ast.DirectiveLocation + switch operation.Operation { + case ast.Query, "": + def = w.Schema.Query + loc = ast.LocationQuery + case ast.Mutation: + def = w.Schema.Mutation + loc = ast.LocationMutation + case ast.Subscription: + def = w.Schema.Subscription + loc = ast.LocationSubscription + } + + for _, varDef := range operation.VariableDefinitions { + if varDef.DefaultValue != nil { + w.walkValue(varDef.DefaultValue) + } + w.walkDirectives(varDef.Definition, varDef.Directives, ast.LocationVariableDefinition) + } + + w.walkDirectives(def, operation.Directives, loc) + w.walkSelectionSet(def, operation.SelectionSet) + + for _, v := range w.Observers.operationVisitor { + v(w, operation) + } + w.CurrentOperation = nil +} + +func (w *Walker) walkFragment(it *ast.FragmentDefinition) { + def := w.Schema.Types[it.TypeCondition] + + it.Definition = def + + w.walkDirectives(def, it.Directives, ast.LocationFragmentDefinition) + w.walkSelectionSet(def, it.SelectionSet) + + for _, v := range w.Observers.fragment { + v(w, it) + } +} + +func (w *Walker) walkDirectives( + parentDef *ast.Definition, + directives []*ast.Directive, + location ast.DirectiveLocation, +) { + for _, dir := range directives { + def := w.Schema.Directives[dir.Name] + dir.Definition = def + dir.ParentDefinition = parentDef + dir.Location = location + + for _, arg := range dir.Arguments { + var argDef *ast.ArgumentDefinition + if def != nil { + argDef = def.Arguments.ForName(arg.Name) + } + + w.walkArgument(argDef, arg) + } + + for _, v := range w.Observers.directive { + v(w, dir) + } + } + + for _, v := range w.Observers.directiveList { + v(w, directives) + } +} + +func (w *Walker) walkValue(value *ast.Value) { + if value.Kind == ast.Variable && w.CurrentOperation != nil { + value.VariableDefinition = w.CurrentOperation.VariableDefinitions.ForName(value.Raw) + if value.VariableDefinition != nil { + value.VariableDefinition.Used = true + } + } + + if value.Kind == ast.ObjectValue { + for _, child := range value.Children { + if value.Definition != nil { + fieldDef := value.Definition.Fields.ForName(child.Name) + if fieldDef != nil { + child.Value.ExpectedType = fieldDef.Type + child.Value.ExpectedTypeHasDefault = fieldDef.DefaultValue != nil && + fieldDef.DefaultValue.Kind != ast.NullValue + child.Value.Definition = w.Schema.Types[fieldDef.Type.Name()] + } + } + w.walkValue(child.Value) + } + } + + if value.Kind == ast.ListValue { + for _, child := range value.Children { + if value.ExpectedType != nil && value.ExpectedType.Elem != nil { + child.Value.ExpectedType = value.ExpectedType.Elem + child.Value.Definition = value.Definition + } + + w.walkValue(child.Value) + } + } + + for _, v := range w.Observers.value { + v(w, value) + } +} + +func (w *Walker) walkArgument(argDef *ast.ArgumentDefinition, arg *ast.Argument) { + if argDef != nil { + arg.Value.ExpectedType = argDef.Type + arg.Value.ExpectedTypeHasDefault = argDef.DefaultValue != nil && + argDef.DefaultValue.Kind != ast.NullValue + arg.Value.Definition = w.Schema.Types[argDef.Type.Name()] + } + + w.walkValue(arg.Value) +} + +func (w *Walker) walkSelectionSet(parentDef *ast.Definition, it ast.SelectionSet) { + for _, child := range it { + w.walkSelection(parentDef, child) + } +} + +func (w *Walker) walkSelection(parentDef *ast.Definition, it ast.Selection) { + switch it := it.(type) { + case *ast.Field: + var def *ast.FieldDefinition + if it.Name == "__typename" { + def = &ast.FieldDefinition{ + Name: "__typename", + Type: ast.NamedType("String", nil), + } + } else if parentDef != nil { + def = parentDef.Fields.ForName(it.Name) + } + + it.Definition = def + it.ObjectDefinition = parentDef + + var nextParentDef *ast.Definition + if def != nil { + nextParentDef = w.Schema.Types[def.Type.Name()] + } + + for _, arg := range it.Arguments { + var argDef *ast.ArgumentDefinition + if def != nil { + argDef = def.Arguments.ForName(arg.Name) + } + + w.walkArgument(argDef, arg) + } + + w.walkDirectives(nextParentDef, it.Directives, ast.LocationField) + w.walkSelectionSet(nextParentDef, it.SelectionSet) + + for _, v := range w.Observers.field { + v(w, it) + } + + case *ast.InlineFragment: + it.ObjectDefinition = parentDef + + nextParentDef := parentDef + if it.TypeCondition != "" { + nextParentDef = w.Schema.Types[it.TypeCondition] + } + + w.walkDirectives(nextParentDef, it.Directives, ast.LocationInlineFragment) + w.walkSelectionSet(nextParentDef, it.SelectionSet) + + for _, v := range w.Observers.inlineFragment { + v(w, it) + } + + case *ast.FragmentSpread: + def := w.Document.Fragments.ForName(it.Name) + it.Definition = def + it.ObjectDefinition = parentDef + + var nextParentDef *ast.Definition + if def != nil { + nextParentDef = w.Schema.Types[def.TypeCondition] + } + + w.walkDirectives(nextParentDef, it.Directives, ast.LocationFragmentSpread) + + if def != nil && !w.validatedFragmentSpreads[def.Name] { + // prevent infinite recursion + w.validatedFragmentSpreads[def.Name] = true + w.walkSelectionSet(nextParentDef, def.SelectionSet) + } + + for _, v := range w.Observers.fragmentSpread { + v(w, it) + } + + default: + panic(fmt.Errorf("unsupported %T", it)) + } +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/imported/prelude.graphql b/vendor/github.com/vektah/gqlparser/v2/validator/imported/prelude.graphql new file mode 100644 index 0000000000..8be3d2f5b6 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/imported/prelude.graphql @@ -0,0 +1,250 @@ +# This file defines all the implicitly declared types that are required by the graphql spec. It is implicitly included by calls to LoadSchema + +"The `Int` scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1." +scalar Int + +"The `Float` scalar type represents signed double-precision fractional values as specified by [IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point)." +scalar Float + +"The `String`scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text." +scalar String + +"The `Boolean` scalar type represents `true` or `false`." +scalar Boolean + +"""The `ID` scalar type represents a unique identifier, often used to refetch an object or as key for a cache. The ID type appears in a JSON response as a String; however, it is not intended to be human-readable. When expected as an input type, any string (such as "4") or integer (such as 4) input value will be accepted as an ID.""" +scalar ID + +"Directs the executor to defer this fragment when the `if` argument is true or undefined." +directive @defer( + "Deferred when true or undefined." + if: Boolean = true, + "Unique name" + label: String +) on FRAGMENT_SPREAD | INLINE_FRAGMENT + +""" +Directs the executor to include this field or fragment only when the `if` argument is true. +""" +directive @include( + """Included when true.""" + if: Boolean! +) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT + +""" +Directs the executor to skip this field or fragment when the `if` argument is true. +""" +directive @skip( + """Skipped when true.""" + if: Boolean! +) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT + +"""Marks an element of a GraphQL schema as no longer supported.""" +directive @deprecated( + """ + Explains why this element was deprecated, usually also including a suggestion for how to access supported similar data. Formatted using the Markdown syntax, as specified by [CommonMark](https://commonmark.org/). + """ + reason: String = "No longer supported" +) on FIELD_DEFINITION | ARGUMENT_DEFINITION | INPUT_FIELD_DEFINITION | ENUM_VALUE + +"""Exposes a URL that specifies the behavior of this scalar.""" +directive @specifiedBy( + """The URL that specifies the behavior of this scalar.""" + url: String! +) on SCALAR + +""" +Indicates exactly one field must be supplied and this field must not be `null`. +""" +directive @oneOf on INPUT_OBJECT + +""" +A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all available types and directives on the server, as well as the entry points for query, mutation, and subscription operations. +""" +type __Schema { + description: String + + """A list of all types supported by this server.""" + types: [__Type!]! + + """The type that query operations will be rooted at.""" + queryType: __Type! + + """ + If this server supports mutation, the type that mutation operations will be rooted at. + """ + mutationType: __Type + + """ + If this server support subscription, the type that subscription operations will be rooted at. + """ + subscriptionType: __Type + + """A list of all directives supported by this server.""" + directives: [__Directive!]! +} + +""" +The fundamental unit of any GraphQL Schema is the type. There are many kinds of types in GraphQL as represented by the `__TypeKind` enum. + +Depending on the kind of a type, certain fields describe information about that type. Scalar types provide no information beyond a name, description and optional `specifiedByURL`, while Enum types provide their values. Object and Interface types provide the fields they describe. Abstract types, Union and Interface, provide the Object types possible at runtime. List and NonNull types compose other types. +""" +type __Type { + kind: __TypeKind! + name: String + description: String + specifiedByURL: String + fields(includeDeprecated: Boolean = false): [__Field!] + interfaces: [__Type!] + possibleTypes: [__Type!] + enumValues(includeDeprecated: Boolean = false): [__EnumValue!] + inputFields(includeDeprecated: Boolean = false): [__InputValue!] + ofType: __Type + isOneOf: Boolean +} + +"""An enum describing what kind of type a given `__Type` is.""" +enum __TypeKind { + """Indicates this type is a scalar.""" + SCALAR + + """ + Indicates this type is an object. `fields` and `interfaces` are valid fields. + """ + OBJECT + + """ + Indicates this type is an interface. `fields`, `interfaces`, and `possibleTypes` are valid fields. + """ + INTERFACE + + """Indicates this type is a union. `possibleTypes` is a valid field.""" + UNION + + """Indicates this type is an enum. `enumValues` is a valid field.""" + ENUM + + """ + Indicates this type is an input object. `inputFields` is a valid field. + """ + INPUT_OBJECT + + """Indicates this type is a list. `ofType` is a valid field.""" + LIST + + """Indicates this type is a non-null. `ofType` is a valid field.""" + NON_NULL +} + +""" +Object and Interface types are described by a list of Fields, each of which has a name, potentially a list of arguments, and a return type. +""" +type __Field { + name: String! + description: String + args(includeDeprecated: Boolean = false): [__InputValue!]! + type: __Type! + isDeprecated: Boolean! + deprecationReason: String +} + +""" +Arguments provided to Fields or Directives and the input fields of an InputObject are represented as Input Values which describe their type and optionally a default value. +""" +type __InputValue { + name: String! + description: String + type: __Type! + + """ + A GraphQL-formatted string representing the default value for this input value. + """ + defaultValue: String + isDeprecated: Boolean! + deprecationReason: String +} + +""" +One possible value for a given Enum. Enum values are unique values, not a placeholder for a string or numeric value. However an Enum value is returned in a JSON response as a string. +""" +type __EnumValue { + name: String! + description: String + isDeprecated: Boolean! + deprecationReason: String +} + +""" +A Directive provides a way to describe alternate runtime execution and type validation behavior in a GraphQL document. + +In some cases, you need to provide options to alter GraphQL's execution behavior in ways field arguments will not suffice, such as conditionally including or skipping a field. Directives provide this by describing additional information to the executor. +""" +type __Directive { + name: String! + description: String + isRepeatable: Boolean! + locations: [__DirectiveLocation!]! + args(includeDeprecated: Boolean = false): [__InputValue!]! +} + +""" +A Directive can be adjacent to many parts of the GraphQL language, a __DirectiveLocation describes one such possible adjacencies. +""" +enum __DirectiveLocation { + """Location adjacent to a query operation.""" + QUERY + + """Location adjacent to a mutation operation.""" + MUTATION + + """Location adjacent to a subscription operation.""" + SUBSCRIPTION + + """Location adjacent to a field.""" + FIELD + + """Location adjacent to a fragment definition.""" + FRAGMENT_DEFINITION + + """Location adjacent to a fragment spread.""" + FRAGMENT_SPREAD + + """Location adjacent to an inline fragment.""" + INLINE_FRAGMENT + + """Location adjacent to a variable definition.""" + VARIABLE_DEFINITION + + """Location adjacent to a schema definition.""" + SCHEMA + + """Location adjacent to a scalar definition.""" + SCALAR + + """Location adjacent to an object type definition.""" + OBJECT + + """Location adjacent to a field definition.""" + FIELD_DEFINITION + + """Location adjacent to an argument definition.""" + ARGUMENT_DEFINITION + + """Location adjacent to an interface definition.""" + INTERFACE + + """Location adjacent to a union definition.""" + UNION + + """Location adjacent to an enum definition.""" + ENUM + + """Location adjacent to an enum value definition.""" + ENUM_VALUE + + """Location adjacent to an input object type definition.""" + INPUT_OBJECT + + """Location adjacent to an input object field definition.""" + INPUT_FIELD_DEFINITION +} \ No newline at end of file diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/prelude.go b/vendor/github.com/vektah/gqlparser/v2/validator/prelude.go new file mode 100644 index 0000000000..5c88e93b3f --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/prelude.go @@ -0,0 +1,16 @@ +package validator + +import ( + _ "embed" + + "github.com/vektah/gqlparser/v2/ast" +) + +//go:embed imported/prelude.graphql +var preludeGraphql string + +var Prelude = &ast.Source{ + Name: "prelude.graphql", + Input: preludeGraphql, + BuiltIn: true, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go new file mode 100644 index 0000000000..1c6f55877e --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go @@ -0,0 +1,131 @@ +package rules + +import ( + "fmt" + "sort" + + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +func ruleFuncFieldsOnCorrectType(observers *Events, addError AddErrFunc, disableSuggestion bool) { + observers.OnField(func(walker *Walker, field *ast.Field) { + if field.ObjectDefinition == nil || field.Definition != nil { + return + } + + message := fmt.Sprintf( + `Cannot query field "%s" on type "%s".`, + field.Name, + field.ObjectDefinition.Name, + ) + + if !disableSuggestion { + if suggestedTypeNames := getSuggestedTypeNames( + walker, + field.ObjectDefinition, + field.Name, + ); suggestedTypeNames != nil { + message += " Did you mean to use an inline fragment on " + QuotedOrList( + suggestedTypeNames...) + "?" + } else if suggestedFieldNames := getSuggestedFieldNames( + field.ObjectDefinition, + field.Name, + ); suggestedFieldNames != nil { + message += " Did you mean " + QuotedOrList(suggestedFieldNames...) + "?" + } + } + + addError( + Message("%s", message), + At(field.Position), + ) + }) +} + +var FieldsOnCorrectTypeRule = Rule{ + Name: "FieldsOnCorrectType", + RuleFunc: func(observers *Events, addError AddErrFunc) { + ruleFuncFieldsOnCorrectType(observers, addError, false) + }, +} + +var FieldsOnCorrectTypeRuleWithoutSuggestions = Rule{ + Name: "FieldsOnCorrectTypeWithoutSuggestions", + RuleFunc: func(observers *Events, addError AddErrFunc) { + ruleFuncFieldsOnCorrectType(observers, addError, true) + }, +} + +// Go through all the implementations of type, as well as the interfaces +// that they implement. If any of those types include the provided field, +// suggest them, sorted by how often the type is referenced, starting +// with Interfaces. +func getSuggestedTypeNames(walker *Walker, parent *ast.Definition, name string) []string { + if !parent.IsAbstractType() { + return nil + } + + possibleTypes := walker.Schema.GetPossibleTypes(parent) + suggestedObjectTypes := make([]string, 0, len(possibleTypes)) + var suggestedInterfaceTypes []string + interfaceUsageCount := map[string]int{} + + for _, possibleType := range possibleTypes { + field := possibleType.Fields.ForName(name) + if field == nil { + continue + } + + suggestedObjectTypes = append(suggestedObjectTypes, possibleType.Name) + + for _, possibleInterface := range possibleType.Interfaces { + interfaceField := walker.Schema.Types[possibleInterface] + if interfaceField != nil && interfaceField.Fields.ForName(name) != nil { + if interfaceUsageCount[possibleInterface] == 0 { + suggestedInterfaceTypes = append(suggestedInterfaceTypes, possibleInterface) + } + interfaceUsageCount[possibleInterface]++ + } + } + } + + suggestedTypes := concatSlice(suggestedInterfaceTypes, suggestedObjectTypes) + + sort.SliceStable(suggestedTypes, func(i, j int) bool { + typeA, typeB := suggestedTypes[i], suggestedTypes[j] + diff := interfaceUsageCount[typeB] - interfaceUsageCount[typeA] + if diff != 0 { + return diff < 0 + } + return typeA < typeB + }) + + return suggestedTypes +} + +// By employing a full slice expression (slice[low:high:max]), +// where max is set to the slice’s length, +// we ensure that appending elements results +// in a slice backed by a distinct array. +// This method prevents the shared array issue. +func concatSlice(first, second []string) []string { + n := len(first) + return append(first[:n:n], second...) +} + +// For the field name provided, determine if there are any similar field names +// that may be the result of a typo. +func getSuggestedFieldNames(parent *ast.Definition, name string) []string { + if parent.Kind != ast.Object && parent.Kind != ast.Interface { + return nil + } + + possibleFieldNames := make([]string, 0, len(parent.Fields)) + for _, field := range parent.Fields { + possibleFieldNames = append(possibleFieldNames, field.Name) + } + + return SuggestionList(name, possibleFieldNames) +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go new file mode 100644 index 0000000000..bd1f84944b --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go @@ -0,0 +1,49 @@ +package rules + +import ( + "fmt" + + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var FragmentsOnCompositeTypesRule = Rule{ + Name: "FragmentsOnCompositeTypes", + RuleFunc: func(observers *Events, addError AddErrFunc) { + observers.OnInlineFragment(func(walker *Walker, inlineFragment *ast.InlineFragment) { + fragmentType := walker.Schema.Types[inlineFragment.TypeCondition] + if fragmentType == nil || fragmentType.IsCompositeType() { + return + } + + message := fmt.Sprintf( + `Fragment cannot condition on non composite type "%s".`, + inlineFragment.TypeCondition, + ) + + addError( + Message("%s", message), + At(inlineFragment.Position), + ) + }) + + observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) { + if fragment.Definition == nil || fragment.TypeCondition == "" || + fragment.Definition.IsCompositeType() { + return + } + + message := fmt.Sprintf( + `Fragment "%s" cannot condition on non composite type "%s".`, + fragment.Name, + fragment.TypeCondition, + ) + + addError( + Message("%s", message), + At(fragment.Position), + ) + }) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go new file mode 100644 index 0000000000..af7ca6db17 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go @@ -0,0 +1,93 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +func ruleFuncKnownArgumentNames(observers *Events, addError AddErrFunc, disableSuggestion bool) { + // A GraphQL field is only valid if all supplied arguments are defined by that field. + observers.OnField(func(walker *Walker, field *ast.Field) { + if field.Definition == nil || field.ObjectDefinition == nil { + return + } + for _, arg := range field.Arguments { + def := field.Definition.Arguments.ForName(arg.Name) + if def != nil { + continue + } + + if disableSuggestion { + addError( + Message( + `Unknown argument "%s" on field "%s.%s".`, + arg.Name, + field.ObjectDefinition.Name, + field.Name, + ), + At(field.Position), + ) + } else { + var suggestions []string + for _, argDef := range field.Definition.Arguments { + suggestions = append(suggestions, argDef.Name) + } + addError( + Message( + `Unknown argument "%s" on field "%s.%s".`, + arg.Name, + field.ObjectDefinition.Name, + field.Name, + ), + SuggestListQuoted("Did you mean", arg.Name, suggestions), + At(field.Position), + ) + } + } + }) + + observers.OnDirective(func(walker *Walker, directive *ast.Directive) { + if directive.Definition == nil { + return + } + for _, arg := range directive.Arguments { + def := directive.Definition.Arguments.ForName(arg.Name) + if def != nil { + continue + } + + if disableSuggestion { + addError( + Message(`Unknown argument "%s" on directive "@%s".`, arg.Name, directive.Name), + At(directive.Position), + ) + } else { + var suggestions []string + for _, argDef := range directive.Definition.Arguments { + suggestions = append(suggestions, argDef.Name) + } + + addError( + Message(`Unknown argument "%s" on directive "@%s".`, arg.Name, directive.Name), + SuggestListQuoted("Did you mean", arg.Name, suggestions), + At(directive.Position), + ) + } + } + }) +} + +var KnownArgumentNamesRule = Rule{ + Name: "KnownArgumentNames", + RuleFunc: func(observers *Events, addError AddErrFunc) { + ruleFuncKnownArgumentNames(observers, addError, false) + }, +} + +var KnownArgumentNamesRuleWithoutSuggestions = Rule{ + Name: "KnownArgumentNamesWithoutSuggestions", + RuleFunc: func(observers *Events, addError AddErrFunc) { + ruleFuncKnownArgumentNames(observers, addError, true) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go new file mode 100644 index 0000000000..b81b93ed72 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go @@ -0,0 +1,53 @@ +package rules + +import ( + "slices" + + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var KnownDirectivesRule = Rule{ + Name: "KnownDirectives", + RuleFunc: func(observers *Events, addError AddErrFunc) { + type mayNotBeUsedDirective struct { + Name string + Line int + Column int + } + seen := map[mayNotBeUsedDirective]bool{} + observers.OnDirective(func(walker *Walker, directive *ast.Directive) { + if directive.Definition == nil { + addError( + Message(`Unknown directive "@%s".`, directive.Name), + At(directive.Position), + ) + return + } + + if slices.Contains(directive.Definition.Locations, directive.Location) { + return + } + + // position must be exists if directive.Definition != nil + tmp := mayNotBeUsedDirective{ + Name: directive.Name, + Line: directive.Position.Line, + Column: directive.Position.Column, + } + + if !seen[tmp] { + addError( + Message( + `Directive "@%s" may not be used on %s.`, + directive.Name, + directive.Location, + ), + At(directive.Position), + ) + seen[tmp] = true + } + }) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go new file mode 100644 index 0000000000..c55cd658c4 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go @@ -0,0 +1,21 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var KnownFragmentNamesRule = Rule{ + Name: "KnownFragmentNames", + RuleFunc: func(observers *Events, addError AddErrFunc) { + observers.OnFragmentSpread(func(walker *Walker, fragmentSpread *ast.FragmentSpread) { + if fragmentSpread.Definition == nil { + addError( + Message(`Unknown fragment "%s".`, fragmentSpread.Name), + At(fragmentSpread.Position), + ) + } + }) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go new file mode 100644 index 0000000000..bc2d2d00b9 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go @@ -0,0 +1,37 @@ +package rules + +import ( + "fmt" + + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var KnownRootTypeRule = Rule{ + Name: "KnownRootType", + RuleFunc: func(observers *Events, addError AddErrFunc) { + // A query's root must be a valid type. Surprisingly, this isn't + // checked anywhere else! + observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { + var def *ast.Definition + switch operation.Operation { + case ast.Query, "": + def = walker.Schema.Query + case ast.Mutation: + def = walker.Schema.Mutation + case ast.Subscription: + def = walker.Schema.Subscription + default: + // This shouldn't even parse; if it did we probably need to + // update this switch block to add the new operation type. + panic(fmt.Sprintf(`got unknown operation type "%s"`, operation.Operation)) + } + if def == nil { + addError( + Message(`Schema does not support operation type "%s"`, operation.Operation), + At(operation.Position)) + } + }) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go new file mode 100644 index 0000000000..5a85d3bfe7 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go @@ -0,0 +1,79 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +func ruleFuncKnownTypeNames(observers *Events, addError AddErrFunc, disableSuggestion bool) { + observers.OnVariable(func(walker *Walker, variable *ast.VariableDefinition) { + typeName := variable.Type.Name() + typdef := walker.Schema.Types[typeName] + if typdef != nil { + return + } + + addError( + Message(`Unknown type "%s".`, typeName), + At(variable.Position), + ) + }) + + observers.OnInlineFragment(func(walker *Walker, inlineFragment *ast.InlineFragment) { + typedName := inlineFragment.TypeCondition + if typedName == "" { + return + } + + def := walker.Schema.Types[typedName] + if def != nil { + return + } + + addError( + Message(`Unknown type "%s".`, typedName), + At(inlineFragment.Position), + ) + }) + + observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) { + typeName := fragment.TypeCondition + def := walker.Schema.Types[typeName] + if def != nil { + return + } + + if disableSuggestion { + addError( + Message(`Unknown type "%s".`, typeName), + At(fragment.Position), + ) + } else { + var possibleTypes []string + for _, t := range walker.Schema.Types { + possibleTypes = append(possibleTypes, t.Name) + } + + addError( + Message(`Unknown type "%s".`, typeName), + SuggestListQuoted("Did you mean", typeName, possibleTypes), + At(fragment.Position), + ) + } + }) +} + +var KnownTypeNamesRule = Rule{ + Name: "KnownTypeNames", + RuleFunc: func(observers *Events, addError AddErrFunc) { + ruleFuncKnownTypeNames(observers, addError, false) + }, +} + +var KnownTypeNamesRuleWithoutSuggestions = Rule{ + Name: "KnownTypeNamesWithoutSuggestions", + RuleFunc: func(observers *Events, addError AddErrFunc) { + ruleFuncKnownTypeNames(observers, addError, true) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go new file mode 100644 index 0000000000..890f71f7ed --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go @@ -0,0 +1,21 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var LoneAnonymousOperationRule = Rule{ + Name: "LoneAnonymousOperation", + RuleFunc: func(observers *Events, addError AddErrFunc) { + observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { + if operation.Name == "" && len(walker.Document.Operations) > 1 { + addError( + Message(`This anonymous operation must be the only defined operation.`), + At(operation.Position), + ) + } + }) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/max_introspection_depth.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/max_introspection_depth.go new file mode 100644 index 0000000000..012a47448d --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/max_introspection_depth.go @@ -0,0 +1,93 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +const maxListsDepth = 3 + +var MaxIntrospectionDepth = Rule{ + Name: "MaxIntrospectionDepth", + RuleFunc: func(observers *Events, addError AddErrFunc) { + // Counts the depth of list fields in "__Type" recursively and + // returns `true` if the limit has been reached. + observers.OnField(func(walker *Walker, field *ast.Field) { + if field.Name == "__schema" || field.Name == "__type" { + visitedFragments := make(map[string]bool) + if checkDepthField(field, visitedFragments, 0) { + addError( + Message(`Maximum introspection depth exceeded`), + At(field.Position), + ) + } + return + } + }) + }, +} + +func checkDepthSelectionSet( + selectionSet ast.SelectionSet, + visitedFragments map[string]bool, + depth int, +) bool { + for _, child := range selectionSet { + if field, ok := child.(*ast.Field); ok { + if checkDepthField(field, visitedFragments, depth) { + return true + } + } + if fragmentSpread, ok := child.(*ast.FragmentSpread); ok { + if checkDepthFragmentSpread(fragmentSpread, visitedFragments, depth) { + return true + } + } + if inlineFragment, ok := child.(*ast.InlineFragment); ok { + if checkDepthSelectionSet(inlineFragment.SelectionSet, visitedFragments, depth) { + return true + } + } + } + return false +} + +func checkDepthField(field *ast.Field, visitedFragments map[string]bool, depth int) bool { + if field.Name == "fields" || + field.Name == "interfaces" || + field.Name == "possibleTypes" || + field.Name == "inputFields" { + depth++ + if depth >= maxListsDepth { + return true + } + } + return checkDepthSelectionSet(field.SelectionSet, visitedFragments, depth) +} + +func checkDepthFragmentSpread( + fragmentSpread *ast.FragmentSpread, + visitedFragments map[string]bool, + depth int, +) bool { + fragmentName := fragmentSpread.Name + if visited, ok := visitedFragments[fragmentName]; ok && visited { + // Fragment cycles are handled by `NoFragmentCyclesRule`. + return false + } + fragment := fragmentSpread.Definition + if fragment == nil { + // Missing fragments checks are handled by `KnownFragmentNamesRule`. + return false + } + + // Rather than following an immutable programming pattern which has + // significant memory and garbage collection overhead, we've opted to + // take a mutable approach for efficiency's sake. Importantly visiting a + // fragment twice is fine, so long as you don't do one visit inside the + // other. + visitedFragments[fragmentName] = true + defer delete(visitedFragments, fragmentName) + return checkDepthSelectionSet(fragment.SelectionSet, visitedFragments, depth) +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_fragment_cycles.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go similarity index 83% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_fragment_cycles.go rename to vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go index a7de611f17..ead6ae9152 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/no_fragment_cycles.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go @@ -1,17 +1,17 @@ -package validator +package rules import ( "fmt" "strings" - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" ) -func init() { - AddRule("NoFragmentCycles", func(observers *Events, addError AddErrFunc) { +var NoFragmentCyclesRule = Rule{ + Name: "NoFragmentCycles", + RuleFunc: func(observers *Events, addError AddErrFunc) { visitedFrags := make(map[string]bool) observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) { @@ -54,7 +54,11 @@ func init() { via = fmt.Sprintf(" via %s", strings.Join(fragmentNames, ", ")) } addError( - Message(`Cannot spread fragment "%s" within itself%s.`, spreadName, via), + Message( + `Cannot spread fragment "%s" within itself%s.`, + spreadName, + via, + ), At(spreadNode.Position), ) } @@ -67,7 +71,7 @@ func init() { recursive(fragment) }) - }) + }, } func getFragmentSpreads(node ast.SelectionSet) []*ast.FragmentSpread { diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go new file mode 100644 index 0000000000..76cafb805a --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go @@ -0,0 +1,35 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var NoUndefinedVariablesRule = Rule{ + Name: "NoUndefinedVariables", + RuleFunc: func(observers *Events, addError AddErrFunc) { + observers.OnValue(func(walker *Walker, value *ast.Value) { + if walker.CurrentOperation == nil || value.Kind != ast.Variable || + value.VariableDefinition != nil { + return + } + + if walker.CurrentOperation.Name != "" { + addError( + Message( + `Variable "%s" is not defined by operation "%s".`, + value, + walker.CurrentOperation.Name, + ), + At(value.Position), + ) + } else { + addError( + Message(`Variable "%s" is not defined.`, value), + At(value.Position), + ) + } + }) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go new file mode 100644 index 0000000000..d28389a42e --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go @@ -0,0 +1,31 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var NoUnusedFragmentsRule = Rule{ + Name: "NoUnusedFragments", + RuleFunc: func(observers *Events, addError AddErrFunc) { + inFragmentDefinition := false + fragmentNameUsed := make(map[string]bool) + + observers.OnFragmentSpread(func(walker *Walker, fragmentSpread *ast.FragmentSpread) { + if !inFragmentDefinition { + fragmentNameUsed[fragmentSpread.Name] = true + } + }) + + observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) { + inFragmentDefinition = true + if !fragmentNameUsed[fragment.Name] { + addError( + Message(`Fragment "%s" is never used.`, fragment.Name), + At(fragment.Position), + ) + } + }) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go new file mode 100644 index 0000000000..b393c5b272 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go @@ -0,0 +1,36 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var NoUnusedVariablesRule = Rule{ + Name: "NoUnusedVariables", + RuleFunc: func(observers *Events, addError AddErrFunc) { + observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { + for _, varDef := range operation.VariableDefinitions { + if varDef.Used { + continue + } + + if operation.Name != "" { + addError( + Message( + `Variable "$%s" is never used in operation "%s".`, + varDef.Variable, + operation.Name, + ), + At(varDef.Position), + ) + } else { + addError( + Message(`Variable "$%s" is never used.`, varDef.Variable), + At(varDef.Position), + ) + } + } + }) + }, +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/overlapping_fields_can_be_merged.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go similarity index 81% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/overlapping_fields_can_be_merged.go rename to vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go index 1e207a43e7..77014e3ec7 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/overlapping_fields_can_be_merged.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go @@ -1,19 +1,18 @@ -package validator +package rules import ( "bytes" "fmt" "reflect" - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" ) -func init() { - - AddRule("OverlappingFieldsCanBeMerged", func(observers *Events, addError AddErrFunc) { +var OverlappingFieldsCanBeMergedRule = Rule{ + Name: "OverlappingFieldsCanBeMerged", + RuleFunc: func(observers *Events, addError AddErrFunc) { /** * Algorithm: * @@ -82,7 +81,8 @@ func init() { }) observers.OnField(func(walker *Walker, field *ast.Field) { if walker.CurrentOperation == nil { - // When checking both Operation and Fragment, errors are duplicated when processing FragmentDefinition referenced from Operation + // When checking both Operation and Fragment, errors are duplicated when processing + // FragmentDefinition referenced from Operation return } m.walker = walker @@ -105,14 +105,18 @@ func init() { conflict.addFieldsConflictMessage(addError) } }) - }) + }, } type pairSet struct { data map[string]map[string]bool } -func (pairSet *pairSet) Add(a *ast.FragmentSpread, b *ast.FragmentSpread, areMutuallyExclusive bool) { +func (pairSet *pairSet) Add( + a *ast.FragmentSpread, + b *ast.FragmentSpread, + areMutuallyExclusive bool, +) { add := func(a *ast.FragmentSpread, b *ast.FragmentSpread) { m := pairSet.data[a.Name] if m == nil { @@ -125,7 +129,11 @@ func (pairSet *pairSet) Add(a *ast.FragmentSpread, b *ast.FragmentSpread, areMut add(b, a) } -func (pairSet *pairSet) Has(a *ast.FragmentSpread, b *ast.FragmentSpread, areMutuallyExclusive bool) bool { +func (pairSet *pairSet) Has( + a *ast.FragmentSpread, + b *ast.FragmentSpread, + areMutuallyExclusive bool, +) bool { am, ok := pairSet.data[a.Name] if !ok { return false @@ -224,7 +232,11 @@ func (m *ConflictMessage) addFieldsConflictMessage(addError AddErrFunc) { var buf bytes.Buffer m.String(&buf) addError( - Message(`Fields "%s" conflict because %s. Use different aliases on the fields to fetch both if this was intentional.`, m.ResponseName, buf.String()), + Message( + `Fields "%s" conflict because %s. Use different aliases on the fields to fetch both if this was intentional.`, + m.ResponseName, + buf.String(), + ), At(m.Position), ) } @@ -240,7 +252,9 @@ type overlappingFieldsCanBeMergedManager struct { comparedFragments map[string]bool } -func (m *overlappingFieldsCanBeMergedManager) findConflictsWithinSelectionSet(selectionSet ast.SelectionSet) []*ConflictMessage { +func (m *overlappingFieldsCanBeMergedManager) findConflictsWithinSelectionSet( + selectionSet ast.SelectionSet, +) []*ConflictMessage { if len(selectionSet) == 0 { return nil } @@ -271,7 +285,12 @@ func (m *overlappingFieldsCanBeMergedManager) findConflictsWithinSelectionSet(se return conflicts.Conflicts } -func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetweenFieldsAndFragment(conflicts *conflictMessageContainer, areMutuallyExclusive bool, fieldsMap *sequentialFieldsMap, fragmentSpread *ast.FragmentSpread) { +func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetweenFieldsAndFragment( + conflicts *conflictMessageContainer, + areMutuallyExclusive bool, + fieldsMap *sequentialFieldsMap, + fragmentSpread *ast.FragmentSpread, +) { if m.comparedFragments[fragmentSpread.Name] { return } @@ -299,15 +318,23 @@ func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetweenFieldsAndFr if fragmentSpread.Name == baseFragmentSpread.Name { continue } - m.collectConflictsBetweenFieldsAndFragment(conflicts, areMutuallyExclusive, fieldsMap, fragmentSpread) + m.collectConflictsBetweenFieldsAndFragment( + conflicts, + areMutuallyExclusive, + fieldsMap, + fragmentSpread, + ) } } -func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetweenFragments(conflicts *conflictMessageContainer, areMutuallyExclusive bool, fragmentSpreadA *ast.FragmentSpread, fragmentSpreadB *ast.FragmentSpread) { - +func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetweenFragments( + conflicts *conflictMessageContainer, + areMutuallyExclusive bool, + fragmentSpreadA *ast.FragmentSpread, + fragmentSpreadB *ast.FragmentSpread, +) { var check func(fragmentSpreadA *ast.FragmentSpread, fragmentSpreadB *ast.FragmentSpread) check = func(fragmentSpreadA *ast.FragmentSpread, fragmentSpreadB *ast.FragmentSpread) { - if fragmentSpreadA.Name == fragmentSpreadB.Name { return } @@ -324,8 +351,12 @@ func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetweenFragments(c return } - fieldsMapA, fragmentSpreadsA := getFieldsAndFragmentNames(fragmentSpreadA.Definition.SelectionSet) - fieldsMapB, fragmentSpreadsB := getFieldsAndFragmentNames(fragmentSpreadB.Definition.SelectionSet) + fieldsMapA, fragmentSpreadsA := getFieldsAndFragmentNames( + fragmentSpreadA.Definition.SelectionSet, + ) + fieldsMapB, fragmentSpreadsB := getFieldsAndFragmentNames( + fragmentSpreadB.Definition.SelectionSet, + ) // (F) First, collect all conflicts between these two collections of fields // (not including any nested fragments). @@ -346,7 +377,11 @@ func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetweenFragments(c check(fragmentSpreadA, fragmentSpreadB) } -func (m *overlappingFieldsCanBeMergedManager) findConflictsBetweenSubSelectionSets(areMutuallyExclusive bool, selectionSetA ast.SelectionSet, selectionSetB ast.SelectionSet) *conflictMessageContainer { +func (m *overlappingFieldsCanBeMergedManager) findConflictsBetweenSubSelectionSets( + areMutuallyExclusive bool, + selectionSetA ast.SelectionSet, + selectionSetB ast.SelectionSet, +) *conflictMessageContainer { var conflicts conflictMessageContainer fieldsMapA, fragmentSpreadsA := getFieldsAndFragmentNames(selectionSetA) @@ -359,14 +394,24 @@ func (m *overlappingFieldsCanBeMergedManager) findConflictsBetweenSubSelectionSe // those referenced by each fragment name associated with the second. for _, fragmentSpread := range fragmentSpreadsB { m.comparedFragments = make(map[string]bool) - m.collectConflictsBetweenFieldsAndFragment(&conflicts, areMutuallyExclusive, fieldsMapA, fragmentSpread) + m.collectConflictsBetweenFieldsAndFragment( + &conflicts, + areMutuallyExclusive, + fieldsMapA, + fragmentSpread, + ) } // (I) Then collect conflicts between the second collection of fields and // those referenced by each fragment name associated with the first. for _, fragmentSpread := range fragmentSpreadsA { m.comparedFragments = make(map[string]bool) - m.collectConflictsBetweenFieldsAndFragment(&conflicts, areMutuallyExclusive, fieldsMapB, fragmentSpread) + m.collectConflictsBetweenFieldsAndFragment( + &conflicts, + areMutuallyExclusive, + fieldsMapB, + fragmentSpread, + ) } // (J) Also collect conflicts between any fragment names by the first and @@ -374,7 +419,12 @@ func (m *overlappingFieldsCanBeMergedManager) findConflictsBetweenSubSelectionSe // names to each item in the second set of names. for _, fragmentSpreadA := range fragmentSpreadsA { for _, fragmentSpreadB := range fragmentSpreadsB { - m.collectConflictsBetweenFragments(&conflicts, areMutuallyExclusive, fragmentSpreadA, fragmentSpreadB) + m.collectConflictsBetweenFragments( + &conflicts, + areMutuallyExclusive, + fragmentSpreadA, + fragmentSpreadB, + ) } } @@ -385,7 +435,10 @@ func (m *overlappingFieldsCanBeMergedManager) findConflictsBetweenSubSelectionSe return &conflicts } -func (m *overlappingFieldsCanBeMergedManager) collectConflictsWithin(conflicts *conflictMessageContainer, fieldsMap *sequentialFieldsMap) { +func (m *overlappingFieldsCanBeMergedManager) collectConflictsWithin( + conflicts *conflictMessageContainer, + fieldsMap *sequentialFieldsMap, +) { for _, fields := range fieldsMap.Iterator() { for idx, fieldA := range fields { for _, fieldB := range fields[idx+1:] { @@ -398,7 +451,12 @@ func (m *overlappingFieldsCanBeMergedManager) collectConflictsWithin(conflicts * } } -func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetween(conflicts *conflictMessageContainer, parentFieldsAreMutuallyExclusive bool, fieldsMapA *sequentialFieldsMap, fieldsMapB *sequentialFieldsMap) { +func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetween( + conflicts *conflictMessageContainer, + parentFieldsAreMutuallyExclusive bool, + fieldsMapA *sequentialFieldsMap, + fieldsMapB *sequentialFieldsMap, +) { for _, fieldsEntryA := range fieldsMapA.KeyValueIterator() { fieldsB, ok := fieldsMapB.Get(fieldsEntryA.ResponseName) if !ok { @@ -415,7 +473,11 @@ func (m *overlappingFieldsCanBeMergedManager) collectConflictsBetween(conflicts } } -func (m *overlappingFieldsCanBeMergedManager) findConflict(parentFieldsAreMutuallyExclusive bool, fieldA *ast.Field, fieldB *ast.Field) *ConflictMessage { +func (m *overlappingFieldsCanBeMergedManager) findConflict( + parentFieldsAreMutuallyExclusive bool, + fieldA *ast.Field, + fieldB *ast.Field, +) *ConflictMessage { if fieldA.ObjectDefinition == nil || fieldB.ObjectDefinition == nil { return nil } @@ -439,8 +501,12 @@ func (m *overlappingFieldsCanBeMergedManager) findConflict(parentFieldsAreMutual if fieldA.Name != fieldB.Name { return &ConflictMessage{ ResponseName: fieldNameA, - Message: fmt.Sprintf(`"%s" and "%s" are different fields`, fieldA.Name, fieldB.Name), - Position: fieldB.Position, + Message: fmt.Sprintf( + `"%s" and "%s" are different fields`, + fieldA.Name, + fieldB.Name, + ), + Position: fieldB.Position, } } @@ -454,18 +520,27 @@ func (m *overlappingFieldsCanBeMergedManager) findConflict(parentFieldsAreMutual } } - if fieldA.Definition != nil && fieldB.Definition != nil && doTypesConflict(m.walker, fieldA.Definition.Type, fieldB.Definition.Type) { + if fieldA.Definition != nil && fieldB.Definition != nil && + doTypesConflict(m.walker, fieldA.Definition.Type, fieldB.Definition.Type) { return &ConflictMessage{ ResponseName: fieldNameA, - Message: fmt.Sprintf(`they return conflicting types "%s" and "%s"`, fieldA.Definition.Type.String(), fieldB.Definition.Type.String()), - Position: fieldB.Position, + Message: fmt.Sprintf( + `they return conflicting types "%s" and "%s"`, + fieldA.Definition.Type.String(), + fieldB.Definition.Type.String(), + ), + Position: fieldB.Position, } } // Collect and compare sub-fields. Use the same "visited fragment names" list // for both collections so fields in a fragment reference are never // compared to themselves. - conflicts := m.findConflictsBetweenSubSelectionSets(areMutuallyExclusive, fieldA.SelectionSet, fieldB.SelectionSet) + conflicts := m.findConflictsBetweenSubSelectionSets( + areMutuallyExclusive, + fieldA.SelectionSet, + fieldB.SelectionSet, + ) if conflicts == nil { return nil } @@ -476,7 +551,7 @@ func (m *overlappingFieldsCanBeMergedManager) findConflict(parentFieldsAreMutual } } -func sameArguments(args1 []*ast.Argument, args2 []*ast.Argument) bool { +func sameArguments(args1, args2 []*ast.Argument) bool { if len(args1) != len(args2) { return false } @@ -495,7 +570,7 @@ func sameArguments(args1 []*ast.Argument, args2 []*ast.Argument) bool { return true } -func sameValue(value1 *ast.Value, value2 *ast.Value) bool { +func sameValue(value1, value2 *ast.Value) bool { if value1.Kind != value2.Kind { return false } @@ -505,7 +580,7 @@ func sameValue(value1 *ast.Value, value2 *ast.Value) bool { return true } -func doTypesConflict(walker *Walker, type1 *ast.Type, type2 *ast.Type) bool { +func doTypesConflict(walker *Walker, type1, type2 *ast.Type) bool { if type1.Elem != nil { if type2.Elem != nil { return doTypesConflict(walker, type1.Elem, type2.Elem) @@ -524,14 +599,17 @@ func doTypesConflict(walker *Walker, type1 *ast.Type, type2 *ast.Type) bool { t1 := walker.Schema.Types[type1.NamedType] t2 := walker.Schema.Types[type2.NamedType] - if (t1.Kind == ast.Scalar || t1.Kind == ast.Enum) && (t2.Kind == ast.Scalar || t2.Kind == ast.Enum) { + if (t1.Kind == ast.Scalar || t1.Kind == ast.Enum) && + (t2.Kind == ast.Scalar || t2.Kind == ast.Enum) { return t1.Name != t2.Name } return false } -func getFieldsAndFragmentNames(selectionSet ast.SelectionSet) (*sequentialFieldsMap, []*ast.FragmentSpread) { +func getFieldsAndFragmentNames( + selectionSet ast.SelectionSet, +) (*sequentialFieldsMap, []*ast.FragmentSpread) { fieldsMap := sequentialFieldsMap{ data: make(map[string][]*ast.Field), } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go new file mode 100644 index 0000000000..94cc6356c6 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go @@ -0,0 +1,83 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var PossibleFragmentSpreadsRule = Rule{ + Name: "PossibleFragmentSpreads", + RuleFunc: func(observers *Events, addError AddErrFunc) { + validate := func(walker *Walker, parentDef *ast.Definition, fragmentName string, emitError func()) { + if parentDef == nil { + return + } + + var parentDefs []*ast.Definition + switch parentDef.Kind { + case ast.Object: + parentDefs = []*ast.Definition{parentDef} + case ast.Interface, ast.Union: + parentDefs = walker.Schema.GetPossibleTypes(parentDef) + default: + return + } + + fragmentDefType := walker.Schema.Types[fragmentName] + if fragmentDefType == nil { + return + } + if !fragmentDefType.IsCompositeType() { + // checked by FragmentsOnCompositeTypes + return + } + fragmentDefs := walker.Schema.GetPossibleTypes(fragmentDefType) + + for _, fragmentDef := range fragmentDefs { + for _, parentDef := range parentDefs { + if parentDef.Name == fragmentDef.Name { + return + } + } + } + + emitError() + } + + observers.OnInlineFragment(func(walker *Walker, inlineFragment *ast.InlineFragment) { + validate(walker, inlineFragment.ObjectDefinition, inlineFragment.TypeCondition, func() { + addError( + Message( + `Fragment cannot be spread here as objects of type "%s" can never be of type "%s".`, + inlineFragment.ObjectDefinition.Name, + inlineFragment.TypeCondition, + ), + At(inlineFragment.Position), + ) + }) + }) + + observers.OnFragmentSpread(func(walker *Walker, fragmentSpread *ast.FragmentSpread) { + if fragmentSpread.Definition == nil { + return + } + validate( + walker, + fragmentSpread.ObjectDefinition, + fragmentSpread.Definition.TypeCondition, + func() { + addError( + Message( + `Fragment "%s" cannot be spread here as objects of type "%s" can never be of type "%s".`, + fragmentSpread.Name, + fragmentSpread.ObjectDefinition.Name, + fragmentSpread.Definition.TypeCondition, + ), + At(fragmentSpread.Position), + ) + }, + ) + }) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go new file mode 100644 index 0000000000..fc1a6a476d --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go @@ -0,0 +1,64 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var ProvidedRequiredArgumentsRule = Rule{ + Name: "ProvidedRequiredArguments", + RuleFunc: func(observers *Events, addError AddErrFunc) { + observers.OnField(func(walker *Walker, field *ast.Field) { + if field.Definition == nil { + return + } + + argDef: + for _, argDef := range field.Definition.Arguments { + if !argDef.Type.NonNull { + continue + } + if argDef.DefaultValue != nil { + continue + } + for _, arg := range field.Arguments { + if arg.Name == argDef.Name { + continue argDef + } + } + + addError( + Message(`Field "%s" argument "%s" of type "%s" is required, but it was not provided.`, field.Name, argDef.Name, argDef.Type.String()), + At(field.Position), + ) + } + }) + + observers.OnDirective(func(walker *Walker, directive *ast.Directive) { + if directive.Definition == nil { + return + } + + argDef: + for _, argDef := range directive.Definition.Arguments { + if !argDef.Type.NonNull { + continue + } + if argDef.DefaultValue != nil { + continue + } + for _, arg := range directive.Arguments { + if arg.Name == argDef.Name { + continue argDef + } + } + + addError( + Message(`Directive "@%s" argument "%s" of type "%s" is required, but it was not provided.`, directive.Definition.Name, argDef.Name, argDef.Type.String()), + At(directive.Position), + ) + } + }) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/rules.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/rules.go new file mode 100644 index 0000000000..e94151b06e --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/rules.go @@ -0,0 +1,126 @@ +package rules + +import ( + "slices" + + "github.com/vektah/gqlparser/v2/validator/core" +) + +// Rules manages GraphQL validation rules. +type Rules struct { + rules map[string]core.RuleFunc + ruleNameKeys []string // for deterministic order +} + +// NewRules creates a Rules instance with the specified rules. +func NewRules(rs ...core.Rule) *Rules { + r := &Rules{ + rules: make(map[string]core.RuleFunc), + } + + for _, rule := range rs { + r.AddRule(rule.Name, rule.RuleFunc) + } + + return r +} + +// NewDefaultRules creates a Rules instance containing the default GraphQL validation rule set. +func NewDefaultRules() *Rules { + rules := []core.Rule{ + FieldsOnCorrectTypeRule, + FragmentsOnCompositeTypesRule, + KnownArgumentNamesRule, + KnownDirectivesRule, + KnownFragmentNamesRule, + KnownRootTypeRule, + KnownTypeNamesRule, + LoneAnonymousOperationRule, + MaxIntrospectionDepth, + NoFragmentCyclesRule, + NoUndefinedVariablesRule, + NoUnusedFragmentsRule, + NoUnusedVariablesRule, + OverlappingFieldsCanBeMergedRule, + PossibleFragmentSpreadsRule, + ProvidedRequiredArgumentsRule, + ScalarLeafsRule, + SingleFieldSubscriptionsRule, + UniqueArgumentNamesRule, + UniqueDirectivesPerLocationRule, + UniqueFragmentNamesRule, + UniqueInputFieldNamesRule, + UniqueOperationNamesRule, + UniqueVariableNamesRule, + ValuesOfCorrectTypeRule, + VariablesAreInputTypesRule, + VariablesInAllowedPositionRule, + } + + r := NewRules(rules...) + + return r +} + +// AddRule adds a rule with the specified name and rule function to the rule set. +// If a rule with the same name already exists, it will not be added. +func (r *Rules) AddRule(name string, ruleFunc core.RuleFunc) { + if r.rules == nil { + r.rules = make(map[string]core.RuleFunc) + } + + if _, exists := r.rules[name]; !exists { + r.rules[name] = ruleFunc + r.ruleNameKeys = append(r.ruleNameKeys, name) + } +} + +// GetInner returns the internal rule map. +// If the map is not initialized, it returns an empty map. +// This returns a copy of the rules map, not the original map. +func (r *Rules) GetInner() map[string]core.RuleFunc { + if r == nil { + return nil // impossible nonsense, hopefully + } + if r.rules == nil { + return make(map[string]core.RuleFunc) + } + + rules := make(map[string]core.RuleFunc) + for k, v := range r.rules { + rules[k] = v + } + + return rules +} + +// RemoveRule removes a rule with the specified name from the rule set. +// If no rule with the specified name exists, it does nothing. +func (r *Rules) RemoveRule(name string) { + if r == nil { + return // impossible nonsense, hopefully + } + if r.rules != nil { + delete(r.rules, name) + } + + if len(r.ruleNameKeys) > 0 { + r.ruleNameKeys = slices.DeleteFunc(r.ruleNameKeys, func(s string) bool { + return s == name // delete the name rule key + }) + } +} + +// ReplaceRule replaces a rule with the specified name with a new rule function. +// If no rule with the specified name exists, it does nothing. +func (r *Rules) ReplaceRule(name string, ruleFunc core.RuleFunc) { + if r == nil { + return // impossible nonsense, hopefully + } + if r.rules == nil { + r.rules = make(map[string]core.RuleFunc) + } + if _, exists := r.rules[name]; exists { + r.rules[name] = ruleFunc + } +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go new file mode 100644 index 0000000000..95fd2298f4 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go @@ -0,0 +1,46 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var ScalarLeafsRule = Rule{ + Name: "ScalarLeafs", + RuleFunc: func(observers *Events, addError AddErrFunc) { + observers.OnField(func(walker *Walker, field *ast.Field) { + if field.Definition == nil { + return + } + + fieldType := walker.Schema.Types[field.Definition.Type.Name()] + if fieldType == nil { + return + } + + if fieldType.IsLeafType() && len(field.SelectionSet) > 0 { + addError( + Message( + `Field "%s" must not have a selection since type "%s" has no subfields.`, + field.Name, + fieldType.Name, + ), + At(field.Position), + ) + } + + if !fieldType.IsLeafType() && len(field.SelectionSet) == 0 { + addError( + Message( + `Field "%s" of type "%s" must have a selection of subfields.`, + field.Name, + field.Definition.Type.String(), + ), + Suggestf(`"%s { ... }"`, field.Name), + At(field.Position), + ) + } + }) + }, +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/single_field_subscriptions.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go similarity index 85% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/single_field_subscriptions.go rename to vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go index 98cb984b40..2d4322da1b 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/single_field_subscriptions.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go @@ -1,17 +1,17 @@ -package validator +package rules import ( "strconv" "strings" - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - - //nolint:revive // Validator rules each use dot imports for convenience. - . "github.com/open-policy-agent/opa/internal/gqlparser/validator" + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" ) -func init() { - AddRule("SingleFieldSubscriptions", func(observers *Events, addError AddErrFunc) { +var SingleFieldSubscriptionsRule = Rule{ + Name: "SingleFieldSubscriptions", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { if walker.Schema.Subscription == nil || operation.Operation != ast.Subscription { return @@ -40,7 +40,7 @@ func init() { } } }) - }) + }, } type topField struct { diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go new file mode 100644 index 0000000000..882a8cb168 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go @@ -0,0 +1,35 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var UniqueArgumentNamesRule = Rule{ + Name: "UniqueArgumentNames", + RuleFunc: func(observers *Events, addError AddErrFunc) { + observers.OnField(func(walker *Walker, field *ast.Field) { + checkUniqueArgs(field.Arguments, addError) + }) + + observers.OnDirective(func(walker *Walker, directive *ast.Directive) { + checkUniqueArgs(directive.Arguments, addError) + }) + }, +} + +func checkUniqueArgs(args ast.ArgumentList, addError AddErrFunc) { + knownArgNames := map[string]int{} + + for _, arg := range args { + if knownArgNames[arg.Name] == 1 { + addError( + Message(`There can be only one argument named "%s".`, arg.Name), + At(arg.Position), + ) + } + + knownArgNames[arg.Name]++ + } +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go new file mode 100644 index 0000000000..a1a0101671 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go @@ -0,0 +1,29 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var UniqueDirectivesPerLocationRule = Rule{ + Name: "UniqueDirectivesPerLocation", + RuleFunc: func(observers *Events, addError AddErrFunc) { + observers.OnDirectiveList(func(walker *Walker, directives []*ast.Directive) { + seen := map[string]bool{} + + for _, dir := range directives { + if dir.Name != "repeatable" && seen[dir.Name] { + addError( + Message( + `The directive "@%s" can only be used once at this location.`, + dir.Name, + ), + At(dir.Position), + ) + } + seen[dir.Name] = true + } + }) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go new file mode 100644 index 0000000000..7b1a40ceb1 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go @@ -0,0 +1,24 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var UniqueFragmentNamesRule = Rule{ + Name: "UniqueFragmentNames", + RuleFunc: func(observers *Events, addError AddErrFunc) { + seenFragments := map[string]bool{} + + observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) { + if seenFragments[fragment.Name] { + addError( + Message(`There can be only one fragment named "%s".`, fragment.Name), + At(fragment.Position), + ) + } + seenFragments[fragment.Name] = true + }) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go new file mode 100644 index 0000000000..67c92de180 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go @@ -0,0 +1,29 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var UniqueInputFieldNamesRule = Rule{ + Name: "UniqueInputFieldNames", + RuleFunc: func(observers *Events, addError AddErrFunc) { + observers.OnValue(func(walker *Walker, value *ast.Value) { + if value.Kind != ast.ObjectValue { + return + } + + seen := map[string]bool{} + for _, field := range value.Children { + if seen[field.Name] { + addError( + Message(`There can be only one input field named "%s".`, field.Name), + At(field.Position), + ) + } + seen[field.Name] = true + } + }) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go new file mode 100644 index 0000000000..199a4b0f5c --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go @@ -0,0 +1,24 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var UniqueOperationNamesRule = Rule{ + Name: "UniqueOperationNames", + RuleFunc: func(observers *Events, addError AddErrFunc) { + seen := map[string]bool{} + + observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { + if seen[operation.Name] { + addError( + Message(`There can be only one operation named "%s".`, operation.Name), + At(operation.Position), + ) + } + seen[operation.Name] = true + }) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go new file mode 100644 index 0000000000..6a33d0e936 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go @@ -0,0 +1,26 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var UniqueVariableNamesRule = Rule{ + Name: "UniqueVariableNames", + RuleFunc: func(observers *Events, addError AddErrFunc) { + observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { + seen := map[string]int{} + for _, def := range operation.VariableDefinitions { + // add the same error only once per a variable. + if seen[def.Variable] == 1 { + addError( + Message(`There can be only one variable named "$%s".`, def.Variable), + At(def.Position), + ) + } + seen[def.Variable]++ + } + }) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go new file mode 100644 index 0000000000..5126245e54 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go @@ -0,0 +1,307 @@ +package rules + +import ( + "errors" + "fmt" + "strconv" + + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +func ruleFuncValuesOfCorrectType(observers *Events, addError AddErrFunc, disableSuggestion bool) { + observers.OnValue(func(walker *Walker, value *ast.Value) { + if value.Definition == nil || value.ExpectedType == nil { + return + } + + if value.Kind == ast.NullValue && value.ExpectedType.NonNull { + addError( + Message( + `Expected value of type "%s", found %s.`, + value.ExpectedType.String(), + value.String(), + ), + At(value.Position), + ) + } + + if value.Definition.Kind == ast.Scalar { + // Skip custom validating scalars + if !value.Definition.OneOf("Int", "Float", "String", "Boolean", "ID") { + return + } + } + + var possibleEnums []string + if value.Definition.Kind == ast.Enum { + for _, val := range value.Definition.EnumValues { + possibleEnums = append(possibleEnums, val.Name) + } + } + + rawVal, err := value.Value(nil) + if err != nil { + unexpectedTypeMessage(addError, value) + } + + switch value.Kind { + case ast.NullValue: + return + case ast.ListValue: + if value.ExpectedType.Elem == nil { + unexpectedTypeMessage(addError, value) + return + } + + case ast.IntValue: + if !value.Definition.OneOf("Int", "Float", "ID") { + unexpectedTypeMessage(addError, value) + } + + case ast.FloatValue: + if !value.Definition.OneOf("Float") { + unexpectedTypeMessage(addError, value) + } + + case ast.StringValue, ast.BlockValue: + if value.Definition.Kind == ast.Enum { + if disableSuggestion { + addError( + Message( + `Enum "%s" cannot represent non-enum value: %s.`, + value.ExpectedType.String(), + value.String(), + ), + At(value.Position), + ) + } else { + rawValStr := fmt.Sprint(rawVal) + addError( + Message( + `Enum "%s" cannot represent non-enum value: %s.`, + value.ExpectedType.String(), + value.String(), + ), + SuggestListQuoted("Did you mean the enum value", rawValStr, possibleEnums), + At(value.Position), + ) + } + } else if !value.Definition.OneOf("String", "ID") { + unexpectedTypeMessage(addError, value) + } + + case ast.EnumValue: + if value.Definition.Kind != ast.Enum { + if disableSuggestion { + addError( + unexpectedTypeMessageOnly(value), + At(value.Position), + ) + } else { + rawValStr := fmt.Sprint(rawVal) + addError( + unexpectedTypeMessageOnly(value), + SuggestListUnquoted( + "Did you mean the enum value", + rawValStr, + possibleEnums, + ), + At(value.Position), + ) + } + } else if value.Definition.EnumValues.ForName(value.Raw) == nil { + if disableSuggestion { + addError( + Message( + `Value "%s" does not exist in "%s" enum.`, + value.String(), + value.ExpectedType.String(), + ), + At(value.Position), + ) + } else { + rawValStr := fmt.Sprint(rawVal) + addError( + Message( + `Value "%s" does not exist in "%s" enum.`, + value.String(), + value.ExpectedType.String(), + ), + SuggestListQuoted("Did you mean the enum value", rawValStr, possibleEnums), + At(value.Position), + ) + } + } + + case ast.BooleanValue: + if !value.Definition.OneOf("Boolean") { + unexpectedTypeMessage(addError, value) + } + + case ast.ObjectValue: + + for _, field := range value.Definition.Fields { + if field.Type.NonNull { + fieldValue := value.Children.ForName(field.Name) + if fieldValue == nil && field.DefaultValue == nil { + addError( + Message( + `Field "%s.%s" of required type "%s" was not provided.`, + value.Definition.Name, + field.Name, + field.Type.String(), + ), + At(value.Position), + ) + continue + } + } + } + + for _, directive := range value.Definition.Directives { + if directive.Name == "oneOf" { + func() { + if len(value.Children) != 1 { + addError( + Message( + `OneOf Input Object "%s" must specify exactly one key.`, + value.Definition.Name, + ), + At(value.Position), + ) + return + } + + fieldValue := value.Children[0].Value + isNullLiteral := fieldValue == nil || fieldValue.Kind == ast.NullValue + if isNullLiteral { + addError( + Message( + `Field "%s.%s" must be non-null.`, + value.Definition.Name, + value.Definition.Fields[0].Name, + ), + At(fieldValue.Position), + ) + return + } + + isVariable := fieldValue.Kind == ast.Variable + if isVariable { + variableName := fieldValue.VariableDefinition.Variable + isNullableVariable := !fieldValue.VariableDefinition.Type.NonNull + if isNullableVariable { + addError( + Message( + `Variable "%s" must be non-nullable to be used for OneOf Input Object "%s".`, + variableName, + value.Definition.Name, + ), + At(fieldValue.Position), + ) + } + } + }() + } + } + + for _, fieldValue := range value.Children { + if value.Definition.Fields.ForName(fieldValue.Name) == nil { + if disableSuggestion { + addError( + Message( + `Field "%s" is not defined by type "%s".`, + fieldValue.Name, + value.Definition.Name, + ), + At(fieldValue.Position), + ) + } else { + var suggestions []string + for _, fieldValue := range value.Definition.Fields { + suggestions = append(suggestions, fieldValue.Name) + } + + addError( + Message( + `Field "%s" is not defined by type "%s".`, + fieldValue.Name, + value.Definition.Name, + ), + SuggestListQuoted("Did you mean", fieldValue.Name, suggestions), + At(fieldValue.Position), + ) + } + } + } + + case ast.Variable: + return + + default: + panic(fmt.Errorf("unhandled %T", value)) + } + }) +} + +var ValuesOfCorrectTypeRule = Rule{ + Name: "ValuesOfCorrectType", + RuleFunc: func(observers *Events, addError AddErrFunc) { + ruleFuncValuesOfCorrectType(observers, addError, false) + }, +} + +var ValuesOfCorrectTypeRuleWithoutSuggestions = Rule{ + Name: "ValuesOfCorrectTypeWithoutSuggestions", + RuleFunc: func(observers *Events, addError AddErrFunc) { + ruleFuncValuesOfCorrectType(observers, addError, true) + }, +} + +func unexpectedTypeMessage(addError AddErrFunc, v *ast.Value) { + addError( + unexpectedTypeMessageOnly(v), + At(v.Position), + ) +} + +func unexpectedTypeMessageOnly(v *ast.Value) ErrorOption { + switch v.ExpectedType.String() { + case "Int", "Int!": + if _, err := strconv.ParseInt( + v.Raw, + 10, + 32, + ); err != nil && + errors.Is(err, strconv.ErrRange) { + return Message(`Int cannot represent non 32-bit signed integer value: %s`, v.String()) + } + return Message(`Int cannot represent non-integer value: %s`, v.String()) + case "String", "String!", "[String]": + return Message(`String cannot represent a non string value: %s`, v.String()) + case "Boolean", "Boolean!": + return Message(`Boolean cannot represent a non boolean value: %s`, v.String()) + case "Float", "Float!": + return Message(`Float cannot represent non numeric value: %s`, v.String()) + case "ID", "ID!": + return Message(`ID cannot represent a non-string and non-integer value: %s`, v.String()) + // case "Enum": + // return Message(`Enum "%s" cannot represent non-enum value: %s`, v.ExpectedType.String(), + // v.String()) + default: + if v.Definition.Kind == ast.Enum { + return Message( + `Enum "%s" cannot represent non-enum value: %s.`, + v.ExpectedType.String(), + v.String(), + ) + } + return Message( + `Expected value of type "%s", found %s.`, + v.ExpectedType.String(), + v.String(), + ) + } +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go new file mode 100644 index 0000000000..b0670404ef --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go @@ -0,0 +1,30 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var VariablesAreInputTypesRule = Rule{ + Name: "VariablesAreInputTypes", + RuleFunc: func(observers *Events, addError AddErrFunc) { + observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { + for _, def := range operation.VariableDefinitions { + if def.Definition == nil { + continue + } + if !def.Definition.IsInputType() { + addError( + Message( + `Variable "$%s" cannot be non-input type "%s".`, + def.Variable, + def.Type.String(), + ), + At(def.Position), + ) + } + } + }) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go new file mode 100644 index 0000000000..d3d36a293f --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go @@ -0,0 +1,48 @@ +package rules + +import ( + "github.com/vektah/gqlparser/v2/ast" + //nolint:staticcheck // Validator rules each use dot imports for convenience. + . "github.com/vektah/gqlparser/v2/validator/core" +) + +var VariablesInAllowedPositionRule = Rule{ + Name: "VariablesInAllowedPosition", + RuleFunc: func(observers *Events, addError AddErrFunc) { + observers.OnValue(func(walker *Walker, value *ast.Value) { + if value.Kind != ast.Variable || value.ExpectedType == nil || + value.VariableDefinition == nil || + walker.CurrentOperation == nil { + return + } + + tmp := *value.ExpectedType + + // todo: move me into walk + // If there is a default non nullable types can be null + if value.VariableDefinition.DefaultValue != nil && + value.VariableDefinition.DefaultValue.Kind != ast.NullValue { + if value.ExpectedType.NonNull { + tmp.NonNull = false + } + } + + // If the expected type has a default, the given variable can be null + if value.ExpectedTypeHasDefault { + tmp.NonNull = false + } + + if !value.VariableDefinition.Type.IsCompatible(&tmp) { + addError( + Message( + `Variable "%s" of type "%s" used in position expecting type "%s".`, + value, + value.VariableDefinition.Type.String(), + value.ExpectedType.String(), + ), + At(value.Position), + ) + } + }) + }, +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/schema.go b/vendor/github.com/vektah/gqlparser/v2/validator/schema.go new file mode 100644 index 0000000000..d40b9f192c --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/schema.go @@ -0,0 +1,660 @@ +package validator + +import ( + "slices" + "sort" + "strconv" + "strings" + + . "github.com/vektah/gqlparser/v2/ast" //nolint:staticcheck // bad, yeah + "github.com/vektah/gqlparser/v2/gqlerror" + "github.com/vektah/gqlparser/v2/parser" +) + +func LoadSchema(inputs ...*Source) (*Schema, error) { + sd, err := parser.ParseSchemas(inputs...) + if err != nil { + return nil, gqlerror.WrapIfUnwrapped(err) + } + return ValidateSchemaDocument(sd) +} + +func ValidateSchemaDocument(sd *SchemaDocument) (*Schema, error) { + schema := Schema{ + Types: map[string]*Definition{}, + Directives: map[string]*DirectiveDefinition{}, + PossibleTypes: map[string][]*Definition{}, + Implements: map[string][]*Definition{}, + } + + for i, def := range sd.Definitions { + if schema.Types[def.Name] != nil { + return nil, gqlerror.ErrorPosf(def.Position, "Cannot redeclare type %s.", def.Name) + } + schema.Types[def.Name] = sd.Definitions[i] + } + + defs := append(DefinitionList{}, sd.Definitions...) + + for _, ext := range sd.Extensions { + def := schema.Types[ext.Name] + if def == nil { + schema.Types[ext.Name] = &Definition{ + Kind: ext.Kind, + Name: ext.Name, + Position: ext.Position, + } + def = schema.Types[ext.Name] + defs = append(defs, def) + } + + if def.Kind != ext.Kind { + return nil, gqlerror.ErrorPosf( + ext.Position, + "Cannot extend type %s because the base type is a %s, not %s.", + ext.Name, + def.Kind, + ext.Kind, + ) + } + + def.Directives = append(def.Directives, ext.Directives...) + def.Interfaces = append(def.Interfaces, ext.Interfaces...) + def.Fields = append(def.Fields, ext.Fields...) + def.Types = append(def.Types, ext.Types...) + def.EnumValues = append(def.EnumValues, ext.EnumValues...) + } + + for _, def := range defs { + switch def.Kind { + case Union: + for _, t := range def.Types { + schema.AddPossibleType(def.Name, schema.Types[t]) + schema.AddImplements(t, def) + } + case InputObject, Object: + for _, intf := range def.Interfaces { + schema.AddPossibleType(intf, def) + schema.AddImplements(def.Name, schema.Types[intf]) + } + schema.AddPossibleType(def.Name, def) + case Interface: + for _, intf := range def.Interfaces { + schema.AddPossibleType(intf, def) + schema.AddImplements(def.Name, schema.Types[intf]) + } + } + } + + for i, dir := range sd.Directives { + if schema.Directives[dir.Name] != nil { + // While the spec says SDL must not (§3.5) explicitly define builtin + // scalars, it may (§3.13) define builtin directives. Here we check for + // that, and reject doubly-defined directives otherwise. + switch dir.Name { + case "include", "skip", "deprecated", "specifiedBy", "defer", "oneOf": // the builtins + // In principle here we might want to validate that the + // directives are the same. But they might not be, if the + // server has an older spec than we do. (Plus, validating this + // is a lot of work.) So we just keep the first one we saw. + // That's an arbitrary choice, but in theory the only way it + // fails is if the server is using features newer than this + // version of gqlparser, in which case they're in trouble + // anyway. + default: + return nil, gqlerror.ErrorPosf( + dir.Position, + "Cannot redeclare directive %s.", + dir.Name, + ) + } + } + schema.Directives[dir.Name] = sd.Directives[i] + } + + if len(sd.Schema) > 1 { + return nil, gqlerror.ErrorPosf( + sd.Schema[1].Position, + "Cannot have multiple schema entry points, consider schema extensions instead.", + ) + } + + if len(sd.Schema) == 1 { + schema.Description = sd.Schema[0].Description + for _, entrypoint := range sd.Schema[0].OperationTypes { + def := schema.Types[entrypoint.Type] + if def == nil { + return nil, gqlerror.ErrorPosf( + entrypoint.Position, + "Schema root %s refers to a type %s that does not exist.", + entrypoint.Operation, + entrypoint.Type, + ) + } + switch entrypoint.Operation { + case Query: + schema.Query = def + case Mutation: + schema.Mutation = def + case Subscription: + schema.Subscription = def + } + } + if err := validateDirectives( + &schema, + sd.Schema[0].Directives, + LocationSchema, + nil, + ); err != nil { + return nil, err + } + schema.SchemaDirectives = append(schema.SchemaDirectives, sd.Schema[0].Directives...) + } + + for _, ext := range sd.SchemaExtension { + for _, entrypoint := range ext.OperationTypes { + def := schema.Types[entrypoint.Type] + if def == nil { + return nil, gqlerror.ErrorPosf( + entrypoint.Position, + "Schema root %s refers to a type %s that does not exist.", + entrypoint.Operation, + entrypoint.Type, + ) + } + switch entrypoint.Operation { + case Query: + schema.Query = def + case Mutation: + schema.Mutation = def + case Subscription: + schema.Subscription = def + } + } + if err := validateDirectives(&schema, ext.Directives, LocationSchema, nil); err != nil { + return nil, err + } + schema.SchemaDirectives = append(schema.SchemaDirectives, ext.Directives...) + } + + if err := validateTypeDefinitions(&schema); err != nil { + return nil, err + } + + if err := validateDirectiveDefinitions(&schema); err != nil { + return nil, err + } + + // Inferred root operation type names should be performed only when a `schema` directive is + // **not** provided, when it is, `Mutation` and `Subscription` becomes valid types and are not + // assigned as a root operation on the schema. + if len(sd.Schema) == 0 { + if schema.Query == nil && schema.Types["Query"] != nil { + schema.Query = schema.Types["Query"] + } + + if schema.Mutation == nil && schema.Types["Mutation"] != nil { + schema.Mutation = schema.Types["Mutation"] + } + + if schema.Subscription == nil && schema.Types["Subscription"] != nil { + schema.Subscription = schema.Types["Subscription"] + } + } + + if schema.Query != nil { + schema.Query.Fields = append( + schema.Query.Fields, + &FieldDefinition{ + Name: "__schema", + Type: NonNullNamedType("__Schema", nil), + }, + &FieldDefinition{ + Name: "__type", + Type: NamedType("__Type", nil), + Arguments: ArgumentDefinitionList{ + {Name: "name", Type: NonNullNamedType("String", nil)}, + }, + }, + ) + } + + return &schema, nil +} + +func validateTypeDefinitions(schema *Schema) *gqlerror.Error { + types := make([]string, 0, len(schema.Types)) + for typ := range schema.Types { + types = append(types, typ) + } + sort.Strings(types) + for _, typ := range types { + err := validateDefinition(schema, schema.Types[typ]) + if err != nil { + return err + } + } + return nil +} + +func validateDirectiveDefinitions(schema *Schema) *gqlerror.Error { + directives := make([]string, 0, len(schema.Directives)) + for directive := range schema.Directives { + directives = append(directives, directive) + } + sort.Strings(directives) + for _, directive := range directives { + err := validateDirective(schema, schema.Directives[directive]) + if err != nil { + return err + } + } + return nil +} + +func validateDirective(schema *Schema, def *DirectiveDefinition) *gqlerror.Error { + if err := validateName(def.Position, def.Name); err != nil { + // now, GraphQL spec doesn't have reserved directive name + return err + } + + return validateArgs(schema, def.Arguments, def) +} + +func validateDefinition(schema *Schema, def *Definition) *gqlerror.Error { + for _, field := range def.Fields { + if err := validateName(field.Position, field.Name); err != nil { + // now, GraphQL spec doesn't have reserved field name + return err + } + if err := validateTypeRef(schema, field.Type); err != nil { + return err + } + if err := validateArgs(schema, field.Arguments, nil); err != nil { + return err + } + wantDirLocation := LocationFieldDefinition + if def.Kind == InputObject { + wantDirLocation = LocationInputFieldDefinition + } + if err := validateDirectives(schema, field.Directives, wantDirLocation, nil); err != nil { + return err + } + } + + for _, typ := range def.Types { + typDef := schema.Types[typ] + if typDef == nil { + return gqlerror.ErrorPosf(def.Position, "Undefined type %s.", strconv.Quote(typ)) + } + if !isValidKind(typDef.Kind, Object) { + return gqlerror.ErrorPosf( + def.Position, + "%s type %s must be %s.", + def.Kind, + strconv.Quote(typ), + kindList(Object), + ) + } + } + + for _, intf := range def.Interfaces { + if err := validateImplements(schema, def, intf); err != nil { + return err + } + } + + switch def.Kind { + case Object, Interface: + if len(def.Fields) == 0 { + return gqlerror.ErrorPosf( + def.Position, + "%s %s: must define one or more fields.", + def.Kind, + def.Name, + ) + } + for _, field := range def.Fields { + if typ, ok := schema.Types[field.Type.Name()]; ok { + if !isValidKind(typ.Kind, Scalar, Object, Interface, Union, Enum) { + return gqlerror.ErrorPosf( + field.Position, + "%s %s: field must be one of %s.", + def.Kind, + def.Name, + kindList(Scalar, Object, Interface, Union, Enum), + ) + } + } + } + case Enum: + if len(def.EnumValues) == 0 { + return gqlerror.ErrorPosf( + def.Position, + "%s %s: must define one or more unique enum values.", + def.Kind, + def.Name, + ) + } + for _, value := range def.EnumValues { + for _, nonEnum := range [3]string{"true", "false", "null"} { + if value.Name == nonEnum { + return gqlerror.ErrorPosf( + def.Position, + "%s %s: non-enum value %s.", + def.Kind, + def.Name, + value.Name, + ) + } + } + if err := validateDirectives( + schema, + value.Directives, + LocationEnumValue, + nil, + ); err != nil { + return err + } + } + case InputObject: + if len(def.Fields) == 0 { + return gqlerror.ErrorPosf( + def.Position, + "%s %s: must define one or more input fields.", + def.Kind, + def.Name, + ) + } + for _, field := range def.Fields { + if typ, ok := schema.Types[field.Type.Name()]; ok { + if !isValidKind(typ.Kind, Scalar, Enum, InputObject) { + return gqlerror.ErrorPosf( + field.Position, + "%s %s: field must be one of %s.", + typ.Kind, + field.Name, + kindList(Scalar, Enum, InputObject), + ) + } + } + } + } + + for idx, field1 := range def.Fields { + for _, field2 := range def.Fields[idx+1:] { + if field1.Name == field2.Name { + return gqlerror.ErrorPosf( + field2.Position, + "Field %s.%s can only be defined once.", + def.Name, + field2.Name, + ) + } + } + } + + if !def.BuiltIn { + // GraphQL spec has reserved type names a lot! + err := validateName(def.Position, def.Name) + if err != nil { + return err + } + } + + return validateDirectives(schema, def.Directives, DirectiveLocation(def.Kind), nil) +} + +func validateTypeRef(schema *Schema, typ *Type) *gqlerror.Error { + if schema.Types[typ.Name()] == nil { + return gqlerror.ErrorPosf(typ.Position, "Undefined type %s.", typ.Name()) + } + return nil +} + +func validateArgs( + schema *Schema, + args ArgumentDefinitionList, + currentDirective *DirectiveDefinition, +) *gqlerror.Error { + for _, arg := range args { + if err := validateName(arg.Position, arg.Name); err != nil { + // now, GraphQL spec doesn't have reserved argument name + return err + } + if err := validateTypeRef(schema, arg.Type); err != nil { + return err + } + def := schema.Types[arg.Type.Name()] + if !def.IsInputType() { + return gqlerror.ErrorPosf( + arg.Position, + "cannot use %s as argument %s because %s is not a valid input type", + arg.Type.String(), + arg.Name, + def.Kind, + ) + } + if err := validateDirectives( + schema, + arg.Directives, + LocationArgumentDefinition, + currentDirective, + ); err != nil { + return err + } + } + return nil +} + +func validateDirectives( + schema *Schema, + dirs DirectiveList, + location DirectiveLocation, + currentDirective *DirectiveDefinition, +) *gqlerror.Error { + for _, dir := range dirs { + if err := validateName(dir.Position, dir.Name); err != nil { + // now, GraphQL spec doesn't have reserved directive name + return err + } + if currentDirective != nil && dir.Name == currentDirective.Name { + return gqlerror.ErrorPosf( + dir.Position, + "Directive %s cannot refer to itself.", + currentDirective.Name, + ) + } + dirDefinition := schema.Directives[dir.Name] + if dirDefinition == nil { + return gqlerror.ErrorPosf(dir.Position, "Undefined directive %s.", dir.Name) + } + validKind := slices.Contains(dirDefinition.Locations, location) + if !validKind { + return gqlerror.ErrorPosf( + dir.Position, + "Directive %s is not applicable on %s.", + dir.Name, + location, + ) + } + for _, arg := range dir.Arguments { + if dirDefinition.Arguments.ForName(arg.Name) == nil { + return gqlerror.ErrorPosf( + arg.Position, + "Undefined argument %s for directive %s.", + arg.Name, + dir.Name, + ) + } + } + for _, schemaArg := range dirDefinition.Arguments { + if schemaArg.Type.NonNull && schemaArg.DefaultValue == nil { + if arg := dir.Arguments.ForName( + schemaArg.Name, + ); arg == nil || + arg.Value.Kind == NullValue { + return gqlerror.ErrorPosf( + dir.Position, + "Argument %s for directive %s cannot be null.", + schemaArg.Name, + dir.Name, + ) + } + } + } + dir.Definition = schema.Directives[dir.Name] + } + return nil +} + +func validateImplements(schema *Schema, def *Definition, intfName string) *gqlerror.Error { + // see validation rules at the bottom of + // https://spec.graphql.org/October2021/#sec-Objects + intf := schema.Types[intfName] + if intf == nil { + return gqlerror.ErrorPosf(def.Position, "Undefined type %s.", strconv.Quote(intfName)) + } + if intf.Kind != Interface { + return gqlerror.ErrorPosf( + def.Position, + "%s is a non interface type %s.", + strconv.Quote(intfName), + intf.Kind, + ) + } + for _, requiredField := range intf.Fields { + foundField := def.Fields.ForName(requiredField.Name) + if foundField == nil { + return gqlerror.ErrorPosf(def.Position, + `For %s to implement %s it must have a field called %s.`, + def.Name, intf.Name, requiredField.Name, + ) + } + + if !isCovariant(schema, requiredField.Type, foundField.Type) { + return gqlerror.ErrorPosf(foundField.Position, + `For %s to implement %s the field %s must have type %s.`, + def.Name, intf.Name, requiredField.Name, requiredField.Type.String(), + ) + } + + for _, requiredArg := range requiredField.Arguments { + foundArg := foundField.Arguments.ForName(requiredArg.Name) + if foundArg == nil { + return gqlerror.ErrorPosf( + foundField.Position, + `For %s to implement %s the field %s must have the same arguments but it is missing %s.`, + def.Name, + intf.Name, + requiredField.Name, + requiredArg.Name, + ) + } + + if !requiredArg.Type.IsCompatible(foundArg.Type) { + return gqlerror.ErrorPosf( + foundArg.Position, + `For %s to implement %s the field %s must have the same arguments but %s has the wrong type.`, + def.Name, + intf.Name, + requiredField.Name, + requiredArg.Name, + ) + } + } + for _, foundArgs := range foundField.Arguments { + if requiredField.Arguments.ForName(foundArgs.Name) == nil && foundArgs.Type.NonNull && + foundArgs.DefaultValue == nil { + return gqlerror.ErrorPosf( + foundArgs.Position, + `For %s to implement %s any additional arguments on %s must be optional or have a default value but %s is required.`, + def.Name, + intf.Name, + foundField.Name, + foundArgs.Name, + ) + } + } + } + return validateTypeImplementsAncestors(schema, def, intfName) +} + +// validateTypeImplementsAncestors +// https://github.com/graphql/graphql-js/blob/47bd8c8897c72d3efc17ecb1599a95cee6bac5e8/src/type/validate.ts#L428 +func validateTypeImplementsAncestors( + schema *Schema, + def *Definition, + intfName string, +) *gqlerror.Error { + intf := schema.Types[intfName] + if intf == nil { + return gqlerror.ErrorPosf(def.Position, "Undefined type %s.", strconv.Quote(intfName)) + } + for _, transitive := range intf.Interfaces { + if !containsString(def.Interfaces, transitive) { + if transitive == def.Name { + return gqlerror.ErrorPosf(def.Position, + `Type %s cannot implement %s because it would create a circular reference.`, + def.Name, intfName, + ) + } + return gqlerror.ErrorPosf(def.Position, + `Type %s must implement %s because it is implemented by %s.`, + def.Name, transitive, intfName, + ) + } + } + return nil +} + +func containsString(slice []string, want string) bool { + return slices.Contains(slice, want) +} + +func isCovariant(schema *Schema, required, actual *Type) bool { + if required.NonNull && !actual.NonNull { + return false + } + + if required.NamedType != "" { + if required.NamedType == actual.NamedType { + return true + } + for _, pt := range schema.PossibleTypes[required.NamedType] { + if pt.Name == actual.NamedType { + return true + } + } + return false + } + + if required.Elem != nil && actual.Elem == nil { + return false + } + + return isCovariant(schema, required.Elem, actual.Elem) +} + +func validateName(pos *Position, name string) *gqlerror.Error { + if strings.HasPrefix(name, "__") { + return gqlerror.ErrorPosf( + pos, + `Name "%s" must not begin with "__", which is reserved by GraphQL introspection.`, + name, + ) + } + return nil +} + +func isValidKind(kind DefinitionKind, valid ...DefinitionKind) bool { + return slices.Contains(valid, kind) +} + +func kindList(kinds ...DefinitionKind) string { + s := make([]string, len(kinds)) + for i, k := range kinds { + s[i] = string(k) + } + return strings.Join(s, ", ") +} diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/schema_test.yml b/vendor/github.com/vektah/gqlparser/v2/validator/schema_test.yml new file mode 100644 index 0000000000..22f125bec4 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/schema_test.yml @@ -0,0 +1,731 @@ +types: + - name: cannot be redeclared + input: | + type A { + name: String + } + type A { + name: String + } + error: + message: "Cannot redeclare type A." + locations: [{line: 4, column: 6}] + - name: cannot be duplicated field at same definition 1 + input: | + type A { + name: String + name: String + } + error: + message: "Field A.name can only be defined once." + locations: [{line: 3, column: 3}] + - name: cannot be duplicated field at same definition 2 + input: | + type A { + name: String + } + extend type A { + name: String + } + error: + message: "Field A.name can only be defined once." + locations: [{line: 5, column: 3}] + - name: cannot be duplicated field at same definition 3 + input: | + type A { + name: String + } + extend type A { + age: Int + age: Int + } + error: + message: "Field A.age can only be defined once." + locations: [{line: 6, column: 3}] + +object types: + - name: must define one or more fields + input: | + directive @D on OBJECT + + # This pattern rejected by parser + # type InvalidObject1 {} + + type InvalidObject2 @D + + type ValidObject { + id: ID + } + extend type ValidObject @D + extend type ValidObject { + b: Int + } + error: + message: 'OBJECT InvalidObject2: must define one or more fields.' + locations: [{line: 6, column: 6}] + - name: check reserved names on type name + input: | + type __FooBar { + id: ID + } + error: + message: 'Name "__FooBar" must not begin with "__", which is reserved by GraphQL introspection.' + locations: [{line: 1, column: 6}] + - name: check reserved names on type field + input: | + type FooBar { + __id: ID + } + error: + message: 'Name "__id" must not begin with "__", which is reserved by GraphQL introspection.' + locations: [{line: 2, column: 3}] + + - name: field argument list must not be empty + input: | + type FooBar { + foo(): ID + } + error: + message: 'expected at least one definition, found )' + locations: [{line: 2, column: 7}] + + - name: check reserved names on type field argument + input: | + type FooBar { + foo(__bar: ID): ID + } + error: + message: 'Name "__bar" must not begin with "__", which is reserved by GraphQL introspection.' + locations: [{line: 2, column: 7}] + + - name: must not allow input object as field type + input: | + input Input { + id: ID + } + type Query { + input: Input! + } + error: + message: 'OBJECT Query: field must be one of SCALAR, OBJECT, INTERFACE, UNION, ENUM.' + locations: [{line: 5, column: 3}] + +interfaces: + - name: must exist + input: | + type Thing implements Object { + id: ID! + } + + type Query { + Things: [Thing!]! + } + error: + message: 'Undefined type "Object".' + locations: [{line: 1, column: 6}] + + - name: must be an interface + input: | + type Thing implements Object { + id: ID! + } + + type Query { + Things: [Thing!]! + } + + type Object { + name: String + } + error: + message: '"Object" is a non interface type OBJECT.' + locations: [{line: 1, column: 6}] + + - name: must define one or more fields + input: | + directive @D on INTERFACE + + # This pattern rejected by parser + # interface InvalidInterface1 {} + + interface InvalidInterface2 @D + + interface ValidInterface { + id: ID + } + extend interface ValidInterface @D + extend interface ValidInterface { + b: Int + } + error: + message: 'INTERFACE InvalidInterface2: must define one or more fields.' + locations: [{line: 6, column: 11}] + + - name: check reserved names on type name + input: | + interface __FooBar { + id: ID + } + error: + message: 'Name "__FooBar" must not begin with "__", which is reserved by GraphQL introspection.' + locations: [{line: 1, column: 11}] + + - name: must not allow input object as field type + input: | + input Input { + id: ID + } + type Query { + foo: Foo! + } + interface Foo { + input: Input! + } + error: + message: 'INTERFACE Foo: field must be one of SCALAR, OBJECT, INTERFACE, UNION, ENUM.' + locations: [{line: 8, column: 3}] + + - name: must have all fields from interface + input: | + type Bar implements BarInterface { + someField: Int! + } + + interface BarInterface { + id: ID! + } + error: + message: 'For Bar to implement BarInterface it must have a field called id.' + locations: [{line: 1, column: 6}] + + - name: must have same type of fields + input: | + type Bar implements BarInterface { + id: Int! + } + + interface BarInterface { + id: ID! + } + error: + message: 'For Bar to implement BarInterface the field id must have type ID!.' + locations: [{line: 2, column: 5}] + + - name: must have all required arguments + input: | + type Bar implements BarInterface { + id: ID! + } + + interface BarInterface { + id(ff: Int!): ID! + } + error: + message: 'For Bar to implement BarInterface the field id must have the same arguments but it is missing ff.' + locations: [{line: 2, column: 5}] + + - name: must have same argument types + input: | + type Bar implements BarInterface { + id(ff: ID!): ID! + } + + interface BarInterface { + id(ff: Int!): ID! + } + error: + message: 'For Bar to implement BarInterface the field id must have the same arguments but ff has the wrong type.' + locations: [{line: 2, column: 8}] + + - name: may defined additional nullable arguments + input: | + type Bar implements BarInterface { + id(opt: Int): ID! + } + + interface BarInterface { + id: ID! + } + + - name: may defined additional required arguments with defaults + input: | + type Bar implements BarInterface { + id(opt: Int! = 1): ID! + } + + interface BarInterface { + id: ID! + } + + - name: must not define additional required arguments without defaults + input: | + type Bar implements BarInterface { + id(opt: Int!): ID! + } + + interface BarInterface { + id: ID! + } + error: + message: 'For Bar to implement BarInterface any additional arguments on id must be optional or have a default value but opt is required.' + locations: [{line: 2, column: 8}] + + - name: can have covariant argument types + input: | + union U = A|B + + type A { name: String } + type B { name: String } + + type Bar implements BarInterface { + f: A! + } + + interface BarInterface { + f: U! + } + + - name: may define intermediate interfaces + input: | + interface IA { + id: ID! + } + + interface IIA implements IA { + id: ID! + } + + type A implements IIA & IA { + id: ID! + } + + - name: Type Foo must implement Baz because it is implemented by Bar + input: | + interface Baz { + baz: String + } + + interface Bar implements Baz { + bar: String + baz: String + } + + type Foo implements Bar { + foo: String + bar: String + baz: String + } + error: + message: 'Type Foo must implement Baz because it is implemented by Bar.' + locations: [{line: 10, column: 6}] + + - name: circular reference error + input: | + interface Circular1 implements Circular2 { + id: ID! + } + + interface Circular2 implements Circular1 { + id: ID! + } + error: + message: 'Type Circular1 cannot implement Circular2 because it would create a circular reference.' + locations: [{line: 1, column: 11}] + +inputs: + - name: must define one or more input fields + input: | + directive @D on INPUT_OBJECT + + # This pattern rejected by parser + # input InvalidInput1 {} + + input InvalidInput2 @D + + input ValidInput { + id: ID + } + extend input ValidInput @D + extend input ValidInput { + b: Int + } + error: + message: 'INPUT_OBJECT InvalidInput2: must define one or more input fields.' + locations: [{line: 6, column: 7}] + - name: check reserved names on type name + input: | + input __FooBar { + id: ID + } + error: + message: 'Name "__FooBar" must not begin with "__", which is reserved by GraphQL introspection.' + locations: [{line: 1, column: 7}] + + - name: fields cannot be Objects + input: | + type Object { id: ID } + input Foo { a: Object! } + error: + message: 'OBJECT a: field must be one of SCALAR, ENUM, INPUT_OBJECT.' + locations: [{line: 2, column: 13}] + + - name: fields cannot be Interfaces + input: | + interface Interface { id: ID! } + input Foo { a: Interface! } + error: + message: 'INTERFACE a: field must be one of SCALAR, ENUM, INPUT_OBJECT.' + locations: [{line: 2, column: 13}] + + - name: fields cannot be Unions + input: | + type Object { id: ID } + union Union = Object + input Foo { a: Union! } + error: + message: 'UNION a: field must be one of SCALAR, ENUM, INPUT_OBJECT.' + locations: [{line: 3, column: 13}] + +args: + - name: Valid arg types + input: | + input Input { id: ID } + enum Enum { A } + scalar Scalar + + type Query { + f(a: Input, b: Scalar, c: Enum): Boolean! + } + + - name: Objects not allowed + input: | + type Object { id: ID } + type Query { f(a: Object): Boolean! } + + error: + message: 'cannot use Object as argument a because OBJECT is not a valid input type' + locations: [{line: 2, column: 16}] + + - name: Union not allowed + input: | + type Object { id: ID } + union Union = Object + type Query { f(a: Union): Boolean! } + + error: + message: 'cannot use Union as argument a because UNION is not a valid input type' + locations: [{line: 3, column: 16}] + + - name: Interface not allowed + input: | + interface Interface { id: ID } + type Query { f(a: Interface): Boolean! } + + error: + message: 'cannot use Interface as argument a because INTERFACE is not a valid input type' + locations: [{line: 2, column: 16}] + +enums: + - name: must define one or more unique enum values + input: | + directive @D on ENUM + + # This pattern rejected by parser + # enum InvalidEmum1 {} + + enum InvalidEnum2 @D + + enum ValidEnum { + FOO + } + extend enum ValidEnum @D + extend enum ValidEnum { + BAR + } + error: + message: 'ENUM InvalidEnum2: must define one or more unique enum values.' + locations: [{line: 6, column: 6}] + - name: check reserved names on type name + input: | + enum __FooBar { + A + B + } + error: + message: 'Name "__FooBar" must not begin with "__", which is reserved by GraphQL introspection.' + locations: [{line: 1, column: 6}] + +unions: + - name: union types must be defined + input: | + union Foo = Bar | Baz + type Bar { + id: ID + } + error: + message: "Undefined type \"Baz\"." + locations: [{line: 1, column: 7}] + - name: union types must be objects + input: | + union Foo = Baz + interface Baz { + id: ID + } + error: + message: "UNION type \"Baz\" must be OBJECT." + locations: [{line: 1, column: 7}] + + - name: unions of pure type extensions are valid + input: | + + type Review { + body: String! + author: User! @provides(fields: "username") + product: Product! + } + + extend type User @key(fields: "id") { + id: ID! @external + reviews: [Review] + } + + extend type Product @key(fields: "upc") { + upc: String! @external + reviews: [Review] + } + + union Foo = User | Product + scalar _Any + scalar _FieldSet + directive @external on FIELD_DEFINITION + directive @requires(fields: _FieldSet!) on FIELD_DEFINITION + directive @provides(fields: _FieldSet!) on FIELD_DEFINITION + directive @key(fields: _FieldSet!) on OBJECT | INTERFACE + directive @extends on OBJECT + + + +type extensions: + - name: can extend non existant types + input: | + extend type A { + name: String + } + + + - name: cannot extend incorret type existant types + input: | + scalar A + extend type A { + name: String + } + error: + message: "Cannot extend type A because the base type is a SCALAR, not OBJECT." + locations: [{line: 2, column: 13}] + +directives: + - name: cannot redeclare directives + input: | + directive @A on FIELD_DEFINITION + directive @A on FIELD_DEFINITION + error: + message: "Cannot redeclare directive A." + locations: [{line: 2, column: 12}] + + - name: can redeclare builtin directives + input: | + directive @skip(if: Boolean!) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT + directive @skip(if: Boolean!) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT + + - name: must be declared (type) + input: | + type User @foo { + name: String + } + error: + message: "Undefined directive foo." + locations: [{line: 1, column: 12}] + + - name: must be declared (field) + input: | + type User { + name: String @foo + } + error: + message: "Undefined directive foo." + locations: [{line: 2, column: 17}] + + - name: must be declared (enum) + input: | + enum Unit { + METER @foo + } + error: + message: "Undefined directive foo." + locations: [{line: 2, column: 10}] + + - name: cannot be self-referential + input: | + directive @A(foo: Int! @A) on FIELD_DEFINITION + error: + message: "Directive A cannot refer to itself." + locations: [{line: 1, column: 25}] + - name: check reserved names on type name + input: | + directive @__A on FIELD_DEFINITION + error: + message: 'Name "__A" must not begin with "__", which is reserved by GraphQL introspection.' + locations: [{line: 1, column: 12}] + + - name: Valid arg types + input: | + input Input { id: ID } + enum Enum { A } + scalar Scalar + + directive @A(a: Input, b: Scalar, c: Enum) on FIELD_DEFINITION + + - name: Objects not allowed + input: | + type Object { id: ID } + directive @A(a: Object) on FIELD_DEFINITION + + error: + message: 'cannot use Object as argument a because OBJECT is not a valid input type' + locations: [{line: 2, column: 14}] + + - name: Union not allowed + input: | + type Object { id: ID } + union Union = Object + directive @A(a: Union) on FIELD_DEFINITION + + error: + message: 'cannot use Union as argument a because UNION is not a valid input type' + locations: [{line: 3, column: 14}] + + - name: Interface not allowed + input: | + interface Interface { id: ID } + directive @A(a: Interface) on FIELD_DEFINITION + + error: + message: 'cannot use Interface as argument a because INTERFACE is not a valid input type' + locations: [{line: 2, column: 14}] + + - name: Invalid location usage not allowed + input: | + directive @test on FIELD_DEFINITION + input I1 @test { f: String } + + error: + message: 'Directive test is not applicable on INPUT_OBJECT.' + locations: [{line: 2, column: 11}] + + - name: Valid location usage + input: | + directive @testInputField on INPUT_FIELD_DEFINITION + directive @testField on FIELD_DEFINITION + directive @inp on INPUT_OBJECT + input I1 @inp { f: String @testInputField } + type P { name: String @testField } + interface I { id: ID @testField } + + - name: Invalid directive argument not allowed + input: | + directive @foo(bla: Int!) on FIELD_DEFINITION + type P {f: Int @foo(foobla: 11)} + + error: + message: 'Undefined argument foobla for directive foo.' + locations: [{line: 2, column: 21}] + + - name: non-null argument must be provided + input: | + directive @foo(bla: Int!) on FIELD_DEFINITION + type P {f: Int @foo } + + error: + message: 'Argument bla for directive foo cannot be null.' + locations: [{line: 2, column: 17}] + + - name: non-null argument must not be null + input: | + directive @foo(bla: Int!) on FIELD_DEFINITION + type P {f: Int @foo(bla: null) } + + error: + message: 'Argument bla for directive foo cannot be null.' + locations: [{line: 2, column: 17}] + +entry points: + - name: multiple schema entry points + input: | + schema { + query: Query + } + schema { + query: Query + } + scalar Query + error: + message: "Cannot have multiple schema entry points, consider schema extensions instead." + locations: [{line: 4, column: 8}] + + - name: Undefined schema entrypoint + input: | + schema { + query: Query + } + error: + message: "Schema root query refers to a type Query that does not exist." + locations: [{line: 2, column: 3}] + +entry point extensions: + - name: Undefined schema entrypoint + input: | + schema { + query: Query + } + scalar Query + extend schema { + mutation: Mutation + } + error: + message: "Schema root mutation refers to a type Mutation that does not exist." + locations: [{line: 6, column: 3}] + +type references: + - name: Field types + input: | + type User { + posts: Post + } + error: + message: "Undefined type Post." + locations: [{line: 2, column: 10}] + + - name: Arg types + input: | + type User { + posts(foo: FooBar): String + } + error: + message: "Undefined type FooBar." + locations: [{line: 2, column: 14}] + + - name: Directive arg types + input: | + directive @Foo(foo: FooBar) on FIELD_DEFINITION + + error: + message: "Undefined type FooBar." + locations: [{line: 1, column: 21}] + + - name: Invalid enum value + input: | + enum Enum { true } + + error: + message: "ENUM Enum: non-enum value true." + locations: [{line: 1, column: 6}] diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/validator.go b/vendor/github.com/vektah/gqlparser/v2/validator/validator.go new file mode 100644 index 0000000000..9fb40d6a18 --- /dev/null +++ b/vendor/github.com/vektah/gqlparser/v2/validator/validator.go @@ -0,0 +1,163 @@ +package validator + +import ( + "sort" + + //nolint:staticcheck // bad, yeah + . "github.com/vektah/gqlparser/v2/ast" + "github.com/vektah/gqlparser/v2/gqlerror" + "github.com/vektah/gqlparser/v2/validator/core" + validatorrules "github.com/vektah/gqlparser/v2/validator/rules" +) + +type ( + AddErrFunc = core.AddErrFunc + RuleFunc = core.RuleFunc + Rule = core.Rule + Events = core.Events + ErrorOption = core.ErrorOption + Walker = core.Walker +) + +var ( + Message = core.Message + QuotedOrList = core.QuotedOrList + OrList = core.OrList +) + +// Walk is an alias for core.Walk. +func Walk(schema *Schema, document *QueryDocument, observers *Events) { + core.Walk(schema, document, observers) +} + +var specifiedRules []Rule + +func init() { + // Initialize specifiedRules with default rules + defaultRules := validatorrules.NewDefaultRules() + for name, ruleFunc := range defaultRules.GetInner() { + specifiedRules = append(specifiedRules, Rule{Name: name, RuleFunc: ruleFunc}) + // ensure initial default is in deterministic order + sort.Sort(core.NameSorter(specifiedRules)) + } +} + +// AddRule adds a rule to the rule set. +// ruleFunc is called once each time `Validate` is executed. +func AddRule(name string, ruleFunc RuleFunc) { + specifiedRules = append(specifiedRules, Rule{Name: name, RuleFunc: ruleFunc}) +} + +// RemoveRule removes an existing rule from the rule set +// if one of the same name exists. +// The rule set is global, so it is not safe for concurrent changes. +func RemoveRule(name string) { + var result []Rule //nolint:prealloc // using initialized with len(rules) produces a race condition + for _, r := range specifiedRules { + if r.Name == name { + continue + } + result = append(result, r) + } + specifiedRules = result +} + +// ReplaceRule replaces an existing rule from the rule set +// if one of the same name exists. +// If no match is found, it will add a new rule to the rule set. +// The rule set is global, so it is not safe for concurrent changes. +func ReplaceRule(name string, ruleFunc RuleFunc) { + var found bool + var result []Rule //nolint:prealloc // using initialized with len(rules) produces a race condition + for _, r := range specifiedRules { + if r.Name == name { + found = true + result = append(result, Rule{Name: name, RuleFunc: ruleFunc}) + continue + } + result = append(result, r) + } + if !found { + specifiedRules = append(specifiedRules, Rule{Name: name, RuleFunc: ruleFunc}) + return + } + specifiedRules = result +} + +// Deprecated: use ValidateWithRules instead. +func Validate(schema *Schema, doc *QueryDocument, rules ...Rule) gqlerror.List { + if rules == nil { + rules = specifiedRules + } + + var errs gqlerror.List + if schema == nil { + errs = append(errs, gqlerror.Errorf("cannot validate as Schema is nil")) + } + if doc == nil { + errs = append(errs, gqlerror.Errorf("cannot validate as QueryDocument is nil")) + } + if len(errs) > 0 { + return errs + } + observers := &core.Events{} + for i := range rules { + rule := rules[i] + rule.RuleFunc(observers, func(options ...ErrorOption) { + err := &gqlerror.Error{ + Rule: rule.Name, + } + for _, o := range options { + o(err) + } + errs = append(errs, err) + }) + } + + Walk(schema, doc, observers) + return errs +} + +func ValidateWithRules( + schema *Schema, + doc *QueryDocument, + rules *validatorrules.Rules, +) gqlerror.List { + if rules == nil { + rules = validatorrules.NewDefaultRules() + } + + var errs gqlerror.List + if schema == nil { + errs = append(errs, gqlerror.Errorf("cannot validate as Schema is nil")) + } + if doc == nil { + errs = append(errs, gqlerror.Errorf("cannot validate as QueryDocument is nil")) + } + if len(errs) > 0 { + return errs + } + observers := &core.Events{} + + var currentRules []Rule //nolint:prealloc // would require extra local refs for len + for name, ruleFunc := range rules.GetInner() { + currentRules = append(currentRules, Rule{Name: name, RuleFunc: ruleFunc}) + // ensure deterministic order evaluation + sort.Sort(core.NameSorter(currentRules)) + } + + for _, currentRule := range currentRules { + currentRule.RuleFunc(observers, func(options ...ErrorOption) { + err := &gqlerror.Error{ + Rule: currentRule.Name, + } + for _, o := range options { + o(err) + } + errs = append(errs, err) + }) + } + + Walk(schema, doc, observers) + return errs +} diff --git a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/vars.go b/vendor/github.com/vektah/gqlparser/v2/validator/vars.go similarity index 81% rename from vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/vars.go rename to vendor/github.com/vektah/gqlparser/v2/validator/vars.go index 86be7c4df2..50e2cdb295 100644 --- a/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/vars.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/vars.go @@ -2,20 +2,26 @@ package validator import ( "encoding/json" + "errors" "fmt" "reflect" "strconv" "strings" - "github.com/open-policy-agent/opa/internal/gqlparser/ast" - "github.com/open-policy-agent/opa/internal/gqlparser/gqlerror" + "github.com/vektah/gqlparser/v2/ast" + "github.com/vektah/gqlparser/v2/gqlerror" ) -var ErrUnexpectedType = fmt.Errorf("Unexpected Type") +//nolint:staticcheck // We do not care about capitalized error strings +var ErrUnexpectedType = errors.New("Unexpected Type") -// VariableValues coerces and validates variable values -func VariableValues(schema *ast.Schema, op *ast.OperationDefinition, variables map[string]interface{}) (map[string]interface{}, error) { - coercedVars := map[string]interface{}{} +// VariableValues coerces and validates variable values. +func VariableValues( + schema *ast.Schema, + op *ast.OperationDefinition, + variables map[string]any, +) (map[string]any, error) { + coercedVars := map[string]any{} validator := varValidator{ path: ast.Path{ast.PathName("variable")}, @@ -55,19 +61,29 @@ func VariableValues(schema *ast.Schema, op *ast.OperationDefinition, variables m jsonNumber, isJSONNumber := val.(json.Number) if isJSONNumber { - if v.Type.NamedType == "Int" { + switch v.Type.NamedType { + case "Int": n, err := jsonNumber.Int64() if err != nil { - return nil, gqlerror.ErrorPathf(validator.path, "cannot use value %d as %s", n, v.Type.NamedType) + return nil, gqlerror.ErrorPathf( + validator.path, + "cannot use value %d as %s", + n, + v.Type.NamedType, + ) } rv = reflect.ValueOf(n) - } else if v.Type.NamedType == "Float" { + case "Float": f, err := jsonNumber.Float64() if err != nil { - return nil, gqlerror.ErrorPathf(validator.path, "cannot use value %f as %s", f, v.Type.NamedType) + return nil, gqlerror.ErrorPathf( + validator.path, + "cannot use value %f as %s", + f, + v.Type.NamedType, + ) } rv = reflect.ValueOf(f) - } } if rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Interface { @@ -92,7 +108,10 @@ type varValidator struct { schema *ast.Schema } -func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflect.Value, *gqlerror.Error) { +func (v *varValidator) validateVarType( + typ *ast.Type, + val reflect.Value, +) (reflect.Value, *gqlerror.Error) { currentPath := v.path resetPath := func() { v.path = currentPath @@ -136,7 +155,8 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec switch def.Kind { case ast.Enum: kind := val.Type().Kind() - if kind != reflect.Int && kind != reflect.Int32 && kind != reflect.Int64 && kind != reflect.String { + if kind != reflect.Int && kind != reflect.Int32 && kind != reflect.Int64 && + kind != reflect.String { return val, gqlerror.ErrorPathf(v.path, "enums must be ints or strings") } isValidEnum := false @@ -153,11 +173,17 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec kind := val.Type().Kind() switch typ.NamedType { case "Int": - if kind == reflect.Int || kind == reflect.Int32 || kind == reflect.Int64 || kind == reflect.Float32 || kind == reflect.Float64 || IsValidIntString(val, kind) { + if kind == reflect.Int || kind == reflect.Int32 || kind == reflect.Int64 || + kind == reflect.Float32 || + kind == reflect.Float64 || + IsValidIntString(val, kind) { return val, nil } case "Float": - if kind == reflect.Float32 || kind == reflect.Float64 || kind == reflect.Int || kind == reflect.Int32 || kind == reflect.Int64 || IsValidFloatString(val, kind) { + if kind == reflect.Float32 || kind == reflect.Float64 || kind == reflect.Int || + kind == reflect.Int32 || + kind == reflect.Int64 || + IsValidFloatString(val, kind) { return val, nil } case "String": @@ -171,7 +197,8 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec } case "ID": - if kind == reflect.Int || kind == reflect.Int32 || kind == reflect.Int64 || kind == reflect.String { + if kind == reflect.Int || kind == reflect.Int32 || kind == reflect.Int64 || + kind == reflect.String { return val, nil } default: @@ -181,7 +208,7 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec return val, gqlerror.ErrorPathf(v.path, "cannot use %s as %s", kind.String(), typ.NamedType) case ast.InputObject: if val.Kind() != reflect.Map { - return val, gqlerror.ErrorPathf(v.path, "must be a %s", def.Name) + return val, gqlerror.ErrorPathf(v.path, "must be a %s, not a %s", def.Name, val.Kind()) } // check for unknown fields @@ -222,7 +249,7 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec if fieldDef.Type.NonNull && field.IsNil() { return val, gqlerror.ErrorPathf(v.path, "cannot be null") } - //allow null object field and skip it + // allow null object field and skip it if !fieldDef.Type.NonNull && field.IsNil() { continue } diff --git a/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md deleted file mode 100644 index 773c9b6431..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md +++ /dev/null @@ -1,27 +0,0 @@ -# Contributing to go.opentelemetry.io/auto/sdk - -The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK. -It is designed to be: - -0. An OpenTelemetry compliant SDK -1. Instrumented by auto-instrumentation (serializable into OTLP JSON) -2. Lightweight -3. User-friendly - -These design choices are listed in the order of their importance. - -The primary design goal of this module is to be an OpenTelemetry SDK. -This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`. - -Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument. -The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP. -This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent. - -Outside of these first two goals, the intended use becomes relevant. -This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running. -Because of this, this package needs to not add unnecessary dependencies to that API. -Ideally, it adds none. -It also needs to operate efficiently. - -Finally, this module is designed to be user-friendly to Go development. -It hides complexity in order to provide simpler APIs when the previous goals can all still be met. diff --git a/vendor/go.opentelemetry.io/auto/sdk/LICENSE b/vendor/go.opentelemetry.io/auto/sdk/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md deleted file mode 100644 index 088d19a6ce..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md +++ /dev/null @@ -1,15 +0,0 @@ -# Versioning - -This document describes the versioning policy for this module. -This policy is designed so the following goals can be achieved. - -**Users are provided a codebase of value that is stable and secure.** - -## Policy - -* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules). - * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used. - * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). - * Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path. - -* GitHub releases will be made for all releases. diff --git a/vendor/go.opentelemetry.io/auto/sdk/doc.go b/vendor/go.opentelemetry.io/auto/sdk/doc.go deleted file mode 100644 index ad73d8cb9d..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -/* -Package sdk provides an auto-instrumentable OpenTelemetry SDK. - -An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the -process running this SDK. In that case, all telemetry the SDK produces will be -processed and handled by that [go.opentelemetry.io/auto.Instrumentation]. - -By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to -auto-instrument the SDK, the SDK will not generate any telemetry. -*/ -package sdk diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go deleted file mode 100644 index af6ef171f6..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry - -// Attr is a key-value pair. -type Attr struct { - Key string `json:"key,omitempty"` - Value Value `json:"value,omitempty"` -} - -// String returns an Attr for a string value. -func String(key, value string) Attr { - return Attr{key, StringValue(value)} -} - -// Int64 returns an Attr for an int64 value. -func Int64(key string, value int64) Attr { - return Attr{key, Int64Value(value)} -} - -// Int returns an Attr for an int value. -func Int(key string, value int) Attr { - return Int64(key, int64(value)) -} - -// Float64 returns an Attr for a float64 value. -func Float64(key string, value float64) Attr { - return Attr{key, Float64Value(value)} -} - -// Bool returns an Attr for a bool value. -func Bool(key string, value bool) Attr { - return Attr{key, BoolValue(value)} -} - -// Bytes returns an Attr for a []byte value. -// The passed slice must not be changed after it is passed. -func Bytes(key string, value []byte) Attr { - return Attr{key, BytesValue(value)} -} - -// Slice returns an Attr for a []Value value. -// The passed slice must not be changed after it is passed. -func Slice(key string, value ...Value) Attr { - return Attr{key, SliceValue(value...)} -} - -// Map returns an Attr for a map value. -// The passed slice must not be changed after it is passed. -func Map(key string, value ...Attr) Attr { - return Attr{key, MapValue(value...)} -} - -// Equal returns if a is equal to b. -func (a Attr) Equal(b Attr) bool { - return a.Key == b.Key && a.Value.Equal(b.Value) -} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go deleted file mode 100644 index 949e2165c0..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -/* -Package telemetry provides a lightweight representations of OpenTelemetry -telemetry that is compatible with the OTLP JSON protobuf encoding. -*/ -package telemetry diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go deleted file mode 100644 index 2950fdb42e..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry - -import ( - "encoding/hex" - "errors" - "fmt" -) - -const ( - traceIDSize = 16 - spanIDSize = 8 -) - -// TraceID is a custom data type that is used for all trace IDs. -type TraceID [traceIDSize]byte - -// String returns the hex string representation form of a TraceID. -func (tid TraceID) String() string { - return hex.EncodeToString(tid[:]) -} - -// IsEmpty returns false if id contains at least one non-zero byte. -func (tid TraceID) IsEmpty() bool { - return tid == [traceIDSize]byte{} -} - -// MarshalJSON converts the trace ID into a hex string enclosed in quotes. -func (tid TraceID) MarshalJSON() ([]byte, error) { - if tid.IsEmpty() { - return []byte(`""`), nil - } - return marshalJSON(tid[:]) -} - -// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in -// quotes. -func (tid *TraceID) UnmarshalJSON(data []byte) error { - *tid = [traceIDSize]byte{} - return unmarshalJSON(tid[:], data) -} - -// SpanID is a custom data type that is used for all span IDs. -type SpanID [spanIDSize]byte - -// String returns the hex string representation form of a SpanID. -func (sid SpanID) String() string { - return hex.EncodeToString(sid[:]) -} - -// IsEmpty returns true if the span ID contains at least one non-zero byte. -func (sid SpanID) IsEmpty() bool { - return sid == [spanIDSize]byte{} -} - -// MarshalJSON converts span ID into a hex string enclosed in quotes. -func (sid SpanID) MarshalJSON() ([]byte, error) { - if sid.IsEmpty() { - return []byte(`""`), nil - } - return marshalJSON(sid[:]) -} - -// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. -func (sid *SpanID) UnmarshalJSON(data []byte) error { - *sid = [spanIDSize]byte{} - return unmarshalJSON(sid[:], data) -} - -// marshalJSON converts id into a hex string enclosed in quotes. -func marshalJSON(id []byte) ([]byte, error) { - // Plus 2 quote chars at the start and end. - hexLen := hex.EncodedLen(len(id)) + 2 - - b := make([]byte, hexLen) - hex.Encode(b[1:hexLen-1], id) - b[0], b[hexLen-1] = '"', '"' - - return b, nil -} - -// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -func unmarshalJSON(dst, src []byte) error { - if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { - src = src[1 : l-1] - } - nLen := len(src) - if nLen == 0 { - return nil - } - - if len(dst) != hex.DecodedLen(nLen) { - return errors.New("invalid length for ID") - } - - _, err := hex.Decode(dst, src) - if err != nil { - return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) - } - return nil -} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go deleted file mode 100644 index 5bb3b16c70..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry - -import ( - "encoding/json" - "strconv" -) - -// protoInt64 represents the protobuf encoding of integers which can be either -// strings or integers. -type protoInt64 int64 - -// Int64 returns the protoInt64 as an int64. -func (i *protoInt64) Int64() int64 { return int64(*i) } - -// UnmarshalJSON decodes both strings and integers. -func (i *protoInt64) UnmarshalJSON(data []byte) error { - if data[0] == '"' { - var str string - if err := json.Unmarshal(data, &str); err != nil { - return err - } - parsedInt, err := strconv.ParseInt(str, 10, 64) - if err != nil { - return err - } - *i = protoInt64(parsedInt) - } else { - var parsedInt int64 - if err := json.Unmarshal(data, &parsedInt); err != nil { - return err - } - *i = protoInt64(parsedInt) - } - return nil -} - -// protoUint64 represents the protobuf encoding of integers which can be either -// strings or integers. -type protoUint64 uint64 - -// Uint64 returns the protoUint64 as a uint64. -func (i *protoUint64) Uint64() uint64 { return uint64(*i) } - -// UnmarshalJSON decodes both strings and integers. -func (i *protoUint64) UnmarshalJSON(data []byte) error { - if data[0] == '"' { - var str string - if err := json.Unmarshal(data, &str); err != nil { - return err - } - parsedUint, err := strconv.ParseUint(str, 10, 64) - if err != nil { - return err - } - *i = protoUint64(parsedUint) - } else { - var parsedUint uint64 - if err := json.Unmarshal(data, &parsedUint); err != nil { - return err - } - *i = protoUint64(parsedUint) - } - return nil -} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go deleted file mode 100644 index cecad8bae3..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// Resource information. -type Resource struct { - // Attrs are the set of attributes that describe the resource. Attribute - // keys MUST be unique (it is not allowed to have more than one attribute - // with the same key). - Attrs []Attr `json:"attributes,omitempty"` - // DroppedAttrs is the number of dropped attributes. If the value - // is 0, then no attributes were dropped. - DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. -func (r *Resource) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid Resource type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid Resource field: %#v", keyIface) - } - - switch key { - case "attributes": - err = decoder.Decode(&r.Attrs) - case "droppedAttributesCount", "dropped_attributes_count": - err = decoder.Decode(&r.DroppedAttrs) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go deleted file mode 100644 index b6f2e28d40..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// Scope is the identifying values of the instrumentation scope. -type Scope struct { - Name string `json:"name,omitempty"` - Version string `json:"version,omitempty"` - Attrs []Attr `json:"attributes,omitempty"` - DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. -func (s *Scope) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid Scope type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid Scope field: %#v", keyIface) - } - - switch key { - case "name": - err = decoder.Decode(&s.Name) - case "version": - err = decoder.Decode(&s.Version) - case "attributes": - err = decoder.Decode(&s.Attrs) - case "droppedAttributesCount", "dropped_attributes_count": - err = decoder.Decode(&s.DroppedAttrs) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go deleted file mode 100644 index 67f80b6aa0..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go +++ /dev/null @@ -1,472 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry - -import ( - "bytes" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "time" -) - -// A Span represents a single operation performed by a single component of the -// system. -type Span struct { - // A unique identifier for a trace. All spans from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR - // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is required. - TraceID TraceID `json:"traceId,omitempty"` - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes OR of length - // other than 8 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is required. - SpanID SpanID `json:"spanId,omitempty"` - // trace_state conveys information about request position in multiple distributed tracing graphs. - // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header - // See also https://github.com/w3c/distributed-tracing for more details about this field. - TraceState string `json:"traceState,omitempty"` - // The `span_id` of this span's parent span. If this is a root span, then this - // field must be empty. The ID is an 8-byte array. - ParentSpanID SpanID `json:"parentSpanId,omitempty"` - // Flags, a bit field. - // - // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - // Context specification. To read the 8-bit W3C trace flag, use - // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - // - // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. - // - // Bits 8 and 9 represent the 3 states of whether a span's parent - // is remote. The states are (unknown, is not remote, is remote). - // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - // - // When creating span messages, if the message is logically forwarded from another source - // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD - // be copied as-is. If creating from a source that does not have an equivalent flags field - // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST - // be set to zero. - // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - // - // [Optional]. - Flags uint32 `json:"flags,omitempty"` - // A description of the span's operation. - // - // For example, the name can be a qualified method name or a file name - // and a line number where the operation is called. A best practice is to use - // the same display name at the same call point in an application. - // This makes it easier to correlate spans in different traces. - // - // This field is semantically required to be set to non-empty string. - // Empty value is equivalent to an unknown span name. - // - // This field is required. - Name string `json:"name"` - // Distinguishes between spans generated in a particular context. For example, - // two spans with the same name may be distinguished using `CLIENT` (caller) - // and `SERVER` (callee) to identify queueing latency associated with the span. - Kind SpanKind `json:"kind,omitempty"` - // start_time_unix_nano is the start time of the span. On the client side, this is the time - // kept by the local machine where the span execution starts. On the server side, this - // is the time when the server's application handler starts running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - StartTime time.Time `json:"startTimeUnixNano,omitempty"` - // end_time_unix_nano is the end time of the span. On the client side, this is the time - // kept by the local machine where the span execution ends. On the server side, this - // is the time when the server application handler stops running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - EndTime time.Time `json:"endTimeUnixNano,omitempty"` - // attributes is a collection of key/value pairs. Note, global attributes - // like server name can be set using the resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "example.com/myattribute": true - // "example.com/score": 10.239 - // - // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - Attrs []Attr `json:"attributes,omitempty"` - // dropped_attributes_count is the number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` - // events is a collection of Event items. - Events []*SpanEvent `json:"events,omitempty"` - // dropped_events_count is the number of dropped events. If the value is 0, then no - // events were dropped. - DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` - // links is a collection of Links, which are references from this span to a span - // in the same or different trace. - Links []*SpanLink `json:"links,omitempty"` - // dropped_links_count is the number of dropped links after the maximum size was - // enforced. If this value is 0, then no links were dropped. - DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` - // An optional final status for this span. Semantically when Status isn't set, it means - // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). - Status *Status `json:"status,omitempty"` -} - -// MarshalJSON encodes s into OTLP formatted JSON. -func (s Span) MarshalJSON() ([]byte, error) { - startT := s.StartTime.UnixNano() - if s.StartTime.IsZero() || startT < 0 { - startT = 0 - } - - endT := s.EndTime.UnixNano() - if s.EndTime.IsZero() || endT < 0 { - endT = 0 - } - - // Override non-empty default SpanID marshal and omitempty. - var parentSpanId string - if !s.ParentSpanID.IsEmpty() { - b := make([]byte, hex.EncodedLen(spanIDSize)) - hex.Encode(b, s.ParentSpanID[:]) - parentSpanId = string(b) - } - - type Alias Span - return json.Marshal(struct { - Alias - ParentSpanID string `json:"parentSpanId,omitempty"` - StartTime uint64 `json:"startTimeUnixNano,omitempty"` - EndTime uint64 `json:"endTimeUnixNano,omitempty"` - }{ - Alias: Alias(s), - ParentSpanID: parentSpanId, - StartTime: uint64(startT), // nolint:gosec // >0 checked above. - EndTime: uint64(endT), // nolint:gosec // >0 checked above. - }) -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. -func (s *Span) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid Span type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid Span field: %#v", keyIface) - } - - switch key { - case "traceId", "trace_id": - err = decoder.Decode(&s.TraceID) - case "spanId", "span_id": - err = decoder.Decode(&s.SpanID) - case "traceState", "trace_state": - err = decoder.Decode(&s.TraceState) - case "parentSpanId", "parent_span_id": - err = decoder.Decode(&s.ParentSpanID) - case "flags": - err = decoder.Decode(&s.Flags) - case "name": - err = decoder.Decode(&s.Name) - case "kind": - err = decoder.Decode(&s.Kind) - case "startTimeUnixNano", "start_time_unix_nano": - var val protoUint64 - err = decoder.Decode(&val) - v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. - s.StartTime = time.Unix(0, v) - case "endTimeUnixNano", "end_time_unix_nano": - var val protoUint64 - err = decoder.Decode(&val) - v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. - s.EndTime = time.Unix(0, v) - case "attributes": - err = decoder.Decode(&s.Attrs) - case "droppedAttributesCount", "dropped_attributes_count": - err = decoder.Decode(&s.DroppedAttrs) - case "events": - err = decoder.Decode(&s.Events) - case "droppedEventsCount", "dropped_events_count": - err = decoder.Decode(&s.DroppedEvents) - case "links": - err = decoder.Decode(&s.Links) - case "droppedLinksCount", "dropped_links_count": - err = decoder.Decode(&s.DroppedLinks) - case "status": - err = decoder.Decode(&s.Status) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} - -// SpanFlags represents constants used to interpret the -// Span.flags field, which is protobuf 'fixed32' type and is to -// be used as bit-fields. Each non-zero value defined in this enum is -// a bit-mask. To extract the bit-field, for example, use an -// expression like: -// -// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) -// -// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. -// -// Note that Span flags were introduced in version 1.1 of the -// OpenTelemetry protocol. Older Span producers do not set this -// field, consequently consumers should not rely on the absence of a -// particular flag bit to indicate the presence of a particular feature. -type SpanFlags int32 - -const ( - // SpanFlagsTraceFlagsMask is a mask for trace-flags. - // - // Bits 0-7 are used for trace flags. - SpanFlagsTraceFlagsMask SpanFlags = 255 - // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status. - // - // Bits 8 and 9 are used to indicate that the parent span or link span is - // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - SpanFlagsContextHasIsRemoteMask SpanFlags = 256 - // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status. - // - // Bits 8 and 9 are used to indicate that the parent span or link span is - // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is - // remote. - SpanFlagsContextIsRemoteMask SpanFlags = 512 -) - -// SpanKind is the type of span. Can be used to specify additional relationships between spans -// in addition to a parent/child relationship. -type SpanKind int32 - -const ( - // SpanKindInternal indicates that the span represents an internal - // operation within an application, as opposed to an operation happening at - // the boundaries. - SpanKindInternal SpanKind = 1 - // SpanKindServer indicates that the span covers server-side handling of an - // RPC or other remote network request. - SpanKindServer SpanKind = 2 - // SpanKindClient indicates that the span describes a request to some - // remote service. - SpanKindClient SpanKind = 3 - // SpanKindProducer indicates that the span describes a producer sending a - // message to a broker. Unlike SpanKindClient and SpanKindServer, there is - // often no direct critical path latency relationship between producer and - // consumer spans. A SpanKindProducer span ends when the message was - // accepted by the broker while the logical processing of the message might - // span a much longer time. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer indicates that the span describes a consumer receiving - // a message from a broker. Like SpanKindProducer, there is often no direct - // critical path latency relationship between producer and consumer spans. - SpanKindConsumer SpanKind = 5 -) - -// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied -// text description and key-value pairs. -type SpanEvent struct { - // time_unix_nano is the time the event occurred. - Time time.Time `json:"timeUnixNano,omitempty"` - // name of the event. - // This field is semantically required to be set to non-empty string. - Name string `json:"name,omitempty"` - // attributes is a collection of attribute key/value pairs on the event. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - Attrs []Attr `json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` -} - -// MarshalJSON encodes e into OTLP formatted JSON. -func (e SpanEvent) MarshalJSON() ([]byte, error) { - t := e.Time.UnixNano() - if e.Time.IsZero() || t < 0 { - t = 0 - } - - type Alias SpanEvent - return json.Marshal(struct { - Alias - Time uint64 `json:"timeUnixNano,omitempty"` - }{ - Alias: Alias(e), - Time: uint64(t), //nolint:gosec // >0 checked above - }) -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. -func (se *SpanEvent) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid SpanEvent type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) - } - - switch key { - case "timeUnixNano", "time_unix_nano": - var val protoUint64 - err = decoder.Decode(&val) - v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. - se.Time = time.Unix(0, v) - case "name": - err = decoder.Decode(&se.Name) - case "attributes": - err = decoder.Decode(&se.Attrs) - case "droppedAttributesCount", "dropped_attributes_count": - err = decoder.Decode(&se.DroppedAttrs) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} - -// SpanLink is a reference from the current span to another span in the same -// trace or in a different trace. For example, this can be used in batching -// operations, where a single batch handler processes multiple requests from -// different traces or when the handler receives a request from a different -// project. -type SpanLink struct { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. - TraceID TraceID `json:"traceId,omitempty"` - // A unique identifier for the linked span. The ID is an 8-byte array. - SpanID SpanID `json:"spanId,omitempty"` - // The trace_state associated with the link. - TraceState string `json:"traceState,omitempty"` - // attributes is a collection of attribute key/value pairs on the link. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - Attrs []Attr `json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` - // Flags, a bit field. - // - // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - // Context specification. To read the 8-bit W3C trace flag, use - // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - // - // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. - // - // Bits 8 and 9 represent the 3 states of whether the link is remote. - // The states are (unknown, is not remote, is remote). - // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - // - // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. - // - // [Optional]. - Flags uint32 `json:"flags,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. -func (sl *SpanLink) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid SpanLink type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid SpanLink field: %#v", keyIface) - } - - switch key { - case "traceId", "trace_id": - err = decoder.Decode(&sl.TraceID) - case "spanId", "span_id": - err = decoder.Decode(&sl.SpanID) - case "traceState", "trace_state": - err = decoder.Decode(&sl.TraceState) - case "attributes": - err = decoder.Decode(&sl.Attrs) - case "droppedAttributesCount", "dropped_attributes_count": - err = decoder.Decode(&sl.DroppedAttrs) - case "flags": - err = decoder.Decode(&sl.Flags) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go deleted file mode 100644 index a2802764f8..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry - -// StatusCode is the status of a Span. -// -// For the semantics of status codes see -// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status -type StatusCode int32 - -const ( - // StatusCodeUnset is the default status. - StatusCodeUnset StatusCode = 0 - // StatusCodeOK is used when the Span has been validated by an Application - // developer or Operator to have completed successfully. - StatusCodeOK StatusCode = 1 - // StatusCodeError is used when the Span contains an error. - StatusCodeError StatusCode = 2 -) - -var statusCodeStrings = []string{ - "Unset", - "OK", - "Error", -} - -func (s StatusCode) String() string { - if s >= 0 && int(s) < len(statusCodeStrings) { - return statusCodeStrings[s] - } - return "" -} - -// The Status type defines a logical error model that is suitable for different -// programming environments, including REST APIs and RPC APIs. -type Status struct { - // A developer-facing human readable error message. - Message string `json:"message,omitempty"` - // The status code. - Code StatusCode `json:"code,omitempty"` -} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go deleted file mode 100644 index 44197b8084..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// Traces represents the traces data that can be stored in a persistent storage, -// OR can be embedded by other protocols that transfer OTLP traces data but do -// not implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -type Traces struct { - // An array of ResourceSpans. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. -func (td *Traces) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid TracesData type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid TracesData field: %#v", keyIface) - } - - switch key { - case "resourceSpans", "resource_spans": - err = decoder.Decode(&td.ResourceSpans) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} - -// ResourceSpans is a collection of ScopeSpans from a Resource. -type ResourceSpans struct { - // The resource for the spans in this message. - // If this field is not set then no resource info is known. - Resource Resource `json:"resource"` - // A list of ScopeSpans that originate from a resource. - ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_spans" field which have their own schema_url field. - SchemaURL string `json:"schemaUrl,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. -func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid ResourceSpans type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) - } - - switch key { - case "resource": - err = decoder.Decode(&rs.Resource) - case "scopeSpans", "scope_spans": - err = decoder.Decode(&rs.ScopeSpans) - case "schemaUrl", "schema_url": - err = decoder.Decode(&rs.SchemaURL) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} - -// ScopeSpans is a collection of Spans produced by an InstrumentationScope. -type ScopeSpans struct { - // The instrumentation scope information for the spans in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - Scope *Scope `json:"scope"` - // A list of Spans that originate from an instrumentation scope. - Spans []*Span `json:"spans,omitempty"` - // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all spans and span events in the "spans" field. - SchemaURL string `json:"schemaUrl,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. -func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid ScopeSpans type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) - } - - switch key { - case "scope": - err = decoder.Decode(&ss.Scope) - case "spans": - err = decoder.Decode(&ss.Spans) - case "schemaUrl", "schema_url": - err = decoder.Decode(&ss.SchemaURL) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go deleted file mode 100644 index 022768bb50..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry - -import ( - "bytes" - "cmp" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "slices" - "strconv" - "unsafe" -) - -// A Value represents a structured value. -// A zero value is valid and represents an empty value. -type Value struct { - // Ensure forward compatibility by explicitly making this not comparable. - noCmp [0]func() //nolint:unused // This is indeed used. - - // num holds the value for Int64, Float64, and Bool. It holds the length - // for String, Bytes, Slice, Map. - num uint64 - // any holds either the KindBool, KindInt64, KindFloat64, stringptr, - // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 - // then the value of Value is in num as described above. Otherwise, it - // contains the value wrapped in the appropriate type. - any any -} - -type ( - // sliceptr represents a value in Value.any for KindString Values. - stringptr *byte - // bytesptr represents a value in Value.any for KindBytes Values. - bytesptr *byte - // sliceptr represents a value in Value.any for KindSlice Values. - sliceptr *Value - // mapptr represents a value in Value.any for KindMap Values. - mapptr *Attr -) - -// ValueKind is the kind of a [Value]. -type ValueKind int - -// ValueKind values. -const ( - ValueKindEmpty ValueKind = iota - ValueKindBool - ValueKindFloat64 - ValueKindInt64 - ValueKindString - ValueKindBytes - ValueKindSlice - ValueKindMap -) - -var valueKindStrings = []string{ - "Empty", - "Bool", - "Float64", - "Int64", - "String", - "Bytes", - "Slice", - "Map", -} - -func (k ValueKind) String() string { - if k >= 0 && int(k) < len(valueKindStrings) { - return valueKindStrings[k] - } - return "" -} - -// StringValue returns a new [Value] for a string. -func StringValue(v string) Value { - return Value{ - num: uint64(len(v)), - any: stringptr(unsafe.StringData(v)), - } -} - -// IntValue returns a [Value] for an int. -func IntValue(v int) Value { return Int64Value(int64(v)) } - -// Int64Value returns a [Value] for an int64. -func Int64Value(v int64) Value { - return Value{num: uint64(v), any: ValueKindInt64} //nolint:gosec // Raw value conv. -} - -// Float64Value returns a [Value] for a float64. -func Float64Value(v float64) Value { - return Value{num: math.Float64bits(v), any: ValueKindFloat64} -} - -// BoolValue returns a [Value] for a bool. -func BoolValue(v bool) Value { //nolint:revive // Not a control flag. - var n uint64 - if v { - n = 1 - } - return Value{num: n, any: ValueKindBool} -} - -// BytesValue returns a [Value] for a byte slice. The passed slice must not be -// changed after it is passed. -func BytesValue(v []byte) Value { - return Value{ - num: uint64(len(v)), - any: bytesptr(unsafe.SliceData(v)), - } -} - -// SliceValue returns a [Value] for a slice of [Value]. The passed slice must -// not be changed after it is passed. -func SliceValue(vs ...Value) Value { - return Value{ - num: uint64(len(vs)), - any: sliceptr(unsafe.SliceData(vs)), - } -} - -// MapValue returns a new [Value] for a slice of key-value pairs. The passed -// slice must not be changed after it is passed. -func MapValue(kvs ...Attr) Value { - return Value{ - num: uint64(len(kvs)), - any: mapptr(unsafe.SliceData(kvs)), - } -} - -// AsString returns the value held by v as a string. -func (v Value) AsString() string { - if sp, ok := v.any.(stringptr); ok { - return unsafe.String(sp, v.num) - } - // TODO: error handle - return "" -} - -// asString returns the value held by v as a string. It will panic if the Value -// is not KindString. -func (v Value) asString() string { - return unsafe.String(v.any.(stringptr), v.num) -} - -// AsInt64 returns the value held by v as an int64. -func (v Value) AsInt64() int64 { - if v.Kind() != ValueKindInt64 { - // TODO: error handle - return 0 - } - return v.asInt64() -} - -// asInt64 returns the value held by v as an int64. If v is not of KindInt64, -// this will return garbage. -func (v Value) asInt64() int64 { - // Assumes v.num was a valid int64 (overflow not checked). - return int64(v.num) //nolint:gosec // Bounded. -} - -// AsBool returns the value held by v as a bool. -func (v Value) AsBool() bool { - if v.Kind() != ValueKindBool { - // TODO: error handle - return false - } - return v.asBool() -} - -// asBool returns the value held by v as a bool. If v is not of KindBool, this -// will return garbage. -func (v Value) asBool() bool { return v.num == 1 } - -// AsFloat64 returns the value held by v as a float64. -func (v Value) AsFloat64() float64 { - if v.Kind() != ValueKindFloat64 { - // TODO: error handle - return 0 - } - return v.asFloat64() -} - -// asFloat64 returns the value held by v as a float64. If v is not of -// KindFloat64, this will return garbage. -func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } - -// AsBytes returns the value held by v as a []byte. -func (v Value) AsBytes() []byte { - if sp, ok := v.any.(bytesptr); ok { - return unsafe.Slice((*byte)(sp), v.num) - } - // TODO: error handle - return nil -} - -// asBytes returns the value held by v as a []byte. It will panic if the Value -// is not KindBytes. -func (v Value) asBytes() []byte { - return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) -} - -// AsSlice returns the value held by v as a []Value. -func (v Value) AsSlice() []Value { - if sp, ok := v.any.(sliceptr); ok { - return unsafe.Slice((*Value)(sp), v.num) - } - // TODO: error handle - return nil -} - -// asSlice returns the value held by v as a []Value. It will panic if the Value -// is not KindSlice. -func (v Value) asSlice() []Value { - return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) -} - -// AsMap returns the value held by v as a []Attr. -func (v Value) AsMap() []Attr { - if sp, ok := v.any.(mapptr); ok { - return unsafe.Slice((*Attr)(sp), v.num) - } - // TODO: error handle - return nil -} - -// asMap returns the value held by v as a []Attr. It will panic if the -// Value is not KindMap. -func (v Value) asMap() []Attr { - return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) -} - -// Kind returns the Kind of v. -func (v Value) Kind() ValueKind { - switch x := v.any.(type) { - case ValueKind: - return x - case stringptr: - return ValueKindString - case bytesptr: - return ValueKindBytes - case sliceptr: - return ValueKindSlice - case mapptr: - return ValueKindMap - default: - return ValueKindEmpty - } -} - -// Empty returns if v does not hold any value. -func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } - -// Equal returns if v is equal to w. -func (v Value) Equal(w Value) bool { - k1 := v.Kind() - k2 := w.Kind() - if k1 != k2 { - return false - } - switch k1 { - case ValueKindInt64, ValueKindBool: - return v.num == w.num - case ValueKindString: - return v.asString() == w.asString() - case ValueKindFloat64: - return v.asFloat64() == w.asFloat64() - case ValueKindSlice: - return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) - case ValueKindMap: - sv := sortMap(v.asMap()) - sw := sortMap(w.asMap()) - return slices.EqualFunc(sv, sw, Attr.Equal) - case ValueKindBytes: - return bytes.Equal(v.asBytes(), w.asBytes()) - case ValueKindEmpty: - return true - default: - // TODO: error handle - return false - } -} - -func sortMap(m []Attr) []Attr { - sm := make([]Attr, len(m)) - copy(sm, m) - slices.SortFunc(sm, func(a, b Attr) int { - return cmp.Compare(a.Key, b.Key) - }) - - return sm -} - -// String returns Value's value as a string, formatted like [fmt.Sprint]. -// -// The returned string is meant for debugging; -// the string representation is not stable. -func (v Value) String() string { - switch v.Kind() { - case ValueKindString: - return v.asString() - case ValueKindInt64: - // Assumes v.num was a valid int64 (overflow not checked). - return strconv.FormatInt(int64(v.num), 10) //nolint:gosec // Bounded. - case ValueKindFloat64: - return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) - case ValueKindBool: - return strconv.FormatBool(v.asBool()) - case ValueKindBytes: - return string(v.asBytes()) - case ValueKindMap: - return fmt.Sprint(v.asMap()) - case ValueKindSlice: - return fmt.Sprint(v.asSlice()) - case ValueKindEmpty: - return "" - default: - // Try to handle this as gracefully as possible. - // - // Don't panic here. The goal here is to have developers find this - // first if a slog.Kind is is not handled. It is - // preferable to have user's open issue asking why their attributes - // have a "unhandled: " prefix than say that their code is panicking. - return fmt.Sprintf("", v.Kind()) - } -} - -// MarshalJSON encodes v into OTLP formatted JSON. -func (v *Value) MarshalJSON() ([]byte, error) { - switch v.Kind() { - case ValueKindString: - return json.Marshal(struct { - Value string `json:"stringValue"` - }{v.asString()}) - case ValueKindInt64: - return json.Marshal(struct { - Value string `json:"intValue"` - }{strconv.FormatInt(int64(v.num), 10)}) //nolint:gosec // Raw value conv. - case ValueKindFloat64: - return json.Marshal(struct { - Value float64 `json:"doubleValue"` - }{v.asFloat64()}) - case ValueKindBool: - return json.Marshal(struct { - Value bool `json:"boolValue"` - }{v.asBool()}) - case ValueKindBytes: - return json.Marshal(struct { - Value []byte `json:"bytesValue"` - }{v.asBytes()}) - case ValueKindMap: - return json.Marshal(struct { - Value struct { - Values []Attr `json:"values"` - } `json:"kvlistValue"` - }{struct { - Values []Attr `json:"values"` - }{v.asMap()}}) - case ValueKindSlice: - return json.Marshal(struct { - Value struct { - Values []Value `json:"values"` - } `json:"arrayValue"` - }{struct { - Values []Value `json:"values"` - }{v.asSlice()}}) - case ValueKindEmpty: - return nil, nil - default: - return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) - } -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. -func (v *Value) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid Value type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid Value key: %#v", keyIface) - } - - switch key { - case "stringValue", "string_value": - var val string - err = decoder.Decode(&val) - *v = StringValue(val) - case "boolValue", "bool_value": - var val bool - err = decoder.Decode(&val) - *v = BoolValue(val) - case "intValue", "int_value": - var val protoInt64 - err = decoder.Decode(&val) - *v = Int64Value(val.Int64()) - case "doubleValue", "double_value": - var val float64 - err = decoder.Decode(&val) - *v = Float64Value(val) - case "bytesValue", "bytes_value": - var val64 string - if err := decoder.Decode(&val64); err != nil { - return err - } - var val []byte - val, err = base64.StdEncoding.DecodeString(val64) - *v = BytesValue(val) - case "arrayValue", "array_value": - var val struct{ Values []Value } - err = decoder.Decode(&val) - *v = SliceValue(val.Values...) - case "kvlistValue", "kvlist_value": - var val struct{ Values []Attr } - err = decoder.Decode(&val) - *v = MapValue(val.Values...) - default: - // Skip unknown. - continue - } - // Use first valid. Ignore the rest. - return err - } - - // Only unknown fields. Return nil without unmarshaling any value. - return nil -} diff --git a/vendor/go.opentelemetry.io/auto/sdk/limit.go b/vendor/go.opentelemetry.io/auto/sdk/limit.go deleted file mode 100644 index 86babf1a88..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/limit.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package sdk - -import ( - "log/slog" - "os" - "strconv" -) - -// maxSpan are the span limits resolved during startup. -var maxSpan = newSpanLimits() - -type spanLimits struct { - // Attrs is the number of allowed attributes for a span. - // - // This is resolved from the environment variable value for the - // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the - // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if - // that is not set, is used. - Attrs int - // AttrValueLen is the maximum attribute value length allowed for a span. - // - // This is resolved from the environment variable value for the - // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the - // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 - // if that is not set, is used. - AttrValueLen int - // Events is the number of allowed events for a span. - // - // This is resolved from the environment variable value for the - // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. - Events int - // EventAttrs is the number of allowed attributes for a span event. - // - // The is resolved from the environment variable value for the - // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. - EventAttrs int - // Links is the number of allowed Links for a span. - // - // This is resolved from the environment variable value for the - // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. - Links int - // LinkAttrs is the number of allowed attributes for a span link. - // - // This is resolved from the environment variable value for the - // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. - LinkAttrs int -} - -func newSpanLimits() spanLimits { - return spanLimits{ - Attrs: firstEnv( - 128, - "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", - "OTEL_ATTRIBUTE_COUNT_LIMIT", - ), - AttrValueLen: firstEnv( - -1, // Unlimited. - "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", - "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", - ), - Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), - EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), - Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), - LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), - } -} - -// firstEnv returns the parsed integer value of the first matching environment -// variable from keys. The defaultVal is returned if the value is not an -// integer or no match is found. -func firstEnv(defaultVal int, keys ...string) int { - for _, key := range keys { - strV := os.Getenv(key) - if strV == "" { - continue - } - - v, err := strconv.Atoi(strV) - if err == nil { - return v - } - slog.Warn( - "invalid limit environment variable", - "error", err, - "key", key, - "value", strV, - ) - } - - return defaultVal -} diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go deleted file mode 100644 index 815d271ffb..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/span.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package sdk - -import ( - "encoding/json" - "fmt" - "math" - "reflect" - "runtime" - "strings" - "sync" - "sync/atomic" - "time" - "unicode/utf8" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.37.0" - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/trace/noop" - - "go.opentelemetry.io/auto/sdk/internal/telemetry" -) - -type span struct { - noop.Span - - spanContext trace.SpanContext - sampled atomic.Bool - - mu sync.Mutex - traces *telemetry.Traces - span *telemetry.Span -} - -func (s *span) SpanContext() trace.SpanContext { - if s == nil { - return trace.SpanContext{} - } - // s.spanContext is immutable, do not acquire lock s.mu. - return s.spanContext -} - -func (s *span) IsRecording() bool { - if s == nil { - return false - } - - return s.sampled.Load() -} - -func (s *span) SetStatus(c codes.Code, msg string) { - if s == nil || !s.sampled.Load() { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - - if s.span.Status == nil { - s.span.Status = new(telemetry.Status) - } - - s.span.Status.Message = msg - - switch c { - case codes.Unset: - s.span.Status.Code = telemetry.StatusCodeUnset - case codes.Error: - s.span.Status.Code = telemetry.StatusCodeError - case codes.Ok: - s.span.Status.Code = telemetry.StatusCodeOK - } -} - -func (s *span) SetAttributes(attrs ...attribute.KeyValue) { - if s == nil || !s.sampled.Load() { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - - limit := maxSpan.Attrs - if limit == 0 { - // No attributes allowed. - n := int64(len(attrs)) - if n > 0 { - s.span.DroppedAttrs += uint32( //nolint:gosec // Bounds checked. - min(n, math.MaxUint32), - ) - } - return - } - - m := make(map[string]int) - for i, a := range s.span.Attrs { - m[a.Key] = i - } - - for _, a := range attrs { - val := convAttrValue(a.Value) - if val.Empty() { - s.span.DroppedAttrs++ - continue - } - - if idx, ok := m[string(a.Key)]; ok { - s.span.Attrs[idx] = telemetry.Attr{ - Key: string(a.Key), - Value: val, - } - } else if limit < 0 || len(s.span.Attrs) < limit { - s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ - Key: string(a.Key), - Value: val, - }) - m[string(a.Key)] = len(s.span.Attrs) - 1 - } else { - s.span.DroppedAttrs++ - } - } -} - -// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The -// number of dropped attributes is also returned. -func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { - n := len(attrs) - if limit == 0 { - var out uint32 - if n > 0 { - out = uint32(min(int64(n), math.MaxUint32)) //nolint:gosec // Bounds checked. - } - return nil, out - } - - if limit < 0 { - // Unlimited. - return convAttrs(attrs), 0 - } - - if n < 0 { - n = 0 - } - - limit = min(n, limit) - return convAttrs(attrs[:limit]), uint32(n - limit) //nolint:gosec // Bounds checked. -} - -func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { - if len(attrs) == 0 { - // Avoid allocations if not necessary. - return nil - } - - out := make([]telemetry.Attr, 0, len(attrs)) - for _, attr := range attrs { - key := string(attr.Key) - val := convAttrValue(attr.Value) - if val.Empty() { - continue - } - out = append(out, telemetry.Attr{Key: key, Value: val}) - } - return out -} - -func convAttrValue(value attribute.Value) telemetry.Value { - switch value.Type() { - case attribute.BOOL: - return telemetry.BoolValue(value.AsBool()) - case attribute.INT64: - return telemetry.Int64Value(value.AsInt64()) - case attribute.FLOAT64: - return telemetry.Float64Value(value.AsFloat64()) - case attribute.STRING: - v := truncate(maxSpan.AttrValueLen, value.AsString()) - return telemetry.StringValue(v) - case attribute.BOOLSLICE: - slice := value.AsBoolSlice() - out := make([]telemetry.Value, 0, len(slice)) - for _, v := range slice { - out = append(out, telemetry.BoolValue(v)) - } - return telemetry.SliceValue(out...) - case attribute.INT64SLICE: - slice := value.AsInt64Slice() - out := make([]telemetry.Value, 0, len(slice)) - for _, v := range slice { - out = append(out, telemetry.Int64Value(v)) - } - return telemetry.SliceValue(out...) - case attribute.FLOAT64SLICE: - slice := value.AsFloat64Slice() - out := make([]telemetry.Value, 0, len(slice)) - for _, v := range slice { - out = append(out, telemetry.Float64Value(v)) - } - return telemetry.SliceValue(out...) - case attribute.STRINGSLICE: - slice := value.AsStringSlice() - out := make([]telemetry.Value, 0, len(slice)) - for _, v := range slice { - v = truncate(maxSpan.AttrValueLen, v) - out = append(out, telemetry.StringValue(v)) - } - return telemetry.SliceValue(out...) - } - return telemetry.Value{} -} - -// truncate returns a truncated version of s such that it contains less than -// the limit number of characters. Truncation is applied by returning the limit -// number of valid characters contained in s. -// -// If limit is negative, it returns the original string. -// -// UTF-8 is supported. When truncating, all invalid characters are dropped -// before applying truncation. -// -// If s already contains less than the limit number of bytes, it is returned -// unchanged. No invalid characters are removed. -func truncate(limit int, s string) string { - // This prioritize performance in the following order based on the most - // common expected use-cases. - // - // - Short values less than the default limit (128). - // - Strings with valid encodings that exceed the limit. - // - No limit. - // - Strings with invalid encodings that exceed the limit. - if limit < 0 || len(s) <= limit { - return s - } - - // Optimistically, assume all valid UTF-8. - var b strings.Builder - count := 0 - for i, c := range s { - if c != utf8.RuneError { - count++ - if count > limit { - return s[:i] - } - continue - } - - _, size := utf8.DecodeRuneInString(s[i:]) - if size == 1 { - // Invalid encoding. - b.Grow(len(s) - 1) - _, _ = b.WriteString(s[:i]) - s = s[i:] - break - } - } - - // Fast-path, no invalid input. - if b.Cap() == 0 { - return s - } - - // Truncate while validating UTF-8. - for i := 0; i < len(s) && count < limit; { - c := s[i] - if c < utf8.RuneSelf { - // Optimization for single byte runes (common case). - _ = b.WriteByte(c) - i++ - count++ - continue - } - - _, size := utf8.DecodeRuneInString(s[i:]) - if size == 1 { - // We checked for all 1-byte runes above, this is a RuneError. - i++ - continue - } - - _, _ = b.WriteString(s[i : i+size]) - i += size - count++ - } - - return b.String() -} - -func (s *span) End(opts ...trace.SpanEndOption) { - if s == nil || !s.sampled.Swap(false) { - return - } - - // s.end exists so the lock (s.mu) is not held while s.ended is called. - s.ended(s.end(opts)) -} - -func (s *span) end(opts []trace.SpanEndOption) []byte { - s.mu.Lock() - defer s.mu.Unlock() - - cfg := trace.NewSpanEndConfig(opts...) - if t := cfg.Timestamp(); !t.IsZero() { - s.span.EndTime = cfg.Timestamp() - } else { - s.span.EndTime = time.Now() - } - - b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. - return b -} - -// Expected to be implemented in eBPF. -// -//go:noinline -func (*span) ended(buf []byte) { ended(buf) } - -// ended is used for testing. -var ended = func([]byte) {} - -func (s *span) RecordError(err error, opts ...trace.EventOption) { - if s == nil || err == nil || !s.sampled.Load() { - return - } - - cfg := trace.NewEventConfig(opts...) - - attrs := cfg.Attributes() - attrs = append(attrs, - semconv.ExceptionType(typeStr(err)), - semconv.ExceptionMessage(err.Error()), - ) - if cfg.StackTrace() { - buf := make([]byte, 2048) - n := runtime.Stack(buf, false) - attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) - } - - s.mu.Lock() - defer s.mu.Unlock() - - s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) -} - -func typeStr(i any) string { - t := reflect.TypeOf(i) - if t.PkgPath() == "" && t.Name() == "" { - // Likely a builtin type. - return t.String() - } - return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) -} - -func (s *span) AddEvent(name string, opts ...trace.EventOption) { - if s == nil || !s.sampled.Load() { - return - } - - cfg := trace.NewEventConfig(opts...) - - s.mu.Lock() - defer s.mu.Unlock() - - s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) -} - -// addEvent adds an event with name and attrs at tStamp to the span. The span -// lock (s.mu) needs to be held by the caller. -func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { - limit := maxSpan.Events - - if limit == 0 { - s.span.DroppedEvents++ - return - } - - if limit > 0 && len(s.span.Events) == limit { - // Drop head while avoiding allocation of more capacity. - copy(s.span.Events[:limit-1], s.span.Events[1:]) - s.span.Events = s.span.Events[:limit-1] - s.span.DroppedEvents++ - } - - e := &telemetry.SpanEvent{Time: tStamp, Name: name} - e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) - - s.span.Events = append(s.span.Events, e) -} - -func (s *span) AddLink(link trace.Link) { - if s == nil || !s.sampled.Load() { - return - } - - l := maxSpan.Links - - s.mu.Lock() - defer s.mu.Unlock() - - if l == 0 { - s.span.DroppedLinks++ - return - } - - if l > 0 && len(s.span.Links) == l { - // Drop head while avoiding allocation of more capacity. - copy(s.span.Links[:l-1], s.span.Links[1:]) - s.span.Links = s.span.Links[:l-1] - s.span.DroppedLinks++ - } - - s.span.Links = append(s.span.Links, convLink(link)) -} - -func convLinks(links []trace.Link) []*telemetry.SpanLink { - out := make([]*telemetry.SpanLink, 0, len(links)) - for _, link := range links { - out = append(out, convLink(link)) - } - return out -} - -func convLink(link trace.Link) *telemetry.SpanLink { - l := &telemetry.SpanLink{ - TraceID: telemetry.TraceID(link.SpanContext.TraceID()), - SpanID: telemetry.SpanID(link.SpanContext.SpanID()), - TraceState: link.SpanContext.TraceState().String(), - Flags: uint32(link.SpanContext.TraceFlags()), - } - l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) - - return l -} - -func (s *span) SetName(name string) { - if s == nil || !s.sampled.Load() { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - - s.span.Name = name -} - -func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() } diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go deleted file mode 100644 index e09acf022f..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/tracer.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package sdk - -import ( - "context" - "math" - "time" - - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/trace/noop" - - "go.opentelemetry.io/auto/sdk/internal/telemetry" -) - -type tracer struct { - noop.Tracer - - name, schemaURL, version string -} - -var _ trace.Tracer = tracer{} - -func (t tracer) Start( - ctx context.Context, - name string, - opts ...trace.SpanStartOption, -) (context.Context, trace.Span) { - var psc, sc trace.SpanContext - sampled := true - span := new(span) - - // Ask eBPF for sampling decision and span context info. - t.start(ctx, span, &psc, &sampled, &sc) - - span.sampled.Store(sampled) - span.spanContext = sc - - ctx = trace.ContextWithSpan(ctx, span) - - if sampled { - // Only build traces if sampled. - cfg := trace.NewSpanStartConfig(opts...) - span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) - } - - return ctx, span -} - -// Expected to be implemented in eBPF. -// -//go:noinline -func (t *tracer) start( - ctx context.Context, - spanPtr *span, - psc *trace.SpanContext, - sampled *bool, - sc *trace.SpanContext, -) { - start(ctx, spanPtr, psc, sampled, sc) -} - -// start is used for testing. -var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} - -var intToUint32Bound = min(math.MaxInt, math.MaxUint32) - -func (t tracer) traces( - name string, - cfg trace.SpanConfig, - sc, psc trace.SpanContext, -) (*telemetry.Traces, *telemetry.Span) { - span := &telemetry.Span{ - TraceID: telemetry.TraceID(sc.TraceID()), - SpanID: telemetry.SpanID(sc.SpanID()), - Flags: uint32(sc.TraceFlags()), - TraceState: sc.TraceState().String(), - ParentSpanID: telemetry.SpanID(psc.SpanID()), - Name: name, - Kind: spanKind(cfg.SpanKind()), - } - - span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) - - links := cfg.Links() - if limit := maxSpan.Links; limit == 0 { - n := len(links) - if n > 0 { - bounded := max(min(n, intToUint32Bound), 0) - span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked. - } - } else { - if limit > 0 { - n := max(len(links)-limit, 0) - bounded := min(n, intToUint32Bound) - span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked. - links = links[n:] - } - span.Links = convLinks(links) - } - - if t := cfg.Timestamp(); !t.IsZero() { - span.StartTime = cfg.Timestamp() - } else { - span.StartTime = time.Now() - } - - return &telemetry.Traces{ - ResourceSpans: []*telemetry.ResourceSpans{ - { - ScopeSpans: []*telemetry.ScopeSpans{ - { - Scope: &telemetry.Scope{ - Name: t.name, - Version: t.version, - }, - Spans: []*telemetry.Span{span}, - SchemaURL: t.schemaURL, - }, - }, - }, - }, - }, span -} - -func spanKind(kind trace.SpanKind) telemetry.SpanKind { - switch kind { - case trace.SpanKindInternal: - return telemetry.SpanKindInternal - case trace.SpanKindServer: - return telemetry.SpanKindServer - case trace.SpanKindClient: - return telemetry.SpanKindClient - case trace.SpanKindProducer: - return telemetry.SpanKindProducer - case trace.SpanKindConsumer: - return telemetry.SpanKindConsumer - } - return telemetry.SpanKind(0) // undefined. -} diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go deleted file mode 100644 index dbc477a59a..0000000000 --- a/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package sdk - -import ( - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/trace/noop" -) - -// TracerProvider returns an auto-instrumentable [trace.TracerProvider]. -// -// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument -// the process using the returned TracerProvider, all of the telemetry it -// produces will be processed and handled by that Instrumentation. By default, -// if no Instrumentation instruments the TracerProvider it will not generate -// any trace telemetry. -func TracerProvider() trace.TracerProvider { return tracerProviderInstance } - -var tracerProviderInstance = new(tracerProvider) - -type tracerProvider struct{ noop.TracerProvider } - -var _ trace.TracerProvider = tracerProvider{} - -func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { - cfg := trace.NewTracerConfig(opts...) - return tracer{ - name: name, - version: cfg.InstrumentationVersion(), - schemaURL: cfg.SchemaURL(), - } -} diff --git a/vendor/go.opentelemetry.io/otel/.clomonitor.yml b/vendor/go.opentelemetry.io/otel/.clomonitor.yml deleted file mode 100644 index 128d61a226..0000000000 --- a/vendor/go.opentelemetry.io/otel/.clomonitor.yml +++ /dev/null @@ -1,3 +0,0 @@ -exemptions: - - check: artifacthub_badge - reason: "Artifact Hub doesn't support Go packages" diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore deleted file mode 100644 index a6d0cbcc9e..0000000000 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ /dev/null @@ -1,11 +0,0 @@ -ot -fo -te -collison -consequentially -ans -nam -valu -thirdparty -addOpt -observ diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc deleted file mode 100644 index e2cb3ea944..0000000000 --- a/vendor/go.opentelemetry.io/otel/.codespellrc +++ /dev/null @@ -1,10 +0,0 @@ -# https://github.com/codespell-project/codespell -[codespell] -builtin = clear,rare,informal -check-filenames = -check-hidden = -ignore-words = .codespellignore -interactive = 1 -skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools -uri-ignore-words-list = * -write = diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes deleted file mode 100644 index 314766e91b..0000000000 --- a/vendor/go.opentelemetry.io/otel/.gitattributes +++ /dev/null @@ -1,3 +0,0 @@ -* text=auto eol=lf -*.{cmd,[cC][mM][dD]} text eol=crlf -*.{bat,[bB][aA][tT]} text eol=crlf diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore deleted file mode 100644 index 749e8e881b..0000000000 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -.DS_Store -Thumbs.db - -.cache/ -.tools/ -venv/ -.idea/ -.vscode/ -*.iml -*.so -coverage.* -go.work -go.work.sum - -gen/ diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml deleted file mode 100644 index db1f55101c..0000000000 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ /dev/null @@ -1,279 +0,0 @@ -version: "2" -run: - issues-exit-code: 1 - tests: true -linters: - default: none - enable: - - asasalint - - bodyclose - - depguard - - errcheck - - errorlint - - gocritic - - godot - - gosec - - govet - - ineffassign - - misspell - - modernize - - noctx - - perfsprint - - revive - - staticcheck - - testifylint - - unconvert - - unparam - - unused - - usestdlibvars - - usetesting - settings: - depguard: - rules: - auto/sdk: - files: - - '!internal/global/trace.go' - - ~internal/global/trace_test.go - deny: - - pkg: go.opentelemetry.io/auto/sdk - desc: Do not use SDK from automatic instrumentation. - non-tests: - files: - - '!$test' - - '!**/*test/*.go' - - '!**/internal/matchers/*.go' - deny: - - pkg: testing - - pkg: github.com/stretchr/testify - - pkg: crypto/md5 - - pkg: crypto/sha1 - - pkg: crypto/**/pkix - otel-internal: - files: - - '**/sdk/*.go' - - '**/sdk/**/*.go' - - '**/exporters/*.go' - - '**/exporters/**/*.go' - - '**/schema/*.go' - - '**/schema/**/*.go' - - '**/metric/*.go' - - '**/metric/**/*.go' - - '**/bridge/*.go' - - '**/bridge/**/*.go' - - '**/trace/*.go' - - '**/trace/**/*.go' - - '**/log/*.go' - - '**/log/**/*.go' - deny: - - pkg: go.opentelemetry.io/otel/internal$ - desc: Do not use cross-module internal packages. - - pkg: go.opentelemetry.io/otel/internal/internaltest - desc: Do not use cross-module internal packages. - otlp-internal: - files: - - '!**/exporters/otlp/internal/**/*.go' - deny: - - pkg: go.opentelemetry.io/otel/exporters/otlp/internal - desc: Do not use cross-module internal packages. - otlpmetric-internal: - files: - - '!**/exporters/otlp/otlpmetric/internal/*.go' - - '!**/exporters/otlp/otlpmetric/internal/**/*.go' - deny: - - pkg: go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal - desc: Do not use cross-module internal packages. - otlptrace-internal: - files: - - '!**/exporters/otlp/otlptrace/*.go' - - '!**/exporters/otlp/otlptrace/internal/**.go' - deny: - - pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal - desc: Do not use cross-module internal packages. - semconv: - list-mode: lax - files: - - "!**/semconv/**" - - "!**/exporters/zipkin/**" - deny: - - pkg: go.opentelemetry.io/otel/semconv - desc: "Use go.opentelemetry.io/otel/semconv/v1.40.0 instead. If a newer semconv version has been released, update the depguard rule." - allow: - - go.opentelemetry.io/otel/semconv/v1.40.0 - gocritic: - disabled-checks: - - appendAssign - - commentedOutCode - - dupArg - - hugeParam - - importShadow - - preferDecodeRune - - rangeValCopy - - unnamedResult - - whyNoLint - enable-all: true - godot: - exclude: - # Exclude links. - - '^ *\[[^]]+\]:' - # Exclude sentence fragments for lists. - - ^[ ]*[-•] - # Exclude sentences prefixing a list. - - :$ - misspell: - locale: US - ignore-rules: - - cancelled - modernize: - disable: - - omitzero - perfsprint: - int-conversion: true - err-error: true - errorf: true - sprintf1: true - strconcat: true - revive: - confidence: 0.01 - rules: - - name: blank-imports - - name: bool-literal-in-expr - - name: constant-logical-expr - - name: context-as-argument - arguments: - - allowTypesBefore: '*testing.T' - disabled: true - - name: context-keys-type - - name: deep-exit - - name: defer - arguments: - - - call-chain - - loop - - name: dot-imports - - name: duplicated-imports - - name: early-return - arguments: - - preserveScope - - name: empty-block - - name: empty-lines - - name: error-naming - - name: error-return - - name: error-strings - - name: errorf - - name: exported - arguments: - - sayRepetitiveInsteadOfStutters - - name: flag-parameter - - name: identical-branches - - name: if-return - - name: import-shadowing - - name: increment-decrement - - name: indent-error-flow - arguments: - - preserveScope - - name: package-comments - - name: range - - name: range-val-in-closure - - name: range-val-address - - name: redefines-builtin-id - - name: string-format - arguments: - - - panic - - /^[^\n]*$/ - - must not contain line breaks - - name: struct-tag - - name: superfluous-else - arguments: - - preserveScope - - name: time-equal - - name: unconditional-recursion - - name: unexported-return - - name: unhandled-error - arguments: - - fmt.Fprint - - fmt.Fprintf - - fmt.Fprintln - - fmt.Print - - fmt.Printf - - fmt.Println - - name: unused-parameter - - name: unused-receiver - - name: unnecessary-stmt - - name: use-any - - name: useless-break - - name: var-declaration - - name: var-naming - arguments: - - ["ID"] # AllowList - - ["Otel", "Aws", "Gcp"] # DenyList - - - skip-package-name-collision-with-go-std: true - - name: waitgroup-by-value - testifylint: - enable-all: true - disable: - - float-compare - - go-require - - require-error - usetesting: - context-background: true - context-todo: true - exclusions: - generated: lax - presets: - - common-false-positives - - legacy - - std-error-handling - rules: - - linters: - - revive - path: schema/v.*/types/.* - text: avoid meaningless package names - # TODO: Having appropriate comments for exported objects helps development, - # even for objects in internal packages. Appropriate comments for all - # exported objects should be added and this exclusion removed. - - linters: - - revive - path: .*internal/.* - text: exported (method|function|type|const) (.+) should have comment or be unexported - # Yes, they are, but it's okay in a test. - - linters: - - revive - path: _test\.go - text: exported func.*returns unexported type.*which can be annoying to use - # Example test functions should be treated like main. - - linters: - - revive - path: example.*_test\.go - text: calls to (.+) only in main[(][)] or init[(][)] functions - # It's okay to not run gosec and perfsprint in a test. - - linters: - - gosec - - perfsprint - path: _test\.go - # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) - # as we commonly use it in tests and examples. - - linters: - - gosec - text: 'G404:' - # Ignoring gosec G402: TLS MinVersion too low - # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. - - linters: - - gosec - text: 'G402: TLS MinVersion too low.' -issues: - max-issues-per-linter: 0 - max-same-issues: 0 -formatters: - enable: - - gofumpt - - goimports - - golines - settings: - gofumpt: - extra-rules: true - goimports: - local-prefixes: - - go.opentelemetry.io/otel - golines: - max-len: 120 - exclusions: - generated: lax diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore deleted file mode 100644 index 994b677df7..0000000000 --- a/vendor/go.opentelemetry.io/otel/.lycheeignore +++ /dev/null @@ -1,13 +0,0 @@ -http://localhost -https://localhost -http://jaeger-collector -https://github.com/open-telemetry/opentelemetry-go/milestone/ -https://github.com/open-telemetry/opentelemetry-go/projects -# Weaver model URL for semantic-conventions repository. -https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+] -file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries -file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual -http://4.3.2.1:78/user/123 -file:///home/runner/work/opentelemetry-go/opentelemetry-go/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/dns:/:4317 -# URL works, but it has blocked link checkers. -https://dl.acm.org/doi/10.1145/198429.198435 diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml deleted file mode 100644 index 3202496c35..0000000000 --- a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Default state for all rules -default: true - -# ul-style -MD004: false - -# hard-tabs -MD010: false - -# line-length -MD013: false - -# no-duplicate-header -MD024: - siblings_only: true - -#single-title -MD025: false - -# ol-prefix -MD029: - style: ordered - -# no-inline-html -MD033: false - -# fenced-code-language -MD040: false - diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md deleted file mode 100644 index 20edda4418..0000000000 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ /dev/null @@ -1,3739 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - -This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - - - - -## [1.43.0/0.65.0/0.19.0] 2026-04-02 - -### Added - -- Add `IsRandom` and `WithRandom` on `TraceFlags`, and `IsRandom` on `SpanContext` in `go.opentelemetry.io/otel/trace` for [W3C Trace Context Level 2 Random Trace ID Flag](https://www.w3.org/TR/trace-context-2/#random-trace-id-flag) support. (#8012) -- Add service detection with `WithService` in `go.opentelemetry.io/otel/sdk/resource`. (#7642) -- Add `DefaultWithContext` and `EnvironmentWithContext` in `go.opentelemetry.io/otel/sdk/resource` to support plumbing `context.Context` through default and environment detectors. (#8051) -- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#8038) -- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#8038) -- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#8038) -- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#8038) -- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#8038) -- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#8038) -- Support attributes with empty value (`attribute.EMPTY`) in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#8038) -- Add support for per-series start time tracking for cumulative metrics in `go.opentelemetry.io/otel/sdk/metric`. - Set `OTEL_GO_X_PER_SERIES_START_TIMESTAMPS=true` to enable. (#8060) -- Add `WithCardinalityLimitSelector` for metric reader for configuring cardinality limits specific to the instrument kind. (#7855) - -### Changed - -- Introduce the `EMPTY` Type in `go.opentelemetry.io/otel/attribute` to reflect that an empty value is now a valid value, with `INVALID` remaining as a deprecated alias of `EMPTY`. (#8038) -- Improve slice handling in `go.opentelemetry.io/otel/attribute` to optimize short slice values with fixed-size fast paths. (#8039) -- Improve performance of span metric recording in `go.opentelemetry.io/otel/sdk/trace` by returning early if self-observability is not enabled. (#8067) -- Improve formatting of metric data diffs in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#8073) - -### Deprecated - -- Deprecate `INVALID` in `go.opentelemetry.io/otel/attribute`. Use `EMPTY` instead. (#8038) - -### Fixed - -- Return spec-compliant `TraceIdRatioBased` description. This is a breaking behavioral change, but it is necessary to - make the implementation [spec-compliant](https://opentelemetry.io/docs/specs/otel/trace/sdk/#traceidratiobased). (#8027) -- Fix a race condition in `go.opentelemetry.io/otel/sdk/metric` where the lastvalue aggregation could collect the value 0 even when no zero-value measurements were recorded. (#8056) -- Limit HTTP response body to 4 MiB in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` to mitigate excessive memory usage caused by a misconfigured or malicious server. - Responses exceeding the limit are treated as non-retryable errors. (#8108) -- Limit HTTP response body to 4 MiB in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` to mitigate excessive memory usage caused by a misconfigured or malicious server. - Responses exceeding the limit are treated as non-retryable errors. (#8108) -- Limit HTTP response body to 4 MiB in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` to mitigate excessive memory usage caused by a misconfigured or malicious server. - Responses exceeding the limit are treated as non-retryable errors. (#8108) -- `WithHostID` detector in `go.opentelemetry.io/otel/sdk/resource` to use full path for `kenv` command on BSD. (#8113) -- Fix missing `request.GetBody` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` to correctly handle HTTP2 GOAWAY frame. (#8096) - -## [1.42.0/0.64.0/0.18.0/0.0.16] 2026-03-06 - -### Added - -- Add `go.opentelemetry.io/otel/semconv/v1.40.0` package. - The package contains semantic conventions from the `v1.40.0` version of the OpenTelemetry Semantic Conventions. - See the [migration documentation](./semconv/v1.40.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.39.0`. (#7985) -- Add `Err` and `SetErr` on `Record` in `go.opentelemetry.io/otel/log` to attach an error and set record exception attributes in `go.opentelemetry.io/otel/log/sdk`. (#7924) - -### Changed - -- `TracerProvider.ForceFlush` in `go.opentelemetry.io/otel/sdk/trace` joins errors together and continues iteration through SpanProcessors as opposed to returning the first encountered error without attempting exports on subsequent SpanProcessors. (#7856) - -### Fixed - -- Fix missing `request.GetBody` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` to correctly handle HTTP2 GOAWAY frame. (#7931) -- Fix semconv v1.39.0 generated metric helpers skipping required attributes when extra attributes were empty. (#7964) -- Preserve W3C TraceFlags bitmask (including the random Trace ID flag) during trace context extraction and injection in `go.opentelemetry.io/otel/propagation`. (#7834) - -### Removed - -- Drop support for [Go 1.24]. (#7984) - -## [1.41.0/0.63.0/0.17.0/0.0.15] 2026-03-02 - -This release is the last to support [Go 1.24]. -The next release will require at least [Go 1.25]. - -### Added - -- Support testing of [Go 1.26]. (#7902) - -### Fixed - -- Update `Baggage` in `go.opentelemetry.io/otel/propagation` and `Parse` and `New` in `go.opentelemetry.io/otel/baggage` to comply with W3C Baggage specification limits. - `New` and `Parse` now return partial baggage along with an error when limits are exceeded. - Errors from baggage extraction are reported to the global error handler. (#7880) -- Return an error when the endpoint is configured as insecure and with TLS configuration in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7914) -- Return an error when the endpoint is configured as insecure and with TLS configuration in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7914) -- Return an error when the endpoint is configured as insecure and with TLS configuration in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7914) - -## [1.40.0/0.62.0/0.16.0] 2026-02-02 - -### Added - -- Add `AlwaysRecord` sampler in `go.opentelemetry.io/otel/sdk/trace`. (#7724) -- Add `Enabled` method to all synchronous instrument interfaces (`Float64Counter`, `Float64UpDownCounter`, `Float64Histogram`, `Float64Gauge`, `Int64Counter`, `Int64UpDownCounter`, `Int64Histogram`, `Int64Gauge`,) in `go.opentelemetry.io/otel/metric`. - This stabilizes the synchronous instrument enabled feature, allowing users to check if an instrument will process measurements before performing computationally expensive operations. (#7763) -- Add `go.opentelemetry.io/otel/semconv/v1.39.0` package. - The package contains semantic conventions from the `v1.39.0` version of the OpenTelemetry Semantic Conventions. - See the [migration documentation](./semconv/v1.39.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.38.0.` (#7783, #7789) - -### Changed - -- Improve the concurrent performance of `HistogramReservoir` in `go.opentelemetry.io/otel/sdk/metric/exemplar` by 4x. (#7443) -- Improve the concurrent performance of `FixedSizeReservoir` in `go.opentelemetry.io/otel/sdk/metric/exemplar`. (#7447) -- Improve performance of concurrent histogram measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7474) -- Improve performance of concurrent synchronous gauge measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7478) -- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`. (#7492) -- `Exporter` in `go.opentelemetry.io/otel/exporters/prometheus` ignores metrics with the scope `go.opentelemetry.io/contrib/bridges/prometheus`. - This prevents scrape failures when the Prometheus exporter is misconfigured to get data from the Prometheus bridge. (#7688) -- Improve performance of concurrent exponential histogram measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7702) -- The `rpc.grpc.status_code` attribute in the experimental metrics emitted from `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` is replaced with the `rpc.response.status_code` attribute to align with the semantic conventions. (#7854) -- The `rpc.grpc.status_code` attribute in the experimental metrics emitted from `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` is replaced with the `rpc.response.status_code` attribute to align with the semantic conventions. (#7854) - -### Fixed - -- Fix bad log message when key-value pairs are dropped because of key duplication in `go.opentelemetry.io/otel/sdk/log`. (#7662) -- Fix `DroppedAttributes` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not count the non-attribute key-value pairs dropped because of key duplication. (#7662) -- Fix `SetAttributes` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not log that attributes are dropped when they are actually not dropped. (#7662) -- Fix missing `request.GetBody` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` to correctly handle HTTP/2 `GOAWAY` frame. (#7794) -- `WithHostID` detector in `go.opentelemetry.io/otel/sdk/resource` to use full path for `ioreg` command on Darwin (macOS). (#7818) - -### Deprecated - -- Deprecate `go.opentelemetry.io/otel/exporters/zipkin`. - For more information, see the [OTel blog post deprecating the Zipkin exporter](https://opentelemetry.io/blog/2025/deprecating-zipkin-exporters/). (#7670) - -## [1.39.0/0.61.0/0.15.0/0.0.14] 2025-12-05 - -### Added - -- Greatly reduce the cost of recording metrics in `go.opentelemetry.io/otel/sdk/metric` using hashing for map keys. (#7175) -- Add `WithInstrumentationAttributeSet` option to `go.opentelemetry.io/otel/log`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/trace` packages. - This provides a concurrent-safe and performant alternative to `WithInstrumentationAttributes` by accepting a pre-constructed `attribute.Set`. (#7287) -- Add experimental observability for the Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus`. - Check the `go.opentelemetry.io/otel/exporters/prometheus/internal/x` package documentation for more information. (#7345) -- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7353) -- Add temporality selector functions `DeltaTemporalitySelector`, `CumulativeTemporalitySelector`, `LowMemoryTemporalitySelector` to `go.opentelemetry.io/otel/sdk/metric`. (#7434) -- Add experimental observability metrics for simple log processor in `go.opentelemetry.io/otel/sdk/log`. (#7548) -- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7459) -- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7486) -- Add experimental observability metrics for simple span processor in `go.opentelemetry.io/otel/sdk/trace`. (#7374) -- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7512) -- Add experimental observability metrics for manual reader in `go.opentelemetry.io/otel/sdk/metric`. (#7524) -- Add experimental observability metrics for periodic reader in `go.opentelemetry.io/otel/sdk/metric`. (#7571) -- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environmental variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7608) -- Add `Enabled` method to the `Processor` interface in `go.opentelemetry.io/otel/sdk/log`. - All `Processor` implementations now include an `Enabled` method. (#7639) -- The `go.opentelemetry.io/otel/semconv/v1.38.0` package. - The package contains semantic conventions from the `v1.38.0` version of the OpenTelemetry Semantic Conventions. - See the [migration documentation](./semconv/v1.38.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.37.0.`(#7648) - -### Changed - -- `Distinct` in `go.opentelemetry.io/otel/attribute` is no longer guaranteed to uniquely identify an attribute set. - Collisions between `Distinct` values for different Sets are possible with extremely high cardinality (billions of series per instrument), but are highly unlikely. (#7175) -- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/trace` synchronously de-duplicates the passed attributes instead of delegating it to the returned `TracerOption`. (#7266) -- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/meter` synchronously de-duplicates the passed attributes instead of delegating it to the returned `MeterOption`. (#7266) -- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/log` synchronously de-duplicates the passed attributes instead of delegating it to the returned `LoggerOption`. (#7266) -- Rename the `OTEL_GO_X_SELF_OBSERVABILITY` environment variable to `OTEL_GO_X_OBSERVABILITY` in `go.opentelemetry.io/otel/sdk/trace`, `go.opentelemetry.io/otel/sdk/log`, and `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7302) -- Improve performance of histogram `Record` in `go.opentelemetry.io/otel/sdk/metric` when min and max are disabled using `NoMinMax`. (#7306) -- Improve error handling for dropped data during translation by using `prometheus.NewInvalidMetric` in `go.opentelemetry.io/otel/exporters/prometheus`. - ⚠️ **Breaking Change:** Previously, these cases were only logged and scrapes succeeded. - Now, when translation would drop data (e.g., invalid label/value), the exporter emits a `NewInvalidMetric`, and Prometheus scrapes **fail with HTTP 500** by default. - To preserve the prior behavior (scrapes succeed while errors are logged), configure your Prometheus HTTP handler with: `promhttp.HandlerOpts{ ErrorHandling: promhttp.ContinueOnError }`. (#7363) -- Replace fnv hash with xxhash in `go.opentelemetry.io/otel/attribute` for better performance. (#7371) -- The default `TranslationStrategy` in `go.opentelemetry.io/exporters/prometheus` is changed from `otlptranslator.NoUTF8EscapingWithSuffixes` to `otlptranslator.UnderscoreEscapingWithSuffixes`. (#7421) -- Improve performance of concurrent measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7427) -- Include W3C TraceFlags (bits 0–7) in the OTLP `Span.Flags` field in `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracehttp` and `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracegrpc`. (#7438) -- The `ErrorType` function in `go.opentelemetry.io/otel/semconv/v1.37.0` now handles custom error types. - If an error implements an `ErrorType() string` method, the return value of that method will be used as the error type. (#7442) - -### Fixed - -- Fix `WithInstrumentationAttributes` options in `go.opentelemetry.io/otel/trace`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/log` to properly merge attributes when passed multiple times instead of replacing them. - Attributes with duplicate keys will use the last value passed. (#7300) -- The equality of `attribute.Set` when using the `Equal` method is not affected by the user overriding the empty set pointed to by `attribute.EmptySet` in `go.opentelemetry.io/otel/attribute`. (#7357) -- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7372) -- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7372) -- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#7372) -- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7372) -- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7372) -- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7372) -- Fix `AddAttributes`, `SetAttributes`, `SetBody` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not mutate input. (#7403) -- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.37.0`. (#7655) -- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.36.0`. (#7656) - -### Removed - -- Drop support for [Go 1.23]. (#7274) -- Remove the `FilterProcessor` interface in `go.opentelemetry.io/otel/sdk/log`. - The `Enabled` method has been added to the `Processor` interface instead. - All `Processor` implementations must now implement the `Enabled` method. - Custom processors that do not filter records can implement `Enabled` to return `true`. (#7639) - -## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29 - -This release is the last to support [Go 1.23]. -The next release will require at least [Go 1.24]. - -### Added - -- Add native histogram exemplar support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6772) -- Add template attribute functions to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6939) - - `ContainerLabel` - - `DBOperationParameter` - - `DBSystemParameter` - - `HTTPRequestHeader` - - `HTTPResponseHeader` - - `K8SCronJobAnnotation` - - `K8SCronJobLabel` - - `K8SDaemonSetAnnotation` - - `K8SDaemonSetLabel` - - `K8SDeploymentAnnotation` - - `K8SDeploymentLabel` - - `K8SJobAnnotation` - - `K8SJobLabel` - - `K8SNamespaceAnnotation` - - `K8SNamespaceLabel` - - `K8SNodeAnnotation` - - `K8SNodeLabel` - - `K8SPodAnnotation` - - `K8SPodLabel` - - `K8SReplicaSetAnnotation` - - `K8SReplicaSetLabel` - - `K8SStatefulSetAnnotation` - - `K8SStatefulSetLabel` - - `ProcessEnvironmentVariable` - - `RPCConnectRPCRequestMetadata` - - `RPCConnectRPCResponseMetadata` - - `RPCGRPCRequestMetadata` - - `RPCGRPCResponseMetadata` -- Add `ErrorType` attribute helper function to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6962) -- Add `WithAllowKeyDuplication` in `go.opentelemetry.io/otel/sdk/log` which can be used to disable deduplication for log records. (#6968) -- Add `WithCardinalityLimit` option to configure the cardinality limit in `go.opentelemetry.io/otel/sdk/metric`. (#6996, #7065, #7081, #7164, #7165, #7179) -- Add `Clone` method to `Record` in `go.opentelemetry.io/otel/log` that returns a copy of the record with no shared state. (#7001) -- Add experimental self-observability span and batch span processor metrics in `go.opentelemetry.io/otel/sdk/trace`. - Check the `go.opentelemetry.io/otel/sdk/trace/internal/x` package documentation for more information. (#7027, #6393, #7209) -- The `go.opentelemetry.io/otel/semconv/v1.36.0` package. - The package contains semantic conventions from the `v1.36.0` version of the OpenTelemetry Semantic Conventions. - See the [migration documentation](./semconv/v1.36.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.34.0.`(#7032, #7041) -- Add support for configuring Prometheus name translation using `WithTranslationStrategy` option in `go.opentelemetry.io/otel/exporters/prometheus`. The current default translation strategy when UTF-8 mode is enabled is `NoUTF8EscapingWithSuffixes`, but a future release will change the default strategy to `UnderscoreEscapingWithSuffixes` for compliance with the specification. (#7111) -- Add experimental self-observability log metrics in `go.opentelemetry.io/otel/sdk/log`. - Check the `go.opentelemetry.io/otel/sdk/log/internal/x` package documentation for more information. (#7121) -- Add experimental self-observability trace exporter metrics in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. - Check the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x` package documentation for more information. (#7133) -- Support testing of [Go 1.25]. (#7187) -- The `go.opentelemetry.io/otel/semconv/v1.37.0` package. - The package contains semantic conventions from the `v1.37.0` version of the OpenTelemetry Semantic Conventions. - See the [migration documentation](./semconv/v1.37.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.36.0.`(#7254) - -### Changed - -- Optimize `TraceIDFromHex` and `SpanIDFromHex` in `go.opentelemetry.io/otel/sdk/trace`. (#6791) -- Change `AssertEqual` in `go.opentelemetry.io/otel/log/logtest` to accept `TestingT` in order to support benchmarks and fuzz tests. (#6908) -- Change `DefaultExemplarReservoirProviderSelector` in `go.opentelemetry.io/otel/sdk/metric` to use `runtime.GOMAXPROCS(0)` instead of `runtime.NumCPU()` for the `FixedSizeReservoirProvider` default size. (#7094) - -### Fixed - -- `SetBody` method of `Record` in `go.opentelemetry.io/otel/sdk/log` now deduplicates key-value collections (`log.Value` of `log.KindMap` from `go.opentelemetry.io/otel/log`). (#7002) -- Fix `go.opentelemetry.io/otel/exporters/prometheus` to not append a suffix if it's already present in metric name. (#7088) -- Fix the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` self-observability component type and name. (#7195) -- Fix partial export count metric in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7199) - -### Deprecated - -- Deprecate `WithoutUnits` and `WithoutCounterSuffixes` options, preferring `WithTranslationStrategy` instead. (#7111) -- Deprecate support for `OTEL_GO_X_CARDINALITY_LIMIT` environment variable in `go.opentelemetry.io/otel/sdk/metric`. Use `WithCardinalityLimit` option instead. (#7166) - -## [0.59.1] 2025-07-21 - -### Changed - -- Retract `v0.59.0` release of `go.opentelemetry.io/otel/exporters/prometheus` module which appends incorrect unit suffixes. (#7046) -- Change `go.opentelemetry.io/otel/exporters/prometheus` to no longer deduplicate suffixes when UTF8 is enabled. - It is recommended to disable unit and counter suffixes in the exporter, and manually add suffixes if you rely on the existing behavior. (#7044) - -### Fixed - -- Fix `go.opentelemetry.io/otel/exporters/prometheus` to properly handle unit suffixes when the unit is in brackets. - E.g. `{spans}`. (#7044) - -## [1.37.0/0.59.0/0.13.0] 2025-06-25 - -### Added - -- The `go.opentelemetry.io/otel/semconv/v1.33.0` package. - The package contains semantic conventions from the `v1.33.0` version of the OpenTelemetry Semantic Conventions. - See the [migration documentation](./semconv/v1.33.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.32.0.`(#6799) -- The `go.opentelemetry.io/otel/semconv/v1.34.0` package. - The package contains semantic conventions from the `v1.34.0` version of the OpenTelemetry Semantic Conventions. (#6812) -- Add metric's schema URL as `otel_scope_schema_url` label in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947) -- Add metric's scope attributes as `otel_scope_[attribute]` labels in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947) -- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/log`. (#6825) -- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6825) -- Changed handling of `go.opentelemetry.io/otel/exporters/prometheus` metric renaming to add unit suffixes when it doesn't match one of the pre-defined values in the unit suffix map. (#6839) - -### Changed - -- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/bridge/opentracing`. (#6827) -- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#6829) -- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/metric`. (#6832) -- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/resource`. (#6834) -- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/trace`. (#6835) -- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/trace`. (#6836) -- `Record.Resource` now returns `*resource.Resource` instead of `resource.Resource` in `go.opentelemetry.io/otel/sdk/log`. (#6864) -- Retry now shows error cause for context timeout in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6898) - -### Fixed - -- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#6710) -- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6710) -- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#6710) -- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6710) -- Validate exponential histogram scale range for Prometheus compatibility in `go.opentelemetry.io/otel/exporters/prometheus`. (#6822) -- Context cancellation during metric pipeline produce does not corrupt data in `go.opentelemetry.io/otel/sdk/metric`. (#6914) - -### Removed - -- `go.opentelemetry.io/otel/exporters/prometheus` no longer exports `otel_scope_info` metric. (#6770) - -## [0.12.2] 2025-05-22 - -### Fixed - -- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` module that contains invalid dependencies. (#6804) -- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` module that contains invalid dependencies. (#6804) -- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` module that contains invalid dependencies. (#6804) - -## [0.12.1] 2025-05-21 - -### Fixes - -- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6800) -- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6800) -- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#6800) - -## [1.36.0/0.58.0/0.12.0] 2025-05-20 - -### Added - -- Add exponential histogram support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6421) -- The `go.opentelemetry.io/otel/semconv/v1.31.0` package. - The package contains semantic conventions from the `v1.31.0` version of the OpenTelemetry Semantic Conventions. - See the [migration documentation](./semconv/v1.31.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.30.0`. (#6479) -- Add `Recording`, `Scope`, and `Record` types in `go.opentelemetry.io/otel/log/logtest`. (#6507) -- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6751) -- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6752) -- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6688) -- Add `ValuesGetter` in `go.opentelemetry.io/otel/propagation`, a `TextMapCarrier` that supports retrieving multiple values for a single key. (#5973) -- Add `Values` method to `HeaderCarrier` to implement the new `ValuesGetter` interface in `go.opentelemetry.io/otel/propagation`. (#5973) -- Update `Baggage` in `go.opentelemetry.io/otel/propagation` to retrieve multiple values for a key when the carrier implements `ValuesGetter`. (#5973) -- Add `AssertEqual` function in `go.opentelemetry.io/otel/log/logtest`. (#6662) -- The `go.opentelemetry.io/otel/semconv/v1.32.0` package. - The package contains semantic conventions from the `v1.32.0` version of the OpenTelemetry Semantic Conventions. - See the [migration documentation](./semconv/v1.32.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.31.0`(#6782) -- Add `Transform` option in `go.opentelemetry.io/otel/log/logtest`. (#6794) -- Add `Desc` option in `go.opentelemetry.io/otel/log/logtest`. (#6796) - -### Removed - -- Drop support for [Go 1.22]. (#6381, #6418) -- Remove `Resource` field from `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6494) -- Remove `RecordFactory` type from `go.opentelemetry.io/otel/log/logtest`. (#6492) -- Remove `ScopeRecords`, `EmittedRecord`, and `RecordFactory` types from `go.opentelemetry.io/otel/log/logtest`. (#6507) -- Remove `AssertRecordEqual` function in `go.opentelemetry.io/otel/log/logtest`, use `AssertEqual` instead. (#6662) - -### Changed - -- ⚠️ Update `github.com/prometheus/client_golang` to `v1.21.1`, which changes the `NameValidationScheme` to `UTF8Validation`. - This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores. - This can be reverted by setting `github.com/prometheus/common/model.NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6433) -- Initialize map with `len(keys)` in `NewAllowKeysFilter` and `NewDenyKeysFilter` to avoid unnecessary allocations in `go.opentelemetry.io/otel/attribute`. (#6455) -- `go.opentelemetry.io/otel/log/logtest` is now a separate Go module. (#6465) -- `go.opentelemetry.io/otel/sdk/log/logtest` is now a separate Go module. (#6466) -- `Recorder` in `go.opentelemetry.io/otel/log/logtest` no longer separately stores records emitted by loggers with the same instrumentation scope. (#6507) -- Improve performance of `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` by not exporting when exporter cannot accept more. (#6569, #6641) - -### Deprecated - -- Deprecate support for `model.LegacyValidation` for `go.opentelemetry.io/otel/exporters/prometheus`. (#6449) - -### Fixes - -- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6392) -- Ensure the `noopSpan.tracerProvider` method is not inlined in `go.opentelemetry.io/otel/trace` so the `go.opentelemetry.io/auto` instrumentation can instrument non-recording spans. (#6456) -- Use a `sync.Pool` instead of allocating `metricdata.ResourceMetrics` in `go.opentelemetry.io/otel/exporters/prometheus`. (#6472) - -## [1.35.0/0.57.0/0.11.0] 2025-03-05 - -This release is the last to support [Go 1.22]. -The next release will require at least [Go 1.23]. - -### Added - -- Add `ValueFromAttribute` and `KeyValueFromAttribute` in `go.opentelemetry.io/otel/log`. (#6180) -- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/log`. (#6187) -- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/log/logtest`. (#6187) -- `AssertRecordEqual` in `go.opentelemetry.io/otel/log/logtest` checks `Record.EventName`. (#6187) -- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/sdk/log`. (#6193) -- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest`. (#6193) -- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6211) -- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6211) -- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` (#6210) -- The `go.opentelemetry.io/otel/semconv/v1.28.0` package. - The package contains semantic conventions from the `v1.28.0` version of the OpenTelemetry Semantic Conventions. - See the [migration documentation](./semconv/v1.28.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.27.0`(#6236) -- The `go.opentelemetry.io/otel/semconv/v1.30.0` package. - The package contains semantic conventions from the `v1.30.0` version of the OpenTelemetry Semantic Conventions. - See the [migration documentation](./semconv/v1.30.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.28.0`(#6240) -- Document the pitfalls of using `Resource` as a comparable type. - `Resource.Equal` and `Resource.Equivalent` should be used instead. (#6272) -- Support [Go 1.24]. (#6304) -- Add `FilterProcessor` and `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. - It replaces `go.opentelemetry.io/otel/sdk/log/internal/x.FilterProcessor`. - Compared to previous version it additionally gives the possibility to filter by resource and instrumentation scope. (#6317) - -### Changed - -- Update `github.com/prometheus/common` to `v0.62.0`, which changes the `NameValidationScheme` to `NoEscaping`. - This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores. - This is controlled by the `Content-Type` header, or can be reverted by setting `NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6198) - -### Fixes - -- Eliminate goroutine leak for the processor returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `Shutdown` is called and the passed `ctx` is canceled and `SpanExporter.Shutdown` has not returned. (#6368) -- Eliminate goroutine leak for the processor returned by `NewBatchSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `ForceFlush` is called and the passed `ctx` is canceled and `SpanExporter.Export` has not returned. (#6369) - -## [1.34.0/0.56.0/0.10.0] 2025-01-17 - -### Changed - -- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167) - -### Fixed - -- Relax minimum Go version to 1.22.0 in various modules. (#6073) -- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143) -- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143) - -## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12 - -### Added - -- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994) -- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`. - This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`. - Users can use it to avoid performing computationally expensive operations when recording measurements. - It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016) - -### Changed - -- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package. - See that package for more information. (#5920) -- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929) -- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929) -- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929) -- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011) -- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009) - -### Fixed - -- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954) -- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954) -- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954) -- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995) -- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997) -- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032) - -## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 - -### Added - -- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850) -- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850) -- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861) -- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861) -- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861) -- The `go.opentelemetry.io/otel/semconv/v1.27.0` package. - The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894) -- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903) -- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927) -- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934) -- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934) -- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935) -- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935) -- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933) -- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933) -- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932) - -### Changed - -- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924) -- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926) -- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925) -- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931) -- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804) - -### Fixed - -- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881) -- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892) -- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911) -- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915) -- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912) -- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944) -- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944) -- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944) -- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944) -- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900) - -### Removed - -- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930) - -## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 - -### Added - -- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862) -- Add `WithExportBufferSize` option to log batch processor.(#5877) - -### Changed - -- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778) -- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791) -- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791) -- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847) -- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864) -- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858) -- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874) - -### Deprecated - -- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854) - -### Fixed - -- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819) -- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803) -- Fix timer channel drain to avoid hanging on Go 1.23. (#5868) -- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827) -- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827) - -## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09 - -### Added - -- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739) -- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773) -- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773) -- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755) - -### Fixed - -- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754) -- Fix panic on instruments creation when setting meter provider. (#5758) -- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780) - -### Removed - -- Drop support for [Go 1.21]. (#5736, #5740, #5800) - -## [1.29.0/0.51.0/0.5.0] 2024-08-23 - -This release is the last to support [Go 1.21]. -The next release will require at least [Go 1.22]. - -### Added - -- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) -- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) -- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. - This new module contains an OTLP exporter that transmits log telemetry using gRPC. - This module is unstable and breaking changes may be introduced. - See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) -- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) -- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) -- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) -- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. - This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. - It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. - It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) -- Support [Go 1.23]. (#5720) - -### Changed - -- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) -- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) -- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) -- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) -- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) -- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. - See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) -- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) -- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) - -### Fixed - -- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) -- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) -- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) -- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) -- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) -- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) -- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) -- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) -- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) -- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) -- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) -- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) -- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) - -### Removed - -- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) -- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) - -## [1.28.0/0.50.0/0.4.0] 2024-07-02 - -### Added - -- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`. - This method is used to check if an `Instrument` instance is a zero-value. (#5431) -- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468) -- The `go.opentelemetry.io/otel/semconv/v1.26.0` package. - The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476) -- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499) -- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530) - -### Changed - -- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457) -- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490) -- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490) -- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490) - - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes. -- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490) -- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490) -- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493) -- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497) -- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520) -- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545) - -### Fixed - -- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376) -- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) -- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) -- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434) -- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435) -- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456) -- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464) -- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494) -- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508) -- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514) -- Fix stale timestamps reported by the last-value aggregation. (#5517) -- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) -- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) -- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) - -## [1.27.0/0.49.0/0.3.0] 2024-05-21 - -### Added - -- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242) -- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258) -- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263) -- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276) -- Add metrics in the `otel-collector` example. (#5283) -- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304) - - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`. - - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument. -- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349) - -### Changed - -- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189) -- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189) -- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230) -- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230) -- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241) -- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272) -- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286) -- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305) -- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315) -- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374) - -### Fixed - -- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306) -- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311) -- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365) -- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371) -- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375) - -## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24 - -### Added - -- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134) -- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194) -- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`. - This new module contains the Go implementation of the OpenTelemetry Logs SDK. - This module is unstable and breaking changes may be introduced. - See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) -- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. - This new module contains an OTLP exporter that transmits log telemetry using HTTP. - This module is unstable and breaking changes may be introduced. - See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) -- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. - This new module contains an exporter prints log records to STDOUT. - This module is unstable and breaking changes may be introduced. - See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) -- The `go.opentelemetry.io/otel/semconv/v1.25.0` package. - The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254) - -### Changed - -- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177) -- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214) -- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244) - -### Fixed - -- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159) - -## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05 - -### Added - -- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906) -- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906) -- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032) -- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`. - This method is used to notify users if a log record will be emitted or not. (#5071) -- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`. - This value represents an unset severity level. (#5072) -- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076) -- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`. - This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes. - At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085) -- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100) -- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108) -- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116) -- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117) -- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111) -- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528) - -### Changed - -- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049) -- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151) - Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError). - Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated. - -### Fixed - -- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027) -- Prevent default `ErrorHandler` self-delegation. (#5137) -- Update all dependencies to address [GO-2024-2687]. (#5139) - -### Removed - -- Drop support for [Go 1.20]. (#4967) - -### Deprecated - -- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734) -- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734) -- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734) - -## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23 - -This release is the last to support [Go 1.20]. -The next release will require at least [Go 1.21]. - -### Added - -- Support [Go 1.22]. (#4890) -- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4900) -- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4900) -- The `go.opentelemetry.io/otel/log` module is added. - This module includes OpenTelemetry Go's implementation of the Logs Bridge API. - This module is in an alpha state, it is subject to breaking changes. - See our [versioning policy](./VERSIONING.md) for more info. (#4961) -- Add ARM64 platform to the compatibility testing suite. (#4994) - -### Fixed - -- Fix registration of multiple callbacks when using the global meter provider from `go.opentelemetry.io/otel`. (#4945) -- Fix negative buckets in output of exponential histograms. (#4956) - -## [1.23.1] 2024-02-07 - -### Fixed - -- Register all callbacks passed during observable instrument creation instead of just the last one multiple times in `go.opentelemetry.io/otel/sdk/metric`. (#4888) - -## [1.23.0] 2024-02-06 - -This release contains the first stable, `v1`, release of the following modules: - -- `go.opentelemetry.io/otel/bridge/opencensus` -- `go.opentelemetry.io/otel/bridge/opencensus/test` -- `go.opentelemetry.io/otel/example/opencensus` -- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` -- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` -- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` - -See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. - -### Added - -- Add `WithEndpointURL` option to the `exporters/otlp/otlpmetric/otlpmetricgrpc`, `exporters/otlp/otlpmetric/otlpmetrichttp`, `exporters/otlp/otlptrace/otlptracegrpc` and `exporters/otlp/otlptrace/otlptracehttp` packages. (#4808) -- Experimental exemplar exporting is added to the metric SDK. - See [metric documentation](./sdk/metric/internal/x/README.md#exemplars) for more information about this feature and how to enable it. (#4871) -- `ErrSchemaURLConflict` is added to `go.opentelemetry.io/otel/sdk/resource`. - This error is returned when a merge of two `Resource`s with different (non-empty) schema URL is attempted. (#4876) - -### Changed - -- The `Merge` and `New` functions in `go.opentelemetry.io/otel/sdk/resource` now returns a partial result if there is a schema URL merge conflict. - Instead of returning `nil` when two `Resource`s with different (non-empty) schema URLs are merged the merged `Resource`, along with the new `ErrSchemaURLConflict` error, is returned. - It is up to the user to decide if they want to use the returned `Resource` or not. - It may have desired attributes overwritten or include stale semantic conventions. (#4876) - -### Fixed - -- Fix `ContainerID` resource detection on systemd when cgroup path has a colon. (#4449) -- Fix `go.opentelemetry.io/otel/sdk/metric` to cache instruments to avoid leaking memory when the same instrument is created multiple times. (#4820) -- Fix missing `Mix` and `Max` values for `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` by introducing `MarshalText` and `MarshalJSON` for the `Extrema` type in `go.opentelemetry.io/sdk/metric/metricdata`. (#4827) - -## [1.23.0-rc.1] 2024-01-18 - -This is a release candidate for the v1.23.0 release. -That release is expected to include the `v1` release of the following modules: - -- `go.opentelemetry.io/otel/bridge/opencensus` -- `go.opentelemetry.io/otel/bridge/opencensus/test` -- `go.opentelemetry.io/otel/example/opencensus` -- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` -- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` -- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` - -See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. - -## [1.22.0/0.45.0] 2024-01-17 - -### Added - -- The `go.opentelemetry.io/otel/semconv/v1.22.0` package. - The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735) -- The `go.opentelemetry.io/otel/semconv/v1.23.0` package. - The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746) -- The `go.opentelemetry.io/otel/semconv/v1.23.1` package. - The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749) -- The `go.opentelemetry.io/otel/semconv/v1.24.0` package. - The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770) -- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733) -- Experimental cardinality limiting is added to the metric SDK. - See [metric documentation](./sdk/metric/internal/x/README.md#cardinality-limit) for more information about this feature and how to enable it. (#4457) -- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804) - -### Changed - -- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754) -- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.24.0` version of the OpenTelemetry specification. (#4754) -- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`. - If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671) -- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722) -- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721) -- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743) -- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774) -- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775) -- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818) -- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804) - -### Fixed - -- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755) -- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756) -- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772) -- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776) -- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804) -- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742) - -## [1.21.0/0.44.0] 2023-11-16 - -### Removed - -- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706) -- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707) -- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708) -- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723) - -### Fixed - -- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719) -- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719) - -## [1.20.0/0.43.0] 2023-11-10 - -This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. - -### Added - -- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567) -- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584) -- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620) -- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620) -- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644) -- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649) -- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603) -- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660) -- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660) -- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622) -- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585) -- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605) -- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668) - -### Deprecated - -- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567) -- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618) -- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`. - Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620) -- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649) -- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693) - -### Changed - -- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) -- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. - This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. - Implementers need to update their implementations based on what they want the default behavior of the interface to be. - See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) -- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. - This extends the `Tracer` interface and is is a breaking change for any existing implementation. - Implementers need to update their implementations based on what they want the default behavior of the interface to be. - See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) -- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. - This extends the `Span` interface and is is a breaking change for any existing implementation. - Implementers need to update their implementations based on what they want the default behavior of the interface to be. - See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) -- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) -- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) -- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670) -- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670) -- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669) -- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669) -- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679) -- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679) - -### Fixed - -- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667) -- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699) -- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699) -- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699) -- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699) -- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699) -- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648) -- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695) -- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695) - -## [1.19.0/0.42.0/0.0.7] 2023-09-28 - -This release contains the first stable release of the OpenTelemetry Go [metric SDK]. -Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package. -See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. - -### Added - -- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539) -- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507) - -### Changed - -- Allow '/' characters in metric instrument names. (#4501) -- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507) -- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535) - -### Fixed - -- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499) - -### Removed - -- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566) - -## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14 - -This is a release candidate for the v1.19.0/v0.42.0 release. -That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK. -See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. - -### Changed - -- Allow '/' characters in metric instrument names. (#4501) - -### Fixed - -- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499) - -## [1.18.0/0.41.0/0.0.6] 2023-09-12 - -This release drops the compatibility guarantee of [Go 1.19]. - -### Added - -- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473) -- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447) - -### Changed - -- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483) - -### Deprecated - -- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541). - The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470) - -### Removed - -- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467) -- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467) -- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468) -- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469) -- Dropped guaranteed support for versions of Go less than 1.20. (#4481) - -## [1.17.0/0.40.0/0.0.5] 2023-08-28 - -### Added - -- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) -- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) -- Add support for exponential histogram aggregations. - A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245) -- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272) -- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272) -- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287) -- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306) -- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315) -- The `go.opentelemetry.io/otel/semconv/v1.21.0` package. - The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362) -- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365) -- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381) -- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374) -- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435) -- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437) -- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444) -- Support Go 1.21. (#4463) - -### Changed - -- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145) -- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202) -- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210) -- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244) -- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244) -- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221) -- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) -- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) -- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290) -- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289) -- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332) -- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333) -- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377) -- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408) -- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434) -- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346) - -### Removed - -- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`. - Use the added `WithProducer` option instead. (#4346) -- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`. - Notice that `PeriodicReader.ForceFlush` is still available. (#4375) - -### Fixed - -- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143) -- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307) -- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317) -- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337) -- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338) -- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350) -- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350) -- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349) -- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353) -- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369) -- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846) -- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846) -- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846) -- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846) -- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846) -- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395) -- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373) -- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409) -- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428) - -### Deprecated - -- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated. - OpenTelemetry dropped support for Jaeger exporter in July 2023. - Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` - or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423) -- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423) -- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420) -- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420) -- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420) -- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420) -- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421) -- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421) -- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421) -- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425) -- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425) -- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425) -- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425) -- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425) -- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated. - Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435) - -## [1.16.0/0.39.0] 2023-05-18 - -This release contains the first stable release of the OpenTelemetry Go [metric API]. -Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package. -See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. - -### Added - -- The `go.opentelemetry.io/otel/semconv/v1.19.0` package. - The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848) -- The `go.opentelemetry.io/otel/semconv/v1.20.0` package. - The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078) -- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165) -- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222) -- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271) - -### Changed - -- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049) -- `MeterProvider` returns noop meters once it has been shutdown. (#4154) - -### Removed - -- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed. - Use `go.opentelemetry.io/otel/metric` instead. (#4055) - -### Fixed - -- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077) - -## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03 - -This is a release candidate for the v1.16.0/v0.39.0 release. -That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. -See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. - -### Added - -- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039) - - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. - - Use `GetMeterProivder` for a global `metric.MeterProvider`. - - Use `SetMeterProivder` to set the global `metric.MeterProvider`. - -### Changed - -- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set. - This stages the metric API to be released as a stable module. (#4038) - -### Removed - -- The `go.opentelemetry.io/otel/metric/global` package is removed. - Use `go.opentelemetry.io/otel` instead. (#4039) - -## [1.15.1/0.38.1] 2023-05-02 - -### Fixed - -- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041) - -## [1.15.0/0.38.0] 2023-04-27 - -### Added - -- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916) -- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949) -- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970) -- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971) - - The `AddConfig` used to hold configuration for addition measurements - - `NewAddConfig` used to create a new `AddConfig` - - `AddOption` used to configure an `AddConfig` - - The `RecordConfig` used to hold configuration for recorded measurements - - `NewRecordConfig` used to create a new `RecordConfig` - - `RecordOption` used to configure a `RecordConfig` - - The `ObserveConfig` used to hold configuration for observed measurements - - `NewObserveConfig` used to create a new `ObserveConfig` - - `ObserveOption` used to configure an `ObserveConfig` -- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`. - They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971) -- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956) -- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956) - -### Changed - -- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870) -- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`. - This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916) -- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941) - - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider` -- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966) -- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974) -- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) - - The `Int64Counter.Add` method now accepts `...AddOption` - - The `Float64Counter.Add` method now accepts `...AddOption` - - The `Int64UpDownCounter.Add` method now accepts `...AddOption` - - The `Float64UpDownCounter.Add` method now accepts `...AddOption` - - The `Int64Histogram.Record` method now accepts `...RecordOption` - - The `Float64Histogram.Record` method now accepts `...RecordOption` - - The `Int64Observer.Observe` method now accepts `...ObserveOption` - - The `Float64Observer.Observe` method now accepts `...ObserveOption` -- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) - - The `Observer.ObserveInt64` method now accepts `...ObserveOption` - - The `Observer.ObserveFloat64` method now accepts `...ObserveOption` -- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986) - -### Fixed - -- `TracerProvider` allows calling `Tracer()` while it's shutting down. - It used to deadlock. (#3924) -- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949) -- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951) -- Automatically figure out the default aggregation with `aggregation.Default`. (#3967) - -### Deprecated - -- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated. - Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018) - -## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23 - -This is a release candidate for the v1.15.0/v0.38.0 release. -That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. -See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. - -### Added - -- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812) -- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828) -- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`. - Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849) -- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895) -- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900) -- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854) - -### Changed - -- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832) -- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832) -- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833) -- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844) -- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849) -- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853) -- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892) -- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) -- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) -- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900) - -### Fixed - -- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845) - -### Removed - -- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829) -- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892) -- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. - Use the added `float64` instrument configuration instead. (#3895) -- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. - Use the added `int64` instrument configuration instead. (#3895) -- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893) - -## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01 - -This is a release candidate for the v1.15.0/v0.38.0 release. -That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. -See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. - -This release drops the compatibility guarantee of [Go 1.18]. - -### Added - -- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818) - - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. - - Use `GetMeterProivder` for a global `metric.MeterProvider`. - - Use `SetMeterProivder` to set the global `metric.MeterProvider`. - -### Changed - -- Dropped compatibility testing for [Go 1.18]. - The project no longer guarantees support for this version of Go. (#3813) - -### Fixed - -- Handle empty environment variable as it they were not set. (#3764) -- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823) -- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899) -- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899) - -### Deprecated - -- The `go.opentelemetry.io/otel/metric/global` package is deprecated. - Use `go.opentelemetry.io/otel` instead. (#3818) - -### Removed - -- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814) - -## [1.14.0/0.37.0/0.0.4] 2023-02-27 - -This release is the last to support [Go 1.18]. -The next release will require at least [Go 1.19]. - -### Added - -- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) -- Support [Go 1.20]. (#3693) -- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. - The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) - - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: - - `OtelScopeNameKey` -> `OTelScopeNameKey` - - `OtelScopeVersionKey` -> `OTelScopeVersionKey` - - `OtelLibraryNameKey` -> `OTelLibraryNameKey` - - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` - - `OtelStatusCodeKey` -> `OTelStatusCodeKey` - - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` - - `OtelStatusCodeOk` -> `OTelStatusCodeOk` - - `OtelStatusCodeError` -> `OTelStatusCodeError` - - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: - - `OtelScopeName` -> `OTelScopeName` - - `OtelScopeVersion` -> `OTelScopeVersion` - - `OtelLibraryName` -> `OTelLibraryName` - - `OtelLibraryVersion` -> `OTelLibraryVersion` - - `OtelStatusDescription` -> `OTelStatusDescription` -- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. - See the [README](./bridge/opentracing/README.md) for more information. (#3570) -- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) -- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) -- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) - - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. - - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. - -### Changed - -- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) -- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. - This change is made to enable memory reuse by SDK users. (#3732) -- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) - -### Fixed - -- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) -- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) -- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) -- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) -- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) -- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) -- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) - -### Deprecated - -- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. - Use the equivalent unit string instead. (#3776) - - Use `"1"` instead of `unit.Dimensionless` - - Use `"By"` instead of `unit.Bytes` - - Use `"ms"` instead of `unit.Milliseconds` - -## [1.13.0/0.36.0] 2023-02-07 - -### Added - -- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. - These functions ensure semantic convention type correctness. (#3675) - -### Fixed - -- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) - - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` - - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` - - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` - - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` - - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` - -### Removed - -- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) -- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) -- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) -- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) - -## [1.12.0/0.35.0] 2023-01-28 - -### Added - -- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. - This options is used to configure `int64` Observer callbacks during their creation. (#3507) -- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. - This options is used to configure `float64` Observer callbacks during their creation. (#3507) -- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. - These additions are used to enable external metric Producers. (#3524) -- The `Callback` function type to `go.opentelemetry.io/otel/metric`. - This new named function type is registered with a `Meter`. (#3564) -- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. - The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) - - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. - - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. -- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. - The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) -- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. - The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) -- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. - The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) -- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. - These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) - - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` - - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` - - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` - - `Int64ObservableCounter` replaces the `asyncint64.Counter` - - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` - - `Int64ObservableGauge` replaces the `asyncint64.Gauge` - - `Float64Counter` replaces the `syncfloat64.Counter` - - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` - - `Float64Histogram` replaces the `syncfloat64.Histogram` - - `Int64Counter` replaces the `syncint64.Counter` - - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` - - `Int64Histogram` replaces the `syncint64.Histogram` -- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. - This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) -- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. - This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) -- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. - The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) - -### Changed - -- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) -- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507) - - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. - - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. - - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. - - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. -- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. - This `Registration` can be used to unregister callbacks. (#3522) -- Global error handler uses an atomic value instead of a mutex. (#3543) -- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) -- Global logger uses an atomic value instead of a mutex. (#3545) -- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) -- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. - This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) -- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name. - Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) -- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) -- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) - - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` - - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` - - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` - - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` - - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` - - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` -- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. - - The named `Callback` replaces the inline function parameter. (#3564) - - `Callback` is required to return an error. (#3576) - - `Callback` accepts the added `Observer` parameter added. - This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) - - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) -- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. - This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. - Instead it uses the `net.sock.peer` attributes. (#3581) -- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) - -### Fixed - -- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) -- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. - Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) - -### Deprecated - -- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. - Use `NewMetricProducer` instead. (#3541) -- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. - Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) -- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. - Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) -- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. - Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) -- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. - Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) -- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. - Use `NewTracerProvider` instead. (#3116) - -### Removed - -- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) -- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. - Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) - - The `Counter` method is replaced by `Meter.Int64ObservableCounter` - - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` - - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` -- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. - Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) - - The `Counter` method is replaced by `Meter.Float64ObservableCounter` - - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` - - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` -- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. - Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) - - The `Counter` method is replaced by `Meter.Int64Counter` - - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` - - The `Histogram` method is replaced by `Meter.Int64Histogram` -- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. - Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) - - The `Counter` method is replaced by `Meter.Float64Counter` - - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` - - The `Histogram` method is replaced by `Meter.Float64Histogram` - -## [1.11.2/0.34.0] 2022-12-05 - -### Added - -- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. - This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) -- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. - This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) -- OTLP exporters now recognize: (#3363) - - `OTEL_EXPORTER_OTLP_INSECURE` - - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` - - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` - - `OTEL_EXPORTER_OTLP_CLIENT_KEY` - - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` - - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` - - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` - - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` - - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` -- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. - These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) -- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. - These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) -- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) -- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) - -### Changed - -- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. - Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. - The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) -- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) -- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) -- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) -- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) - -### Fixed - -- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) -- Remove comparable requirement for `Reader`s. (#3387) -- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) -- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) -- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) -- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) -- Re-enabled Attribute Filters in the Metric SDK. (#3396) -- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408) -- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) -- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) -- Prevent duplicate Prometheus description, unit, and type. (#3469) -- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) - -### Removed - -- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) -- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) - -### Deprecated - -- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. - Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) - -## [1.11.1/0.33.0] 2022-10-19 - -### Added - -- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. - By default, it will register with the default Prometheus registerer. - A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) -- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) -- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) - -### Changed - -- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. - It will return an error if the exporter fails to register with Prometheus. (#3239) - -### Fixed - -- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) -- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. - This fixes the implementation to be compliant with the W3C specification. (#3226) -- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) -- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) -- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) -- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) -- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) -- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) -- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. - Instead the exporter is defined as an "unchecked" collector for Prometheus. - This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) -- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) -- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. - This can be disabled using the `WithoutUnits()` option added to that package. (#3352) - -## [1.11.0/0.32.3] 2022-10-12 - -### Added - -- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) - -### Changed - -- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) -- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. - This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) - -## [0.32.2] Metric SDK (Alpha) - 2022-10-11 - -### Added - -- Added an example of using metric views to customize instruments. (#3177) -- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) - -### Changed - -- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) -- Update histogram default bounds to match the requirements of the latest specification. (#3222) -- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) - -### Fixed - -- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) -- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) -- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) -- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) -- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) - -## [0.32.1] Metric SDK (Alpha) - 2022-09-22 - -### Changed - -- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. - Invalid characters are replaced with `_`. (#3212) - -### Added - -- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) -- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) - -### Fixed - -- Updated go.mods to point to valid versions of the sdk. (#3216) -- Set the `MeterProvider` resource on all exported metric data. (#3218) - -## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 - -### Changed - -- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. - Please see the package documentation for how the new SDK is initialized and configured. (#3175) -- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) - -### Removed - -- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. - A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. - A replacement package that supports the new metric SDK will be added back in a future release. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) -- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) -- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) -- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) -- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) -- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) - -## [1.10.0] - 2022-09-09 - -### Added - -- Support Go 1.19. (#3077) - Include compatibility testing and document support. (#3077) -- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) -- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) - -### Changed - -- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) -- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) -- All exporters will be shutdown even if one reports an error (#3091) -- Ensure valid UTF-8 when truncating over-length attribute values. (#3156) - -## [1.9.0/0.0.3] - 2022-08-01 - -### Added - -- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) -- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. - The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) -- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. - The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) -- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) - -### Fixed - -- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) - -## [1.8.0/0.31.0] - 2022-07-08 - -### Added - -- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods -of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) - -### Changed - -- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) -- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) -- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) - -### Removed - -- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) - -### Deprecated - -- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. - Use the equivalent `Scope` struct instead. (#2977) -- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. - Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) - -## [1.7.0/0.30.0] - 2022-04-28 - -### Added - -- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. - The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) -- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. - The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) -- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. - The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) -- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) - -### Fixed - -- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) -- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) - -### Changed - -- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) -- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. - The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) -- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. - Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) - -### Deprecated - -- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. - Use the equivalent `Iterator.Attribute` method instead. (#2790) -- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. - Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) -- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. - Use the equivalent `MergeIterator.Attribute` method instead. (#2790) - -### Removed - -- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) -- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) - -## [0.29.0] - 2022-04-11 - -### Added - -- The metrics global package was added back into several test files. (#2764) -- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. - This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) - -### Removed - -- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. - Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) - -### Changed - -- Don't panic anymore when setting a global MeterProvider to itself. (#2749) -- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. - This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) - -## [1.6.3] - 2022-04-07 - -### Fixed - -- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) - -## [1.6.2] - 2022-04-06 - -### Changed - -- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) -- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. - This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) - -## [1.6.1] - 2022-03-28 - -### Fixed - -- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. - Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) - -### Security - -- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. - This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) - -## [1.6.0/0.28.0] - 2022-03-23 - -### ⚠️ Notice ⚠️ - -This update is a breaking change of the unstable Metrics API. -Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. - -### Added - -- Add metrics exponential histogram support. - New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) -- Add Go 1.18 to our compatibility tests. (#2679) -- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) -- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) - -### Changed - -- The metrics API has been significantly changed to match the revised OpenTelemetry specification. - High-level changes include: - - - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. - These `InstrumentProvider`s are managed with a `Meter`. - - Synchronous and asynchronous instruments are grouped into their own packages based on value types. - - Asynchronous callbacks can now be registered with a `Meter`. - - Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) - -### Fixed - -- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) - -## [1.5.0] - 2022-03-16 - -### Added - -- Log the Exporters configuration in the TracerProviders message. (#2578) -- Added support to configure the span limits with environment variables. - The following environment variables are supported. (#2606, #2637) - - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` - - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` - - `OTEL_SPAN_EVENT_COUNT_LIMIT` - - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` - - `OTEL_SPAN_LINK_COUNT_LIMIT` - - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` - - If the provided environment variables are invalid (negative), the default values would be used. -- Rename the `gc` runtime name to `go` (#2560) -- Add resource container ID detection. (#2418) -- Add span attribute value length limit. - The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. - The default limit for this resource is "unlimited". (#2637) -- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. - This option replaces the `WithSpanLimits` option. - Zero or negative values will not be changed to the default value like `WithSpanLimits` does. - Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. - Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) - -### Changed - -- Drop oldest tracestate `Member` when capacity is reached. (#2592) -- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) -- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) -- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) -- Introduce new internal `envconfig` package for OTLP exporters. (#2608) -- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) - -### Fixed - -- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) -- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) -- Unlimited span limits are now supported (negative values). (#2636, #2637) - -### Deprecated - -- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. - Use `WithRawSpanLimits` instead. - That option allows setting unlimited and zero limits, this option does not. - This option will be kept until the next major version incremented release. (#2637) - -## [1.4.1] - 2022-02-16 - -### Fixed - -- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615) - -## [1.4.0] - 2022-02-11 - -### Added - -- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490) -- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging. - To enable use a logger with Verbosity (V level) `>=1`. (#2500) -- Added support to configure the batch span-processor with environment variables. - The following environment variables are used. (#2515) - - `OTEL_BSP_SCHEDULE_DELAY` - - `OTEL_BSP_EXPORT_TIMEOUT` - - `OTEL_BSP_MAX_QUEUE_SIZE`. - - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` - -### Changed - -- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589) - -### Deprecated - -- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`. - Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382) -- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445) - -### Fixed - -- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461) -- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512) -- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491) -- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493) -- W3C baggage will now decode urlescaped values. (#2529) -- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522) -- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification. - Instead of dropping the least-recently-used attribute, the last added attribute is dropped. - This drop order still only applies to attributes with unique keys not already contained in the span. - If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576) - -### Removed - -- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546) - - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge) - - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram) - - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum) - -## [1.3.0] - 2021-12-10 - -### ⚠️ Notice ⚠️ - -We have updated the project minimum supported Go version to 1.16 - -### Added - -- Added an internal Logger. - This can be used by the SDK and API to provide users with feedback of the internal state. - To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343) -- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425) -- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296) - -### Changed - -- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329) -- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425) -- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425) -- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432) -- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371) - -### Fixed - -- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification. - Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path. - When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433) -- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381) -- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440) - -### Deprecated - -- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425) -- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425) - -### Removed - -- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350) -- Remove the metric Bound Instruments interface and implementations. (#2399) -- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423) -- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348) - -## [1.2.0] - 2021-11-12 - -### Changed - -- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) -- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) -- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: - - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` - - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. - - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) -- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335) - -### Added - -- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) -- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) -- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334) - -## [1.1.0] - 2021-10-27 - -### Added - -- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) -- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package. - The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320) -- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package. - The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321) -- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package. - The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322) - - When upgrading from the `semconv/v1.4.0` package note the following name changes: - - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey` - - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey` - - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey` - - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey` - - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey` - - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey` - -### Changed - -- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275). - -### Fixed - -- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284) -- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285) -- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289) - -## [1.0.1] - 2021-10-01 - -### Fixed - -- json stdout exporter no longer crashes due to concurrency bug. (#2265) - -## [Metrics 0.24.0] - 2021-10-01 - -### Changed - -- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) -- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197) - - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`. - - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`. - -## [1.0.0] - 2021-09-20 - -This is the first stable release for the project. -This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). - -### Added - -- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) - -### Fixed - -- Slice-valued attributes can correctly be used as map keys. (#2223) - -### Removed - -- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) -- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) -- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) -- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. - Use the typed functions and methods added to the package instead. (#2235) - - The `Key.Array` method is removed. - - The `Array` function is removed. - - The `Any` function is removed. - - The `ArrayValue` function is removed. - - The `AsArray` function is removed. - -## [1.0.0-RC3] - 2021-09-02 - -### Added - -- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) -- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) -- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) - - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. -- Added the `go.opentelemetry.io/otel/example/fib` example package. - Included is an example application that computes Fibonacci numbers. (#2203) - -### Changed - -- Metric instruments have been renamed to match the (feature-frozen) metric API specification: - - ValueRecorder becomes Histogram - - ValueObserver becomes Gauge - - SumObserver becomes CounterObserver - - UpDownSumObserver becomes UpDownCounterObserver - The API exported from this project is still considered experimental. (#2202) -- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) -- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) -- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) -- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) - -### Deprecated - -- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. - All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. - The functions from that package should be used instead. (#2166) -- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. - Use the typed `*Slice` functions and types added to the package instead. (#2162) -- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. - Use the typed functions instead. (#2181) -- The `go.opentelemetry.io/otel/oteltest` package is deprecated. - The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) - -### Removed - -- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) - -### Fixed - -- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) -- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) -- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) -- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) -- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195) -- Fixed typos in resources.go. (#2201) - -## [1.0.0-RC2] - 2021-07-26 - -### Added - -- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) -- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) -- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. - This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) -- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) -- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. - This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. - For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) -- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. - This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) - -### Changed - -- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) -- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) - -### Deprecated - -- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) -- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) -- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. - Use the `trace.ParseTraceState` function instead. (#2122) - -### Removed - -- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) -- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) -- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. - The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) -- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. - The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) -- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) - -### Fixed - -- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) -- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) -- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) -- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. - This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) -- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) -- Use `6831` as default Jaeger agent port instead of `6832`. (#2131) - -## [Experimental Metrics v0.22.0] - 2021-07-19 - -### Added - -- Adds HTTP support for OTLP metrics exporter. (#2022) - -### Removed - -- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) - -## [1.0.0-RC1] / 0.21.0 - 2021-06-18 - -With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` -while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules -with major version 0. - -### Added - -- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) - - The following status codes are defined as transient errors: - | gRPC Status Code | Description | - | ---------------- | ----------- | - | 1 | Cancelled | - | 4 | Deadline Exceeded | - | 8 | Resource Exhausted | - | 10 | Aborted | - | 10 | Out of Range | - | 14 | Unavailable | - | 15 | Data Loss | -- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) -- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. - This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) -- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) -- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) -- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) -- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. - It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) -- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. - This method returns the number of list-members the `TraceState` holds. (#1937) -- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. - Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) -- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) -- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. - These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) -- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) -- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) -- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) -- Several builtin resource detectors now correctly populate the schema URL. (#1938) -- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. -- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) -- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) -- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) -- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) - -### Changed - -- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. - `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) -- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) -- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) -- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) -- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) -- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) -- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) -- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. - This method returns the status of a span using the new `Status` type. (#1874) -- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. - This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) -- Unembed `SpanContext` in `Link`. (#1877) -- Generate Semantic conventions from the specification YAML. (#1891) -- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) -- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) -- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) -- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) -- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) -- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) -- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) -- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) -- Refactored option types according to the contribution style guide. (#1882) -- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. - This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. - The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) -- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) -- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) -- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) -- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) -- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) -- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) -- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) -- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) - -### Deprecated - -- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) -- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) -- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) - -### Removed - -- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) -- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) -- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. - The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. - The `IsRecording` method returns if the span is recording or not. - A read-only span value does not need to know if updates to it will be recorded or not. - By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) -- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. - The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. - When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) -- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. - Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. - The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) - - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) -- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) -- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) -- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. - Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) -- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. - These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) -- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) -- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) - -### Fixed - -- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) -- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) -- BatchSpanProcessor now drops span batches that failed to be exported. (#1860) -- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) -- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) -- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) -- Avoid transport security when OTLP endpoint is a Unix socket. (#2001) - -### Security - -## [0.20.0] - 2021-04-23 - -### Added - -- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) -- Adds semantic conventions for exceptions. (#1492) -- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` - These environment variables can be used to override Jaeger agent hostname and port (#1752) -- Option `ExportTimeout` was added to batch span processor. (#1755) -- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) -- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) -- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) -- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) -- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) -- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) -- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) - - `process.pid` - - `process.executable.name` - - `process.executable.path` - - `process.command_args` - - `process.owner` - - `process.runtime.name` - - `process.runtime.version` - - `process.runtime.description` -- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) -- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) - - `OTEL_EXPORTER_OTLP_ENDPOINT` - - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` - - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` - - `OTEL_EXPORTER_OTLP_HEADERS` - - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` - - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` - - `OTEL_EXPORTER_OTLP_COMPRESSION` - - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` - - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` - - `OTEL_EXPORTER_OTLP_TIMEOUT` - - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` - - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` - - `OTEL_EXPORTER_OTLP_CERTIFICATE` - - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` - - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` -- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) -- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) - -### Fixed - -- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) -- The Jaeger exporter now correctly sets tags for the Span status code and message. - This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) -- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. - Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) -- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) -- Fixed typo for default service name in Jaeger Exporter. (#1797) -- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) -- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. - Instead, the exporter now splits the batch into smaller sendable batches. (#1828) - -### Changed - -- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) -- Jaeger exporter was updated to use thrift v0.14.1. (#1712) -- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) -- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) -- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. - The Span's SpanContext can now self-identify as being remote or not. - This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) -- Improve OTLP/gRPC exporter connection errors. (#1737) -- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. - The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) -- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. - This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) -- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` - to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) -- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) -- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. - It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) -- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) -- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) -- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) -- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) -- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) -- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. - This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) -- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) -- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. - The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) -- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) -- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) - -### Removed - -- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` - These environment variables will no longer be used to override values of the Jaeger exporter (#1752) -- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. - This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. - To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) -- Setting error status while recording error with Span from oteltest package. (#1729) -- The concept of a remote and local Span stored in a context is unified to just the current Span. - Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. - Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span. - If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) -- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. - This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) -- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) -- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. - The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) -- Remove the `WithDisabled` option from the Jaeger exporter. - To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) -- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. - These functions for retrieving specific environment variable values are redundant of other internal functions and - are not intended for end user use. (#1824) -- Removed the Jaeger exporter `WithSDKOptions` `Option`. - This option was used to set SDK options for the exporter creation convenience functions. - These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. - If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) -- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. - The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) -- The Jaeger exporter `Option` type is removed. - The type is no longer used by the exporter to configure anything. - All the previous configurations these options provided were duplicates of SDK configuration. - They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) - -## [0.19.0] - 2021-03-18 - -### Added - -- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) -- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) -- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) -- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) -- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) - -### Changed - -- `trace.SpanContext` is now immutable and has no exported fields. (#1573) - - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. -- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) -- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) -- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) -- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) -- Added non-empty string check for trace `Attribute` keys. (#1659) -- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) -- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) -- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) -- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) -- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) -- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) - -### Removed - -- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) -- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) -- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. - These are now returned as a SpanProcessor interface from their respective constructors. (#1638) -- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) -- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) -- Removed `jaeger.WithProcess` configuration option. (#1673) -- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) - -### Fixed - -- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) -- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) -- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) -- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) -- Synchronization issues in global trace delegate implementation. (#1686) -- Reduced excess memory usage by global `TracerProvider`. (#1687) - -## [0.18.0] - 2021-03-03 - -### Added - -- Added `resource.Default()` for use with meter and tracer providers. (#1507) -- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) -- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) -- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) -- Compatibility testing suite in the CI system for the following systems. (#1567) - | OS | Go Version | Architecture | - | ------- | ---------- | ------------ | - | Ubuntu | 1.15 | amd64 | - | Ubuntu | 1.14 | amd64 | - | Ubuntu | 1.15 | 386 | - | Ubuntu | 1.14 | 386 | - | MacOS | 1.15 | amd64 | - | MacOS | 1.14 | amd64 | - | Windows | 1.15 | amd64 | - | Windows | 1.14 | amd64 | - | Windows | 1.15 | 386 | - | Windows | 1.14 | 386 | - -### Changed - -- Replaced interface `oteltest.SpanRecorder` with its existing implementation - `StandardSpanRecorder`. (#1542) -- Default span limit values to 128. (#1535) -- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) -- Renamed the `otel/label` package to `otel/attribute`. (#1541) -- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) -- Parallelize the CI linting and testing. (#1567) -- Stagger timestamps in exact aggregator tests. (#1569) -- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) -- Prevent end-users from implementing some interfaces (#1575) - - ``` - "otel/exporters/otlp/otlphttp".Option - "otel/exporters/stdout".Option - "otel/oteltest".Option - "otel/trace".TracerOption - "otel/trace".SpanOption - "otel/trace".EventOption - "otel/trace".LifeCycleOption - "otel/trace".InstrumentationOption - "otel/sdk/resource".Option - "otel/sdk/trace".ParentBasedSamplerOption - "otel/sdk/trace".ReadOnlySpan - "otel/sdk/trace".ReadWriteSpan - ``` - -### Removed - -- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) -- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) -- Removed the `test-386` make target. - This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) - -### Fixed - -- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) -- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) -- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) -- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) -- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) - -## [0.17.0] - 2021-02-12 - -### Changed - -- Rename project default branch from `master` to `main`. (#1505) -- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) -- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) -- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) -- Move metric-related public global APIs from otel to otel/metric/global. (#1528) - -## Fixed - -- Fixed otlpgrpc reconnection issue. -- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) -- The otel-collector example now uses the default OTLP receiver port of the collector. - -## [0.16.0] - 2021-01-13 - -### Added - -- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) -- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) -- Added documentation about the project's versioning policy. (#1388) -- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) -- Added codeql workflow to GitHub Actions (#1428) -- Added Gosec workflow to GitHub Actions (#1429) -- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) -- Add an OpenCensus exporter bridge. (#1444) - -### Changed - -- Rename `internal/testing` to `internal/internaltest`. (#1449) -- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) -- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) -- Improve span duration accuracy. (#1360) -- Migrated CI/CD from CircleCI to GitHub Actions (#1382) -- Remove duplicate checkout from GitHub Actions workflow (#1407) -- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) -- Metric `exact` aggregator includes per-point timestamps (#1412) -- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) -- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) -- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) -- Unify endpoint API that related to OTel exporter. (#1401) -- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435) -- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) -- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) -- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) -- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) -- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) -- Metric Push and Pull Controller components are combined into a single "basic" Controller: - - `WithExporter()` and `Start()` to configure Push behavior - - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior - - `Start()` and `Stop()` accept Context. (#1378) -- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) - -### Removed - -- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) -- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) -- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) - -### Fixed - -- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) - -## [0.15.0] - 2020-12-10 - -### Added - -- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363) - -### Changed - -- The Zipkin exporter now uses the Span status code to determine. (#1328) -- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357) -- Move the OpenCensus example into `example` directory. (#1359) -- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363) -- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) -- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) - -### Fixed - -- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) - -## [0.14.0] - 2020-11-19 - -### Added - -- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) -- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) -- `SpanContextFromContext` returns `SpanContext` from context. (#1255) -- `TraceState` has been added to `SpanContext`. (#1340) -- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) -- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) -- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) -- Add missing tests for `sdk/trace/attributes_map.go`. (#1337) - -### Changed - -- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307) - - `ID` has been renamed to `TraceID`. - - `IDFromHex` has been renamed to `TraceIDFromHex`. - - `EmptySpanContext` is removed. -- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229) -- OTLP Exporter updates: - - supports OTLP v0.6.0 (#1230, #1354) - - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296) -- The Sampler is now called on local child spans. (#1233) -- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240) -- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`. - This matches the returned type and fixes misuse of the term metric. (#1240) -- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241) -- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252) -- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321) -- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316) -- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316) -- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254) -- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254) -- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330) -- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330) -- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267) -- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276) -- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and - `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235) -- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210) -- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310) -- Updated span collection limits for attribute, event and link counts to 1000 (#1318) -- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338) - -### Removed - -- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243) -- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy. - It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254) -- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. - `Tracer` and `Span` from the same module should be used in their place instead. (#1306) -- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) -- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) - -### Fixed - -- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244) -- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) -- Fix condition in `label.Any`. (#1299) -- Fix global `TracerProvider` to pass options to its configured provider. (#1329) -- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) - -## [0.13.0] - 2020-10-08 - -### Added - -- OTLP Metric exporter supports Histogram aggregation. (#1209) -- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214) -- A Baggage API to implement the OpenTelemetry specification. (#1217) -- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227) - -### Changed - -- Set default propagator to no-op propagator. (#1184) -- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325) -- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212) -- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification. - They now are `Unset`, `Error`, and `Ok`. - They no longer track the gRPC codes. (#1214) -- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214) -- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325) -- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264) - -### Fixed - -- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226) - -### Removed - -- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212) -- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification. - The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212) -- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216) -- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217) -- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219) -- Nested array/slice support has been removed. (#1226) - -## [0.12.0] - 2020-09-24 - -### Added - -- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108) -- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s. - This addition was made to conform with our project option conventions. (#1155) -- Instrumentation library information was added to the Zipkin exporter. (#1119) -- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166) -- More semantic conventions for k8s as resource attributes. (#1167) - -### Changed - -- Add reconnecting udp connection type to Jaeger exporter. - This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record. - It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063) -- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`. - This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108) -- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`. - This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108) -- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109) -- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package. - This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118) -- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119) -- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115) -- Move `tools` package under `internal`. (#1141) -- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142) - The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged. -- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153) -- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155) -- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161) -- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to - recommend the use of `newConfig()` instead of `configure()`. (#1163) -- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163) -- Ensure exported interface types include parameter names and update the - Style Guide to reflect this styling rule. (#1172) -- Don't consider unset environment variable for resource detection to be an error. (#1170) -- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and - `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`. -- ValueObserver instruments use LastValue aggregator by default. (#1165) -- OTLP Metric exporter supports LastValue aggregation. (#1165) -- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185) -- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) -- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) -- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190) -- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190) -- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190) -- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) -- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) -- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) -- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) -- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) -- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) -- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190) -- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190) -- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) -- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) -- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) -- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) -- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192) -- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201) -- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195) -- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203) - -### Removed - -- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the - `go.opentelemetry.io/contrib/propagators/` module. (#1191) -- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194) - -### Fixed - -- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171) -- Fix missing shutdown processor in otel-collector example. (#1186) -- Fix missing shutdown processor in basic and namedtracer examples. (#1197) - -## [0.11.0] - 2020-08-24 - -### Added - -- Support for exporting array-valued attributes via OTLP. (#992) -- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994) -- Support for filtering metric label sets. (#1047) -- A dimensionality-reducing metric Processor. (#1057) -- Integration tests for more OTel Collector Attribute types. (#1062) -- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078) - -### Changed - -- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049) -- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049) -- Rename `api/testharness` to `api/apitest`. (#1049) -- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049) -- Change Metric Processor to merge multiple observations. (#1024) -- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module. - This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038) -- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016) -- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042) -- Replace `WithSyncer` with `WithBatcher` in examples. (#1044) -- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046) -- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060) -- Unify Callback Function Naming. - Rename `*Callback` with `*Func`. (#1061) -- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064) -- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface. - This interface still supports the export of `SpanData`, but only as a slice. - Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078) -- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error. - If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078) -- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`. - This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078) - -### Removed - -- Duplicate, unused API sampler interface. (#999) - Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead. -- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository. - This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027) -- The `WithSpan` method of the `Tracer` interface. - The functionality this method provided was limited compared to what a user can provide themselves. - It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043) -- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions. - These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077) -- The `oterror` package. (#1026) -- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032) - -### Fixed - -- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031) -- Correct instrumentation version tag in Jaeger exporter. (#1037) -- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043) -- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050) -- The `otel-collector` example referenced outdated collector processors. (#1006) - -## [0.10.0] - 2020-07-29 - -This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages. - -### Added - -- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern. - These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944) -- Add propagator option for gRPC instrumentation. (#986) -- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987) - -### Changed - -- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function. - This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944) -- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`. - This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963) -- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962) -- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968) - - `value.Bool` was replaced with `kv.BoolValue`. - - `value.Int64` was replaced with `kv.Int64Value`. - - `value.Uint64` was replaced with `kv.Uint64Value`. - - `value.Float64` was replaced with `kv.Float64Value`. - - `value.Int32` was replaced with `kv.Int32Value`. - - `value.Uint32` was replaced with `kv.Uint32Value`. - - `value.Float32` was replaced with `kv.Float32Value`. - - `value.String` was replaced with `kv.StringValue`. - - `value.Int` was replaced with `kv.IntValue`. - - `value.Uint` was replaced with `kv.UintValue`. - - `value.Array` was replaced with `kv.ArrayValue`. -- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972) -- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979) -- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980) -- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985) -- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989) - -### Removed - -- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970) - -### Fixed - -- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953) -- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957) -- Use `global.Handle` for span export errors in the OTLP exporter. (#946) -- Correct Go language formatting in the README documentation. (#961) -- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977) -- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983) -- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984) - -## [0.9.0] - 2020-07-20 - -### Added - -- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939) -- A Detector to automatically detect resources from an environment variable. (#939) -- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938) -- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`. - References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942) - -### Changed - -- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948) - -### Removed - -- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943) - -## [0.8.0] - 2020-07-09 - -### Added - -- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject. - A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882) -- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882) -- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882) -- Add `peer.service` semantic attribute. (#898) -- Add database-specific semantic attributes. (#899) -- Add semantic convention for `faas.coldstart` and `container.id`. (#909) -- Add http content size semantic conventions. (#905) -- Include `http.request_content_length` in HTTP request basic attributes. (#905) -- Add semantic conventions for operating system process resource attribute keys. (#919) -- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931) - -### Changed - -- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879) -- Use lowercase header names for B3 Multiple Headers. (#881) -- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`. - This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings. - If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882) -- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header. - Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid. - This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882) -- Extend semantic conventions for RPC. (#900) -- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920) - - `"api/standard".FaaSName` -> `FaaSNameKey` - - `"api/standard".FaaSID` -> `FaaSIDKey` - - `"api/standard".FaaSVersion` -> `FaaSVersionKey` - - `"api/standard".FaaSInstance` -> `FaaSInstanceKey` - -### Removed - -- The `FlagsUnused` trace flag is removed. - The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882) -- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed. - If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882) - -### Fixed - -- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881) -- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882) -- The B3 propagator now propagates the debug flag. - This removes the behavior of changing the debug flag into a set sampling bit. - Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882) -- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882) -- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883) -- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885) -- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896) -- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908) -- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912) -- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) -- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) -- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) -- Update otel-collector example to use the v0.5.0 collector. (#915) -- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) -- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) -- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. - This is in accordance with OpenTelemetry semantic conventions. (#922) -- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923) -- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925) -- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926) -- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930) - -## [0.7.0] - 2020-06-26 - -This release implements the v0.5.0 version of the OpenTelemetry specification. - -### Added - -- The othttp instrumentation now includes default metrics. (#861) -- This CHANGELOG file to track all changes in the project going forward. -- Support for array type attributes. (#798) -- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844) -- Timestamps are now passed to exporters for each export. (#835) -- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s. - This replaces the prior `Record` `struct` use for this purpose. (#835) -- New dependabot integration to automate package upgrades. (#814) -- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument. - This instrumentation version is passed on to exporters. (#811) (#805) (#802) -- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811) -- Environment variables for Jaeger exporter are supported. (#796) -- New `aggregation.Kind` in the export metric API. (#808) -- New example that uses OTLP and the collector. (#790) -- Handle errors in the span `SetName` during span initialization. (#791) -- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777) -- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778) -- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`. - There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778) -- Options to specify propagators for httptrace and grpctrace instrumentation. (#784) -- The required `application/json` header for the Zipkin exporter is included in all exports. (#774) -- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769 - -### Changed - -- Rename `Integrator` to `Processor` in the metric SDK. (#863) -- Rename `AggregationSelector` to `AggregatorSelector`. (#859) -- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858) -- Rename `simple` integrator to `basic` integrator. (#857) -- Merge otlp collector examples. (#841) -- Change the metric SDK to support cumulative, delta, and pass-through exporters directly. - With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840) -- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812) -- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other. - All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`. - Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812) -- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812) -- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810) -- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808 -- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806) -- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791) -- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779) -- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781) - -### Removed - -- `Uint64NumberKind` and related functions from the API. (#864) -- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803) -- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775) - -### Fixed - -- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866) -- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824) -- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867) -- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853) -- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854) -- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817) -- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828) -- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829) -- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823) -- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822) -- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820) -- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831) -- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836) -- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837) -- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839) -- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843) -- Set span status from HTTP status code in the othttp instrumentation. (#832) -- Fixed typo in push controller comment. (#834) -- The `Aggregator` testing has been updated and cleaned. (#812) -- `metric.Number(0)` expressions are replaced by `0` where possible. (#812) -- Fixed `global` `handler_test.go` test failure. #804 -- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766) -- Fixed OTLP example's accidental early close of exporter. (#807) -- Ensure zipkin exporter reads and closes response body. (#788) -- Update instrumentation to use `api/standard` keys instead of custom keys. (#782) -- Clean up tools and RELEASING documentation. (#762) - -## [0.6.0] - 2020-05-21 - -### Added - -- Support for `Resource`s in the prometheus exporter. (#757) -- New pull controller. (#751) -- New `UpDownSumObserver` instrument. (#750) -- OpenTelemetry collector demo. (#711) -- New `SumObserver` instrument. (#747) -- New `UpDownCounter` instrument. (#745) -- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742) -- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731) - -### Changed - -- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761) -- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758) -- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756) -- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754) -- The prometheus exporter now uses the new pull controller. (#751) -- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752) -- Support use of synchronous instruments in asynchronous callbacks (#725) -- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739) -- Rename `Observer` instrument to `ValueObserver`. (#734) -- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) -- Replace `Measure` instrument by `ValueRecorder` instrument. (#732) -- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) - -### Fixed - -- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755) -- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743) -- Fix `string` case in `kv` `Infer` function. (#746) -- Fix panic in grpctrace client interceptors. (#740) -- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737) -- Rewrite span batch process queue batching logic. (#719) -- Remove the push controller named Meter map. (#738) -- Fix Histogram aggregator initial state (fix #735). (#736) -- Ensure golang alpine image is running `golang-1.14` for examples. (#733) -- Added test for grpctrace `UnaryInterceptorClient`. (#695) -- Rearrange `api/metric` code layout. (#724) - -## [0.5.0] - 2020-05-13 - -### Added - -- Batch `Observer` callback support. (#717) -- Alias `api` types to root package of project. (#696) -- Create basic `othttp.Transport` for simple client instrumentation. (#678) -- `SetAttribute(string, interface{})` to the trace API. (#674) -- Jaeger exporter option that allows user to specify custom http client. (#671) -- `Stringer` and `Infer` methods to `key`s. (#662) - -### Changed - -- Rename `NewKey` in the `kv` package to just `Key`. (#721) -- Move `core` and `key` to `kv` package. (#720) -- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709) -- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710) -- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710) -- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710) -- Move `Number` from `core` to `api/metric` package. (#706) -- Move `SpanContext` from `core` to `trace` package. (#692) -- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681) - -### Fixed - -- Update tooling to run generators in all submodules. (#705) -- gRPC interceptor regexp to match methods without a service name. (#683) -- Use a `const` for padding 64-bit B3 trace IDs. (#701) -- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700) -- Left-pad 64-bit B3 trace IDs with zero. (#698) -- Propagate at least the first W3C tracestate header. (#694) -- Remove internal `StateLocker` implementation. (#688) -- Increase instance size CI system uses. (#690) -- Add a `key` benchmark and use reflection in `key.Infer()`. (#679) -- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680) -- Reimplement histogram using mutex instead of `StateLocker`. (#669) -- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667) -- Update documentation to not include any references to `WithKeys`. (#672) -- Correct misspelling. (#668) -- Fix clobbering of the span context if extraction fails. (#656) -- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670) - -## [0.4.3] - 2020-04-24 - -### Added - -- `Dockerfile` and `docker-compose.yml` to run example code. (#635) -- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621) -- New `api/label` package, providing common label set implementation. (#651) -- Support for JSON marshaling of `Resources`. (#654) -- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642) -- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627) -- `WithSpanFormatter` option to the othttp plugin. (#617) -- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612) -- The prometheus exporter now supports exporting histograms. (#601) -- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613) -- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613) -- An `Equal` method to the `Resource` test the equivalence of resources. (#613) -- An iterable structure (`AttributeIterator`) for `Resource` attributes. - -### Changed - -- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644) -- Pass `Resources` through the metrics export pipeline. (#659) - -### Removed - -- `WithKeys` option from the metric API. (#639) - -### Fixed - -- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658) -- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653) -- Use type names for return values in jaeger exporter. (#648) -- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650) -- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647) -- Do not cache `reflect.ValueOf()` in metric Labels. (#649) -- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626) -- Add error wrapping to the prometheus exporter. (#631) -- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623) -- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614) -- Update `Resource` internal representation to uniquely and reliably identify resources. (#613) -- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622) -- Ensure spans created by httptrace client tracer reflect operation structure. (#618) -- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 -- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) - -## [0.4.2] - 2020-03-31 - -### Fixed - -- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607) -- Fix time conversion from internal to OTLP in OTLP exporter. (#606) - -## [0.4.1] - 2020-03-31 - -### Fixed - -- Update `tag.sh` to create signed tags. (#604) - -## [0.4.0] - 2020-03-30 - -### Added - -- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580) -- Script to verify examples after a new release. (#579) - -### Removed - -- The dogstatsd exporter due to lack of support. - This additionally removes support for statsd. (#591) -- `LabelSet` from the metric API. - This is replaced by a `[]core.KeyValue` slice. (#595) -- `Labels` from the metric API's `Meter` interface. (#595) - -### Changed - -- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574) -- Renamed `internal/metric.Meter` to `MeterImpl`. (#580) -- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580) - -### Fixed - -- Corrected missing return in mock span. (#582) -- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596) -- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588) -- Update pre-release script to be compatible between GNU and BSD based systems. (#592) -- Add a `RecordBatch` benchmark. (#594) -- Moved span transforms of the OTLP exporter to the internal package. (#593) -- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569) -- Removed unneeded allocation on empty labels in OLTP exporter. (#597) -- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599) -- Update project documentation godoc.org links to pkg.go.dev. (#602) - -## [0.3.0] - 2020-03-21 - -This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality. -There is still a possibility of breaking changes. - -### Added - -- Add `Observer` metric instrument. (#474) -- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494) -- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) -- The zipkin trace exporter. (#495) -- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) -- Add `StatusMessage` field to the trace `Span`. (#524) -- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) -- The `Resource` type was added to the SDK. (#528) -- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) -- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction. - Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560) -- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560) -- Scripts to better automate the release process. (#576) - -### Changed - -- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506) -- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511) -- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511) -- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524) -- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531) -- Rename metric API `Options` to `Config`. (#541) -- Rename metric `Counter` aggregator to be `Sum`. (#541) -- Unify metric options into `Option` from instrument specific options. (#541) -- The trace API's `TraceProvider` now support `Resource`s. (#545) -- Correct error in zipkin module name. (#548) -- The jaeger trace exporter now supports `Resource`s. (#551) -- Metric SDK now supports `Resource`s. - The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552) -- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557) -- The stdout trace exporter now supports `Resource`s. (#558) -- The metric `Descriptor` is now included at the API instead of the SDK. (#560) -- Replace `Ordered` with an iterator in `export.Labels`. (#567) - -### Removed - -- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452) -- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560) -- `GetDescriptor` from the metric SDK. (#575) -- The `Gauge` instrument from the metric API. (#537) - -### Fixed - -- Make histogram aggregator checkpoint consistent. (#438) -- Update README with import instructions and how to build and test. (#505) -- The default label encoding was updated to be unique. (#508) -- Use `NewRoot` in the othttp plugin for public endpoints. (#513) -- Fix data race in `BatchedSpanProcessor`. (#518) -- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521 -- Use a variable-size array to represent ordered labels in maps. (#523) -- Update the OTLP protobuf and update changed import path. (#532) -- Use `StateLocker` implementation in `MinMaxSumCount`. (#546) -- Eliminate goroutine leak in histogram stress test. (#547) -- Update OTLP exporter with latest protobuf. (#550) -- Add filters to the othttp plugin. (#556) -- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565) -- Encode labels once during checkpoint. - The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter. - This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572) -- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573) - -## [0.2.3] - 2020-03-04 - -### Added - -- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473) -- Configurable push frequency for exporters setup pipeline. (#504) - -### Changed - -- Rename the `exporter` directory to `exporters`. - The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`. - This resulted in all subsequent releases not becoming the default latest. - A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages. - Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags. - Consequentially, this action also renames *all* exporter packages. (#502) - -### Removed - -- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503) - -## [0.2.2] - 2020-02-27 - -### Added - -- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467) -- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467) -- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467) -- `Config` and configuring `Option` to the propagator API. (#467) -- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467) -- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467) -- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467) -- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467) -- Histogram aggregator. (#433) -- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456) -- `AlwaysParentSample` sampler to the trace API. (#455) -- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) - -### Changed - -- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) -- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) -- Move correlation context propagation to correlation package. (#479) -- Do not default to putting remote span context into links. (#480) -- `Tracer.WithSpan` updated to accept `StartOptions`. (#472) -- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) -- Renamed the `export` package to `metric` to match directory structure. (#432) -- Rename the `api/distributedcontext` package to `api/correlation`. (#444) -- Rename the `api/propagators` package to `api/propagation`. (#444) -- Move the propagators from the `propagators` package into the `trace` API package. (#444) -- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462) -- Moved all dependencies of tools package to a tools directory. (#466) - -### Removed - -- Binary propagators. (#467) -- NOOP propagator. (#467) - -### Fixed - -- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492) -- Fix a possible nil-dereference crash (#478) -- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483) -- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484) -- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482) -- Initialize `onError` based on `Config` in prometheus exporter. (#486) -- Correct module name in prometheus exporter README. (#475) -- Removed tracer name prefix from span names. (#430) -- Fix `aggregator_test.go` import package comment. (#431) -- Improved detail in stdout exporter. (#436) -- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442) -- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442) -- Reword function documentation in gRPC plugin. (#446) -- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441) -- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441) -- Upgraded to Go 1.13 in CI. (#465) -- Correct opentelemetry.io URL in trace SDK documentation. (#464) -- Refactored reference counting logic in SDK determination of stale records. (#468) -- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469) - -## [0.2.1.1] - 2020-01-13 - -### Fixed - -- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428) - -## [0.2.1] - 2020-01-08 - -### Added - -- Global meter forwarding implementation. - This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392) -- Global trace forwarding implementation. - This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406) -- Standardize export pipeline creation in all exporters. (#395) -- A testing, organization, and comments for 64-bit field alignment. (#418) -- Script to tag all modules in the project. (#414) - -### Changed - -- Renamed `propagation` package to `propagators`. (#362) -- Renamed `B3Propagator` propagator to `B3`. (#362) -- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362) -- Renamed `BinaryPropagator` propagator to `Binary`. (#362) -- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362) -- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362) -- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362) -- Renamed `SpanOption` to `StartOption` in the trace API. (#369) -- Renamed `StartOptions` to `StartConfig` in the trace API. (#369) -- Renamed `EndOptions` to `EndConfig` in the trace API. (#369) -- `Number` now has a pointer receiver for its methods. (#375) -- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379) -- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379) -- Renamed `Message` in Event to `Name` in the trace API. (#389) -- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385) -- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400) -- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400) -- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400) -- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400) -- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400) -- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400) -- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400) -- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400) -- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400) -- Renamed the `File` option in the stdout exporter to `Writer`. (#404) -- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case. - -### Fixed - -- Aggregator import path corrected. (#421) -- Correct links in README. (#368) -- The README was updated to match latest code changes in its examples. (#374) -- Don't capitalize error statements. (#375) -- Fix ignored errors. (#375) -- Fix ambiguous variable naming. (#375) -- Removed unnecessary type casting. (#375) -- Use named parameters. (#375) -- Updated release schedule. (#378) -- Correct http-stackdriver example module name. (#394) -- Removed the `http.request` span in `httptrace` package. (#397) -- Add comments in the metrics SDK (#399) -- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403) -- Add documentation of compatible exporters in the README. (#405) -- Typo fix. (#408) -- Simplify span check logic in SDK tracer implementation. (#419) - -## [0.2.0] - 2019-12-03 - -### Added - -- Unary gRPC tracing example. (#351) -- Prometheus exporter. (#334) -- Dogstatsd metrics exporter. (#326) - -### Changed - -- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352) -- Rename `GetMeter` to `Meter`. (#357) -- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) -- Rename `HTTPB3Propagator` to `B3Propagator`. (#355) -- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) -- Move `/global` package to `/api/global`. (#356) -- Rename `GetTracer` to `Tracer`. (#347) - -### Removed - -- `SetAttribute` from the `Span` interface in the trace API. (#361) -- `AddLink` from the `Span` interface in the trace API. (#349) -- `Link` from the `Span` interface in the trace API. (#349) - -### Fixed - -- Exclude example directories from coverage report. (#365) -- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360) -- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359) -- Run the race checker for all test. (#354) -- Redundant commands in the Makefile are removed. (#354) -- Split the `generate` and `lint` targets of the Makefile. (#354) -- Renames `circle-ci` target to more generic `ci` in Makefile. (#354) -- Add example Prometheus binary to gitignore. (#358) -- Support negative numbers with the `MaxSumCount`. (#335) -- Resolve race conditions in `push_test.go` identified in #339. (#340) -- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336) -- Trace benchmark now tests both `AlwaysSample` and `NeverSample`. - Previously it was testing `AlwaysSample` twice. (#325) -- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325) -- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325) -- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint. - This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. - This was corrected. (#333) - -## [0.1.2] - 2019-11-18 - -### Fixed - -- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328) -- Removed unnecessary unslicing of parameters that are already a slice. (#324) - -## [0.1.1] - 2019-11-18 - -This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch. - -### Added - -- Metrics stdout export pipeline. (#265) -- Array aggregation for raw measure metrics. (#282) -- The core.Value now have a `MarshalJSON` method. (#281) - -### Removed - -- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314) -- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292) - -### Changed - -- Allocation in LabelSet construction to reduce GC overhead. (#318) -- `trace.WithAttributes` to append values instead of replacing (#315) -- Use a formula for tolerance in sampling tests. (#298) -- Move export types into trace and metric-specific sub-directories. (#289) -- `SpanKind` back to being based on an `int` type. (#288) - -### Fixed - -- URL to OpenTelemetry website in README. (#323) -- Name of othttp default tracer. (#321) -- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294) -- CI modules cache to correctly restore/save from/to the cache. (#316) -- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293) -- README now reflects the new code structure introduced with these changes. (#291) -- Make the basic example work. (#279) - -## [0.1.0] - 2019-11-04 - -This is the first release of open-telemetry go library. -It contains api and sdk for trace and meter. - -### Added - -- Initial OpenTelemetry trace and metric API prototypes. -- Initial OpenTelemetry trace, metric, and export SDK packages. -- A wireframe bridge to support compatibility with OpenTracing. -- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup. -- Exporters for Jaeger, Stackdriver, and stdout. -- Propagators for binary, B3, and trace-context protocols. -- Project information and guidelines in the form of a README and CONTRIBUTING. -- Tools to build the project and a Makefile to automate the process. -- Apache-2.0 license. -- CircleCI build CI manifest files. -- CODEOWNERS file to track owners of this project. - -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.43.0...HEAD -[1.43.0/0.65.0/0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.43.0 -[1.42.0/0.64.0/0.18.0/0.0.16]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.42.0 -[1.41.0/0.63.0/0.17.0/0.0.15]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.41.0 -[1.40.0/0.62.0/0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.40.0 -[1.39.0/0.61.0/0.15.0/0.0.14]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.39.0 -[1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0 -[0.59.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/exporters/prometheus/v0.59.1 -[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0 -[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2 -[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1 -[1.36.0/0.58.0/0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.36.0 -[1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0 -[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 -[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 -[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 -[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 -[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 -[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 -[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 -[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 -[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 -[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0 -[1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0 -[1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1 -[1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0 -[1.23.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0-rc.1 -[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0 -[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0 -[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 -[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 -[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 -[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 -[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0 -[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0 -[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1 -[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1 -[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0 -[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2 -[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1 -[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 -[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 -[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 -[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 -[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 -[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 -[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 -[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 -[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 -[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 -[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 -[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 -[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 -[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 -[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 -[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 -[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 -[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 -[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 -[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 -[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 -[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 -[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 -[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 -[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1 -[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0 -[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 -[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 -[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 -[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 -[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 -[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 -[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 -[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 -[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 -[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 -[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 -[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 -[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 -[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0 -[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0 -[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0 -[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0 -[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0 -[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0 -[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0 -[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0 -[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3 -[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2 -[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1 -[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0 -[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0 -[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3 -[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2 -[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1 -[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1 -[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0 -[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 -[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 -[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 - - - -[Go 1.26]: https://go.dev/doc/go1.26 -[Go 1.25]: https://go.dev/doc/go1.25 -[Go 1.24]: https://go.dev/doc/go1.24 -[Go 1.23]: https://go.dev/doc/go1.23 -[Go 1.22]: https://go.dev/doc/go1.22 -[Go 1.21]: https://go.dev/doc/go1.21 -[Go 1.20]: https://go.dev/doc/go1.20 -[Go 1.19]: https://go.dev/doc/go1.19 -[Go 1.18]: https://go.dev/doc/go1.18 - -[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric -[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric -[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace - -[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS deleted file mode 100644 index 26a03aed1d..0000000000 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ /dev/null @@ -1,17 +0,0 @@ -##################################################### -# -# List of approvers for this repository -# -##################################################### -# -# Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md -# -# -# Learn about CODEOWNERS file format: -# https://help.github.com/en/articles/about-code-owners -# - -* @MrAlias @XSAM @dashpole @pellared @dmathieu @flc1125 - -CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md deleted file mode 100644 index 12de3607a3..0000000000 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ /dev/null @@ -1,1157 +0,0 @@ -# Contributing to opentelemetry-go - -The Go special interest group (SIG) meets regularly. See the -OpenTelemetry -[community](https://github.com/open-telemetry/community#golang-sdk) -repo for information on this and other language SIGs. - -See the [public meeting -notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit) -for a summary description of past meetings. To request edit access, -join the meeting or get in touch on -[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). - -## Development - -You can view and edit the source code by cloning this repository: - -```sh -git clone https://github.com/open-telemetry/opentelemetry-go.git -``` - -Run `make test` to run the tests instead of `go test`. - -There are some generated files checked into the repo. To make sure -that the generated files are up-to-date, run `make` (or `make -precommit` - the `precommit` target is the default). - -The `precommit` target also fixes the formatting of the code and -checks the status of the go module files. - -Additionally, there is a `codespell` target that checks for common -typos in the code. It is not run by default, but you can run it -manually with `make codespell`. It will set up a virtual environment -in `venv` and install `codespell` there. - -If after running `make precommit` the output of `git status` contains -`nothing to commit, working tree clean` then it means that everything -is up-to-date and properly formatted. - -## Pull Requests - -### How to Send Pull Requests - -Everyone is welcome to contribute code to `opentelemetry-go` via -GitHub pull requests (PRs). - -To create a new PR, fork the project in GitHub and clone the upstream -repo: - -```sh -go get -d go.opentelemetry.io/otel -``` - -(This may print some warning about "build constraints exclude all Go -files", just ignore it.) - -This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. -Alternatively, you can use `git` directly with: - -```sh -git clone https://github.com/open-telemetry/opentelemetry-go -``` - -(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - -that name is a kind of a redirector to GitHub that `go get` can -understand, but `git` does not.) - -This will add the project as `opentelemetry-go` within the current directory. - -Enter the newly created directory and add your fork as a new remote: - -```sh -git remote add git@github.com:/opentelemetry-go -``` - -Check out a new branch, make modifications, run linters and tests, update -`CHANGELOG.md`, and push the branch to your fork: - -```sh -git checkout -b -# edit files -# update changelog -make precommit -git add -p -git commit -git push -``` - -Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull -request ID to the entry you added to `CHANGELOG.md`. - -Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request. -Rewriting Git history makes it difficult to keep track of iterations during code review. -All pull requests are squashed to a single commit upon merge to `main`. - -### How to Receive Comments - -* If the PR is not ready for review, please put `[WIP]` in the title, - tag it as `work-in-progress`, or mark it as - [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). -* Make sure CLA is signed and CI is clear. - -### How to Get PRs Merged - -A PR is considered **ready to merge** when: - -* It has received two qualified approvals[^1]. - - This is not enforced through automation, but needs to be validated by the - maintainer merging. - * At least one of the qualified approvals needs to be from an - [Approver]/[Maintainer] affiliated with a different company than the author - of the PR. - * PRs introducing changes that have already been discussed and consensus - reached only need one qualified approval. The discussion and resolution - needs to be linked to the PR. - * Trivial changes[^2] only need one qualified approval. - -* All feedback has been addressed. - * All PR comments and suggestions are resolved. - * All GitHub Pull Request reviews with a status of "Request changes" have - been addressed. Another review by the objecting reviewer with a different - status can be submitted to clear the original review, or the review can be - dismissed by a [Maintainer] when the issues from the original review have - been addressed. - * Any comments or reviews that cannot be resolved between the PR author and - reviewers can be submitted to the community [Approver]s and [Maintainer]s - during the weekly SIG meeting. If consensus is reached among the - [Approver]s and [Maintainer]s during the SIG meeting the objections to the - PR may be dismissed or resolved or the PR closed by a [Maintainer]. - * Any substantive changes to the PR require existing Approval reviews be - cleared unless the approver explicitly states that their approval persists - across changes. This includes changes resulting from other feedback. - [Approver]s and [Maintainer]s can help in clearing reviews and they should - be consulted if there are any questions. - -* The PR branch is up to date with the base branch it is merging into. - * To ensure this does not block the PR, it should be configured to allow - maintainers to update it. - -* It has been open for review for at least one working day. This gives people - reasonable time to review. - * Trivial changes[^2] do not have to wait for one day and may be merged with - a single [Maintainer]'s approval. - -* All required GitHub workflows have succeeded. -* Urgent fix can take exception as long as it has been actively communicated - among [Maintainer]s. - -Any [Maintainer] can merge the PR once the above criteria have been met. - -[^1]: A qualified approval is a GitHub Pull Request review with "Approve" - status from an OpenTelemetry Go [Approver] or [Maintainer]. -[^2]: Trivial changes include: typo corrections, cosmetic non-substantive - changes, documentation corrections or updates, dependency updates, etc. - -## Design Choices - -As with other OpenTelemetry clients, opentelemetry-go follows the -[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel). - -It's especially valuable to read through the [library -guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). - -### Focus on Capabilities, Not Structure Compliance - -OpenTelemetry is an evolving specification, one where the desires and -use cases are clear, but the methods to satisfy those use cases are -not. - -As such, Contributions should provide functionality and behavior that -conforms to the specification, but the interface and structure are -flexible. - -It is preferable to have contributions follow the idioms of the -language rather than conform to specific API names or argument -patterns in the spec. - -For a deeper discussion, see -[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). - -## Tests - -Each functionality should be covered by tests. - -Performance-critical functionality should also be covered by benchmarks. - -- Pull requests adding a performance-critical functionality -should have `go test -bench` output in their description. -- Pull requests changing a performance-critical functionality -should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) -output in their description. - -## Dependencies - -This project uses [Go Modules] for dependency management. All modules will use -`go.mod` to explicitly list all direct and indirect dependencies, ensuring a -clear dependency graph. The `go.sum` file for each module will be committed to -the repository and used to verify the integrity of downloaded modules, -preventing malicious tampering. - -This project uses automated dependency update tools (i.e. dependabot, -renovatebot) to manage updates to dependencies. This ensures that dependencies -are kept up-to-date with the latest security patches and features and are -reviewed before being merged. If you would like to propose a change to a -dependency it should be done through a pull request that updates the `go.mod` -file and includes a description of the change. - -See the [versioning and compatibility](./VERSIONING.md) policy for more details -about dependency compatibility. - -[Go Modules]: https://pkg.go.dev/cmd/go#hdr-Modules__module_versions__and_more - -### Environment Dependencies - -This project does not partition dependencies based on the environment (i.e. -`development`, `staging`, `production`). - -Only the dependencies explicitly included in the released modules have been -tested and verified to work with the released code. No other guarantee is made -about the compatibility of other dependencies. - -## Documentation - -Each (non-internal, non-test) package must be documented using -[Go Doc Comments](https://go.dev/doc/comment), -preferably in a `doc.go` file. - -Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples) -instead of putting code snippets in Go doc comments. -In some cases, you can even create [Testable Examples](https://go.dev/blog/examples). - -You can install and run a "local Go Doc site" in the following way: - - ```sh - go install golang.org/x/pkgsite/cmd/pkgsite@latest - pkgsite - ``` - -[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) -is an example of a very well-documented package. - -### README files - -Each (non-internal, non-test, non-documentation) package must contain a -`README.md` file containing at least a title, and a `pkg.go.dev` badge. - -The README should not be a repetition of Go doc comments. - -You can verify the presence of all README files with the `make verify-readmes` -command. - -## Style Guide - -One of the primary goals of this project is that it is actually used by -developers. With this goal in mind the project strives to build -user-friendly and idiomatic Go code adhering to the Go community's best -practices. - -For a non-comprehensive but foundational overview of these best practices -the [Effective Go](https://golang.org/doc/effective_go.html) documentation -is an excellent starting place. - -We also recommend following the -[Go Code Review Comments](https://go.dev/wiki/CodeReviewComments) -that collects common comments made during reviews of Go code. - -As a convenience for developers building this project the `make precommit` -will format, lint, validate, and in some cases fix the changes you plan to -submit. This check will need to pass for your changes to be able to be -merged. - -In addition to idiomatic Go, the project has adopted certain standards for -implementations of common patterns. These standards should be followed as a -default, and if they are not followed documentation needs to be included as -to the reasons why. - -### Configuration - -When creating an instantiation function for a complex `type T struct`, it is -useful to allow variable number of options to be applied. However, the strong -type system of Go restricts the function design options. There are a few ways -to solve this problem, but we have landed on the following design. - -#### `config` - -Configuration should be held in a `struct` named `config`, or prefixed with -specific type name this Configuration applies to if there are multiple -`config` in the package. This type must contain configuration options. - -```go -// config contains configuration options for a thing. -type config struct { - // options ... -} -``` - -In general the `config` type will not need to be used externally to the -package and should be unexported. If, however, it is expected that the user -will likely want to build custom options for the configuration, the `config` -should be exported. Please, include in the documentation for the `config` -how the user can extend the configuration. - -It is important that internal `config` are not shared across package boundaries. -Meaning a `config` from one package should not be directly used by another. The -one exception is the API packages. The configs from the base API, eg. -`go.opentelemetry.io/otel/trace.TracerConfig` and -`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed -by the SDK therefore it is expected that these are exported. - -When a config is exported we want to maintain forward and backward -compatibility, to achieve this no fields should be exported but should -instead be accessed by methods. - -Optionally, it is common to include a `newConfig` function (with the same -naming scheme). This function wraps any defaults setting and looping over -all options to create a configured `config`. - -```go -// newConfig returns an appropriately configured config. -func newConfig(options ...Option) config { - // Set default values for config. - config := config{/* […] */} - for _, option := range options { - config = option.apply(config) - } - // Perform any validation here. - return config -} -``` - -If validation of the `config` options is also performed this can return an -error as well that is expected to be handled by the instantiation function -or propagated to the user. - -Given the design goal of not having the user need to work with the `config`, -the `newConfig` function should also be unexported. - -#### `Option` - -To set the value of the options a `config` contains, a corresponding -`Option` interface type should be used. - -```go -type Option interface { - apply(config) config -} -``` - -Having `apply` unexported makes sure that it will not be used externally. -Moreover, the interface becomes sealed so the user cannot easily implement -the interface on its own. - -The `apply` method should return a modified version of the passed config. -This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap. - -The name of the interface should be prefixed in the same way the -corresponding `config` is (if at all). - -#### Options - -All user configurable options for a `config` must have a related unexported -implementation of the `Option` interface and an exported configuration -function that wraps this implementation. - -The wrapping function name should be prefixed with `With*` (or in the -special case of a boolean options `Without*`) and should have the following -function signature. - -```go -func With*(…) Option { … } -``` - -##### `bool` Options - -```go -type defaultFalseOption bool - -func (o defaultFalseOption) apply(c config) config { - c.Bool = bool(o) - return c -} - -// WithOption sets a T to have an option included. -func WithOption() Option { - return defaultFalseOption(true) -} -``` - -```go -type defaultTrueOption bool - -func (o defaultTrueOption) apply(c config) config { - c.Bool = bool(o) - return c -} - -// WithoutOption sets a T to have Bool option excluded. -func WithoutOption() Option { - return defaultTrueOption(false) -} -``` - -##### Declared Type Options - -```go -type myTypeOption struct { - MyType MyType -} - -func (o myTypeOption) apply(c config) config { - c.MyType = o.MyType - return c -} - -// WithMyType sets T to have include MyType. -func WithMyType(t MyType) Option { - return myTypeOption{t} -} -``` - -##### Functional Options - -```go -type optionFunc func(config) config - -func (fn optionFunc) apply(c config) config { - return fn(c) -} - -// WithMyType sets t as MyType. -func WithMyType(t MyType) Option { - return optionFunc(func(c config) config { - c.MyType = t - return c - }) -} -``` - -#### Instantiation - -Using this configuration pattern to configure instantiation with a `NewT` -function. - -```go -func NewT(options ...Option) T {…} -``` - -Any required parameters can be declared before the variadic `options`. - -#### Dealing with Overlap - -Sometimes there are multiple complex `struct` that share common -configuration and also have distinct configuration. To avoid repeated -portions of `config`s, a common `config` can be used with the union of -options being handled with the `Option` interface. - -For example. - -```go -// config holds options for all animals. -type config struct { - Weight float64 - Color string - MaxAltitude float64 -} - -// DogOption apply Dog specific options. -type DogOption interface { - applyDog(config) config -} - -// BirdOption apply Bird specific options. -type BirdOption interface { - applyBird(config) config -} - -// Option apply options for all animals. -type Option interface { - BirdOption - DogOption -} - -type weightOption float64 - -func (o weightOption) applyDog(c config) config { - c.Weight = float64(o) - return c -} - -func (o weightOption) applyBird(c config) config { - c.Weight = float64(o) - return c -} - -func WithWeight(w float64) Option { return weightOption(w) } - -type furColorOption string - -func (o furColorOption) applyDog(c config) config { - c.Color = string(o) - return c -} - -func WithFurColor(c string) DogOption { return furColorOption(c) } - -type maxAltitudeOption float64 - -func (o maxAltitudeOption) applyBird(c config) config { - c.MaxAltitude = float64(o) - return c -} - -func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } - -func NewDog(name string, o ...DogOption) Dog {…} -func NewBird(name string, o ...BirdOption) Bird {…} -``` - -### Interfaces - -To allow other developers to better comprehend the code, it is important -to ensure it is sufficiently documented. One simple measure that contributes -to this aim is self-documenting by naming method parameters. Therefore, -where appropriate, methods of every exported interface type should have -their parameters appropriately named. - -#### Interface Stability - -All exported stable interfaces that include the following warning in their -documentation are allowed to be extended with additional methods. - -> Warning: methods may be added to this interface in minor releases. - -These interfaces are defined by the OpenTelemetry specification and will be -updated as the specification evolves. - -Otherwise, stable interfaces MUST NOT be modified. - -#### How to Change Specification Interfaces - -When an API change must be made, we will update the SDK with the new method one -release before the API change. This will allow the SDK one version before the -API change to work seamlessly with the new API. - -If an incompatible version of the SDK is used with the new API the application -will fail to compile. - -#### How Not to Change Specification Interfaces - -We have explored using a v2 of the API to change interfaces and found that there -was no way to introduce a v2 and have it work seamlessly with the v1 of the API. -Problems happened with libraries that upgraded to v2 when an application did not, -and would not produce any telemetry. - -More detail of the approaches considered and their limitations can be found in -the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920) -issue. - -#### How to Change Other Interfaces - -If new functionality is needed for an interface that cannot be changed it MUST -be added by including an additional interface. That added interface can be a -simple interface for the specific functionality that you want to add or it can -be a super-set of the original interface. For example, if you wanted to a -`Close` method to the `Exporter` interface: - -```go -type Exporter interface { - Export() -} -``` - -A new interface, `Closer`, can be added: - -```go -type Closer interface { - Close() -} -``` - -Code that is passed the `Exporter` interface can now check to see if the passed -value also satisfies the new interface. E.g. - -```go -func caller(e Exporter) { - /* ... */ - if c, ok := e.(Closer); ok { - c.Close() - } - /* ... */ -} -``` - -Alternatively, a new type that is the super-set of an `Exporter` can be created. - -```go -type ClosingExporter struct { - Exporter - Close() -} -``` - -This new type can be used similar to the simple interface above in that a -passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type -and the `Close` method called. - -This super-set approach can be useful if there is explicit behavior that needs -to be coupled with the original type and passed as a unified type to a new -function, but, because of this coupling, it also limits the applicability of -the added functionality. If there exist other interfaces where this -functionality should be added, each one will need their own super-set -interfaces and will duplicate the pattern. For this reason, the simple targeted -interface that defines the specific functionality should be preferred. - -See also: -[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces). - -### Testing - -We allow using [`testify`](https://github.com/stretchr/testify) even though -it is seen as non-idiomatic according to -the [Go Test Comments](https://go.dev/wiki/TestComments#assert-libraries) page. - -The tests should never leak goroutines. - -Use the term `ConcurrentSafe` in the test name when it aims to verify the -absence of race conditions. The top-level tests with this term will be run -many times in the `test-concurrent-safe` CI job to increase the chance of -catching concurrency issues. This does not apply to subtests when this term -is not in their root name. - -### Internal packages - -The use of internal packages should be scoped to a single module. A sub-module -should never import from a parent internal package. This creates a coupling -between the two modules where a user can upgrade the parent without the child, -and if the internal package API has changed, it will fail to upgrade[^3]. - -There are two known exceptions to this rule: - -- `go.opentelemetry.io/otel/internal/global` - - This package manages global state for all of opentelemetry-go. It needs to - be a single package in order to ensure the uniqueness of the global state. -- `go.opentelemetry.io/otel/internal/baggage` - - This package provides values in a `context.Context` that need to be - recognized by `go.opentelemetry.io/otel/baggage` and - `go.opentelemetry.io/otel/bridge/opentracing` but remain private. - -If you have duplicate code in multiple modules, make that code into a Go -template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl] -to render the templates in the desired locations. See [#4404] for an example of -this. - -[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548 - -### Ignoring context cancellation - -OpenTelemetry API implementations need to ignore the cancellation of the context that is -passed when recording a value (e.g. starting a span, recording a measurement, emitting a log). -Recording methods should not return an error describing the cancellation state of the context -when they complete, nor should they abort any work. - -This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for -the method. In that case the context cancellation can be used for the timeout with the -restriction that this behavior is documented for the method. Otherwise, timeouts -are expected to be handled by the user calling the API, not the implementation. - -Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method -of a provider. It is assumed the context passed from a user is not used for this purpose. - -Outside of the direct recording of telemetry from the API (e.g. exporting telemetry, -force flushing telemetry, shutting down a signal provider) the context cancellation -should be honored. This means all work done on behalf of the user provided context -should be canceled. - -### Observability - -OpenTelemetry Go SDK components should be instrumented to enable users observability for the health and performance of the telemetry pipeline itself. -This allows operators to understand how well their observability infrastructure is functioning and to identify potential issues before they impact their applications. - -This section outlines the best practices for building instrumentation in OpenTelemetry Go SDK components. - -#### Environment Variable Activation - -Observability features are currently experimental. -They should be disabled by default and activated through the `OTEL_GO_X_OBSERVABILITY` environment variable. -This follows the established experimental feature pattern used throughout the SDK. - -Components should check for this environment variable using a consistent pattern: - -```go -import "go.opentelemetry.io/otel/*/internal/x" - -if x.Observability.Enabled() { - // Initialize observability metrics -} -``` - -**References**: - -- [stdouttrace exporter](./exporters/stdout/stdouttrace/internal/x/x.go) -- [sdk](./sdk/internal/x/x.go) - -#### Encapsulation - -Instrumentation should be encapsulated within a dedicated `struct` (e.g. `instrumentation`). -It should not be mixed into the instrumented component. - -Prefer this: - -```go -type SDKComponent struct { - inst *instrumentation -} - -type instrumentation struct { - inflight otelconv.SDKComponentInflight - exported otelconv.SDKComponentExported -} -``` - -To this: - -```go -// ❌ Avoid this pattern. -type SDKComponent struct { - /* other SDKComponent fields... */ - - inflight otelconv.SDKComponentInflight - exported otelconv.SDKComponentExported -} -``` - -The instrumentation code should not bloat the code being instrumented. -Likely, this means its own file, or its own package if it is complex or reused. - -#### Initialization - -Instrumentation setup should be explicit, side-effect free, and local to the relevant component. -Avoid relying on global or implicit [side effects][side-effect] for initialization. - -Encapsulate setup in constructor functions, ensuring clear ownership and scope: - -```go -import ( - "errors" - - semconv "go.opentelemetry.io/otel/semconv/v1.40.0" - "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" -) - -type SDKComponent struct { - inst *instrumentation -} - -func NewSDKComponent(config Config) (*SDKComponent, error) { - inst, err := newInstrumentation() - if err != nil { - return nil, err - } - return &SDKComponent{inst: inst}, nil -} - -type instrumentation struct { - inflight otelconv.SDKComponentInflight - exported otelconv.SDKComponentExported -} - -func newInstrumentation() (*instrumentation, error) { - if !x.Observability.Enabled() { - return nil, nil - } - - meter := otel.GetMeterProvider().Meter( - "", - metric.WithInstrumentationVersion(sdk.Version()), - metric.WithSchemaURL(semconv.SchemaURL), - ) - - inst := &instrumentation{} - - var err, e error - inst.inflight, e = otelconv.NewSDKComponentInflight(meter) - err = errors.Join(err, e) - - inst.exported, e = otelconv.NewSDKComponentExported(meter) - err = errors.Join(err, e) - - return inst, err -} -``` - -```go -// ❌ Avoid this pattern. -func (c *Component) initObservability() { - // Initialize observability metrics - if !x.Observability.Enabled() { - return - } - - // Initialize observability metrics - c.inst = &instrumentation{/* ... */} -} -``` - -[side-effect]: https://en.wikipedia.org/wiki/Side_effect_(computer_science) - -#### Performance - -When observability is disabled there should be little to no overhead. - -```go -func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { - if e.inst != nil { - attrs := expensiveOperation() - e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...) - } - // Export spans... -} -``` - -```go -// ❌ Avoid this pattern. -func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { - attrs := expensiveOperation() - e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...) - // Export spans... -} - -func (i *instrumentation) recordSpanInflight(ctx context.Context, count int64, attrs ...attribute.KeyValue) { - if i == nil || i.inflight == nil { - return - } - i.inflight.Add(ctx, count, metric.WithAttributes(attrs...)) -} -``` - -When observability is enabled, the instrumentation code paths should be optimized to reduce allocation and computation overhead. - -##### Attribute and Option Allocation Management - -Pool attribute slices and options with [`sync.Pool`] to minimize allocations in measurement calls with dynamic attributes. - -```go -var ( - attrPool = sync.Pool{ - New: func() any { - // Pre-allocate common capacity - knownCap := 8 // Adjust based on expected usage - s := make([]attribute.KeyValue, 0, knownCap) - // Return a pointer to avoid extra allocation on Put(). - return &s - }, - } - - addOptPool = &sync.Pool{ - New: func() any { - const n = 1 // WithAttributeSet - o := make([]metric.AddOption, 0, n) - // Return a pointer to avoid extra allocation on Put(). - return &o - }, - } -) - -func (i *instrumentation) record(ctx context.Context, value int64, baseAttrs ...attribute.KeyValue) { - attrs := attrPool.Get().(*[]attribute.KeyValue) - defer func() { - *attrs = (*attrs)[:0] // Reset. - attrPool.Put(attrs) - }() - - *attrs = append(*attrs, baseAttrs...) - // Add any dynamic attributes. - *attrs = append(*attrs, semconv.OTelComponentName("exporter-1")) - - addOpt := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *addOpt = (*addOpt)[:0] - addOptPool.Put(addOpt) - }() - - set := attribute.NewSet(*attrs...) - *addOpt = append(*addOpt, metric.WithAttributeSet(set)) - - i.counter.Add(ctx, value, *addOpt...) -} -``` - -Pools are most effective when there are many pooled objects of the same sufficiently large size, and the objects are repeatedly used. -This amortizes the cost of allocation and synchronization. -Ideally, the pools should be scoped to be used as widely as possible within the component to maximize this efficiency while still ensuring correctness. - -[`sync.Pool`]: https://pkg.go.dev/sync#Pool - -##### Cache common attribute sets for repeated measurements - -If a static set of attributes are used for measurements and they are known at compile time, pre-compute and cache these attributes. - -```go -type spanLiveSetKey struct { - sampled bool -} - -var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{ - {true}: attribute.NewSet( - otelconv.SDKSpanLive{}.AttrSpanSamplingResult( - otelconv.SpanSamplingResultRecordAndSample, - ), - ), - {false}: attribute.NewSet( - otelconv.SDKSpanLive{}.AttrSpanSamplingResult( - otelconv.SpanSamplingResultRecordOnly, - ), - ), -} - -func spanLiveSet(sampled bool) attribute.Set { - key := spanLiveSetKey{sampled: sampled} - return spanLiveSetCache[key] -} -``` - -##### Benchmarking - -Always provide benchmarks when introducing or refactoring instrumentation. -Demonstrate the impact (allocs/op, B/op, ns/op) in enabled/disabled scenarios: - -```go -func BenchmarkExportSpans(b *testing.B) { - scenarios := []struct { - name string - obsEnabled bool - }{ - {"ObsDisabled", false}, - {"ObsEnabled", true}, - } - - for _, scenario := range scenarios { - b.Run(scenario.name, func(b *testing.B) { - b.Setenv( - "OTEL_GO_X_OBSERVABILITY", - strconv.FormatBool(scenario.obsEnabled), - ) - - exporter := NewExporter() - spans := generateTestSpans(100) - - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - _ = exporter.ExportSpans(context.Background(), spans) - } - }) - } -} -``` - -#### Error Handling and Robustness - -Errors should be reported back to the caller if possible, and partial failures should be handled as gracefully as possible. - -```go -func newInstrumentation() (*instrumentation, error) { - if !x.Observability.Enabled() { - return nil, nil - } - - m := otel.GetMeterProvider().Meter(/* initialize meter */) - counter, err := otelconv.NewSDKComponentCounter(m) - // Use the partially initialized counter if available. - i := &instrumentation{counter: counter} - // Return any error to the caller. - return i, err -} -``` - -```go -// ❌ Avoid this pattern. -func newInstrumentation() *instrumentation { - if !x.Observability.Enabled() { - return nil, nil - } - - m := otel.GetMeterProvider().Meter(/* initialize meter */) - counter, err := otelconv.NewSDKComponentCounter(m) - if err != nil { - // ❌ Do not dump the error to the OTel Handler. Return it to the - // caller. - otel.Handle(err) - // ❌ Do not return nil if we can still use the partially initialized - // counter. - return nil - } - return &instrumentation{counter: counter} -} -``` - -If the instrumented component cannot report the error to the user, let it report the error to `otel.Handle`. - -#### Context Propagation - -Ensure observability measurements receive the correct context, especially for trace exemplars and distributed context: - -```go -func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { - // Use the provided context for observability measurements - e.inst.recordSpanExportStarted(ctx, len(spans)) - - err := e.doExport(ctx, spans) - - if err != nil { - e.inst.recordSpanExportFailed(ctx, len(spans), err) - } else { - e.inst.recordSpanExportSucceeded(ctx, len(spans)) - } - - return err -} -``` - -```go -// ❌ Avoid this pattern. -func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { - // ❌ Do not break the context propagation. - e.inst.recordSpanExportStarted(context.Background(), len(spans)) - - err := e.doExport(ctx, spans) - - /* ... */ - - return err -} -``` - -#### Semantic Conventions Compliance - -All observability metrics should follow the [OpenTelemetry Semantic Conventions for SDK metrics](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/otel/sdk-metrics.md). - -Use the metric semantic conventions convenience package [otelconv](./semconv/v1.40.0/otelconv/metric.go). - -##### Component Identification - -Component names and types should follow [semantic convention](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/registry/attributes/otel.md#otel-component-attributes). - -If a component is not a well-known type specified in the semantic conventions, use the package path scope type as a stable identifier. - -```go -componentType := "go.opentelemetry.io/otel/sdk/trace.Span" -``` - -```go -// ❌ Do not do this. -componentType := "trace-span" -``` - -The component name should be a stable unique identifier for the specific instance of the component. - -Use a global counter to ensure uniqueness if necessary. - -```go -// Unique 0-based ID counter for component instances. -var componentIDCounter atomic.Int64 - -// nextID returns the next unique ID for a component. -func nextID() int64 { - return componentIDCounter.Add(1) - 1 -} - -// componentName returns a unique name for the component instance. -func componentName() attribute.KeyValue { - id := nextID() - name := fmt.Sprintf("%s/%d", componentType, id) - return semconv.OTelComponentName(name) -} -``` - -The component ID will need to be resettable for deterministic testing. -If tests are in a different package than the component being tested (i.e. a `_test` package name), use a generated `counter` internal package to manage the counter. -See [stdouttrace exporter example](./exporters/stdout/stdouttrace/internal/gen.go) for reference. - -#### Testing - -Use deterministic testing with isolated state: - -```go -func TestObservability(t *testing.T) { - // Restore state after test to ensure this does not affect other tests. - prev := otel.GetMeterProvider() - t.Cleanup(func() { otel.SetMeterProvider(prev) }) - - // Isolate the meter provider for deterministic testing - reader := metric.NewManualReader() - meterProvider := metric.NewMeterProvider(metric.WithReader(reader)) - otel.SetMeterProvider(meterProvider) - - // Use t.Setenv to ensure environment variable is restored after test. - t.Setenv("OTEL_GO_X_OBSERVABILITY", "true") - - // Reset component ID counter to ensure deterministic component names. - componentIDCounter.Store(0) - - /* ... test code ... */ -} -``` - -Test order should not affect results. -Ensure that any global state (e.g. component ID counters) is reset between tests. - -## Approvers and Maintainers - -### Maintainers - -- [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832)) -- [David Ashpole](https://github.com/dashpole), Google ([GPG](https://keys.openpgp.org/search?q=C0D1BDDCAAEAE573673085F176327DA4D864DC70)) -- [Robert Pająk](https://github.com/pellared), Splunk ([GPG](https://keys.openpgp.org/search?q=CDAD3A60476A3DE599AA5092E5F7C35A4DBE90C2)) -- [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA)) -- [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A)) - -For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer). - -### Approvers - -- [Flc](https://github.com/flc1125), Independent - -For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver). - -### Triagers - -- [Alex Kats](https://github.com/akats7), Capital One - -For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager). - -### Emeritus - -- [Aaron Clawson](https://github.com/MadVikingGod) -- [Anthony Mirabella](https://github.com/Aneurysm9) -- [Cheng-Zhen Yang](https://github.com/scorpionknifes) -- [Chester Cheung](https://github.com/hanyuancheung) -- [Evan Torrie](https://github.com/evantorrie) -- [Gustavo Silva Paiva](https://github.com/paivagustavo) -- [Josh MacDonald](https://github.com/jmacd) -- [Liz Fong-Jones](https://github.com/lizthegrey) - -For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager). - -### Become an Approver or a Maintainer - -See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). - -[Approver]: #approvers -[Maintainer]: #maintainers -[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl -[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404 diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE deleted file mode 100644 index f1aee0f110..0000000000 --- a/vendor/go.opentelemetry.io/otel/LICENSE +++ /dev/null @@ -1,231 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------------------------------------- - -Copyright 2009 The Go Authors. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google LLC nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile deleted file mode 100644 index 42466f2d6a..0000000000 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ /dev/null @@ -1,330 +0,0 @@ -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -TOOLS_MOD_DIR := ./internal/tools - -ALL_DOCS := $(shell find . -name '*.md' -type f | sort) -ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) -OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) -ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) - -GO = go -TIMEOUT = 60 - -# User to run as in docker images. -DOCKER_USER=$(shell id -u):$(shell id -g) -DEPENDENCIES_DOCKERFILE=./dependencies.Dockerfile - -.DEFAULT_GOAL := precommit - -.PHONY: precommit ci -precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default -ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage - -# Tools - -TOOLS = $(CURDIR)/.tools - -$(TOOLS): - @mkdir -p $@ -$(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) - cd $(TOOLS_MOD_DIR) && \ - $(GO) build -o $@ $(PACKAGE) - -MULTIMOD = $(TOOLS)/multimod -$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod - -CROSSLINK = $(TOOLS)/crosslink -$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink - -SEMCONVKIT = $(TOOLS)/semconvkit -SEMCONVKIT_FILES := $(sort $(shell find $(TOOLS_MOD_DIR)/semconvkit -type f)) -$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit -$(TOOLS)/semconvkit: $(SEMCONVKIT_FILES) - -VERIFYREADMES = $(TOOLS)/verifyreadmes -VERIFYREADMES_FILES := $(sort $(shell find $(TOOLS_MOD_DIR)/verifyreadmes -type f)) -$(TOOLS)/verifyreadmes: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/verifyreadmes -$(TOOLS)/verifyreadmes: $(VERIFYREADMES_FILES) - -GOLANGCI_LINT = $(TOOLS)/golangci-lint -$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/v2/cmd/golangci-lint - -MISSPELL = $(TOOLS)/misspell -$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell - -GOCOVMERGE = $(TOOLS)/gocovmerge -$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge - -STRINGER = $(TOOLS)/stringer -$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer - -PORTO = $(TOOLS)/porto -$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto - -GOTMPL = $(TOOLS)/gotmpl -$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl - -GORELEASE = $(TOOLS)/gorelease -$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease - -GOVULNCHECK = $(TOOLS)/govulncheck -$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck - -.PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) - -# Virtualized python tools via docker - -# The directory where the virtual environment is created. -VENVDIR := venv - -# The directory where the python tools are installed. -PYTOOLS := $(VENVDIR)/bin - -# The pip executable in the virtual environment. -PIP := $(PYTOOLS)/pip - -# The directory in the docker image where the current directory is mounted. -WORKDIR := /workdir - -# The python image to use for the virtual environment. -PYTHONIMAGE := $(shell awk '$$4=="python" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) - -# Run the python image with the current directory mounted. -DOCKERPY := docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) - -# Create a virtual environment for Python tools. -$(PYTOOLS): -# The `--upgrade` flag is needed to ensure that the virtual environment is -# created with the latest pip version. - @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade --cache-dir=$(WORKDIR)/.cache/pip pip" - -# Install python packages into the virtual environment. -$(PYTOOLS)/%: $(PYTOOLS) - @$(DOCKERPY) $(PIP) install --cache-dir=$(WORKDIR)/.cache/pip -r requirements.txt - -CODESPELL = $(PYTOOLS)/codespell -$(CODESPELL): PACKAGE=codespell - -# Generate - -.PHONY: generate -generate: go-generate vanity-import-fix - -.PHONY: go-generate -go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) -go-generate/%: DIR=$* -go-generate/%: $(STRINGER) $(GOTMPL) - @echo "$(GO) generate $(DIR)/..." \ - && cd $(DIR) \ - && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... - -.PHONY: vanity-import-fix -vanity-import-fix: $(PORTO) - @$(PORTO) --include-internal -w . - -# Generate go.work file for local development. -.PHONY: go-work -go-work: $(CROSSLINK) - $(CROSSLINK) work --root=$(shell pwd) --go=1.22.7 - -# Build - -.PHONY: build - -build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) -build/%: DIR=$* -build/%: - @echo "$(GO) build $(DIR)/..." \ - && cd $(DIR) \ - && $(GO) build ./... - -build-tests/%: DIR=$* -build-tests/%: - @echo "$(GO) build tests $(DIR)/..." \ - && cd $(DIR) \ - && $(GO) list ./... \ - | grep -v third_party \ - | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null - -# Tests - -TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe test-fuzz -.PHONY: $(TEST_TARGETS) test -test-default test-race: ARGS=-race -test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. -test-short: ARGS=-short -test-fuzz: ARGS=-fuzztime=10s -fuzz -test-verbose: ARGS=-v -race -test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race -test-concurrent-safe: TIMEOUT=120 -$(TEST_TARGETS): test -test: $(OTEL_GO_MOD_DIRS:%=test/%) -test/%: DIR=$* -test/%: - @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \ - && cd $(DIR) \ - && $(GO) list ./... \ - | grep -v third_party \ - | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS) - -COVERAGE_MODE = atomic -COVERAGE_PROFILE = coverage.out -.PHONY: test-coverage -test-coverage: $(GOCOVMERGE) - @set -e; \ - printf "" > coverage.txt; \ - for dir in $(ALL_COVERAGE_MOD_DIRS); do \ - echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ - (cd "$${dir}" && \ - $(GO) list ./... \ - | grep -v third_party \ - | grep -v 'semconv/v.*' \ - | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ - $(GO) tool cover -html=coverage.out -o coverage.html); \ - done; \ - $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt - -.PHONY: benchmark -benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) -benchmark/%: - cd $* && $(GO) test -run='^$$' -bench=. $(ARGS) ./... - -print-sharded-benchmarks: - @echo $(OTEL_GO_MOD_DIRS) | jq -cR 'split(" ")' - -.PHONY: golangci-lint golangci-lint-fix -golangci-lint-fix: ARGS=--fix -golangci-lint-fix: golangci-lint -golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) -golangci-lint/%: DIR=$* -golangci-lint/%: $(GOLANGCI_LINT) - @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ - && cd $(DIR) \ - && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) - -.PHONY: crosslink -crosslink: $(CROSSLINK) - @echo "Updating intra-repository dependencies in all go modules" \ - && $(CROSSLINK) --root=$(shell pwd) --prune - -.PHONY: go-mod-tidy -go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) -go-mod-tidy/%: DIR=$* -go-mod-tidy/%: crosslink - @echo "$(GO) mod tidy in $(DIR)" \ - && cd $(DIR) \ - && $(GO) mod tidy -compat=1.21 - -.PHONY: lint -lint: misspell go-mod-tidy golangci-lint - -.PHONY: vanity-import-check -vanity-import-check: $(PORTO) - @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) - -.PHONY: misspell -misspell: $(MISSPELL) - @$(MISSPELL) -w $(ALL_DOCS) - -.PHONY: govulncheck -govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) -govulncheck/%: DIR=$* -govulncheck/%: $(GOVULNCHECK) - @echo "govulncheck ./... in $(DIR)" \ - && cd $(DIR) \ - && $(GOVULNCHECK) ./... - -.PHONY: codespell -codespell: $(CODESPELL) - @$(DOCKERPY) $(CODESPELL) - -.PHONY: toolchain-check -toolchain-check: - @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \ - awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \ - done); \ - if [ -n "$${toolchainRes}" ]; then \ - echo "toolchain checking failed:"; echo "$${toolchainRes}"; \ - exit 1; \ - fi - -.PHONY: license-check -license-check: - @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ - awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \ - done); \ - if [ -n "$${licRes}" ]; then \ - echo "license header checking failed:"; echo "$${licRes}"; \ - exit 1; \ - fi - -.PHONY: check-clean-work-tree -check-clean-work-tree: - @if ! git diff --quiet; then \ - echo; \ - echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ - echo; \ - git status; \ - exit 1; \ - fi - -# The weaver docker image to use for semconv-generate. -WEAVER_IMAGE := $(shell awk '$$4=="weaver" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) - -SEMCONVPKG ?= "semconv/" -.PHONY: semconv-generate -semconv-generate: $(SEMCONVKIT) - [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) - # Ensure the target directory for source code is available. - mkdir -p $(PWD)/$(SEMCONVPKG)/${TAG} - # Note: We mount a home directory for downloading/storing the semconv repository. - # Weaver will automatically clean the cache when finished, but the directories will remain. - mkdir -p ~/.weaver - docker run --rm \ - -u $(DOCKER_USER) \ - --env HOME=/tmp/weaver \ - --mount 'type=bind,source=$(PWD)/semconv/templates,target=/home/weaver/templates,readonly' \ - --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \ - --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \ - $(WEAVER_IMAGE) registry generate \ - --registry=https://github.com/open-telemetry/semantic-conventions/archive/refs/tags/$(TAG).zip[model] \ - --templates=/home/weaver/templates \ - --param tag=$(TAG) \ - go \ - /home/weaver/target - $(SEMCONVKIT) -semconv "$(SEMCONVPKG)" -tag "$(TAG)" - -.PHONY: gorelease -gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) -gorelease/%: DIR=$* -gorelease/%:| $(GORELEASE) - @echo "gorelease in $(DIR):" \ - && cd $(DIR) \ - && $(GORELEASE) \ - || echo "" - -.PHONY: verify-mods -verify-mods: $(MULTIMOD) - $(MULTIMOD) verify - -.PHONY: prerelease -prerelease: verify-mods - @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) prerelease -m ${MODSET} - -COMMIT ?= "HEAD" -.PHONY: add-tags -add-tags: verify-mods - @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} - -MARKDOWNIMAGE := $(shell awk '$$4=="markdown" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) -.PHONY: lint-markdown -lint-markdown: - docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" $(MARKDOWNIMAGE) -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md - -.PHONY: verify-readmes -verify-readmes: $(VERIFYREADMES) - $(VERIFYREADMES) diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md deleted file mode 100644 index 16a72004c0..0000000000 --- a/vendor/go.opentelemetry.io/otel/README.md +++ /dev/null @@ -1,115 +0,0 @@ -# OpenTelemetry-Go - -[![ci](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml) -[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) -[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) -[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go) -[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996) -[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/opentelemetry-go.svg)](https://issues.oss-fuzz.com/issues?q=project:opentelemetry-go) -[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go.svg?type=shield&issueType=license)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go?ref=badge_shield&issueType=license) -[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) - -OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). -It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. - -## Project Status - -| Signal | Status | -|---------|--------------------| -| Traces | Stable | -| Metrics | Stable | -| Logs | Beta[^1] | - -Progress and status specific to this repository is tracked in our -[project boards](https://github.com/open-telemetry/opentelemetry-go/projects) -and -[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). - -Project versioning information and stability guarantees can be found in the -[versioning documentation](VERSIONING.md). - -[^1]: https://github.com/orgs/open-telemetry/projects/43 - -### Compatibility - -OpenTelemetry-Go ensures compatibility with the current supported versions of -the [Go language](https://golang.org/doc/devel/release#policy): - -> Each major Go release is supported until there are two newer major releases. -> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. - -For versions of Go that are no longer supported upstream, opentelemetry-go will -stop ensuring compatibility with these versions in the following manner: - -- A minor release of opentelemetry-go will be made to add support for the new - supported release of Go. -- The following minor release of opentelemetry-go will remove compatibility - testing for the oldest (now archived upstream) version of Go. This, and - future, releases of opentelemetry-go may include features only supported by - the currently supported versions of Go. - -Currently, this project supports the following environments. - -| OS | Go Version | Architecture | -|----------|------------|--------------| -| Ubuntu | 1.26 | amd64 | -| Ubuntu | 1.25 | amd64 | -| Ubuntu | 1.26 | 386 | -| Ubuntu | 1.25 | 386 | -| Ubuntu | 1.26 | arm64 | -| Ubuntu | 1.25 | arm64 | -| macOS | 1.26 | amd64 | -| macOS | 1.25 | amd64 | -| macOS | 1.26 | arm64 | -| macOS | 1.25 | arm64 | -| Windows | 1.26 | amd64 | -| Windows | 1.25 | amd64 | -| Windows | 1.26 | 386 | -| Windows | 1.25 | 386 | - -While this project should work for other systems, no compatibility guarantees -are made for those systems currently. - -## Getting Started - -You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/). - -OpenTelemetry's goal is to provide a single set of APIs to capture distributed -traces and metrics from your application and send them to an observability -platform. This project allows you to do just that for applications written in -Go. There are two steps to this process: instrument your application, and -configure an exporter. - -### Instrumentation - -To start capturing distributed traces and metric events from your application -it first needs to be instrumented. The easiest way to do this is by using an -instrumentation library for your code. Be sure to check out [the officially -supported instrumentation -libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). - -If you need to extend the telemetry an instrumentation library provides or want -to build your own instrumentation for your application directly you will need -to use the -[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) -package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples) -are a good way to see some practical uses of this process. - -### Export - -Now that your application is instrumented to collect telemetry, it needs an -export pipeline to send that telemetry to an observability platform. - -All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). - -| Exporter | Logs | Metrics | Traces | -|---------------------------------------|:----:|:-------:|:------:| -| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ | -| [Prometheus](./exporters/prometheus/) | | ✓ | | -| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ | -| [Zipkin](./exporters/zipkin/) | | | ✓ | - -## Contributing - -See the [contributing documentation](CONTRIBUTING.md). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md deleted file mode 100644 index 6aff7548c9..0000000000 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ /dev/null @@ -1,220 +0,0 @@ -# Release Process - -## Create a `Version Release` issue - -Create a `Version Release` issue to track the release process. - -## Semantic Convention Upgrade - -### Semantic Convention Generation - -New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. -The `semconv-generate` make target is used for this. - -1. Set the `TAG` environment variable to the semantic convention tag you want to generate. -2. Run the `make semconv-generate ...` target from this repository. - -For example, - -```sh -export TAG="v1.30.0" # Change to the release version you are generating. -make semconv-generate # Uses the exported TAG. -``` - -This should create a new sub-package of [`semconv`](./semconv). -Ensure things look correct before submitting a pull request to include the addition. - -The `CHANGELOG.md` should also be updated to reflect the new changes: - -```md -- The `go.opentelemetry.io/otel/semconv/` package. The package contains semantic conventions from the `` version of the OpenTelemetry Semantic Conventions. See the [migration documentation](./semconv//MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/`. (#PR_NUMBER) -``` - -> **Tip:** Change to the release and prior version to match the changes - -### Update semconv imports - -Once the new semconv module has been generated, update all semconv imports throughout the codebase to reference the new version: - -```go -// Before -semconv "go.opentelemetry.io/otel/semconv/v1.37.0" -"go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" - - -// After -semconv "go.opentelemetry.io/otel/semconv/v1.39.0" -"go.opentelemetry.io/otel/semconv/v1.39.0/otelconv" -``` - -Once complete, run `make` to check for any compilation or test failures. - -#### Handling attribute changes - -Some semconv releases might add new attributes or impact attributes that are currently being used. Changes could stem from a simple renaming, to more complex changes like merging attributes and property values being changed. - -One should update the code to the new attributes that supersede the impacted ones, hence sticking to the semantic conventions. However, legacy attributes might still be emitted in accordance to the `OTEL_SEMCONV_STABILITY_OPT_IN` environment variable. - -For an example on how such migration might have to be tracked and performed, see issue [#7806](https://github.com/open-telemetry/opentelemetry-go/issues/7806). - -### Go contrib linter update - -Update [.golangci.yml](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/.golangci.yml) in [opentelemetry-go-contrib](https://github.com/open-telemetry/opentelemetry-go-contrib/) to mandate the new semconv version. - -## Breaking changes validation - -You can run `make gorelease` which runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes made in the public API. - -You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). - -## Verify changes for contrib repository - -If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository. - -Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes. - -## Pre-Release - -First, decide which module sets will be released and update their versions -in `versions.yaml`. Commit this change to a new branch. - -Update go.mod for submodules to depend on the new release which will happen in the next step. - -1. Run the `prerelease` make target. It creates a branch - `prerelease__` that will contain all release changes. - - ``` - make prerelease MODSET= - ``` - -2. Verify the changes. - - ``` - git diff ...prerelease__ - ``` - - This should have changed the version for all modules to be ``. - If these changes look correct, merge them into your pre-release branch: - - ```go - git merge prerelease__ - ``` - -3. Update the [Changelog](./CHANGELOG.md). - - Make sure all relevant changes for this release are included and are written in language that non-contributors to the project can understand. - To verify this, you can look directly at the commits since the ``. - - ``` - git --no-pager log --pretty=oneline "..HEAD" - ``` - - - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). - - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. - - Update all the appropriate links at the bottom. - -4. Push the changes to upstream and create a Pull Request on GitHub. - Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. - -## Tag - -Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. - -***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step! -Failure to do so will leave things in a broken state. As long as you do not -change `versions.yaml` between pre-release and this step, things should be fine. - -***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189). -It is critical you make sure the version you push upstream is correct. -[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). - -1. For each module set that will be released, run the `add-tags` make target - using the `` of the commit on the main branch for the merged Pull Request. - - ``` - make add-tags MODSET= COMMIT= - ``` - - It should only be necessary to provide an explicit `COMMIT` value if the - current `HEAD` of your working directory is not the correct commit. - -2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`). - Make sure you push all sub-modules as well. - - ``` - git push upstream - git push upstream - ... - ``` - -## Sign artifacts - -To ensure we comply with CNCF best practices, we need to sign the release artifacts. - -Download the `.tar.gz` and `.zip` archives from the [tags page](https://github.com/open-telemetry/opentelemetry-go/tags) for the new release tag. -Both archives need to be signed with your GPG key. - -You can use [this script] to verify the contents of the archives before signing them. - -To find your GPG key ID, run: - -```terminal -gpg --list-secret-keys --keyid-format=long -``` - -The key ID is the 16-character string after `sec rsa4096/` (or similar). - -Set environment variables and sign both artifacts: - -```terminal -export VERSION="" # e.g., v1.32.0 -export KEY_ID="" - -gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.tar.gz -gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.zip -``` - -You can verify the signatures with: - -```terminal -gpg --verify opentelemetry-go-$VERSION.tar.gz.asc opentelemetry-go-$VERSION.tar.gz -gpg --verify opentelemetry-go-$VERSION.zip.asc opentelemetry-go-$VERSION.zip -``` - -[this script]: https://github.com/MrAlias/attest-sh - -## Release - -Finally create a Release for the new `` on GitHub. -The release body should include all the release notes from the Changelog for this release. - -***IMPORTANT***: GitHub Releases are immutable once created. -You must upload the signed artifacts (`.tar.gz`, `.tar.gz.asc`, `.zip`, and `.zip.asc`) when creating the release, as they cannot be added or modified later. - -## Post-Release - -### Contrib Repository - -Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. - -### Website Documentation - -Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go]. -Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. - -[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions -[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/ -[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go - -### Close the milestone - -Once a release is made, ensure all issues that were fixed and PRs that were merged as part of this release are added to the corresponding milestone. -This helps track what changes were included in each release. - -- To find issues that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/issues?q=is%3Aissue%20no%3Amilestone%20is%3Aclosed%20sort%3Aupdated-desc%20reason%3Acompleted%20-label%3AStale%20linked%3Apr) -- To find merged PRs that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/pulls?q=is%3Apr+no%3Amilestone+is%3Amerged). - -Once all related issues and PRs have been added to the milestone, close the milestone. - -### Close the `Version Release` issue - -Once the todo list in the `Version Release` issue is complete, close the issue. diff --git a/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml b/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml deleted file mode 100644 index 8041fc62e4..0000000000 --- a/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml +++ /dev/null @@ -1,203 +0,0 @@ -header: - schema-version: "1.0.0" - expiration-date: "2026-08-04T00:00:00.000Z" - last-updated: "2025-08-04" - last-reviewed: "2025-08-04" - commit-hash: 69e81088ad40f45a0764597326722dea8f3f00a8 - project-url: https://github.com/open-telemetry/opentelemetry-go - project-release: "v1.37.0" - changelog: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CHANGELOG.md - license: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/LICENSE - -project-lifecycle: - status: active - bug-fixes-only: false - core-maintainers: - - https://github.com/dmathieu - - https://github.com/dashpole - - https://github.com/pellared - - https://github.com/XSAM - - https://github.com/MrAlias - release-process: | - See https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/RELEASING.md - -contribution-policy: - accepts-pull-requests: true - accepts-automated-pull-requests: true - automated-tools-list: - - automated-tool: dependabot - action: allowed - comment: Automated dependency updates are accepted. - - automated-tool: renovatebot - action: allowed - comment: Automated dependency updates are accepted. - - automated-tool: opentelemetrybot - action: allowed - comment: Automated OpenTelemetry actions are accepted. - contributing-policy: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md - code-of-conduct: https://github.com/open-telemetry/.github/blob/ffa15f76b65ec7bcc41f6a0b277edbb74f832206/CODE_OF_CONDUCT.md - -documentation: - - https://pkg.go.dev/go.opentelemetry.io/otel - - https://opentelemetry.io/docs/instrumentation/go/ - -distribution-points: - - pkg:golang/go.opentelemetry.io/otel - - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus - - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus/test - - pkg:golang/go.opentelemetry.io/otel/bridge/opentracing - - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace - - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc - - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp - - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric - - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdouttrace - - pkg:golang/go.opentelemetry.io/otel/exporters/zipkin - - pkg:golang/go.opentelemetry.io/otel/metric - - pkg:golang/go.opentelemetry.io/otel/sdk - - pkg:golang/go.opentelemetry.io/otel/sdk/metric - - pkg:golang/go.opentelemetry.io/otel/trace - - pkg:golang/go.opentelemetry.io/otel/exporters/prometheus - - pkg:golang/go.opentelemetry.io/otel/log - - pkg:golang/go.opentelemetry.io/otel/log/logtest - - pkg:golang/go.opentelemetry.io/otel/sdk/log - - pkg:golang/go.opentelemetry.io/otel/sdk/log/logtest - - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutlog - - pkg:golang/go.opentelemetry.io/otel/schema - -security-artifacts: - threat-model: - threat-model-created: false - comment: | - No formal threat model created yet. - self-assessment: - self-assessment-created: false - comment: | - No formal self-assessment yet. - -security-testing: - - tool-type: sca - tool-name: Dependabot - tool-version: latest - tool-url: https://github.com/dependabot - tool-rulesets: - - built-in - integration: - ad-hoc: false - ci: true - before-release: true - comment: | - Automated dependency updates. - - tool-type: sast - tool-name: golangci-lint - tool-version: latest - tool-url: https://github.com/golangci/golangci-lint - tool-rulesets: - - built-in - integration: - ad-hoc: false - ci: true - before-release: true - comment: | - Static analysis in CI. - - tool-type: fuzzing - tool-name: OSS-Fuzz - tool-version: latest - tool-url: https://github.com/google/oss-fuzz - tool-rulesets: - - default - integration: - ad-hoc: false - ci: false - before-release: false - comment: | - OpenTelemetry Go is integrated with OSS-Fuzz for continuous fuzz testing. See https://github.com/google/oss-fuzz/tree/f0f9b221190c6063a773bea606d192ebfc3d00cf/projects/opentelemetry-go for more details. - - tool-type: sast - tool-name: CodeQL - tool-version: latest - tool-url: https://github.com/github/codeql - tool-rulesets: - - default - integration: - ad-hoc: false - ci: true - before-release: true - comment: | - CodeQL static analysis is run in CI for all commits and pull requests to detect security vulnerabilities in the Go source code. See https://github.com/open-telemetry/opentelemetry-go/blob/d5b5b059849720144a03ca5c87561bfbdb940119/.github/workflows/codeql-analysis.yml for workflow details. - - tool-type: sca - tool-name: govulncheck - tool-version: latest - tool-url: https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck - tool-rulesets: - - default - integration: - ad-hoc: false - ci: true - before-release: true - comment: | - govulncheck is run in CI to detect known vulnerabilities in Go modules and code paths. See https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/.github/workflows/ci.yml for workflow configuration. - -security-assessments: - - auditor-name: 7ASecurity - auditor-url: https://7asecurity.com - auditor-report: https://7asecurity.com/reports/pentest-report-opentelemetry.pdf - report-year: 2023 - comment: | - This independent penetration test by 7ASecurity covered OpenTelemetry repositories including opentelemetry-go. The assessment focused on codebase review, threat modeling, and vulnerability identification. See the report for details of findings and recommendations applicable to opentelemetry-go. No critical vulnerabilities were found for this repository. - -security-contacts: - - type: email - value: cncf-opentelemetry-security@lists.cncf.io - primary: true - - type: website - value: https://github.com/open-telemetry/opentelemetry-go/security/policy - primary: false - -vulnerability-reporting: - accepts-vulnerability-reports: true - email-contact: cncf-opentelemetry-security@lists.cncf.io - security-policy: https://github.com/open-telemetry/opentelemetry-go/security/policy - comment: | - Security issues should be reported via email or GitHub security policy page. - -dependencies: - third-party-packages: true - dependencies-lists: - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/test/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opentracing/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploggrpc/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploghttp/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracegrpc/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracehttp/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/prometheus/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutlog/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutmetric/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdouttrace/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/zipkin/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/internal/tools/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/logtest/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/metric/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/schema/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/logtest/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/metric/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/go.mod - - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/internal/telemetry/test/go.mod - dependencies-lifecycle: - policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md - comment: | - Dependency lifecycle managed via go.mod and renovatebot. - env-dependencies-policy: - policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md - comment: | - See contributing policy for environment usage. diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md deleted file mode 100644 index b27c9e84f5..0000000000 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ /dev/null @@ -1,224 +0,0 @@ -# Versioning - -This document describes the versioning policy for this repository. This policy -is designed so the following goals can be achieved. - -**Users are provided a codebase of value that is stable and secure.** - -## Policy - -* Versioning of this project will be idiomatic of a Go project using [Go - modules](https://github.com/golang/go/wiki/Modules). - * [Semantic import - versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) - will be used. - * Versions will comply with [semver - 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. - * New methods may be added to exported API interfaces. All exported - interfaces that fall within this exception will include the following - paragraph in their public documentation. - - > Warning: methods may be added to this interface in minor releases. - - * If a module is version `v2` or higher, the major version of the module - must be included as a `/vN` at the end of the module paths used in - `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require - go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path - (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the - paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a - `@v2.0.1` in that example. One way to think about it is that the module - name now includes the `/v2`, so include `/v2` whenever you are using the - module name). - * If a module is version `v0` or `v1`, do not include the major version in - either the module path or the import path. - * Modules will be used to encapsulate signals and components. - * Experimental modules still under active development will be versioned at - `v0` to imply the stability guarantee defined by - [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). - - > Major version zero (0.y.z) is for initial development. Anything MAY - > change at any time. The public API SHOULD NOT be considered stable. - - * Mature modules for which we guarantee a stable public API will be versioned - with a major version greater than `v0`. - * The decision to make a module stable will be made on a case-by-case - basis by the maintainers of this project. - * Experimental modules will start their versioning at `v0.0.0` and will - increment their minor version when backwards incompatible changes are - released and increment their patch version when backwards compatible - changes are released. - * All stable modules that use the same major version number will use the - same entire version number. - * Stable modules may be released with an incremented minor or patch - version even though that module has not been changed, but rather so - that it will remain at the same version as other stable modules that - did undergo change. - * When an experimental module becomes stable a new stable module version - will be released and will include this now stable module. The new - stable module version will be an increment of the minor version number - and will be applied to all existing stable modules as well as the newly - stable module being released. -* Versioning of the associated [contrib - repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of - this project will be idiomatic of a Go project using [Go - modules](https://github.com/golang/go/wiki/Modules). - * [Semantic import - versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) - will be used. - * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). - * If a module is version `v2` or higher, the - major version of the module must be included as a `/vN` at the end of the - module paths used in `go.mod` files (e.g., `module - go.opentelemetry.io/contrib/instrumentation/host/v2`, `require - go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the - package import path (e.g., `import - "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes - the paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there - is both a `/v2` and a `@v2.0.1` in that example. One way to think about - it is that the module name now includes the `/v2`, so include `/v2` - whenever you are using the module name). - * If a module is version `v0` or `v1`, do not include the major version - in either the module path or the import path. - * In addition to public APIs, telemetry produced by stable instrumentation - will remain stable and backwards compatible. This is to avoid breaking - alerts and dashboards. - * Modules will be used to encapsulate instrumentation, detectors, exporters, - propagators, and any other independent sets of related components. - * Experimental modules still under active development will be versioned at - `v0` to imply the stability guarantee defined by - [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). - - > Major version zero (0.y.z) is for initial development. Anything MAY - > change at any time. The public API SHOULD NOT be considered stable. - - * Mature modules for which we guarantee a stable public API and telemetry will - be versioned with a major version greater than `v0`. - * Experimental modules will start their versioning at `v0.0.0` and will - increment their minor version when backwards incompatible changes are - released and increment their patch version when backwards compatible - changes are released. - * Stable contrib modules cannot depend on experimental modules from this - project. - * All stable contrib modules of the same major version with this project - will use the same entire version as this project. - * Stable modules may be released with an incremented minor or patch - version even though that module's code has not been changed. Instead - the only change that will have been included is to have updated that - modules dependency on this project's stable APIs. - * When an experimental module in contrib becomes stable a new stable - module version will be released and will include this now stable - module. The new stable module version will be an increment of the minor - version number and will be applied to all existing stable contrib - modules, this project's modules, and the newly stable module being - released. - * Contrib modules will be kept up to date with this project's releases. - * Due to the dependency contrib modules will implicitly have on this - project's modules the release of stable contrib modules to match the - released version number will be staggered after this project's release. - There is no explicit time guarantee for how long after this projects - release the contrib release will be. Effort should be made to keep them - as close in time as possible. - * No additional stable release in this project can be made until the - contrib repository has a matching stable release. - * No release can be made in the contrib repository after this project's - stable release except for a stable release of the contrib repository. -* GitHub releases will be made for all releases. -* Go modules will be made available at Go package mirrors. - -## Example Versioning Lifecycle - -To better understand the implementation of the above policy the following -example is provided. This project is simplified to include only the following -modules and their versions: - -* `otel`: `v0.14.0` -* `otel/trace`: `v0.14.0` -* `otel/metric`: `v0.14.0` -* `otel/baggage`: `v0.14.0` -* `otel/sdk/trace`: `v0.14.0` -* `otel/sdk/metric`: `v0.14.0` - -These modules have been developed to a point where the `otel/trace`, -`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they -should be considered for a stable release. The `otel/metric` and -`otel/sdk/metric` are still under active development and the `otel` module -depends on both `otel/trace` and `otel/metric`. - -The `otel` package is refactored to remove its dependencies on `otel/metric` so -it can be released as stable as well. With that done the following release -candidates are made: - -* `otel`: `v1.0.0-RC1` -* `otel/trace`: `v1.0.0-RC1` -* `otel/baggage`: `v1.0.0-RC1` -* `otel/sdk/trace`: `v1.0.0-RC1` - -The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. - -A few minor issues are discovered in the `otel/trace` package. These issues are -resolved with some minor, but backwards incompatible, changes and are released -as a second release candidate: - -* `otel`: `v1.0.0-RC2` -* `otel/trace`: `v1.0.0-RC2` -* `otel/baggage`: `v1.0.0-RC2` -* `otel/sdk/trace`: `v1.0.0-RC2` - -Notice that all module version numbers are incremented to adhere to our -versioning policy. - -After these release candidates have been evaluated to satisfaction, they are -released as version `v1.0.0`. - -* `otel`: `v1.0.0` -* `otel/trace`: `v1.0.0` -* `otel/baggage`: `v1.0.0` -* `otel/sdk/trace`: `v1.0.0` - -Since both the `go` utility and the Go module system support [the semantic -versioning definition of -precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release -will correctly be interpreted as the successor to the previous release -candidates. - -Active development of this project continues. The `otel/metric` module now has -backwards incompatible changes to its API that need to be released and the -`otel/baggage` module has a minor bug fix that needs to be released. The -following release is made: - -* `otel`: `v1.0.1` -* `otel/trace`: `v1.0.1` -* `otel/metric`: `v0.15.0` -* `otel/baggage`: `v1.0.1` -* `otel/sdk/trace`: `v1.0.1` -* `otel/sdk/metric`: `v0.15.0` - -Notice that, again, all stable module versions are incremented in unison and -the `otel/sdk/metric` package, which depends on the `otel/metric` package, also -bumped its version. This bump of the `otel/sdk/metric` package makes sense -given their coupling, though it is not explicitly required by our versioning -policy. - -As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a -point where they should be evaluated for stability. The `otel` module is -reintegrated with the `otel/metric` package and the following release is made: - -* `otel`: `v1.1.0-RC1` -* `otel/trace`: `v1.1.0-RC1` -* `otel/metric`: `v1.1.0-RC1` -* `otel/baggage`: `v1.1.0-RC1` -* `otel/sdk/trace`: `v1.1.0-RC1` -* `otel/sdk/metric`: `v1.1.0-RC1` - -All the modules are evaluated and determined to a viable stable release. They -are then released as version `v1.1.0` (the minor version is incremented to -indicate the addition of new signal). - -* `otel`: `v1.1.0` -* `otel/trace`: `v1.1.0` -* `otel/metric`: `v1.1.0` -* `otel/baggage`: `v1.1.0` -* `otel/sdk/trace`: `v1.1.0` -* `otel/sdk/metric`: `v1.1.0` diff --git a/vendor/go.opentelemetry.io/otel/attribute/README.md b/vendor/go.opentelemetry.io/otel/attribute/README.md deleted file mode 100644 index 5b3da8f14c..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Attribute - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute) diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go deleted file mode 100644 index eef51ebc2a..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package attribute provides key and value attributes. -package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go deleted file mode 100644 index 771dd69c55..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "bytes" - "sync" - "sync/atomic" -) - -type ( - // Encoder is a mechanism for serializing an attribute set into a specific - // string representation that supports caching, to avoid repeated - // serialization. An example could be an exporter encoding the attribute - // set into a wire representation. - Encoder interface { - // Encode returns the serialized encoding of the attribute set using - // its Iterator. This result may be cached by an attribute.Set. - Encode(iterator Iterator) string - - // ID returns a value that is unique for each class of attribute - // encoder. Attribute encoders allocate these using `NewEncoderID`. - ID() EncoderID - } - - // EncoderID is used to identify distinct Encoder - // implementations, for caching encoded results. - EncoderID struct { - value uint64 - } - - // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of - // allocations used in encoding attributes. This implementation encodes a - // comma-separated list of key=value, with '/'-escaping of '=', ',', and - // '\'. - defaultAttrEncoder struct { - // pool is a pool of attribute set builders. The buffers in this pool - // grow to a size that most attribute encodings will not allocate new - // memory. - pool sync.Pool // *bytes.Buffer - } -) - -// escapeChar is used to ensure uniqueness of the attribute encoding where -// keys or values contain either '=' or ','. Since there is no parser needed -// for this encoding and its only requirement is to be unique, this choice is -// arbitrary. Users will see these in some exporters (e.g., stdout), so the -// backslash ('\') is used as a conventional choice. -const escapeChar = '\\' - -var ( - _ Encoder = &defaultAttrEncoder{} - - // encoderIDCounter is for generating IDs for other attribute encoders. - encoderIDCounter atomic.Uint64 - - defaultEncoderOnce sync.Once - defaultEncoderID = NewEncoderID() - defaultEncoderInstance *defaultAttrEncoder -) - -// NewEncoderID returns a unique attribute encoder ID. It should be called -// once per each type of attribute encoder. Preferably in init() or in var -// definition. -func NewEncoderID() EncoderID { - return EncoderID{value: encoderIDCounter.Add(1)} -} - -// DefaultEncoder returns an attribute encoder that encodes attributes in such -// a way that each escaped attribute's key is followed by an equal sign and -// then by an escaped attribute's value. All key-value pairs are separated by -// a comma. -// -// Escaping is done by prepending a backslash before either a backslash, equal -// sign or a comma. -func DefaultEncoder() Encoder { - defaultEncoderOnce.Do(func() { - defaultEncoderInstance = &defaultAttrEncoder{ - pool: sync.Pool{ - New: func() any { - return &bytes.Buffer{} - }, - }, - } - }) - return defaultEncoderInstance -} - -// Encode is a part of an implementation of the AttributeEncoder interface. -func (d *defaultAttrEncoder) Encode(iter Iterator) string { - buf := d.pool.Get().(*bytes.Buffer) - defer d.pool.Put(buf) - buf.Reset() - - for iter.Next() { - i, keyValue := iter.IndexedAttribute() - if i > 0 { - _ = buf.WriteByte(',') - } - copyAndEscape(buf, string(keyValue.Key)) - - _ = buf.WriteByte('=') - - if keyValue.Value.Type() == STRING { - copyAndEscape(buf, keyValue.Value.AsString()) - } else { - _, _ = buf.WriteString(keyValue.Value.Emit()) - } - } - return buf.String() -} - -// ID is a part of an implementation of the AttributeEncoder interface. -func (*defaultAttrEncoder) ID() EncoderID { - return defaultEncoderID -} - -// copyAndEscape escapes `=`, `,` and its own escape character (`\`), -// making the default encoding unique. -func copyAndEscape(buf *bytes.Buffer, val string) { - for _, ch := range val { - switch ch { - case '=', ',', escapeChar: - _ = buf.WriteByte(escapeChar) - } - _, _ = buf.WriteRune(ch) - } -} - -// Valid reports whether this encoder ID was allocated by -// [NewEncoderID]. Invalid encoder IDs will not be cached. -func (id EncoderID) Valid() bool { - return id.value != 0 -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go deleted file mode 100644 index 624ebbe381..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/filter.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package attribute // import "go.opentelemetry.io/otel/attribute" - -// Filter supports removing certain attributes from attribute sets. When -// the filter returns true, the attribute will be kept in the filtered -// attribute set. When the filter returns false, the attribute is excluded -// from the filtered attribute set, and the attribute instead appears in -// the removed list of excluded attributes. -type Filter func(KeyValue) bool - -// NewAllowKeysFilter returns a Filter that only allows attributes with one of -// the provided keys. -// -// If keys is empty a deny-all filter is returned. -func NewAllowKeysFilter(keys ...Key) Filter { - if len(keys) == 0 { - return func(KeyValue) bool { return false } - } - - allowed := make(map[Key]struct{}, len(keys)) - for _, k := range keys { - allowed[k] = struct{}{} - } - return func(kv KeyValue) bool { - _, ok := allowed[kv.Key] - return ok - } -} - -// NewDenyKeysFilter returns a Filter that only allows attributes -// that do not have one of the provided keys. -// -// If keys is empty an allow-all filter is returned. -func NewDenyKeysFilter(keys ...Key) Filter { - if len(keys) == 0 { - return func(KeyValue) bool { return true } - } - - forbid := make(map[Key]struct{}, len(keys)) - for _, k := range keys { - forbid[k] = struct{}{} - } - return func(kv KeyValue) bool { - _, ok := forbid[kv.Key] - return !ok - } -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/hash.go b/vendor/go.opentelemetry.io/otel/attribute/hash.go deleted file mode 100644 index b09caaa6d7..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/hash.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "fmt" - "reflect" - - "go.opentelemetry.io/otel/attribute/internal/xxhash" -) - -// Type identifiers. These identifiers are hashed before the value of the -// corresponding type. This is done to distinguish values that are hashed with -// the same value representation (e.g. `int64(1)` and `true`, []int64{0} and -// int64(0)). -// -// These are all 8 byte length strings converted to a uint64 representation. A -// uint64 is used instead of the string directly as an optimization, it avoids -// the for loop in [xxhash] which adds minor overhead. -const ( - boolID uint64 = 7953749933313450591 // "_boolean" (little endian) - int64ID uint64 = 7592915492740740150 // "64_bit_i" (little endian) - float64ID uint64 = 7376742710626956342 // "64_bit_f" (little endian) - stringID uint64 = 6874584755375207263 // "_string_" (little endian) - boolSliceID uint64 = 6875993255270243167 // "_[]bool_" (little endian) - int64SliceID uint64 = 3762322556277578591 // "_[]int64" (little endian) - float64SliceID uint64 = 7308324551835016539 // "[]double" (little endian) - stringSliceID uint64 = 7453010373645655387 // "[]string" (little endian) - emptyID uint64 = 7305809155345288421 // "__empty_" (little endian) -) - -// hashKVs returns a new xxHash64 hash of kvs. -func hashKVs(kvs []KeyValue) uint64 { - h := xxhash.New() - for _, kv := range kvs { - h = hashKV(h, kv) - } - return h.Sum64() -} - -// hashKV returns the xxHash64 hash of kv with h as the base. -func hashKV(h xxhash.Hash, kv KeyValue) xxhash.Hash { - h = h.String(string(kv.Key)) - - switch kv.Value.Type() { - case BOOL: - h = h.Uint64(boolID) - h = h.Uint64(kv.Value.numeric) - case INT64: - h = h.Uint64(int64ID) - h = h.Uint64(kv.Value.numeric) - case FLOAT64: - h = h.Uint64(float64ID) - // Assumes numeric stored with math.Float64bits. - h = h.Uint64(kv.Value.numeric) - case STRING: - h = h.Uint64(stringID) - h = h.String(kv.Value.stringly) - case BOOLSLICE: - h = h.Uint64(boolSliceID) - rv := reflect.ValueOf(kv.Value.slice) - for i := 0; i < rv.Len(); i++ { - h = h.Bool(rv.Index(i).Bool()) - } - case INT64SLICE: - h = h.Uint64(int64SliceID) - rv := reflect.ValueOf(kv.Value.slice) - for i := 0; i < rv.Len(); i++ { - h = h.Int64(rv.Index(i).Int()) - } - case FLOAT64SLICE: - h = h.Uint64(float64SliceID) - rv := reflect.ValueOf(kv.Value.slice) - for i := 0; i < rv.Len(); i++ { - h = h.Float64(rv.Index(i).Float()) - } - case STRINGSLICE: - h = h.Uint64(stringSliceID) - rv := reflect.ValueOf(kv.Value.slice) - for i := 0; i < rv.Len(); i++ { - h = h.String(rv.Index(i).String()) - } - case EMPTY: - h = h.Uint64(emptyID) - default: - // Logging is an alternative, but using the internal logger here - // causes an import cycle so it is not done. - v := kv.Value.AsInterface() - msg := fmt.Sprintf("unknown value type: %[1]v (%[1]T)", v) - panic(msg) - } - return h -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go deleted file mode 100644 index d9f51fa2d7..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -/* -Package attribute provide several helper functions for some commonly used -logic of processing attributes. -*/ -package attribute // import "go.opentelemetry.io/otel/attribute/internal" - -import ( - "reflect" -) - -// sliceElem is the exact set of element types stored in attribute slice values. -// Using a closed set prevents accidental instantiations for unsupported types. -type sliceElem interface { - bool | int64 | float64 | string -} - -// SliceValue converts a slice into an array with the same elements. -func SliceValue[T sliceElem](v []T) any { - // Keep only the common tiny-slice cases out of reflection. Extending this - // much further increases code size for diminishing benefit while larger - // slices still need the generic reflective path to preserve comparability. - // This matches the short lengths that show up most often in local - // benchmarks and semantic convention examples while leaving larger, less - // predictable slices on the generic reflective path. - switch len(v) { - case 0: - return [0]T{} - case 1: - return [1]T{v[0]} - case 2: - return [2]T{v[0], v[1]} - case 3: - return [3]T{v[0], v[1], v[2]} - } - - return sliceValueReflect(v) -} - -// AsSlice converts an array into a slice with the same elements. -func AsSlice[T sliceElem](v any) []T { - // Mirror the small fixed-array fast path used by SliceValue. - switch a := v.(type) { - case [0]T: - return []T{} - case [1]T: - return []T{a[0]} - case [2]T: - return []T{a[0], a[1]} - case [3]T: - return []T{a[0], a[1], a[2]} - } - - return asSliceReflect[T](v) -} - -func sliceValueReflect[T sliceElem](v []T) any { - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[T]())).Elem() - reflect.Copy(cp, reflect.ValueOf(v)) - return cp.Interface() -} - -func asSliceReflect[T sliceElem](v any) []T { - rv := reflect.ValueOf(v) - if !rv.IsValid() || rv.Kind() != reflect.Array || rv.Type().Elem() != reflect.TypeFor[T]() { - return nil - } - cpy := make([]T, rv.Len()) - if len(cpy) > 0 { - _ = reflect.Copy(reflect.ValueOf(cpy), rv) - } - return cpy -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go b/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go deleted file mode 100644 index 113a978383..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package xxhash provides a wrapper around the xxhash library for attribute hashing. -package xxhash // import "go.opentelemetry.io/otel/attribute/internal/xxhash" - -import ( - "encoding/binary" - "math" - - "github.com/cespare/xxhash/v2" -) - -// Hash wraps xxhash.Digest to provide an API friendly for hashing attribute values. -type Hash struct { - d *xxhash.Digest -} - -// New returns a new initialized xxHash64 hasher. -func New() Hash { - return Hash{d: xxhash.New()} -} - -func (h Hash) Uint64(val uint64) Hash { - var buf [8]byte - binary.LittleEndian.PutUint64(buf[:], val) - // errors from Write are always nil for xxhash - // if it returns an err then panic - _, err := h.d.Write(buf[:]) - if err != nil { - panic("xxhash write of uint64 failed: " + err.Error()) - } - return h -} - -func (h Hash) Bool(val bool) Hash { // nolint:revive // This is a hashing function. - if val { - return h.Uint64(1) - } - return h.Uint64(0) -} - -func (h Hash) Float64(val float64) Hash { - return h.Uint64(math.Float64bits(val)) -} - -func (h Hash) Int64(val int64) Hash { - return h.Uint64(uint64(val)) // nolint:gosec // Overflow doesn't matter since we are hashing. -} - -func (h Hash) String(val string) Hash { - // errors from WriteString are always nil for xxhash - // if it returns an err then panic - _, err := h.d.WriteString(val) - if err != nil { - panic("xxhash write of string failed: " + err.Error()) - } - return h -} - -// Sum64 returns the current hash value. -func (h Hash) Sum64() uint64 { - return h.d.Sum64() -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go deleted file mode 100644 index 8df6249f02..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package attribute // import "go.opentelemetry.io/otel/attribute" - -// Iterator allows iterating over the set of attributes in order, sorted by -// key. -type Iterator struct { - storage *Set - idx int -} - -// MergeIterator supports iterating over two sets of attributes while -// eliminating duplicate values from the combined set. The first iterator -// value takes precedence. -type MergeIterator struct { - one oneIterator - two oneIterator - current KeyValue -} - -type oneIterator struct { - iter Iterator - done bool - attr KeyValue -} - -// Next moves the iterator to the next position. -// Next reports whether there are more attributes. -func (i *Iterator) Next() bool { - i.idx++ - return i.idx < i.Len() -} - -// Label returns current KeyValue. Must be called only after Next returns -// true. -// -// Deprecated: Use Attribute instead. -func (i *Iterator) Label() KeyValue { - return i.Attribute() -} - -// Attribute returns the current KeyValue of the Iterator. It must be called -// only after Next returns true. -func (i *Iterator) Attribute() KeyValue { - kv, _ := i.storage.Get(i.idx) - return kv -} - -// IndexedLabel returns current index and attribute. Must be called only -// after Next returns true. -// -// Deprecated: Use IndexedAttribute instead. -func (i *Iterator) IndexedLabel() (int, KeyValue) { - return i.idx, i.Attribute() -} - -// IndexedAttribute returns current index and attribute. Must be called only -// after Next returns true. -func (i *Iterator) IndexedAttribute() (int, KeyValue) { - return i.idx, i.Attribute() -} - -// Len returns a number of attributes in the iterated set. -func (i *Iterator) Len() int { - return i.storage.Len() -} - -// ToSlice is a convenience function that creates a slice of attributes from -// the passed iterator. The iterator is set up to start from the beginning -// before creating the slice. -func (i *Iterator) ToSlice() []KeyValue { - l := i.Len() - if l == 0 { - return nil - } - i.idx = -1 - slice := make([]KeyValue, 0, l) - for i.Next() { - slice = append(slice, i.Attribute()) - } - return slice -} - -// NewMergeIterator returns a MergeIterator for merging two attribute sets. -// Duplicates are resolved by taking the value from the first set. -func NewMergeIterator(s1, s2 *Set) MergeIterator { - mi := MergeIterator{ - one: makeOne(s1.Iter()), - two: makeOne(s2.Iter()), - } - return mi -} - -func makeOne(iter Iterator) oneIterator { - oi := oneIterator{ - iter: iter, - } - oi.advance() - return oi -} - -func (oi *oneIterator) advance() { - if oi.done = !oi.iter.Next(); !oi.done { - oi.attr = oi.iter.Attribute() - } -} - -// Next moves the iterator to the next position. -// Next reports whether there is another attribute available. -func (m *MergeIterator) Next() bool { - if m.one.done && m.two.done { - return false - } - if m.one.done { - m.current = m.two.attr - m.two.advance() - return true - } - if m.two.done { - m.current = m.one.attr - m.one.advance() - return true - } - if m.one.attr.Key == m.two.attr.Key { - m.current = m.one.attr // first iterator attribute value wins - m.one.advance() - m.two.advance() - return true - } - if m.one.attr.Key < m.two.attr.Key { - m.current = m.one.attr - m.one.advance() - return true - } - m.current = m.two.attr - m.two.advance() - return true -} - -// Label returns the current value after Next() returns true. -// -// Deprecated: Use Attribute instead. -func (m *MergeIterator) Label() KeyValue { - return m.current -} - -// Attribute returns the current value after Next() returns true. -func (m *MergeIterator) Attribute() KeyValue { - return m.current -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go deleted file mode 100644 index 80a9e5643f..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/key.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package attribute // import "go.opentelemetry.io/otel/attribute" - -// Key represents the key part in key-value pairs. It's a string. The -// allowed character set in the key depends on the use of the key. -type Key string - -// Bool creates a KeyValue instance with a BOOL Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Bool(name, value). -func (k Key) Bool(v bool) KeyValue { - return KeyValue{ - Key: k, - Value: BoolValue(v), - } -} - -// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- BoolSlice(name, value). -func (k Key) BoolSlice(v []bool) KeyValue { - return KeyValue{ - Key: k, - Value: BoolSliceValue(v), - } -} - -// Int creates a KeyValue instance with an INT64 Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Int(name, value). -func (k Key) Int(v int) KeyValue { - return KeyValue{ - Key: k, - Value: IntValue(v), - } -} - -// IntSlice creates a KeyValue instance with an INT64SLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- IntSlice(name, value). -func (k Key) IntSlice(v []int) KeyValue { - return KeyValue{ - Key: k, - Value: IntSliceValue(v), - } -} - -// Int64 creates a KeyValue instance with an INT64 Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Int64(name, value). -func (k Key) Int64(v int64) KeyValue { - return KeyValue{ - Key: k, - Value: Int64Value(v), - } -} - -// Int64Slice creates a KeyValue instance with an INT64SLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Int64Slice(name, value). -func (k Key) Int64Slice(v []int64) KeyValue { - return KeyValue{ - Key: k, - Value: Int64SliceValue(v), - } -} - -// Float64 creates a KeyValue instance with a FLOAT64 Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Float64(name, value). -func (k Key) Float64(v float64) KeyValue { - return KeyValue{ - Key: k, - Value: Float64Value(v), - } -} - -// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Float64(name, value). -func (k Key) Float64Slice(v []float64) KeyValue { - return KeyValue{ - Key: k, - Value: Float64SliceValue(v), - } -} - -// String creates a KeyValue instance with a STRING Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- String(name, value). -func (k Key) String(v string) KeyValue { - return KeyValue{ - Key: k, - Value: StringValue(v), - } -} - -// StringSlice creates a KeyValue instance with a STRINGSLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- StringSlice(name, value). -func (k Key) StringSlice(v []string) KeyValue { - return KeyValue{ - Key: k, - Value: StringSliceValue(v), - } -} - -// Defined reports whether the key is not empty. -func (k Key) Defined() bool { - return len(k) != 0 -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go deleted file mode 100644 index 0cc368018b..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "fmt" -) - -// KeyValue holds a key and value pair. -type KeyValue struct { - Key Key - Value Value -} - -// Valid reports whether kv is a valid OpenTelemetry attribute. -func (kv KeyValue) Valid() bool { - return kv.Key.Defined() -} - -// Bool creates a KeyValue with a BOOL Value type. -func Bool(k string, v bool) KeyValue { - return Key(k).Bool(v) -} - -// BoolSlice creates a KeyValue with a BOOLSLICE Value type. -func BoolSlice(k string, v []bool) KeyValue { - return Key(k).BoolSlice(v) -} - -// Int creates a KeyValue with an INT64 Value type. -func Int(k string, v int) KeyValue { - return Key(k).Int(v) -} - -// IntSlice creates a KeyValue with an INT64SLICE Value type. -func IntSlice(k string, v []int) KeyValue { - return Key(k).IntSlice(v) -} - -// Int64 creates a KeyValue with an INT64 Value type. -func Int64(k string, v int64) KeyValue { - return Key(k).Int64(v) -} - -// Int64Slice creates a KeyValue with an INT64SLICE Value type. -func Int64Slice(k string, v []int64) KeyValue { - return Key(k).Int64Slice(v) -} - -// Float64 creates a KeyValue with a FLOAT64 Value type. -func Float64(k string, v float64) KeyValue { - return Key(k).Float64(v) -} - -// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. -func Float64Slice(k string, v []float64) KeyValue { - return Key(k).Float64Slice(v) -} - -// String creates a KeyValue with a STRING Value type. -func String(k, v string) KeyValue { - return Key(k).String(v) -} - -// StringSlice creates a KeyValue with a STRINGSLICE Value type. -func StringSlice(k string, v []string) KeyValue { - return Key(k).StringSlice(v) -} - -// Stringer creates a new key-value pair with a passed name and a string -// value generated by the passed Stringer interface. -func Stringer(k string, v fmt.Stringer) KeyValue { - return Key(k).String(v.String()) -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go b/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go deleted file mode 100644 index 5791c6e7aa..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "math" -) - -func boolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. - if b { - return 1 - } - return 0 -} - -func rawToBool(r uint64) bool { - return r != 0 -} - -func int64ToRaw(i int64) uint64 { - // Assumes original was a valid int64 (overflow not checked). - return uint64(i) // nolint: gosec -} - -func rawToInt64(r uint64) int64 { - // Assumes original was a valid int64 (overflow not checked). - return int64(r) // nolint: gosec -} - -func float64ToRaw(f float64) uint64 { - return math.Float64bits(f) -} - -func rawToFloat64(r uint64) float64 { - return math.Float64frombits(r) -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go deleted file mode 100644 index 6572c98b12..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ /dev/null @@ -1,436 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "cmp" - "encoding/json" - "reflect" - "slices" - "sort" - - "go.opentelemetry.io/otel/attribute/internal/xxhash" -) - -type ( - // Set is the representation for a distinct attribute set. It manages an - // immutable set of attributes, with an internal cache for storing - // attribute encodings. - // - // This type will remain comparable for backwards compatibility. The - // equivalence of Sets across versions is not guaranteed to be stable. - // Prior versions may find two Sets to be equal or not when compared - // directly (i.e. ==), but subsequent versions may not. Users should use - // the Equals method to ensure stable equivalence checking. - // - // Users should also use the Distinct returned from Equivalent as a map key - // instead of a Set directly. Set has relatively poor performance when used - // as a map key compared to Distinct. - Set struct { - hash uint64 - data any - } - - // Distinct is an identifier of a Set which is very likely to be unique. - // - // Distinct should be used as a map key instead of a Set for to provide better - // performance for map operations. - Distinct struct { - hash uint64 - } - - // Sortable implements sort.Interface, used for sorting KeyValue. - // - // Deprecated: This type is no longer used. It was added as a performance - // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no - // longer supported by the module). - Sortable []KeyValue -) - -// Compile time check these types remain comparable. -var ( - _ = isComparable(Set{}) - _ = isComparable(Distinct{}) -) - -func isComparable[T comparable](t T) T { return t } - -var ( - // keyValueType is used in computeDistinctReflect. - keyValueType = reflect.TypeFor[KeyValue]() - - // emptyHash is the hash of an empty set. - emptyHash = xxhash.New().Sum64() - - // userDefinedEmptySet is an empty set. It was mistakenly exposed to users - // as something they can assign to, so it must remain addressable and - // mutable. - // - // This is kept for backwards compatibility, but should not be used in new code. - userDefinedEmptySet = &Set{ - hash: emptyHash, - data: [0]KeyValue{}, - } - - emptySet = Set{ - hash: emptyHash, - data: [0]KeyValue{}, - } -) - -// EmptySet returns a reference to a Set with no elements. -// -// This is a convenience provided for optimized calling utility. -func EmptySet() *Set { - // Continue to return the pointer to the user-defined empty set for - // backwards-compatibility. - // - // New code should not use this, instead use emptySet. - return userDefinedEmptySet -} - -// Valid reports whether this value refers to a valid Set. -func (d Distinct) Valid() bool { return d.hash != 0 } - -// reflectValue abbreviates reflect.ValueOf(d). -func (l Set) reflectValue() reflect.Value { - return reflect.ValueOf(l.data) -} - -// Len returns the number of attributes in this set. -func (l *Set) Len() int { - if l == nil || l.hash == 0 { - return 0 - } - return l.reflectValue().Len() -} - -// Get returns the KeyValue at ordered position idx in this set. -func (l *Set) Get(idx int) (KeyValue, bool) { - if l == nil || l.hash == 0 { - return KeyValue{}, false - } - value := l.reflectValue() - - if idx >= 0 && idx < value.Len() { - // Note: The Go compiler successfully avoids an allocation for - // the interface{} conversion here: - return value.Index(idx).Interface().(KeyValue), true - } - - return KeyValue{}, false -} - -// Value returns the value of a specified key in this set. -func (l *Set) Value(k Key) (Value, bool) { - if l == nil || l.hash == 0 { - return Value{}, false - } - rValue := l.reflectValue() - vlen := rValue.Len() - - idx := sort.Search(vlen, func(idx int) bool { - return rValue.Index(idx).Interface().(KeyValue).Key >= k - }) - if idx >= vlen { - return Value{}, false - } - keyValue := rValue.Index(idx).Interface().(KeyValue) - if k == keyValue.Key { - return keyValue.Value, true - } - return Value{}, false -} - -// HasValue reports whether a key is defined in this set. -func (l *Set) HasValue(k Key) bool { - if l == nil { - return false - } - _, ok := l.Value(k) - return ok -} - -// Iter returns an iterator for visiting the attributes in this set. -func (l *Set) Iter() Iterator { - return Iterator{ - storage: l, - idx: -1, - } -} - -// ToSlice returns the set of attributes belonging to this set, sorted, where -// keys appear no more than once. -func (l *Set) ToSlice() []KeyValue { - iter := l.Iter() - return iter.ToSlice() -} - -// Equivalent returns a value that may be used as a map key. Equal Distinct -// values are very likely to be equivalent attribute Sets. Distinct value of any -// attribute set with the same elements as this, where sets are made unique by -// choosing the last value in the input for any given key. -func (l *Set) Equivalent() Distinct { - if l == nil || l.hash == 0 { - return Distinct{hash: emptySet.hash} - } - return Distinct{hash: l.hash} -} - -// Equals reports whether the argument set is equivalent to this set. -func (l *Set) Equals(o *Set) bool { - if l.Equivalent() != o.Equivalent() { - return false - } - if l == nil || l.hash == 0 { - l = &emptySet - } - if o == nil || o.hash == 0 { - o = &emptySet - } - return l.data == o.data -} - -// Encoded returns the encoded form of this set, according to encoder. -func (l *Set) Encoded(encoder Encoder) string { - if l == nil || encoder == nil { - return "" - } - - return encoder.Encode(l.Iter()) -} - -// NewSet returns a new Set. See the documentation for -// NewSetWithSortableFiltered for more details. -// -// Except for empty sets, this method adds an additional allocation compared -// with calls that include a Sortable. -func NewSet(kvs ...KeyValue) Set { - s, _ := NewSetWithFiltered(kvs, nil) - return s -} - -// NewSetWithSortable returns a new Set. See the documentation for -// NewSetWithSortableFiltered for more details. -// -// This call includes a Sortable option as a memory optimization. -// -// Deprecated: Use [NewSet] instead. -func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { - s, _ := NewSetWithFiltered(kvs, nil) - return s -} - -// NewSetWithFiltered returns a new Set. See the documentation for -// NewSetWithSortableFiltered for more details. -// -// This call includes a Filter to include/exclude attribute keys from the -// return value. Excluded keys are returned as a slice of attribute values. -func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { - // Check for empty set. - if len(kvs) == 0 { - return emptySet, nil - } - - // Stable sort so the following de-duplication can implement - // last-value-wins semantics. - slices.SortStableFunc(kvs, func(a, b KeyValue) int { - return cmp.Compare(a.Key, b.Key) - }) - - position := len(kvs) - 1 - offset := position - 1 - - // The requirements stated above require that the stable - // result be placed in the end of the input slice, while - // overwritten values are swapped to the beginning. - // - // De-duplicate with last-value-wins semantics. Preserve - // duplicate values at the beginning of the input slice. - for ; offset >= 0; offset-- { - if kvs[offset].Key == kvs[position].Key { - continue - } - position-- - kvs[offset], kvs[position] = kvs[position], kvs[offset] - } - kvs = kvs[position:] - - if filter != nil { - if div := filteredToFront(kvs, filter); div != 0 { - return newSet(kvs[div:]), kvs[:div] - } - } - return newSet(kvs), nil -} - -// NewSetWithSortableFiltered returns a new Set. -// -// Duplicate keys are eliminated by taking the last value. This -// re-orders the input slice so that unique last-values are contiguous -// at the end of the slice. -// -// This ensures the following: -// -// - Last-value-wins semantics -// - Caller sees the reordering, but doesn't lose values -// - Repeated call preserve last-value wins. -// -// Note that methods are defined on Set, although this returns Set. Callers -// can avoid memory allocations by: -// -// - allocating a Sortable for use as a temporary in this method -// - allocating a Set for storing the return value of this constructor. -// -// The result maintains a cache of encoded attributes, by attribute.EncoderID. -// This value should not be copied after its first use. -// -// The second []KeyValue return value is a list of attributes that were -// excluded by the Filter (if non-nil). -// -// Deprecated: Use [NewSetWithFiltered] instead. -func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) { - return NewSetWithFiltered(kvs, filter) -} - -// filteredToFront filters slice in-place using keep function. All KeyValues that need to -// be removed are moved to the front. All KeyValues that need to be kept are -// moved (in-order) to the back. The index for the first KeyValue to be kept is -// returned. -func filteredToFront(slice []KeyValue, keep Filter) int { - n := len(slice) - j := n - for i := n - 1; i >= 0; i-- { - if keep(slice[i]) { - j-- - slice[i], slice[j] = slice[j], slice[i] - } - } - return j -} - -// Filter returns a filtered copy of this Set. See the documentation for -// NewSetWithSortableFiltered for more details. -func (l *Set) Filter(re Filter) (Set, []KeyValue) { - if re == nil { - return *l, nil - } - - // Iterate in reverse to the first attribute that will be filtered out. - n := l.Len() - first := n - 1 - for ; first >= 0; first-- { - kv, _ := l.Get(first) - if !re(kv) { - break - } - } - - // No attributes will be dropped, return the immutable Set l and nil. - if first < 0 { - return *l, nil - } - - // Copy now that we know we need to return a modified set. - // - // Do not do this in-place on the underlying storage of *Set l. Sets are - // immutable and filtering should not change this. - slice := l.ToSlice() - - // Don't re-iterate the slice if only slice[0] is filtered. - if first == 0 { - // It is safe to assume len(slice) >= 1 given we found at least one - // attribute above that needs to be filtered out. - return newSet(slice[1:]), slice[:1] - } - - // Move the filtered slice[first] to the front (preserving order). - kv := slice[first] - copy(slice[1:first+1], slice[:first]) - slice[0] = kv - - // Do not re-evaluate re(slice[first+1:]). - div := filteredToFront(slice[1:first+1], re) + 1 - return newSet(slice[div:]), slice[:div] -} - -// newSet returns a new set based on the sorted and uniqued kvs. -func newSet(kvs []KeyValue) Set { - s := Set{ - hash: hashKVs(kvs), - data: computeDataFixed(kvs), - } - if s.data == nil { - s.data = computeDataReflect(kvs) - } - return s -} - -// computeDataFixed computes a Set data for small slices. It returns nil if the -// input is too large for this code path. -func computeDataFixed(kvs []KeyValue) any { - switch len(kvs) { - case 1: - return [1]KeyValue(kvs) - case 2: - return [2]KeyValue(kvs) - case 3: - return [3]KeyValue(kvs) - case 4: - return [4]KeyValue(kvs) - case 5: - return [5]KeyValue(kvs) - case 6: - return [6]KeyValue(kvs) - case 7: - return [7]KeyValue(kvs) - case 8: - return [8]KeyValue(kvs) - case 9: - return [9]KeyValue(kvs) - case 10: - return [10]KeyValue(kvs) - default: - return nil - } -} - -// computeDataReflect computes a Set data using reflection, works for any size -// input. -func computeDataReflect(kvs []KeyValue) any { - at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() - for i, keyValue := range kvs { - *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue - } - return at.Interface() -} - -// MarshalJSON returns the JSON encoding of the Set. -func (l *Set) MarshalJSON() ([]byte, error) { - return json.Marshal(l.data) -} - -// MarshalLog is the marshaling function used by the logging system to represent this Set. -func (l Set) MarshalLog() any { - kvs := make(map[string]string) - for _, kv := range l.ToSlice() { - kvs[string(kv.Key)] = kv.Value.Emit() - } - return kvs -} - -// Len implements sort.Interface. -func (l *Sortable) Len() int { - return len(*l) -} - -// Swap implements sort.Interface. -func (l *Sortable) Swap(i, j int) { - (*l)[i], (*l)[j] = (*l)[j], (*l)[i] -} - -// Less implements sort.Interface. -func (l *Sortable) Less(i, j int) bool { - return (*l)[i].Key < (*l)[j].Key -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go deleted file mode 100644 index 6c04448d6f..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/type_string.go +++ /dev/null @@ -1,32 +0,0 @@ -// Code generated by "stringer -type=Type"; DO NOT EDIT. - -package attribute - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[EMPTY-0] - _ = x[BOOL-1] - _ = x[INT64-2] - _ = x[FLOAT64-3] - _ = x[STRING-4] - _ = x[BOOLSLICE-5] - _ = x[INT64SLICE-6] - _ = x[FLOAT64SLICE-7] - _ = x[STRINGSLICE-8] -} - -const _Type_name = "EMPTYBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" - -var _Type_index = [...]uint8{0, 5, 9, 14, 21, 27, 36, 46, 58, 69} - -func (i Type) String() string { - idx := int(i) - 0 - if i < 0 || idx >= len(_Type_index)-1 { - return "Type(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Type_name[_Type_index[idx]:_Type_index[idx+1]] -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go deleted file mode 100644 index db04b1326c..0000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "encoding/json" - "fmt" - "strconv" - - attribute "go.opentelemetry.io/otel/attribute/internal" -) - -//go:generate stringer -type=Type - -// Type describes the type of the data Value holds. -type Type int // nolint: revive // redefines builtin Type. - -// Value represents the value part in key-value pairs. -// -// Note that the zero value is a valid empty value. -type Value struct { - vtype Type - numeric uint64 - stringly string - slice any -} - -const ( - // EMPTY is used for a Value with no value set. - EMPTY Type = iota - // BOOL is a boolean Type Value. - BOOL - // INT64 is a 64-bit signed integral Type Value. - INT64 - // FLOAT64 is a 64-bit floating point Type Value. - FLOAT64 - // STRING is a string Type Value. - STRING - // BOOLSLICE is a slice of booleans Type Value. - BOOLSLICE - // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. - INT64SLICE - // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. - FLOAT64SLICE - // STRINGSLICE is a slice of strings Type Value. - STRINGSLICE - // INVALID is used for a Value with no value set. - // - // Deprecated: Use EMPTY instead as an empty value is a valid value. - INVALID = EMPTY -) - -// BoolValue creates a BOOL Value. -func BoolValue(v bool) Value { - return Value{ - vtype: BOOL, - numeric: boolToRaw(v), - } -} - -// BoolSliceValue creates a BOOLSLICE Value. -func BoolSliceValue(v []bool) Value { - return Value{vtype: BOOLSLICE, slice: attribute.SliceValue(v)} -} - -// IntValue creates an INT64 Value. -func IntValue(v int) Value { - return Int64Value(int64(v)) -} - -// IntSliceValue creates an INT64SLICE Value. -func IntSliceValue(v []int) Value { - val := Value{vtype: INT64SLICE} - - // Avoid the common tiny-slice cases from allocating a new slice. - switch len(v) { - case 0: - val.slice = [0]int64{} - case 1: - val.slice = [1]int64{int64(v[0])} - case 2: - val.slice = [2]int64{int64(v[0]), int64(v[1])} - case 3: - val.slice = [3]int64{int64(v[0]), int64(v[1]), int64(v[2])} - default: - // Fallback to a new slice for larger slices. - cp := make([]int64, len(v)) - for i, val := range v { - cp[i] = int64(val) - } - val.slice = attribute.SliceValue(cp) - } - - return val -} - -// Int64Value creates an INT64 Value. -func Int64Value(v int64) Value { - return Value{ - vtype: INT64, - numeric: int64ToRaw(v), - } -} - -// Int64SliceValue creates an INT64SLICE Value. -func Int64SliceValue(v []int64) Value { - return Value{vtype: INT64SLICE, slice: attribute.SliceValue(v)} -} - -// Float64Value creates a FLOAT64 Value. -func Float64Value(v float64) Value { - return Value{ - vtype: FLOAT64, - numeric: float64ToRaw(v), - } -} - -// Float64SliceValue creates a FLOAT64SLICE Value. -func Float64SliceValue(v []float64) Value { - return Value{vtype: FLOAT64SLICE, slice: attribute.SliceValue(v)} -} - -// StringValue creates a STRING Value. -func StringValue(v string) Value { - return Value{ - vtype: STRING, - stringly: v, - } -} - -// StringSliceValue creates a STRINGSLICE Value. -func StringSliceValue(v []string) Value { - return Value{vtype: STRINGSLICE, slice: attribute.SliceValue(v)} -} - -// Type returns a type of the Value. -func (v Value) Type() Type { - return v.vtype -} - -// AsBool returns the bool value. Make sure that the Value's type is -// BOOL. -func (v Value) AsBool() bool { - return rawToBool(v.numeric) -} - -// AsBoolSlice returns the []bool value. Make sure that the Value's type is -// BOOLSLICE. -func (v Value) AsBoolSlice() []bool { - if v.vtype != BOOLSLICE { - return nil - } - return v.asBoolSlice() -} - -func (v Value) asBoolSlice() []bool { - return attribute.AsSlice[bool](v.slice) -} - -// AsInt64 returns the int64 value. Make sure that the Value's type is -// INT64. -func (v Value) AsInt64() int64 { - return rawToInt64(v.numeric) -} - -// AsInt64Slice returns the []int64 value. Make sure that the Value's type is -// INT64SLICE. -func (v Value) AsInt64Slice() []int64 { - if v.vtype != INT64SLICE { - return nil - } - return v.asInt64Slice() -} - -func (v Value) asInt64Slice() []int64 { - return attribute.AsSlice[int64](v.slice) -} - -// AsFloat64 returns the float64 value. Make sure that the Value's -// type is FLOAT64. -func (v Value) AsFloat64() float64 { - return rawToFloat64(v.numeric) -} - -// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is -// FLOAT64SLICE. -func (v Value) AsFloat64Slice() []float64 { - if v.vtype != FLOAT64SLICE { - return nil - } - return v.asFloat64Slice() -} - -func (v Value) asFloat64Slice() []float64 { - return attribute.AsSlice[float64](v.slice) -} - -// AsString returns the string value. Make sure that the Value's type -// is STRING. -func (v Value) AsString() string { - return v.stringly -} - -// AsStringSlice returns the []string value. Make sure that the Value's type is -// STRINGSLICE. -func (v Value) AsStringSlice() []string { - if v.vtype != STRINGSLICE { - return nil - } - return v.asStringSlice() -} - -func (v Value) asStringSlice() []string { - return attribute.AsSlice[string](v.slice) -} - -type unknownValueType struct{} - -// AsInterface returns Value's data as any. -func (v Value) AsInterface() any { - switch v.Type() { - case BOOL: - return v.AsBool() - case BOOLSLICE: - return v.asBoolSlice() - case INT64: - return v.AsInt64() - case INT64SLICE: - return v.asInt64Slice() - case FLOAT64: - return v.AsFloat64() - case FLOAT64SLICE: - return v.asFloat64Slice() - case STRING: - return v.stringly - case STRINGSLICE: - return v.asStringSlice() - case EMPTY: - return nil - } - return unknownValueType{} -} - -// Emit returns a string representation of Value's data. -func (v Value) Emit() string { - switch v.Type() { - case BOOLSLICE: - return fmt.Sprint(v.asBoolSlice()) - case BOOL: - return strconv.FormatBool(v.AsBool()) - case INT64SLICE: - j, err := json.Marshal(v.asInt64Slice()) - if err != nil { - return fmt.Sprintf("invalid: %v", v.asInt64Slice()) - } - return string(j) - case INT64: - return strconv.FormatInt(v.AsInt64(), 10) - case FLOAT64SLICE: - j, err := json.Marshal(v.asFloat64Slice()) - if err != nil { - return fmt.Sprintf("invalid: %v", v.asFloat64Slice()) - } - return string(j) - case FLOAT64: - return fmt.Sprint(v.AsFloat64()) - case STRINGSLICE: - j, err := json.Marshal(v.asStringSlice()) - if err != nil { - return fmt.Sprintf("invalid: %v", v.asStringSlice()) - } - return string(j) - case STRING: - return v.stringly - case EMPTY: - return "" - default: - return "unknown" - } -} - -// MarshalJSON returns the JSON encoding of the Value. -func (v Value) MarshalJSON() ([]byte, error) { - var jsonVal struct { - Type string - Value any - } - jsonVal.Type = v.Type().String() - jsonVal.Value = v.AsInterface() - return json.Marshal(jsonVal) -} diff --git a/vendor/go.opentelemetry.io/otel/baggage/README.md b/vendor/go.opentelemetry.io/otel/baggage/README.md deleted file mode 100644 index 7d798435e1..0000000000 --- a/vendor/go.opentelemetry.io/otel/baggage/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Baggage - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/baggage)](https://pkg.go.dev/go.opentelemetry.io/otel/baggage) diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go deleted file mode 100644 index 878ffbe43a..0000000000 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ /dev/null @@ -1,1075 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package baggage // import "go.opentelemetry.io/otel/baggage" - -import ( - "errors" - "fmt" - "net/url" - "strings" - "unicode/utf8" - - "go.opentelemetry.io/otel/internal/baggage" -) - -const ( - maxMembers = 64 - maxBytesPerBaggageString = 8192 - - listDelimiter = "," - keyValueDelimiter = "=" - propertyDelimiter = ";" -) - -var ( - errInvalidKey = errors.New("invalid key") - errInvalidValue = errors.New("invalid value") - errInvalidProperty = errors.New("invalid baggage list-member property") - errInvalidMember = errors.New("invalid baggage list-member") - errMemberNumber = errors.New("too many list-members in baggage-string") - errBaggageBytes = errors.New("baggage-string too large") -) - -// Property is an additional metadata entry for a baggage list-member. -type Property struct { - key, value string - - // hasValue indicates if a zero-value value means the property does not - // have a value or if it was the zero-value. - hasValue bool -} - -// NewKeyProperty returns a new Property for key. -// -// The passed key must be valid, non-empty UTF-8 string. -// If key is invalid, an error will be returned. -// However, the specific Propagators that are used to transmit baggage entries across -// component boundaries may impose their own restrictions on Property key. -// For example, the W3C Baggage specification restricts the Property keys to strings that -// satisfy the token definition from RFC7230, Section 3.2.6. -// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. -func NewKeyProperty(key string) (Property, error) { - if !validateBaggageName(key) { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) - } - - p := Property{key: key} - return p, nil -} - -// NewKeyValueProperty returns a new Property for key with value. -// -// The passed key must be compliant with W3C Baggage specification. -// The passed value must be percent-encoded as defined in W3C Baggage specification. -// -// Notice: Consider using [NewKeyValuePropertyRaw] instead -// that does not require percent-encoding of the value. -func NewKeyValueProperty(key, value string) (Property, error) { - if !validateKey(key) { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) - } - - if !validateValue(value) { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) - } - decodedValue, err := url.PathUnescape(value) - if err != nil { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) - } - return NewKeyValuePropertyRaw(key, decodedValue) -} - -// NewKeyValuePropertyRaw returns a new Property for key with value. -// -// The passed key must be valid, non-empty UTF-8 string. -// The passed value must be valid UTF-8 string. -// However, the specific Propagators that are used to transmit baggage entries across -// component boundaries may impose their own restrictions on Property key. -// For example, the W3C Baggage specification restricts the Property keys to strings that -// satisfy the token definition from RFC7230, Section 3.2.6. -// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. -func NewKeyValuePropertyRaw(key, value string) (Property, error) { - if !validateBaggageName(key) { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) - } - if !validateBaggageValue(value) { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) - } - - p := Property{ - key: key, - value: value, - hasValue: true, - } - return p, nil -} - -func newInvalidProperty() Property { - return Property{} -} - -// parseProperty attempts to decode a Property from the passed string. It -// returns an error if the input is invalid according to the W3C Baggage -// specification. -func parseProperty(property string) (Property, error) { - if property == "" { - return newInvalidProperty(), nil - } - - p, ok := parsePropertyInternal(property) - if !ok { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) - } - - return p, nil -} - -// validate ensures p conforms to the W3C Baggage specification, returning an -// error otherwise. -func (p Property) validate() error { - errFunc := func(err error) error { - return fmt.Errorf("invalid property: %w", err) - } - - if !validateBaggageName(p.key) { - return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) - } - if !p.hasValue && p.value != "" { - return errFunc(errors.New("inconsistent value")) - } - if p.hasValue && !validateBaggageValue(p.value) { - return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) - } - return nil -} - -// Key returns the Property key. -func (p Property) Key() string { - return p.key -} - -// Value returns the Property value. Additionally, a boolean value is returned -// indicating if the returned value is the empty if the Property has a value -// that is empty or if the value is not set. -func (p Property) Value() (string, bool) { - return p.value, p.hasValue -} - -// String encodes Property into a header string compliant with the W3C Baggage -// specification. -// It would return empty string if the key is invalid with the W3C Baggage -// specification. This could happen for a UTF-8 key, as it may contain -// invalid characters. -func (p Property) String() string { - // W3C Baggage specification does not allow percent-encoded keys. - if !validateKey(p.key) { - return "" - } - - if p.hasValue { - return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) - } - return p.key -} - -type properties []Property - -func fromInternalProperties(iProps []baggage.Property) properties { - if len(iProps) == 0 { - return nil - } - - props := make(properties, len(iProps)) - for i, p := range iProps { - props[i] = Property{ - key: p.Key, - value: p.Value, - hasValue: p.HasValue, - } - } - return props -} - -func (p properties) asInternal() []baggage.Property { - if len(p) == 0 { - return nil - } - - iProps := make([]baggage.Property, len(p)) - for i, prop := range p { - iProps[i] = baggage.Property{ - Key: prop.key, - Value: prop.value, - HasValue: prop.hasValue, - } - } - return iProps -} - -func (p properties) Copy() properties { - if len(p) == 0 { - return nil - } - - props := make(properties, len(p)) - copy(props, p) - return props -} - -// validate ensures each Property in p conforms to the W3C Baggage -// specification, returning an error otherwise. -func (p properties) validate() error { - for _, prop := range p { - if err := prop.validate(); err != nil { - return err - } - } - return nil -} - -// String encodes properties into a header string compliant with the W3C Baggage -// specification. -func (p properties) String() string { - props := make([]string, 0, len(p)) - for _, prop := range p { - s := prop.String() - - // Ignored empty properties. - if s != "" { - props = append(props, s) - } - } - return strings.Join(props, propertyDelimiter) -} - -// Member is a list-member of a baggage-string as defined by the W3C Baggage -// specification. -type Member struct { - key, value string - properties properties - - // hasData indicates whether the created property contains data or not. - // Properties that do not contain data are invalid with no other check - // required. - hasData bool -} - -// NewMember returns a new Member from the passed arguments. -// -// The passed key must be compliant with W3C Baggage specification. -// The passed value must be percent-encoded as defined in W3C Baggage specification. -// -// Notice: Consider using [NewMemberRaw] instead -// that does not require percent-encoding of the value. -func NewMember(key, value string, props ...Property) (Member, error) { - if !validateKey(key) { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) - } - - if !validateValue(value) { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) - } - decodedValue, err := url.PathUnescape(value) - if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) - } - return NewMemberRaw(key, decodedValue, props...) -} - -// NewMemberRaw returns a new Member from the passed arguments. -// -// The passed key must be valid, non-empty UTF-8 string. -// The passed value must be valid UTF-8 string. -// However, the specific Propagators that are used to transmit baggage entries across -// component boundaries may impose their own restrictions on baggage key. -// For example, the W3C Baggage specification restricts the baggage keys to strings that -// satisfy the token definition from RFC7230, Section 3.2.6. -// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key. -func NewMemberRaw(key, value string, props ...Property) (Member, error) { - m := Member{ - key: key, - value: value, - properties: properties(props).Copy(), - hasData: true, - } - if err := m.validate(); err != nil { - return newInvalidMember(), err - } - return m, nil -} - -func newInvalidMember() Member { - return Member{} -} - -// parseMember attempts to decode a Member from the passed string. It returns -// an error if the input is invalid according to the W3C Baggage -// specification. -func parseMember(member string) (Member, error) { - var props properties - keyValue, properties, found := strings.Cut(member, propertyDelimiter) - if found { - // Parse the member properties. - for pStr := range strings.SplitSeq(properties, propertyDelimiter) { - p, err := parseProperty(pStr) - if err != nil { - return newInvalidMember(), err - } - props = append(props, p) - } - } - // Parse the member key/value pair. - - // Take into account a value can contain equal signs (=). - k, v, found := strings.Cut(keyValue, keyValueDelimiter) - if !found { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) - } - // "Leading and trailing whitespaces are allowed but MUST be trimmed - // when converting the header into a data structure." - key := strings.TrimSpace(k) - if !validateKey(key) { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) - } - - rawVal := strings.TrimSpace(v) - if !validateValue(rawVal) { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) - } - - // Decode a percent-encoded value. - unescapeVal, err := url.PathUnescape(rawVal) - if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) - } - - value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) - return Member{key: key, value: value, properties: props, hasData: true}, nil -} - -// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. -func replaceInvalidUTF8Sequences(c int, unescapeVal string) string { - if utf8.ValidString(unescapeVal) { - return unescapeVal - } - // W3C baggage spec: - // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 - - var b strings.Builder - b.Grow(c) - for i := 0; i < len(unescapeVal); { - r, size := utf8.DecodeRuneInString(unescapeVal[i:]) - if r == utf8.RuneError && size == 1 { - // Invalid UTF-8 sequence found, replace it with '�' - _, _ = b.WriteString("�") - } else { - _, _ = b.WriteRune(r) - } - i += size - } - - return b.String() -} - -// validate ensures m conforms to the W3C Baggage specification. -// A key must be an ASCII string, returning an error otherwise. -func (m Member) validate() error { - if !m.hasData { - return fmt.Errorf("%w: %q", errInvalidMember, m) - } - - if !validateBaggageName(m.key) { - return fmt.Errorf("%w: %q", errInvalidKey, m.key) - } - if !validateBaggageValue(m.value) { - return fmt.Errorf("%w: %q", errInvalidValue, m.value) - } - return m.properties.validate() -} - -// Key returns the Member key. -func (m Member) Key() string { return m.key } - -// Value returns the Member value. -func (m Member) Value() string { return m.value } - -// Properties returns a copy of the Member properties. -func (m Member) Properties() []Property { return m.properties.Copy() } - -// String encodes Member into a header string compliant with the W3C Baggage -// specification. -// It would return empty string if the key is invalid with the W3C Baggage -// specification. This could happen for a UTF-8 key, as it may contain -// invalid characters. -func (m Member) String() string { - // W3C Baggage specification does not allow percent-encoded keys. - if !validateKey(m.key) { - return "" - } - - s := m.key + keyValueDelimiter + valueEscape(m.value) - if len(m.properties) > 0 { - s += propertyDelimiter + m.properties.String() - } - return s -} - -// Baggage is a list of baggage members representing the baggage-string as -// defined by the W3C Baggage specification. -type Baggage struct { //nolint:golint - list baggage.List -} - -// New returns a new valid Baggage. It returns an error if it results in a -// Baggage exceeding limits set in that specification. -// -// If the resulting Baggage exceeds the maximum allowed members or bytes, -// members are dropped until the limits are satisfied and an error is returned -// along with the partial result. -// -// It expects all the provided members to have already been validated. -func New(members ...Member) (Baggage, error) { - if len(members) == 0 { - return Baggage{}, nil - } - - b := make(baggage.List) - for _, m := range members { - if !m.hasData { - return Baggage{}, errInvalidMember - } - // OpenTelemetry resolves duplicates by last-one-wins. - b[m.key] = baggage.Item{ - Value: m.value, - Properties: m.properties.asInternal(), - } - } - - var truncateErr error - - // Check member count after deduplication. - if len(b) > maxMembers { - truncateErr = errors.Join(truncateErr, errMemberNumber) - for k := range b { - if len(b) <= maxMembers { - break - } - delete(b, k) - } - } - - // Check byte size and drop members if necessary. - totalBytes := 0 - first := true - for k := range b { - m := Member{ - key: k, - value: b[k].Value, - properties: fromInternalProperties(b[k].Properties), - } - memberSize := len(m.String()) - if !first { - memberSize++ // comma separator - } - if totalBytes+memberSize > maxBytesPerBaggageString { - truncateErr = errors.Join(truncateErr, fmt.Errorf("%w: %d", errBaggageBytes, totalBytes+memberSize)) - delete(b, k) - continue - } - totalBytes += memberSize - first = false - } - - return Baggage{b}, truncateErr -} - -// Parse attempts to decode a baggage-string from the passed string. It -// returns an error if the input is invalid according to the W3C Baggage -// specification. -// -// If there are duplicate list-members contained in baggage, the last one -// defined (reading left-to-right) will be the only one kept. This diverges -// from the W3C Baggage specification which allows duplicate list-members, but -// conforms to the OpenTelemetry Baggage specification. -// -// If the baggage-string exceeds the maximum allowed members (64) or bytes -// (8192), members are dropped until the limits are satisfied and an error is -// returned along with the partial result. -// -// Invalid members are skipped and the error is returned along with the -// partial result containing the valid members. -func Parse(bStr string) (Baggage, error) { - if bStr == "" { - return Baggage{}, nil - } - - b := make(baggage.List) - sizes := make(map[string]int) // Track per-key byte sizes - var totalBytes int - var truncateErr error - for memberStr := range strings.SplitSeq(bStr, listDelimiter) { - // Check member count limit. - if len(b) >= maxMembers { - truncateErr = errors.Join(truncateErr, errMemberNumber) - break - } - - m, err := parseMember(memberStr) - if err != nil { - truncateErr = errors.Join(truncateErr, err) - continue // skip invalid member, keep processing - } - - // Check byte size limit. - // Account for comma separator between members. - memberBytes := len(m.String()) - _, existingKey := b[m.key] - if !existingKey && len(b) > 0 { - memberBytes++ // comma separator only for new keys - } - - // Calculate new totalBytes if we add/overwrite this key - var newTotalBytes int - if oldSize, exists := sizes[m.key]; exists { - // Overwriting existing key: subtract old size, add new size - newTotalBytes = totalBytes - oldSize + memberBytes - } else { - // New key - newTotalBytes = totalBytes + memberBytes - } - - if newTotalBytes > maxBytesPerBaggageString { - truncateErr = errors.Join(truncateErr, errBaggageBytes) - break - } - - // OpenTelemetry resolves duplicates by last-one-wins. - b[m.key] = baggage.Item{ - Value: m.value, - Properties: m.properties.asInternal(), - } - sizes[m.key] = memberBytes - totalBytes = newTotalBytes - } - - if len(b) == 0 { - return Baggage{}, truncateErr - } - return Baggage{b}, truncateErr -} - -// Member returns the baggage list-member identified by key. -// -// If there is no list-member matching the passed key the returned Member will -// be a zero-value Member. -// The returned member is not validated, as we assume the validation happened -// when it was added to the Baggage. -func (b Baggage) Member(key string) Member { - v, ok := b.list[key] - if !ok { - // We do not need to worry about distinguishing between the situation - // where a zero-valued Member is included in the Baggage because a - // zero-valued Member is invalid according to the W3C Baggage - // specification (it has an empty key). - return newInvalidMember() - } - - return Member{ - key: key, - value: v.Value, - properties: fromInternalProperties(v.Properties), - hasData: true, - } -} - -// Members returns all the baggage list-members. -// The order of the returned list-members is not significant. -// -// The returned members are not validated, as we assume the validation happened -// when they were added to the Baggage. -func (b Baggage) Members() []Member { - if len(b.list) == 0 { - return nil - } - - members := make([]Member, 0, len(b.list)) - for k, v := range b.list { - members = append(members, Member{ - key: k, - value: v.Value, - properties: fromInternalProperties(v.Properties), - hasData: true, - }) - } - return members -} - -// SetMember returns a copy of the Baggage with the member included. If the -// baggage contains a Member with the same key, the existing Member is -// replaced. -// -// If member is invalid according to the W3C Baggage specification, an error -// is returned with the original Baggage. -func (b Baggage) SetMember(member Member) (Baggage, error) { - if !member.hasData { - return b, errInvalidMember - } - - n := len(b.list) - if _, ok := b.list[member.key]; !ok { - n++ - } - list := make(baggage.List, n) - - for k, v := range b.list { - // Do not copy if we are just going to overwrite. - if k == member.key { - continue - } - list[k] = v - } - - list[member.key] = baggage.Item{ - Value: member.value, - Properties: member.properties.asInternal(), - } - - return Baggage{list: list}, nil -} - -// DeleteMember returns a copy of the Baggage with the list-member identified -// by key removed. -func (b Baggage) DeleteMember(key string) Baggage { - n := len(b.list) - if _, ok := b.list[key]; ok { - n-- - } - list := make(baggage.List, n) - - for k, v := range b.list { - if k == key { - continue - } - list[k] = v - } - - return Baggage{list: list} -} - -// Len returns the number of list-members in the Baggage. -func (b Baggage) Len() int { - return len(b.list) -} - -// String encodes Baggage into a header string compliant with the W3C Baggage -// specification. -// It would ignore members where the member key is invalid with the W3C Baggage -// specification. This could happen for a UTF-8 key, as it may contain -// invalid characters. -func (b Baggage) String() string { - members := make([]string, 0, len(b.list)) - for k, v := range b.list { - s := Member{ - key: k, - value: v.Value, - properties: fromInternalProperties(v.Properties), - }.String() - - // Ignored empty members. - if s != "" { - members = append(members, s) - } - } - return strings.Join(members, listDelimiter) -} - -// parsePropertyInternal attempts to decode a Property from the passed string. -// It follows the spec at https://www.w3.org/TR/baggage/#definition. -func parsePropertyInternal(s string) (p Property, ok bool) { - // For the entire function we will use " key = value " as an example. - // Attempting to parse the key. - // First skip spaces at the beginning "< >key = value " (they could be empty). - index := skipSpace(s, 0) - - // Parse the key: " = value ". - keyStart := index - keyEnd := index - for _, c := range s[keyStart:] { - if !validateKeyChar(c) { - break - } - keyEnd++ - } - - // If we couldn't find any valid key character, - // it means the key is either empty or invalid. - if keyStart == keyEnd { - return p, ok - } - - // Skip spaces after the key: " key< >= value ". - index = skipSpace(s, keyEnd) - - if index == len(s) { - // A key can have no value, like: " key ". - ok = true - p.key = s[keyStart:keyEnd] - return p, ok - } - - // If we have not reached the end and we can't find the '=' delimiter, - // it means the property is invalid. - if s[index] != keyValueDelimiter[0] { - return p, ok - } - - // Attempting to parse the value. - // Match: " key =< >value ". - index = skipSpace(s, index+1) - - // Match the value string: " key = ". - // A valid property can be: " key =". - // Therefore, we don't have to check if the value is empty. - valueStart := index - valueEnd := index - for _, c := range s[valueStart:] { - if !validateValueChar(c) { - break - } - valueEnd++ - } - - // Skip all trailing whitespaces: " key = value< >". - index = skipSpace(s, valueEnd) - - // If after looking for the value and skipping whitespaces - // we have not reached the end, it means the property is - // invalid, something like: " key = value value1". - if index != len(s) { - return p, ok - } - - // Decode a percent-encoded value. - rawVal := s[valueStart:valueEnd] - unescapeVal, err := url.PathUnescape(rawVal) - if err != nil { - return p, ok - } - value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) - - ok = true - p.key = s[keyStart:keyEnd] - p.hasValue = true - - p.value = value - return p, ok -} - -func skipSpace(s string, offset int) int { - i := offset - for ; i < len(s); i++ { - c := s[i] - if c != ' ' && c != '\t' { - break - } - } - return i -} - -var safeKeyCharset = [utf8.RuneSelf]bool{ - // 0x23 to 0x27 - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - - // 0x30 to 0x39 - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - - // 0x41 to 0x5a - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'V': true, - 'W': true, - 'X': true, - 'Y': true, - 'Z': true, - - // 0x5e to 0x7a - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - - // remainder - '!': true, - '*': true, - '+': true, - '-': true, - '.': true, - '|': true, - '~': true, -} - -// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. -// Baggage name is a valid, non-empty UTF-8 string. -func validateBaggageName(s string) bool { - if s == "" { - return false - } - - return utf8.ValidString(s) -} - -// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. -// Baggage value is a valid UTF-8 strings. -// Empty string is also a valid UTF-8 string. -func validateBaggageValue(s string) bool { - return utf8.ValidString(s) -} - -// validateKey checks if the string is a valid W3C Baggage key. -func validateKey(s string) bool { - if s == "" { - return false - } - - for _, c := range s { - if !validateKeyChar(c) { - return false - } - } - - return true -} - -func validateKeyChar(c int32) bool { - return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] -} - -// validateValue checks if the string is a valid W3C Baggage value. -func validateValue(s string) bool { - for _, c := range s { - if !validateValueChar(c) { - return false - } - } - - return true -} - -var safeValueCharset = [utf8.RuneSelf]bool{ - '!': true, // 0x21 - - // 0x23 to 0x2b - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '(': true, - ')': true, - '*': true, - '+': true, - - // 0x2d to 0x3a - '-': true, - '.': true, - '/': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - ':': true, - - // 0x3c to 0x5b - '<': true, // 0x3C - '=': true, // 0x3D - '>': true, // 0x3E - '?': true, // 0x3F - '@': true, // 0x40 - 'A': true, // 0x41 - 'B': true, // 0x42 - 'C': true, // 0x43 - 'D': true, // 0x44 - 'E': true, // 0x45 - 'F': true, // 0x46 - 'G': true, // 0x47 - 'H': true, // 0x48 - 'I': true, // 0x49 - 'J': true, // 0x4A - 'K': true, // 0x4B - 'L': true, // 0x4C - 'M': true, // 0x4D - 'N': true, // 0x4E - 'O': true, // 0x4F - 'P': true, // 0x50 - 'Q': true, // 0x51 - 'R': true, // 0x52 - 'S': true, // 0x53 - 'T': true, // 0x54 - 'U': true, // 0x55 - 'V': true, // 0x56 - 'W': true, // 0x57 - 'X': true, // 0x58 - 'Y': true, // 0x59 - 'Z': true, // 0x5A - '[': true, // 0x5B - - // 0x5d to 0x7e - ']': true, // 0x5D - '^': true, // 0x5E - '_': true, // 0x5F - '`': true, // 0x60 - 'a': true, // 0x61 - 'b': true, // 0x62 - 'c': true, // 0x63 - 'd': true, // 0x64 - 'e': true, // 0x65 - 'f': true, // 0x66 - 'g': true, // 0x67 - 'h': true, // 0x68 - 'i': true, // 0x69 - 'j': true, // 0x6A - 'k': true, // 0x6B - 'l': true, // 0x6C - 'm': true, // 0x6D - 'n': true, // 0x6E - 'o': true, // 0x6F - 'p': true, // 0x70 - 'q': true, // 0x71 - 'r': true, // 0x72 - 's': true, // 0x73 - 't': true, // 0x74 - 'u': true, // 0x75 - 'v': true, // 0x76 - 'w': true, // 0x77 - 'x': true, // 0x78 - 'y': true, // 0x79 - 'z': true, // 0x7A - '{': true, // 0x7B - '|': true, // 0x7C - '}': true, // 0x7D - '~': true, // 0x7E -} - -func validateValueChar(c int32) bool { - return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c] -} - -// valueEscape escapes the string so it can be safely placed inside a baggage value, -// replacing special characters with %XX sequences as needed. -// -// The implementation is based on: -// https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285. -func valueEscape(s string) string { - hexCount := 0 - for i := 0; i < len(s); i++ { - c := s[i] - if shouldEscape(c) { - hexCount++ - } - } - - if hexCount == 0 { - return s - } - - var buf [64]byte - var t []byte - - required := len(s) + 2*hexCount - if required <= len(buf) { - t = buf[:required] - } else { - t = make([]byte, required) - } - - j := 0 - for i := 0; i < len(s); i++ { - c := s[i] - if shouldEscape(s[i]) { - const upperhex = "0123456789ABCDEF" - t[j] = '%' - t[j+1] = upperhex[c>>4] - t[j+2] = upperhex[c&15] - j += 3 - } else { - t[j] = c - j++ - } - } - - return string(t) -} - -// shouldEscape returns true if the specified byte should be escaped when -// appearing in a baggage value string. -func shouldEscape(c byte) bool { - if c == '%' { - // The percent character must be encoded so that percent-encoding can work. - return true - } - return !validateValueChar(int32(c)) -} diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go deleted file mode 100644 index a572461a05..0000000000 --- a/vendor/go.opentelemetry.io/otel/baggage/context.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package baggage // import "go.opentelemetry.io/otel/baggage" - -import ( - "context" - - "go.opentelemetry.io/otel/internal/baggage" -) - -// ContextWithBaggage returns a copy of parent with baggage. -func ContextWithBaggage(parent context.Context, b Baggage) context.Context { - // Delegate so any hooks for the OpenTracing bridge are handled. - return baggage.ContextWithList(parent, b.list) -} - -// ContextWithoutBaggage returns a copy of parent with no baggage. -func ContextWithoutBaggage(parent context.Context) context.Context { - // Delegate so any hooks for the OpenTracing bridge are handled. - return baggage.ContextWithList(parent, nil) -} - -// FromContext returns the baggage contained in ctx. -func FromContext(ctx context.Context) Baggage { - // Delegate so any hooks for the OpenTracing bridge are handled. - return Baggage{list: baggage.ListFromContext(ctx)} -} diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go deleted file mode 100644 index b51d87cab7..0000000000 --- a/vendor/go.opentelemetry.io/otel/baggage/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -/* -Package baggage provides functionality for storing and retrieving -baggage items in Go context. For propagating the baggage, see the -go.opentelemetry.io/otel/propagation package. -*/ -package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/codes/README.md b/vendor/go.opentelemetry.io/otel/codes/README.md deleted file mode 100644 index 24c52b387d..0000000000 --- a/vendor/go.opentelemetry.io/otel/codes/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Codes - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/codes)](https://pkg.go.dev/go.opentelemetry.io/otel/codes) diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go deleted file mode 100644 index d48847ed86..0000000000 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package codes // import "go.opentelemetry.io/otel/codes" - -import ( - "encoding/json" - "errors" - "fmt" - "strconv" -) - -const ( - // Unset is the default status code. - Unset Code = 0 - - // Error indicates the operation contains an error. - // - // NOTE: The error code in OTLP is 2. - // The value of this enum is only relevant to the internals - // of the Go SDK. - Error Code = 1 - - // Ok indicates operation has been validated by an Application developers - // or Operator to have completed successfully, or contain no error. - // - // NOTE: The Ok code in OTLP is 1. - // The value of this enum is only relevant to the internals - // of the Go SDK. - Ok Code = 2 - - maxCode = 3 -) - -// Code is an 32-bit representation of a status state. -type Code uint32 - -var codeToStr = map[Code]string{ - Unset: "Unset", - Error: "Error", - Ok: "Ok", -} - -var strToCode = map[string]Code{ - `"Unset"`: Unset, - `"Error"`: Error, - `"Ok"`: Ok, -} - -// String returns the Code as a string. -func (c Code) String() string { - return codeToStr[c] -} - -// UnmarshalJSON unmarshals b into the Code. -// -// This is based on the functionality in the gRPC codes package: -// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244 -func (c *Code) UnmarshalJSON(b []byte) error { - // From json.Unmarshaler: By convention, to approximate the behavior of - // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as - // a no-op. - if string(b) == "null" { - return nil - } - if c == nil { - return errors.New("nil receiver passed to UnmarshalJSON") - } - - var x any - if err := json.Unmarshal(b, &x); err != nil { - return err - } - switch x.(type) { - case string: - if jc, ok := strToCode[string(b)]; ok { - *c = jc - return nil - } - return fmt.Errorf("invalid code: %q", string(b)) - case float64: - if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { - if ci >= maxCode { - return fmt.Errorf("invalid code: %q", ci) - } - - *c = Code(ci) // nolint: gosec // Bit size of 32 check above. - return nil - } - return fmt.Errorf("invalid code: %q", string(b)) - default: - return fmt.Errorf("invalid code: %q", string(b)) - } -} - -// MarshalJSON returns c as the JSON encoding of c. -func (c *Code) MarshalJSON() ([]byte, error) { - if c == nil { - return []byte("null"), nil - } - str, ok := codeToStr[*c] - if !ok { - return nil, fmt.Errorf("invalid code: %d", *c) - } - return fmt.Appendf(nil, "%q", str), nil -} diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go deleted file mode 100644 index ee8db448b8..0000000000 --- a/vendor/go.opentelemetry.io/otel/codes/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -/* -Package codes defines the canonical error codes used by OpenTelemetry. - -It conforms to [the OpenTelemetry -specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status). -*/ -package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile deleted file mode 100644 index 7a9b3c0559..0000000000 --- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -# This is a renovate-friendly source of Docker images. -FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python -FROM otel/weaver:v0.22.1@sha256:33ae522ae4b71c1c562563c1d81f46aa0f79f088a0873199143a1f11ac30e5c9 AS weaver -FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go deleted file mode 100644 index 921f85961a..0000000000 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -/* -Package otel provides global access to the OpenTelemetry API. The subpackages of -the otel package provide an implementation of the OpenTelemetry API. - -The provided API is used to instrument code and measure data about that code's -performance and operation. The measured data, by default, is not processed or -transmitted anywhere. An implementation of the OpenTelemetry SDK, like the -default SDK implementation (go.opentelemetry.io/otel/sdk), and associated -exporters are used to process and transport this data. - -To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/. - -To read more about tracing, see go.opentelemetry.io/otel/trace. - -To read more about metrics, see go.opentelemetry.io/otel/metric. - -To read more about logs, see go.opentelemetry.io/otel/log. - -To read more about propagation, see go.opentelemetry.io/otel/propagation and -go.opentelemetry.io/otel/baggage. -*/ -package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go deleted file mode 100644 index 67414c71e0..0000000000 --- a/vendor/go.opentelemetry.io/otel/error_handler.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otel // import "go.opentelemetry.io/otel" - -// ErrorHandler handles irremediable events. -type ErrorHandler interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Handle handles any error deemed irremediable by an OpenTelemetry - // component. - Handle(error) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -// ErrorHandlerFunc is a convenience adapter to allow the use of a function -// as an ErrorHandler. -type ErrorHandlerFunc func(error) - -var _ ErrorHandler = ErrorHandlerFunc(nil) - -// Handle handles the irremediable error by calling the ErrorHandlerFunc itself. -func (f ErrorHandlerFunc) Handle(err error) { - f(err) -} diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go deleted file mode 100644 index 07623b6791..0000000000 --- a/vendor/go.opentelemetry.io/otel/handler.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otel // import "go.opentelemetry.io/otel" - -import ( - "go.opentelemetry.io/otel/internal/global" -) - -// Compile-time check global.ErrDelegator implements ErrorHandler. -var _ ErrorHandler = (*global.ErrDelegator)(nil) - -// GetErrorHandler returns the global ErrorHandler instance. -// -// The default ErrorHandler instance returned will log all errors to STDERR -// until an override ErrorHandler is set with SetErrorHandler. All -// ErrorHandler returned prior to this will automatically forward errors to -// the set instance instead of logging. -// -// Subsequent calls to SetErrorHandler after the first will not forward errors -// to the new ErrorHandler for prior returned instances. -func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } - -// SetErrorHandler sets the global ErrorHandler to h. -// -// The first time this is called all ErrorHandler previously returned from -// GetErrorHandler will send errors to h instead of the default logging -// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not -// delegate errors to h. -func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } - -// Handle is a convenience function for GetErrorHandler().Handle(err). -func Handle(err error) { global.GetErrorHandler().Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go deleted file mode 100644 index b4f85f44a9..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -/* -Package baggage provides base types and functionality to store and retrieve -baggage in Go context. This package exists because the OpenTracing bridge to -OpenTelemetry needs to synchronize state whenever baggage for a context is -modified and that context contains an OpenTracing span. If it were not for -this need this package would not need to exist and the -`go.opentelemetry.io/otel/baggage` package would be the singular place where -W3C baggage is handled. -*/ -package baggage // import "go.opentelemetry.io/otel/internal/baggage" - -// List is the collection of baggage members. The W3C allows for duplicates, -// but OpenTelemetry does not, therefore, this is represented as a map. -type List map[string]Item - -// Item is the value and metadata properties part of a list-member. -type Item struct { - Value string - Properties []Property -} - -// Property is a metadata entry for a list-member. -type Property struct { - Key, Value string - - // HasValue indicates if a zero-value value means the property does not - // have a value or if it was the zero-value. - HasValue bool -} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go deleted file mode 100644 index 3aea9c491f..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package baggage // import "go.opentelemetry.io/otel/internal/baggage" - -import "context" - -type baggageContextKeyType int - -const baggageKey baggageContextKeyType = iota - -// SetHookFunc is a callback called when storing baggage in the context. -type SetHookFunc func(context.Context, List) context.Context - -// GetHookFunc is a callback called when getting baggage from the context. -type GetHookFunc func(context.Context, List) List - -type baggageState struct { - list List - - setHook SetHookFunc - getHook GetHookFunc -} - -// ContextWithSetHook returns a copy of parent with hook configured to be -// invoked every time ContextWithBaggage is called. -// -// Passing nil SetHookFunc creates a context with no set hook to call. -func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { - var s baggageState - if v, ok := parent.Value(baggageKey).(baggageState); ok { - s = v - } - - s.setHook = hook - return context.WithValue(parent, baggageKey, s) -} - -// ContextWithGetHook returns a copy of parent with hook configured to be -// invoked every time FromContext is called. -// -// Passing nil GetHookFunc creates a context with no get hook to call. -func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { - var s baggageState - if v, ok := parent.Value(baggageKey).(baggageState); ok { - s = v - } - - s.getHook = hook - return context.WithValue(parent, baggageKey, s) -} - -// ContextWithList returns a copy of parent with baggage. Passing nil list -// returns a context without any baggage. -func ContextWithList(parent context.Context, list List) context.Context { - var s baggageState - if v, ok := parent.Value(baggageKey).(baggageState); ok { - s = v - } - - s.list = list - ctx := context.WithValue(parent, baggageKey, s) - if s.setHook != nil { - ctx = s.setHook(ctx, list) - } - - return ctx -} - -// ListFromContext returns the baggage contained in ctx. -func ListFromContext(ctx context.Context) List { - switch v := ctx.Value(baggageKey).(type) { - case baggageState: - if v.getHook != nil { - return v.getHook(ctx, v.list) - } - return v.list - default: - return nil - } -} diff --git a/vendor/go.opentelemetry.io/otel/internal/errorhandler/errorhandler.go b/vendor/go.opentelemetry.io/otel/internal/errorhandler/errorhandler.go deleted file mode 100644 index 3f0ab31312..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/errorhandler/errorhandler.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package errorhandler provides the global error handler for OpenTelemetry. -// -// This package has no OTel dependencies, allowing it to be imported by any -// package in the module without creating import cycles. -package errorhandler // import "go.opentelemetry.io/otel/internal/errorhandler" - -import ( - "errors" - "log" - "sync" - "sync/atomic" -) - -// ErrorHandler handles irremediable events. -type ErrorHandler interface { - // Handle handles any error deemed irremediable by an OpenTelemetry - // component. - Handle(error) -} - -type ErrDelegator struct { - delegate atomic.Pointer[ErrorHandler] -} - -// Compile-time check that delegator implements ErrorHandler. -var _ ErrorHandler = (*ErrDelegator)(nil) - -func (d *ErrDelegator) Handle(err error) { - if eh := d.delegate.Load(); eh != nil { - (*eh).Handle(err) - return - } - log.Print(err) -} - -// setDelegate sets the ErrorHandler delegate. -func (d *ErrDelegator) setDelegate(eh ErrorHandler) { - d.delegate.Store(&eh) -} - -type errorHandlerHolder struct { - eh ErrorHandler -} - -var ( - globalErrorHandler = defaultErrorHandler() - delegateErrorHandlerOnce sync.Once -) - -// GetErrorHandler returns the global ErrorHandler instance. -// -// The default ErrorHandler instance returned will log all errors to STDERR -// until an override ErrorHandler is set with SetErrorHandler. All -// ErrorHandler returned prior to this will automatically forward errors to -// the set instance instead of logging. -// -// Subsequent calls to SetErrorHandler after the first will not forward errors -// to the new ErrorHandler for prior returned instances. -func GetErrorHandler() ErrorHandler { - return globalErrorHandler.Load().(errorHandlerHolder).eh -} - -// SetErrorHandler sets the global ErrorHandler to h. -// -// The first time this is called all ErrorHandler previously returned from -// GetErrorHandler will send errors to h instead of the default logging -// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not -// delegate errors to h. -func SetErrorHandler(h ErrorHandler) { - current := GetErrorHandler() - - if _, cOk := current.(*ErrDelegator); cOk { - if _, ehOk := h.(*ErrDelegator); ehOk && current == h { - // Do not assign to the delegate of the default ErrDelegator to be - // itself. - log.Print(errors.New("no ErrorHandler delegate configured"), " ErrorHandler remains its current value.") - return - } - } - - delegateErrorHandlerOnce.Do(func() { - if def, ok := current.(*ErrDelegator); ok { - def.setDelegate(h) - } - }) - globalErrorHandler.Store(errorHandlerHolder{eh: h}) -} - -func defaultErrorHandler() *atomic.Value { - v := &atomic.Value{} - v.Store(errorHandlerHolder{eh: &ErrDelegator{}}) - return v -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go deleted file mode 100644 index 77d0425f54..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package global provides the OpenTelemetry global API. -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "go.opentelemetry.io/otel/internal/errorhandler" -) - -// ErrorHandler is an alias for errorhandler.ErrorHandler, kept for backward -// compatibility with existing callers of internal/global. -type ErrorHandler = errorhandler.ErrorHandler - -// ErrDelegator is an alias for errorhandler.ErrDelegator, kept for backward -// compatibility with existing callers of internal/global. -type ErrDelegator = errorhandler.ErrDelegator diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go deleted file mode 100644 index 55255cddfc..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ /dev/null @@ -1,468 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "context" - "sync/atomic" - - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/embedded" -) - -// unwrapper unwraps to return the underlying instrument implementation. -type unwrapper interface { - unwrap() metric.Observable -} - -type afCounter struct { - embedded.Float64ObservableCounter - metric.Float64Observable - - name string - opts []metric.Float64ObservableCounterOption - - delegate atomic.Value // metric.Float64ObservableCounter -} - -var ( - _ unwrapper = (*afCounter)(nil) - _ metric.Float64ObservableCounter = (*afCounter)(nil) -) - -func (i *afCounter) setDelegate(m metric.Meter) { - ctr, err := m.Float64ObservableCounter(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *afCounter) unwrap() metric.Observable { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Float64ObservableCounter) - } - return nil -} - -type afUpDownCounter struct { - embedded.Float64ObservableUpDownCounter - metric.Float64Observable - - name string - opts []metric.Float64ObservableUpDownCounterOption - - delegate atomic.Value // metric.Float64ObservableUpDownCounter -} - -var ( - _ unwrapper = (*afUpDownCounter)(nil) - _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) -) - -func (i *afUpDownCounter) setDelegate(m metric.Meter) { - ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *afUpDownCounter) unwrap() metric.Observable { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Float64ObservableUpDownCounter) - } - return nil -} - -type afGauge struct { - embedded.Float64ObservableGauge - metric.Float64Observable - - name string - opts []metric.Float64ObservableGaugeOption - - delegate atomic.Value // metric.Float64ObservableGauge -} - -var ( - _ unwrapper = (*afGauge)(nil) - _ metric.Float64ObservableGauge = (*afGauge)(nil) -) - -func (i *afGauge) setDelegate(m metric.Meter) { - ctr, err := m.Float64ObservableGauge(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *afGauge) unwrap() metric.Observable { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Float64ObservableGauge) - } - return nil -} - -type aiCounter struct { - embedded.Int64ObservableCounter - metric.Int64Observable - - name string - opts []metric.Int64ObservableCounterOption - - delegate atomic.Value // metric.Int64ObservableCounter -} - -var ( - _ unwrapper = (*aiCounter)(nil) - _ metric.Int64ObservableCounter = (*aiCounter)(nil) -) - -func (i *aiCounter) setDelegate(m metric.Meter) { - ctr, err := m.Int64ObservableCounter(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *aiCounter) unwrap() metric.Observable { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Int64ObservableCounter) - } - return nil -} - -type aiUpDownCounter struct { - embedded.Int64ObservableUpDownCounter - metric.Int64Observable - - name string - opts []metric.Int64ObservableUpDownCounterOption - - delegate atomic.Value // metric.Int64ObservableUpDownCounter -} - -var ( - _ unwrapper = (*aiUpDownCounter)(nil) - _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) -) - -func (i *aiUpDownCounter) setDelegate(m metric.Meter) { - ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *aiUpDownCounter) unwrap() metric.Observable { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Int64ObservableUpDownCounter) - } - return nil -} - -type aiGauge struct { - embedded.Int64ObservableGauge - metric.Int64Observable - - name string - opts []metric.Int64ObservableGaugeOption - - delegate atomic.Value // metric.Int64ObservableGauge -} - -var ( - _ unwrapper = (*aiGauge)(nil) - _ metric.Int64ObservableGauge = (*aiGauge)(nil) -) - -func (i *aiGauge) setDelegate(m metric.Meter) { - ctr, err := m.Int64ObservableGauge(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *aiGauge) unwrap() metric.Observable { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Int64ObservableGauge) - } - return nil -} - -// Sync Instruments. -type sfCounter struct { - embedded.Float64Counter - - name string - opts []metric.Float64CounterOption - - delegate atomic.Value // metric.Float64Counter -} - -var _ metric.Float64Counter = (*sfCounter)(nil) - -func (i *sfCounter) setDelegate(m metric.Meter) { - ctr, err := m.Float64Counter(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { - if ctr := i.delegate.Load(); ctr != nil { - ctr.(metric.Float64Counter).Add(ctx, incr, opts...) - } -} - -func (i *sfCounter) Enabled(ctx context.Context) bool { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Float64Counter).Enabled(ctx) - } - return false -} - -type sfUpDownCounter struct { - embedded.Float64UpDownCounter - - name string - opts []metric.Float64UpDownCounterOption - - delegate atomic.Value // metric.Float64UpDownCounter -} - -var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) - -func (i *sfUpDownCounter) setDelegate(m metric.Meter) { - ctr, err := m.Float64UpDownCounter(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { - if ctr := i.delegate.Load(); ctr != nil { - ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...) - } -} - -func (i *sfUpDownCounter) Enabled(ctx context.Context) bool { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Float64UpDownCounter).Enabled(ctx) - } - return false -} - -type sfHistogram struct { - embedded.Float64Histogram - - name string - opts []metric.Float64HistogramOption - - delegate atomic.Value // metric.Float64Histogram -} - -var _ metric.Float64Histogram = (*sfHistogram)(nil) - -func (i *sfHistogram) setDelegate(m metric.Meter) { - ctr, err := m.Float64Histogram(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { - if ctr := i.delegate.Load(); ctr != nil { - ctr.(metric.Float64Histogram).Record(ctx, x, opts...) - } -} - -func (i *sfHistogram) Enabled(ctx context.Context) bool { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Float64Histogram).Enabled(ctx) - } - return false -} - -type sfGauge struct { - embedded.Float64Gauge - - name string - opts []metric.Float64GaugeOption - - delegate atomic.Value // metric.Float64Gauge -} - -var _ metric.Float64Gauge = (*sfGauge)(nil) - -func (i *sfGauge) setDelegate(m metric.Meter) { - ctr, err := m.Float64Gauge(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { - if ctr := i.delegate.Load(); ctr != nil { - ctr.(metric.Float64Gauge).Record(ctx, x, opts...) - } -} - -func (i *sfGauge) Enabled(ctx context.Context) bool { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Float64Gauge).Enabled(ctx) - } - return false -} - -type siCounter struct { - embedded.Int64Counter - - name string - opts []metric.Int64CounterOption - - delegate atomic.Value // metric.Int64Counter -} - -var _ metric.Int64Counter = (*siCounter)(nil) - -func (i *siCounter) setDelegate(m metric.Meter) { - ctr, err := m.Int64Counter(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { - if ctr := i.delegate.Load(); ctr != nil { - ctr.(metric.Int64Counter).Add(ctx, x, opts...) - } -} - -func (i *siCounter) Enabled(ctx context.Context) bool { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Int64Counter).Enabled(ctx) - } - return false -} - -type siUpDownCounter struct { - embedded.Int64UpDownCounter - - name string - opts []metric.Int64UpDownCounterOption - - delegate atomic.Value // metric.Int64UpDownCounter -} - -var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) - -func (i *siUpDownCounter) setDelegate(m metric.Meter) { - ctr, err := m.Int64UpDownCounter(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { - if ctr := i.delegate.Load(); ctr != nil { - ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...) - } -} - -func (i *siUpDownCounter) Enabled(ctx context.Context) bool { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Int64UpDownCounter).Enabled(ctx) - } - return false -} - -type siHistogram struct { - embedded.Int64Histogram - - name string - opts []metric.Int64HistogramOption - - delegate atomic.Value // metric.Int64Histogram -} - -var _ metric.Int64Histogram = (*siHistogram)(nil) - -func (i *siHistogram) setDelegate(m metric.Meter) { - ctr, err := m.Int64Histogram(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { - if ctr := i.delegate.Load(); ctr != nil { - ctr.(metric.Int64Histogram).Record(ctx, x, opts...) - } -} - -func (i *siHistogram) Enabled(ctx context.Context) bool { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Int64Histogram).Enabled(ctx) - } - return false -} - -type siGauge struct { - embedded.Int64Gauge - - name string - opts []metric.Int64GaugeOption - - delegate atomic.Value // metric.Int64Gauge -} - -var _ metric.Int64Gauge = (*siGauge)(nil) - -func (i *siGauge) setDelegate(m metric.Meter) { - ctr, err := m.Int64Gauge(i.name, i.opts...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - i.delegate.Store(ctr) -} - -func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { - if ctr := i.delegate.Load(); ctr != nil { - ctr.(metric.Int64Gauge).Record(ctx, x, opts...) - } -} - -func (i *siGauge) Enabled(ctx context.Context) bool { - if ctr := i.delegate.Load(); ctr != nil { - return ctr.(metric.Int64Gauge).Enabled(ctx) - } - return false -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go deleted file mode 100644 index 86d7f4ba08..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "log" - "os" - "sync/atomic" - - "github.com/go-logr/logr" - "github.com/go-logr/stdr" -) - -// globalLogger holds a reference to the [logr.Logger] used within -// go.opentelemetry.io/otel. -// -// The default logger uses stdr which is backed by the standard `log.Logger` -// interface. This logger will only show messages at the Error Level. -var globalLogger = func() *atomic.Pointer[logr.Logger] { - l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) - - p := new(atomic.Pointer[logr.Logger]) - p.Store(&l) - return p -}() - -// SetLogger sets the global Logger to l. -// -// To see Warn messages use a logger with `l.V(1).Enabled() == true` -// To see Info messages use a logger with `l.V(4).Enabled() == true` -// To see Debug messages use a logger with `l.V(8).Enabled() == true`. -func SetLogger(l logr.Logger) { - globalLogger.Store(&l) -} - -// GetLogger returns the global logger. -func GetLogger() logr.Logger { - return *globalLogger.Load() -} - -// Info prints messages about the general state of the API or SDK. -// This should usually be less than 5 messages a minute. -func Info(msg string, keysAndValues ...any) { - GetLogger().V(4).Info(msg, keysAndValues...) -} - -// Error prints messages about exceptional states of the API or SDK. -func Error(err error, msg string, keysAndValues ...any) { - GetLogger().Error(err, msg, keysAndValues...) -} - -// Debug prints messages about all internal changes in the API or SDK. -func Debug(msg string, keysAndValues ...any) { - GetLogger().V(8).Info(msg, keysAndValues...) -} - -// Warn prints messages about warnings in the API or SDK. -// Not an error but is likely more important than an informational event. -func Warn(msg string, keysAndValues ...any) { - GetLogger().V(1).Info(msg, keysAndValues...) -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go deleted file mode 100644 index 50043d669b..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ /dev/null @@ -1,625 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "container/list" - "context" - "reflect" - "sync" - - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/embedded" -) - -// meterProvider is a placeholder for a configured SDK MeterProvider. -// -// All MeterProvider functionality is forwarded to a delegate once -// configured. -type meterProvider struct { - embedded.MeterProvider - - mtx sync.Mutex - meters map[il]*meter - - delegate metric.MeterProvider -} - -// setDelegate configures p to delegate all MeterProvider functionality to -// provider. -// -// All Meters provided prior to this function call are switched out to be -// Meters provided by provider. All instruments and callbacks are recreated and -// delegated. -// -// It is guaranteed by the caller that this happens only once. -func (p *meterProvider) setDelegate(provider metric.MeterProvider) { - p.mtx.Lock() - defer p.mtx.Unlock() - - p.delegate = provider - - if len(p.meters) == 0 { - return - } - - for _, meter := range p.meters { - meter.setDelegate(provider) - } - - p.meters = nil -} - -// Meter implements MeterProvider. -func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { - p.mtx.Lock() - defer p.mtx.Unlock() - - if p.delegate != nil { - return p.delegate.Meter(name, opts...) - } - - // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. - - c := metric.NewMeterConfig(opts...) - key := il{ - name: name, - version: c.InstrumentationVersion(), - schema: c.SchemaURL(), - attrs: c.InstrumentationAttributes(), - } - - if p.meters == nil { - p.meters = make(map[il]*meter) - } - - if val, ok := p.meters[key]; ok { - return val - } - - t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)} - p.meters[key] = t - return t -} - -// meter is a placeholder for a metric.Meter. -// -// All Meter functionality is forwarded to a delegate once configured. -// Otherwise, all functionality is forwarded to a NoopMeter. -type meter struct { - embedded.Meter - - name string - opts []metric.MeterOption - - mtx sync.Mutex - instruments map[instID]delegatedInstrument - - registry list.List - - delegate metric.Meter -} - -type delegatedInstrument interface { - setDelegate(metric.Meter) -} - -// instID are the identifying properties of an instrument. -type instID struct { - // name is the name of the stream. - name string - // description is the description of the stream. - description string - // kind defines the functional group of the instrument. - kind reflect.Type - // unit is the unit of the stream. - unit string -} - -// setDelegate configures m to delegate all Meter functionality to Meters -// created by provider. -// -// All subsequent calls to the Meter methods will be passed to the delegate. -// -// It is guaranteed by the caller that this happens only once. -func (m *meter) setDelegate(provider metric.MeterProvider) { - m.mtx.Lock() - defer m.mtx.Unlock() - - meter := provider.Meter(m.name, m.opts...) - m.delegate = meter - - for _, inst := range m.instruments { - inst.setDelegate(meter) - } - - var n *list.Element - for e := m.registry.Front(); e != nil; e = n { - r := e.Value.(*registration) - r.setDelegate(meter) - n = e.Next() - m.registry.Remove(e) - } - - m.instruments = nil - m.registry.Init() -} - -func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.delegate != nil { - return m.delegate.Int64Counter(name, options...) - } - - cfg := metric.NewInt64CounterConfig(options...) - id := instID{ - name: name, - kind: reflect.TypeFor[*siCounter](), - description: cfg.Description(), - unit: cfg.Unit(), - } - if f, ok := m.instruments[id]; ok { - return f.(metric.Int64Counter), nil - } - i := &siCounter{name: name, opts: options} - m.instruments[id] = i - return i, nil -} - -func (m *meter) Int64UpDownCounter( - name string, - options ...metric.Int64UpDownCounterOption, -) (metric.Int64UpDownCounter, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.delegate != nil { - return m.delegate.Int64UpDownCounter(name, options...) - } - - cfg := metric.NewInt64UpDownCounterConfig(options...) - id := instID{ - name: name, - kind: reflect.TypeFor[*siUpDownCounter](), - description: cfg.Description(), - unit: cfg.Unit(), - } - if f, ok := m.instruments[id]; ok { - return f.(metric.Int64UpDownCounter), nil - } - i := &siUpDownCounter{name: name, opts: options} - m.instruments[id] = i - return i, nil -} - -func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.delegate != nil { - return m.delegate.Int64Histogram(name, options...) - } - - cfg := metric.NewInt64HistogramConfig(options...) - id := instID{ - name: name, - kind: reflect.TypeFor[*siHistogram](), - description: cfg.Description(), - unit: cfg.Unit(), - } - if f, ok := m.instruments[id]; ok { - return f.(metric.Int64Histogram), nil - } - i := &siHistogram{name: name, opts: options} - m.instruments[id] = i - return i, nil -} - -func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.delegate != nil { - return m.delegate.Int64Gauge(name, options...) - } - - cfg := metric.NewInt64GaugeConfig(options...) - id := instID{ - name: name, - kind: reflect.TypeFor[*siGauge](), - description: cfg.Description(), - unit: cfg.Unit(), - } - if f, ok := m.instruments[id]; ok { - return f.(metric.Int64Gauge), nil - } - i := &siGauge{name: name, opts: options} - m.instruments[id] = i - return i, nil -} - -func (m *meter) Int64ObservableCounter( - name string, - options ...metric.Int64ObservableCounterOption, -) (metric.Int64ObservableCounter, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.delegate != nil { - return m.delegate.Int64ObservableCounter(name, options...) - } - - cfg := metric.NewInt64ObservableCounterConfig(options...) - id := instID{ - name: name, - kind: reflect.TypeFor[*aiCounter](), - description: cfg.Description(), - unit: cfg.Unit(), - } - if f, ok := m.instruments[id]; ok { - return f.(metric.Int64ObservableCounter), nil - } - i := &aiCounter{name: name, opts: options} - m.instruments[id] = i - return i, nil -} - -func (m *meter) Int64ObservableUpDownCounter( - name string, - options ...metric.Int64ObservableUpDownCounterOption, -) (metric.Int64ObservableUpDownCounter, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.delegate != nil { - return m.delegate.Int64ObservableUpDownCounter(name, options...) - } - - cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) - id := instID{ - name: name, - kind: reflect.TypeFor[*aiUpDownCounter](), - description: cfg.Description(), - unit: cfg.Unit(), - } - if f, ok := m.instruments[id]; ok { - return f.(metric.Int64ObservableUpDownCounter), nil - } - i := &aiUpDownCounter{name: name, opts: options} - m.instruments[id] = i - return i, nil -} - -func (m *meter) Int64ObservableGauge( - name string, - options ...metric.Int64ObservableGaugeOption, -) (metric.Int64ObservableGauge, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.delegate != nil { - return m.delegate.Int64ObservableGauge(name, options...) - } - - cfg := metric.NewInt64ObservableGaugeConfig(options...) - id := instID{ - name: name, - kind: reflect.TypeFor[*aiGauge](), - description: cfg.Description(), - unit: cfg.Unit(), - } - if f, ok := m.instruments[id]; ok { - return f.(metric.Int64ObservableGauge), nil - } - i := &aiGauge{name: name, opts: options} - m.instruments[id] = i - return i, nil -} - -func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.delegate != nil { - return m.delegate.Float64Counter(name, options...) - } - - cfg := metric.NewFloat64CounterConfig(options...) - id := instID{ - name: name, - kind: reflect.TypeFor[*sfCounter](), - description: cfg.Description(), - unit: cfg.Unit(), - } - if f, ok := m.instruments[id]; ok { - return f.(metric.Float64Counter), nil - } - i := &sfCounter{name: name, opts: options} - m.instruments[id] = i - return i, nil -} - -func (m *meter) Float64UpDownCounter( - name string, - options ...metric.Float64UpDownCounterOption, -) (metric.Float64UpDownCounter, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.delegate != nil { - return m.delegate.Float64UpDownCounter(name, options...) - } - - cfg := metric.NewFloat64UpDownCounterConfig(options...) - id := instID{ - name: name, - kind: reflect.TypeFor[*sfUpDownCounter](), - description: cfg.Description(), - unit: cfg.Unit(), - } - if f, ok := m.instruments[id]; ok { - return f.(metric.Float64UpDownCounter), nil - } - i := &sfUpDownCounter{name: name, opts: options} - m.instruments[id] = i - return i, nil -} - -func (m *meter) Float64Histogram( - name string, - options ...metric.Float64HistogramOption, -) (metric.Float64Histogram, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.delegate != nil { - return m.delegate.Float64Histogram(name, options...) - } - - cfg := metric.NewFloat64HistogramConfig(options...) - id := instID{ - name: name, - kind: reflect.TypeFor[*sfHistogram](), - description: cfg.Description(), - unit: cfg.Unit(), - } - if f, ok := m.instruments[id]; ok { - return f.(metric.Float64Histogram), nil - } - i := &sfHistogram{name: name, opts: options} - m.instruments[id] = i - return i, nil -} - -func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.delegate != nil { - return m.delegate.Float64Gauge(name, options...) - } - - cfg := metric.NewFloat64GaugeConfig(options...) - id := instID{ - name: name, - kind: reflect.TypeFor[*sfGauge](), - description: cfg.Description(), - unit: cfg.Unit(), - } - if f, ok := m.instruments[id]; ok { - return f.(metric.Float64Gauge), nil - } - i := &sfGauge{name: name, opts: options} - m.instruments[id] = i - return i, nil -} - -func (m *meter) Float64ObservableCounter( - name string, - options ...metric.Float64ObservableCounterOption, -) (metric.Float64ObservableCounter, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.delegate != nil { - return m.delegate.Float64ObservableCounter(name, options...) - } - - cfg := metric.NewFloat64ObservableCounterConfig(options...) - id := instID{ - name: name, - kind: reflect.TypeFor[*afCounter](), - description: cfg.Description(), - unit: cfg.Unit(), - } - if f, ok := m.instruments[id]; ok { - return f.(metric.Float64ObservableCounter), nil - } - i := &afCounter{name: name, opts: options} - m.instruments[id] = i - return i, nil -} - -func (m *meter) Float64ObservableUpDownCounter( - name string, - options ...metric.Float64ObservableUpDownCounterOption, -) (metric.Float64ObservableUpDownCounter, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.delegate != nil { - return m.delegate.Float64ObservableUpDownCounter(name, options...) - } - - cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) - id := instID{ - name: name, - kind: reflect.TypeFor[*afUpDownCounter](), - description: cfg.Description(), - unit: cfg.Unit(), - } - if f, ok := m.instruments[id]; ok { - return f.(metric.Float64ObservableUpDownCounter), nil - } - i := &afUpDownCounter{name: name, opts: options} - m.instruments[id] = i - return i, nil -} - -func (m *meter) Float64ObservableGauge( - name string, - options ...metric.Float64ObservableGaugeOption, -) (metric.Float64ObservableGauge, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.delegate != nil { - return m.delegate.Float64ObservableGauge(name, options...) - } - - cfg := metric.NewFloat64ObservableGaugeConfig(options...) - id := instID{ - name: name, - kind: reflect.TypeFor[*afGauge](), - description: cfg.Description(), - unit: cfg.Unit(), - } - if f, ok := m.instruments[id]; ok { - return f.(metric.Float64ObservableGauge), nil - } - i := &afGauge{name: name, opts: options} - m.instruments[id] = i - return i, nil -} - -// RegisterCallback captures the function that will be called during Collect. -func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - if m.delegate != nil { - return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...) - } - - reg := ®istration{instruments: insts, function: f} - e := m.registry.PushBack(reg) - reg.unreg = func() error { - m.mtx.Lock() - _ = m.registry.Remove(e) - m.mtx.Unlock() - return nil - } - return reg, nil -} - -func unwrapInstruments(instruments []metric.Observable) []metric.Observable { - out := make([]metric.Observable, 0, len(instruments)) - - for _, inst := range instruments { - if in, ok := inst.(unwrapper); ok { - out = append(out, in.unwrap()) - } else { - out = append(out, inst) - } - } - - return out -} - -type registration struct { - embedded.Registration - - instruments []metric.Observable - function metric.Callback - - unreg func() error - unregMu sync.Mutex -} - -type unwrapObs struct { - embedded.Observer - obs metric.Observer -} - -// unwrapFloat64Observable returns an expected metric.Float64Observable after -// unwrapping the global object. -func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable { - if unwrapped, ok := inst.(unwrapper); ok { - if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok { - // Note: if the unwrapped object does not - // unwrap as an observable for either of the - // predicates here, it means an internal bug in - // this package. We avoid logging an error in - // this case, because the SDK has to try its - // own type conversion on the object. The SDK - // will see this and be forced to respond with - // its own error. - // - // This code uses a double-nested if statement - // to avoid creating a branch that is - // impossible to cover. - inst = floatObs - } - } - return inst -} - -// unwrapInt64Observable returns an expected metric.Int64Observable after -// unwrapping the global object. -func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable { - if unwrapped, ok := inst.(unwrapper); ok { - if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok { - // See the comment in unwrapFloat64Observable(). - inst = unint - } - } - return inst -} - -func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) { - uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...) -} - -func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) { - uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...) -} - -func unwrapCallback(f metric.Callback) metric.Callback { - return func(ctx context.Context, obs metric.Observer) error { - return f(ctx, &unwrapObs{obs: obs}) - } -} - -func (c *registration) setDelegate(m metric.Meter) { - c.unregMu.Lock() - defer c.unregMu.Unlock() - - if c.unreg == nil { - // Unregister already called. - return - } - - reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...) - if err != nil { - GetErrorHandler().Handle(err) - return - } - - c.unreg = reg.Unregister -} - -func (c *registration) Unregister() error { - c.unregMu.Lock() - defer c.unregMu.Unlock() - if c.unreg == nil { - // Unregister already called. - return nil - } - - var err error - err, c.unreg = c.unreg(), nil - return err -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go deleted file mode 100644 index 38560ff991..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "context" - "sync" - - "go.opentelemetry.io/otel/propagation" -) - -// textMapPropagator is a default TextMapPropagator that delegates calls to a -// registered delegate if one is set, otherwise it defaults to delegating the -// calls to a the default no-op propagation.TextMapPropagator. -type textMapPropagator struct { - mtx sync.Mutex - once sync.Once - delegate propagation.TextMapPropagator - noop propagation.TextMapPropagator -} - -// Compile-time guarantee that textMapPropagator implements the -// propagation.TextMapPropagator interface. -var _ propagation.TextMapPropagator = (*textMapPropagator)(nil) - -func newTextMapPropagator() *textMapPropagator { - return &textMapPropagator{ - noop: propagation.NewCompositeTextMapPropagator(), - } -} - -// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are -// forwarded to. Delegation can only be performed once, all subsequent calls -// perform no delegation. -func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) { - if delegate == nil { - return - } - - p.mtx.Lock() - p.once.Do(func() { p.delegate = delegate }) - p.mtx.Unlock() -} - -// effectiveDelegate returns the current delegate of p if one is set, -// otherwise the default noop TextMapPropagator is returned. This method -// can be called concurrently. -func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator { - p.mtx.Lock() - defer p.mtx.Unlock() - if p.delegate != nil { - return p.delegate - } - return p.noop -} - -// Inject set cross-cutting concerns from the Context into the carrier. -func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { - p.effectiveDelegate().Inject(ctx, carrier) -} - -// Extract reads cross-cutting concerns from the carrier into a Context. -func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { - return p.effectiveDelegate().Extract(ctx, carrier) -} - -// Fields returns the keys whose values are set with Inject. -func (p *textMapPropagator) Fields() []string { - return p.effectiveDelegate().Fields() -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go deleted file mode 100644 index 225c9e5015..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "errors" - "sync" - "sync/atomic" - - "go.opentelemetry.io/otel/internal/errorhandler" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" -) - -type ( - tracerProviderHolder struct { - tp trace.TracerProvider - } - - propagatorsHolder struct { - tm propagation.TextMapPropagator - } - - meterProviderHolder struct { - mp metric.MeterProvider - } -) - -var ( - globalTracer = defaultTracerValue() - globalPropagators = defaultPropagatorsValue() - globalMeterProvider = defaultMeterProvider() - - delegateTraceOnce sync.Once - delegateTextMapPropagatorOnce sync.Once - delegateMeterOnce sync.Once -) - -// GetErrorHandler returns the global ErrorHandler instance. -// -// The default ErrorHandler instance returned will log all errors to STDERR -// until an override ErrorHandler is set with SetErrorHandler. All -// ErrorHandler returned prior to this will automatically forward errors to -// the set instance instead of logging. -// -// Subsequent calls to SetErrorHandler after the first will not forward errors -// to the new ErrorHandler for prior returned instances. -func GetErrorHandler() ErrorHandler { - return errorhandler.GetErrorHandler() -} - -// SetErrorHandler sets the global ErrorHandler to h. -// -// The first time this is called all ErrorHandler previously returned from -// GetErrorHandler will send errors to h instead of the default logging -// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not -// delegate errors to h. -func SetErrorHandler(h ErrorHandler) { - errorhandler.SetErrorHandler(h) -} - -// TracerProvider is the internal implementation for global.TracerProvider. -func TracerProvider() trace.TracerProvider { - return globalTracer.Load().(tracerProviderHolder).tp -} - -// SetTracerProvider is the internal implementation for global.SetTracerProvider. -func SetTracerProvider(tp trace.TracerProvider) { - current := TracerProvider() - - if _, cOk := current.(*tracerProvider); cOk { - if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { - // Do not assign the default delegating TracerProvider to delegate - // to itself. - Error( - errors.New("no delegate configured in tracer provider"), - "Setting tracer provider to its current value. No delegate will be configured", - ) - return - } - } - - delegateTraceOnce.Do(func() { - if def, ok := current.(*tracerProvider); ok { - def.setDelegate(tp) - } - }) - globalTracer.Store(tracerProviderHolder{tp: tp}) -} - -// TextMapPropagator is the internal implementation for global.TextMapPropagator. -func TextMapPropagator() propagation.TextMapPropagator { - return globalPropagators.Load().(propagatorsHolder).tm -} - -// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. -func SetTextMapPropagator(p propagation.TextMapPropagator) { - current := TextMapPropagator() - - if _, cOk := current.(*textMapPropagator); cOk { - if _, pOk := p.(*textMapPropagator); pOk && current == p { - // Do not assign the default delegating TextMapPropagator to - // delegate to itself. - Error( - errors.New("no delegate configured in text map propagator"), - "Setting text map propagator to its current value. No delegate will be configured", - ) - return - } - } - - // For the textMapPropagator already returned by TextMapPropagator - // delegate to p. - delegateTextMapPropagatorOnce.Do(func() { - if def, ok := current.(*textMapPropagator); ok { - def.SetDelegate(p) - } - }) - // Return p when subsequent calls to TextMapPropagator are made. - globalPropagators.Store(propagatorsHolder{tm: p}) -} - -// MeterProvider is the internal implementation for global.MeterProvider. -func MeterProvider() metric.MeterProvider { - return globalMeterProvider.Load().(meterProviderHolder).mp -} - -// SetMeterProvider is the internal implementation for global.SetMeterProvider. -func SetMeterProvider(mp metric.MeterProvider) { - current := MeterProvider() - if _, cOk := current.(*meterProvider); cOk { - if _, mpOk := mp.(*meterProvider); mpOk && current == mp { - // Do not assign the default delegating MeterProvider to delegate - // to itself. - Error( - errors.New("no delegate configured in meter provider"), - "Setting meter provider to its current value. No delegate will be configured", - ) - return - } - } - - delegateMeterOnce.Do(func() { - if def, ok := current.(*meterProvider); ok { - def.setDelegate(mp) - } - }) - globalMeterProvider.Store(meterProviderHolder{mp: mp}) -} - -func defaultTracerValue() *atomic.Value { - v := &atomic.Value{} - v.Store(tracerProviderHolder{tp: &tracerProvider{}}) - return v -} - -func defaultPropagatorsValue() *atomic.Value { - v := &atomic.Value{} - v.Store(propagatorsHolder{tm: newTextMapPropagator()}) - return v -} - -func defaultMeterProvider() *atomic.Value { - v := &atomic.Value{} - v.Store(meterProviderHolder{mp: &meterProvider{}}) - return v -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go deleted file mode 100644 index bf5cf3119b..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package global // import "go.opentelemetry.io/otel/internal/global" - -/* -This file contains the forwarding implementation of the TracerProvider used as -the default global instance. Prior to initialization of an SDK, Tracers -returned by the global TracerProvider will provide no-op functionality. This -means that all Span created prior to initialization are no-op Spans. - -Once an SDK has been initialized, all provided no-op Tracers are swapped for -Tracers provided by the SDK defined TracerProvider. However, any Span started -prior to this initialization does not change its behavior. Meaning, the Span -remains a no-op Span. - -The implementation to track and swap Tracers locks all new Tracer creation -until the swap is complete. This assumes that this operation is not -performance-critical. If that assumption is incorrect, be sure to configure an -SDK prior to any Tracer creation. -*/ - -import ( - "context" - "sync" - "sync/atomic" - - "go.opentelemetry.io/auto/sdk" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/trace/embedded" -) - -// tracerProvider is a placeholder for a configured SDK TracerProvider. -// -// All TracerProvider functionality is forwarded to a delegate once -// configured. -type tracerProvider struct { - embedded.TracerProvider - - mtx sync.Mutex - tracers map[il]*tracer - delegate trace.TracerProvider -} - -// Compile-time guarantee that tracerProvider implements the TracerProvider -// interface. -var _ trace.TracerProvider = &tracerProvider{} - -// setDelegate configures p to delegate all TracerProvider functionality to -// provider. -// -// All Tracers provided prior to this function call are switched out to be -// Tracers provided by provider. -// -// It is guaranteed by the caller that this happens only once. -func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { - p.mtx.Lock() - defer p.mtx.Unlock() - - p.delegate = provider - - if len(p.tracers) == 0 { - return - } - - for _, t := range p.tracers { - t.setDelegate(provider) - } - - p.tracers = nil -} - -// Tracer implements TracerProvider. -func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { - p.mtx.Lock() - defer p.mtx.Unlock() - - if p.delegate != nil { - return p.delegate.Tracer(name, opts...) - } - - // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. - - c := trace.NewTracerConfig(opts...) - key := il{ - name: name, - version: c.InstrumentationVersion(), - schema: c.SchemaURL(), - attrs: c.InstrumentationAttributes(), - } - - if p.tracers == nil { - p.tracers = make(map[il]*tracer) - } - - if val, ok := p.tracers[key]; ok { - return val - } - - t := &tracer{name: name, opts: opts, provider: p} - p.tracers[key] = t - return t -} - -type il struct { - name string - version string - schema string - attrs attribute.Set -} - -// tracer is a placeholder for a trace.Tracer. -// -// All Tracer functionality is forwarded to a delegate once configured. -// Otherwise, all functionality is forwarded to a NoopTracer. -type tracer struct { - embedded.Tracer - - name string - opts []trace.TracerOption - provider *tracerProvider - - delegate atomic.Value -} - -// Compile-time guarantee that tracer implements the trace.Tracer interface. -var _ trace.Tracer = &tracer{} - -// setDelegate configures t to delegate all Tracer functionality to Tracers -// created by provider. -// -// All subsequent calls to the Tracer methods will be passed to the delegate. -// -// It is guaranteed by the caller that this happens only once. -func (t *tracer) setDelegate(provider trace.TracerProvider) { - t.delegate.Store(provider.Tracer(t.name, t.opts...)) -} - -// Start implements trace.Tracer by forwarding the call to t.delegate if -// set, otherwise it forwards the call to a NoopTracer. -func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - delegate := t.delegate.Load() - if delegate != nil { - return delegate.(trace.Tracer).Start(ctx, name, opts...) - } - - return t.newSpan(ctx, autoInstEnabled, name, opts) -} - -// autoInstEnabled determines if the auto-instrumentation SDK span is returned -// from the tracer when not backed by a delegate and auto-instrumentation has -// attached to this process. -// -// The auto-instrumentation is expected to overwrite this value to true when it -// attaches. By default, this will point to false and mean a tracer will return -// a nonRecordingSpan by default. -var autoInstEnabled = new(bool) - -// newSpan is called by tracer.Start so auto-instrumentation can attach an eBPF -// uprobe to this code. -// -// "noinline" pragma prevents the method from ever being inlined. -// -//go:noinline -func (t *tracer) newSpan( - ctx context.Context, - autoSpan *bool, - name string, - opts []trace.SpanStartOption, -) (context.Context, trace.Span) { - // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is - // so the auto-instrumentation can define a uprobe for (*t).newSpan and be - // provided with the address of the bool autoInstEnabled points to. It - // needs to be a parameter so that pointer can be reliably determined, it - // should not be read from the global. - - if *autoSpan { - tracer := sdk.TracerProvider().Tracer(t.name, t.opts...) - return tracer.Start(ctx, name, opts...) - } - - s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} - ctx = trace.ContextWithSpan(ctx, s) - return ctx, s -} - -// nonRecordingSpan is a minimal implementation of a Span that wraps a -// SpanContext. It performs no operations other than to return the wrapped -// SpanContext. -type nonRecordingSpan struct { - embedded.Span - - sc trace.SpanContext - tracer *tracer -} - -var _ trace.Span = nonRecordingSpan{} - -// SpanContext returns the wrapped SpanContext. -func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } - -// IsRecording always returns false. -func (nonRecordingSpan) IsRecording() bool { return false } - -// SetStatus does nothing. -func (nonRecordingSpan) SetStatus(codes.Code, string) {} - -// SetError does nothing. -func (nonRecordingSpan) SetError(bool) {} - -// SetAttributes does nothing. -func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} - -// End does nothing. -func (nonRecordingSpan) End(...trace.SpanEndOption) {} - -// RecordError does nothing. -func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} - -// AddEvent does nothing. -func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} - -// AddLink does nothing. -func (nonRecordingSpan) AddLink(trace.Link) {} - -// SetName does nothing. -func (nonRecordingSpan) SetName(string) {} - -func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go deleted file mode 100644 index 6de7f2e4d8..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal_logging.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otel // import "go.opentelemetry.io/otel" - -import ( - "github.com/go-logr/logr" - - "go.opentelemetry.io/otel/internal/global" -) - -// SetLogger configures the logger used internally to opentelemetry. -func SetLogger(logger logr.Logger) { - global.SetLogger(logger) -} diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go deleted file mode 100644 index 527d9aec86..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otel // import "go.opentelemetry.io/otel" - -import ( - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/metric" -) - -// Meter returns a Meter from the global MeterProvider. The name must be the -// name of the library providing instrumentation. This name may be the same as -// the instrumented code only if that code provides built-in instrumentation. -// If the name is empty, then an implementation defined default name will be -// used instead. -// -// If this is called before a global MeterProvider is registered the returned -// Meter will be a No-op implementation of a Meter. When a global MeterProvider -// is registered for the first time, the returned Meter, and all the -// instruments it has created or will create, are recreated automatically from -// the new MeterProvider. -// -// This is short for GetMeterProvider().Meter(name). -func Meter(name string, opts ...metric.MeterOption) metric.Meter { - return GetMeterProvider().Meter(name, opts...) -} - -// GetMeterProvider returns the registered global meter provider. -// -// If no global GetMeterProvider has been registered, a No-op GetMeterProvider -// implementation is returned. When a global GetMeterProvider is registered for -// the first time, the returned GetMeterProvider, and all the Meters it has -// created or will create, are recreated automatically from the new -// GetMeterProvider. -func GetMeterProvider() metric.MeterProvider { - return global.MeterProvider() -} - -// SetMeterProvider registers mp as the global MeterProvider. -func SetMeterProvider(mp metric.MeterProvider) { - global.SetMeterProvider(mp) -} diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE deleted file mode 100644 index f1aee0f110..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/LICENSE +++ /dev/null @@ -1,231 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------------------------------------- - -Copyright 2009 The Go Authors. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google LLC nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/metric/README.md b/vendor/go.opentelemetry.io/otel/metric/README.md deleted file mode 100644 index 0cf902e01f..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Metric API - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/metric) diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go deleted file mode 100644 index 466812d343..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/metric/embedded" -) - -// Float64Observable describes a set of instruments used asynchronously to -// record float64 measurements once per collection cycle. Observations of -// these instruments are only made within a callback. -// -// Warning: Methods may be added to this interface in minor releases. -type Float64Observable interface { - Observable - - float64Observable() -} - -// Float64ObservableCounter is an instrument used to asynchronously record -// increasing float64 measurements once per collection cycle. Observations are -// only made within a callback for this instrument. The value observed is -// assumed the to be the cumulative sum of the count. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for -// unimplemented methods. -type Float64ObservableCounter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Float64ObservableCounter - - Float64Observable -} - -// Float64ObservableCounterConfig contains options for asynchronous counter -// instruments that record float64 values. -type Float64ObservableCounterConfig struct { - description string - unit string - callbacks []Float64Callback -} - -// NewFloat64ObservableCounterConfig returns a new -// [Float64ObservableCounterConfig] with all opts applied. -func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig { - var config Float64ObservableCounterConfig - for _, o := range opts { - config = o.applyFloat64ObservableCounter(config) - } - return config -} - -// Description returns the configured description. -func (c Float64ObservableCounterConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Float64ObservableCounterConfig) Unit() string { - return c.unit -} - -// Callbacks returns the configured callbacks. -func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback { - return c.callbacks -} - -// Float64ObservableCounterOption applies options to a -// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and -// [InstrumentOption] for other options that can be used as a -// Float64ObservableCounterOption. -type Float64ObservableCounterOption interface { - applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig -} - -// Float64ObservableUpDownCounter is an instrument used to asynchronously -// record float64 measurements once per collection cycle. Observations are only -// made within a callback for this instrument. The value observed is assumed -// the to be the cumulative sum of the count. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Float64ObservableUpDownCounter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Float64ObservableUpDownCounter - - Float64Observable -} - -// Float64ObservableUpDownCounterConfig contains options for asynchronous -// counter instruments that record float64 values. -type Float64ObservableUpDownCounterConfig struct { - description string - unit string - callbacks []Float64Callback -} - -// NewFloat64ObservableUpDownCounterConfig returns a new -// [Float64ObservableUpDownCounterConfig] with all opts applied. -func NewFloat64ObservableUpDownCounterConfig( - opts ...Float64ObservableUpDownCounterOption, -) Float64ObservableUpDownCounterConfig { - var config Float64ObservableUpDownCounterConfig - for _, o := range opts { - config = o.applyFloat64ObservableUpDownCounter(config) - } - return config -} - -// Description returns the configured description. -func (c Float64ObservableUpDownCounterConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Float64ObservableUpDownCounterConfig) Unit() string { - return c.unit -} - -// Callbacks returns the configured callbacks. -func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback { - return c.callbacks -} - -// Float64ObservableUpDownCounterOption applies options to a -// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and -// [InstrumentOption] for other options that can be used as a -// Float64ObservableUpDownCounterOption. -type Float64ObservableUpDownCounterOption interface { - applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig -} - -// Float64ObservableGauge is an instrument used to asynchronously record -// instantaneous float64 measurements once per collection cycle. Observations -// are only made within a callback for this instrument. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Float64ObservableGauge interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Float64ObservableGauge - - Float64Observable -} - -// Float64ObservableGaugeConfig contains options for asynchronous counter -// instruments that record float64 values. -type Float64ObservableGaugeConfig struct { - description string - unit string - callbacks []Float64Callback -} - -// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig] -// with all opts applied. -func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig { - var config Float64ObservableGaugeConfig - for _, o := range opts { - config = o.applyFloat64ObservableGauge(config) - } - return config -} - -// Description returns the configured description. -func (c Float64ObservableGaugeConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Float64ObservableGaugeConfig) Unit() string { - return c.unit -} - -// Callbacks returns the configured callbacks. -func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback { - return c.callbacks -} - -// Float64ObservableGaugeOption applies options to a -// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and -// [InstrumentOption] for other options that can be used as a -// Float64ObservableGaugeOption. -type Float64ObservableGaugeOption interface { - applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig -} - -// Float64Observer is a recorder of float64 measurements. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Float64Observer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Float64Observer - - // Observe records the float64 value. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Observe(value float64, options ...ObserveOption) -} - -// Float64Callback is a function registered with a Meter that makes -// observations for a Float64Observable instrument it is registered with. -// Calls to the Float64Observer record measurement values for the -// Float64Observable. -// -// The function needs to complete in a finite amount of time and the deadline -// of the passed context is expected to be honored. -// -// The function needs to make unique observations across all registered -// Float64Callbacks. Meaning, it should not report measurements with the same -// attributes as another Float64Callbacks also registered for the same -// instrument. -// -// The function needs to be reentrant and concurrent safe. -// -// Note that Go's mutexes are not reentrant, and locking a mutex takes -// an indefinite amount of time. It is therefore advised to avoid -// using mutexes inside callbacks. -type Float64Callback func(context.Context, Float64Observer) error - -// Float64ObservableOption applies options to float64 Observer instruments. -type Float64ObservableOption interface { - Float64ObservableCounterOption - Float64ObservableUpDownCounterOption - Float64ObservableGaugeOption -} - -type float64CallbackOpt struct { - cback Float64Callback -} - -func (o float64CallbackOpt) applyFloat64ObservableCounter( - cfg Float64ObservableCounterConfig, -) Float64ObservableCounterConfig { - cfg.callbacks = append(cfg.callbacks, o.cback) - return cfg -} - -func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter( - cfg Float64ObservableUpDownCounterConfig, -) Float64ObservableUpDownCounterConfig { - cfg.callbacks = append(cfg.callbacks, o.cback) - return cfg -} - -func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { - cfg.callbacks = append(cfg.callbacks, o.cback) - return cfg -} - -// WithFloat64Callback adds callback to be called for an instrument. -func WithFloat64Callback(callback Float64Callback) Float64ObservableOption { - return float64CallbackOpt{callback} -} diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go deleted file mode 100644 index 66c971bd8a..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/metric/embedded" -) - -// Int64Observable describes a set of instruments used asynchronously to record -// int64 measurements once per collection cycle. Observations of these -// instruments are only made within a callback. -// -// Warning: Methods may be added to this interface in minor releases. -type Int64Observable interface { - Observable - - int64Observable() -} - -// Int64ObservableCounter is an instrument used to asynchronously record -// increasing int64 measurements once per collection cycle. Observations are -// only made within a callback for this instrument. The value observed is -// assumed the to be the cumulative sum of the count. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Int64ObservableCounter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Int64ObservableCounter - - Int64Observable -} - -// Int64ObservableCounterConfig contains options for asynchronous counter -// instruments that record int64 values. -type Int64ObservableCounterConfig struct { - description string - unit string - callbacks []Int64Callback -} - -// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig] -// with all opts applied. -func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig { - var config Int64ObservableCounterConfig - for _, o := range opts { - config = o.applyInt64ObservableCounter(config) - } - return config -} - -// Description returns the configured description. -func (c Int64ObservableCounterConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Int64ObservableCounterConfig) Unit() string { - return c.unit -} - -// Callbacks returns the configured callbacks. -func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback { - return c.callbacks -} - -// Int64ObservableCounterOption applies options to a -// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and -// [InstrumentOption] for other options that can be used as an -// Int64ObservableCounterOption. -type Int64ObservableCounterOption interface { - applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig -} - -// Int64ObservableUpDownCounter is an instrument used to asynchronously record -// int64 measurements once per collection cycle. Observations are only made -// within a callback for this instrument. The value observed is assumed the to -// be the cumulative sum of the count. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Int64ObservableUpDownCounter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Int64ObservableUpDownCounter - - Int64Observable -} - -// Int64ObservableUpDownCounterConfig contains options for asynchronous counter -// instruments that record int64 values. -type Int64ObservableUpDownCounterConfig struct { - description string - unit string - callbacks []Int64Callback -} - -// NewInt64ObservableUpDownCounterConfig returns a new -// [Int64ObservableUpDownCounterConfig] with all opts applied. -func NewInt64ObservableUpDownCounterConfig( - opts ...Int64ObservableUpDownCounterOption, -) Int64ObservableUpDownCounterConfig { - var config Int64ObservableUpDownCounterConfig - for _, o := range opts { - config = o.applyInt64ObservableUpDownCounter(config) - } - return config -} - -// Description returns the configured description. -func (c Int64ObservableUpDownCounterConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Int64ObservableUpDownCounterConfig) Unit() string { - return c.unit -} - -// Callbacks returns the configured callbacks. -func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback { - return c.callbacks -} - -// Int64ObservableUpDownCounterOption applies options to a -// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and -// [InstrumentOption] for other options that can be used as an -// Int64ObservableUpDownCounterOption. -type Int64ObservableUpDownCounterOption interface { - applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig -} - -// Int64ObservableGauge is an instrument used to asynchronously record -// instantaneous int64 measurements once per collection cycle. Observations are -// only made within a callback for this instrument. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Int64ObservableGauge interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Int64ObservableGauge - - Int64Observable -} - -// Int64ObservableGaugeConfig contains options for asynchronous counter -// instruments that record int64 values. -type Int64ObservableGaugeConfig struct { - description string - unit string - callbacks []Int64Callback -} - -// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig] -// with all opts applied. -func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig { - var config Int64ObservableGaugeConfig - for _, o := range opts { - config = o.applyInt64ObservableGauge(config) - } - return config -} - -// Description returns the configured description. -func (c Int64ObservableGaugeConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Int64ObservableGaugeConfig) Unit() string { - return c.unit -} - -// Callbacks returns the configured callbacks. -func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback { - return c.callbacks -} - -// Int64ObservableGaugeOption applies options to a -// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and -// [InstrumentOption] for other options that can be used as an -// Int64ObservableGaugeOption. -type Int64ObservableGaugeOption interface { - applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig -} - -// Int64Observer is a recorder of int64 measurements. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Int64Observer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Int64Observer - - // Observe records the int64 value. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Observe(value int64, options ...ObserveOption) -} - -// Int64Callback is a function registered with a Meter that makes observations -// for an Int64Observable instrument it is registered with. Calls to the -// Int64Observer record measurement values for the Int64Observable. -// -// The function needs to complete in a finite amount of time and the deadline -// of the passed context is expected to be honored. -// -// The function needs to make unique observations across all registered -// Int64Callbacks. Meaning, it should not report measurements with the same -// attributes as another Int64Callbacks also registered for the same -// instrument. -// -// The function needs to be reentrant and concurrent safe. -// -// Note that Go's mutexes are not reentrant, and locking a mutex takes -// an indefinite amount of time. It is therefore advised to avoid -// using mutexes inside callbacks. -type Int64Callback func(context.Context, Int64Observer) error - -// Int64ObservableOption applies options to int64 Observer instruments. -type Int64ObservableOption interface { - Int64ObservableCounterOption - Int64ObservableUpDownCounterOption - Int64ObservableGaugeOption -} - -type int64CallbackOpt struct { - cback Int64Callback -} - -func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig { - cfg.callbacks = append(cfg.callbacks, o.cback) - return cfg -} - -func (o int64CallbackOpt) applyInt64ObservableUpDownCounter( - cfg Int64ObservableUpDownCounterConfig, -) Int64ObservableUpDownCounterConfig { - cfg.callbacks = append(cfg.callbacks, o.cback) - return cfg -} - -func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { - cfg.callbacks = append(cfg.callbacks, o.cback) - return cfg -} - -// WithInt64Callback adds callback to be called for an instrument. -func WithInt64Callback(callback Int64Callback) Int64ObservableOption { - return int64CallbackOpt{callback} -} diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go deleted file mode 100644 index e42dd6e70a..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "slices" - - "go.opentelemetry.io/otel/attribute" -) - -// MeterConfig contains options for Meters. -type MeterConfig struct { - instrumentationVersion string - schemaURL string - attrs attribute.Set - - // Ensure forward compatibility by explicitly making this not comparable. - noCmp [0]func() //nolint: unused // This is indeed used. -} - -// InstrumentationVersion returns the version of the library providing -// instrumentation. -func (cfg MeterConfig) InstrumentationVersion() string { - return cfg.instrumentationVersion -} - -// InstrumentationAttributes returns the attributes associated with the library -// providing instrumentation. -func (cfg MeterConfig) InstrumentationAttributes() attribute.Set { - return cfg.attrs -} - -// SchemaURL is the schema_url of the library providing instrumentation. -func (cfg MeterConfig) SchemaURL() string { - return cfg.schemaURL -} - -// MeterOption is an interface for applying Meter options. -type MeterOption interface { - // applyMeter is used to set a MeterOption value of a MeterConfig. - applyMeter(MeterConfig) MeterConfig -} - -// NewMeterConfig creates a new MeterConfig and applies -// all the given options. -func NewMeterConfig(opts ...MeterOption) MeterConfig { - var config MeterConfig - for _, o := range opts { - config = o.applyMeter(config) - } - return config -} - -type meterOptionFunc func(MeterConfig) MeterConfig - -func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig { - return fn(cfg) -} - -// WithInstrumentationVersion sets the instrumentation version. -func WithInstrumentationVersion(version string) MeterOption { - return meterOptionFunc(func(config MeterConfig) MeterConfig { - config.instrumentationVersion = version - return config - }) -} - -// WithInstrumentationAttributes adds the instrumentation attributes. -// -// This is equivalent to calling [WithInstrumentationAttributeSet] with an -// [attribute.Set] created from a clone of the passed attributes. -// [WithInstrumentationAttributeSet] is recommended for more control. -// -// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] -// options are passed, the attributes will be merged together in the order -// they are passed. Attributes with duplicate keys will use the last value passed. -func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { - set := attribute.NewSet(slices.Clone(attr)...) - return WithInstrumentationAttributeSet(set) -} - -// WithInstrumentationAttributeSet adds the instrumentation attributes. -// -// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] -// options are passed, the attributes will be merged together in the order -// they are passed. Attributes with duplicate keys will use the last value passed. -func WithInstrumentationAttributeSet(set attribute.Set) MeterOption { - if set.Len() == 0 { - return meterOptionFunc(func(config MeterConfig) MeterConfig { - return config - }) - } - - return meterOptionFunc(func(config MeterConfig) MeterConfig { - if config.attrs.Len() == 0 { - config.attrs = set - } else { - config.attrs = mergeSets(config.attrs, set) - } - return config - }) -} - -// WithSchemaURL sets the schema URL. -func WithSchemaURL(schemaURL string) MeterOption { - return meterOptionFunc(func(config MeterConfig) MeterConfig { - config.schemaURL = schemaURL - return config - }) -} diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go deleted file mode 100644 index f153745b00..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -/* -Package metric provides the OpenTelemetry API used to measure metrics about -source code operation. - -This API is separate from its implementation so the instrumentation built from -it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official -OpenTelemetry implementation of this API. - -All measurements made with this package are made via instruments. These -instruments are created by a [Meter] which itself is created by a -[MeterProvider]. Applications need to accept a [MeterProvider] implementation -as a starting point when instrumenting. This can be done directly, or by using -the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an -appropriately named [Meter] from the accepted [MeterProvider], instrumentation -can then be built from the [Meter]'s instruments. - -# Instruments - -Each instrument is designed to make measurements of a particular type. Broadly, -all instruments fall into two overlapping logical categories: asynchronous or -synchronous, and int64 or float64. - -All synchronous instruments ([Int64Counter], [Int64UpDownCounter], -[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and -[Float64Histogram]) are used to measure the operation and performance of source -code during the source code execution. These instruments only make measurements -when the source code they instrument is run. - -All asynchronous instruments ([Int64ObservableCounter], -[Int64ObservableUpDownCounter], [Int64ObservableGauge], -[Float64ObservableCounter], [Float64ObservableUpDownCounter], and -[Float64ObservableGauge]) are used to measure metrics outside of the execution -of source code. They are said to make "observations" via a callback function -called once every measurement collection cycle. - -Each instrument is also grouped by the value type it measures. Either int64 or -float64. The value being measured will dictate which instrument in these -categories to use. - -Outside of these two broad categories, instruments are described by the -function they are designed to serve. All Counters ([Int64Counter], -[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are -designed to measure values that never decrease in value, but instead only -incrementally increase in value. UpDownCounters ([Int64UpDownCounter], -[Float64UpDownCounter], [Int64ObservableUpDownCounter], and -[Float64ObservableUpDownCounter]) on the other hand, are designed to measure -values that can increase and decrease. When more information needs to be -conveyed about all the synchronous measurements made during a collection cycle, -a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally, -when just the most recent measurement needs to be conveyed about an -asynchronous measurement, a Gauge ([Int64ObservableGauge] and -[Float64ObservableGauge]) should be used. - -See the [OpenTelemetry documentation] for more information about instruments -and their intended use. - -# Instrument Name - -OpenTelemetry defines an [instrument name syntax] that restricts what -instrument names are allowed. - -Instrument names should ... - - - Not be empty. - - Have an alphabetic character as their first letter. - - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’, - ‘-’, or ‘/’. - - Have a maximum length of 255 letters. - -To ensure compatibility with observability platforms, all instruments created -need to conform to this syntax. Not all implementations of the API will validate -these names, it is the callers responsibility to ensure compliance. - -# Measurements - -Measurements are made by recording values and information about the values with -an instrument. How these measurements are recorded depends on the instrument. - -Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter], -[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and -[Float64Histogram]) are recorded using the instrument methods directly. All -counter instruments have an Add method that is used to measure an increment -value, and all histogram instruments have a Record method to measure a data -point. - -Asynchronous instruments ([Int64ObservableCounter], -[Int64ObservableUpDownCounter], [Int64ObservableGauge], -[Float64ObservableCounter], [Float64ObservableUpDownCounter], and -[Float64ObservableGauge]) record measurements within a callback function. The -callback is registered with the Meter which ensures the callback is called once -per collection cycle. A callback can be registered two ways: during the -instrument's creation using an option, or later using the RegisterCallback -method of the [Meter] that created the instrument. - -If the following criteria are met, an option ([WithInt64Callback] or -[WithFloat64Callback]) can be used during the asynchronous instrument's -creation to register a callback ([Int64Callback] or [Float64Callback], -respectively): - - - The measurement process is known when the instrument is created - - Only that instrument will make a measurement within the callback - - The callback never needs to be unregistered - -If the criteria are not met, use the RegisterCallback method of the [Meter] that -created the instrument to register a [Callback]. - -# API Implementations - -This package does not conform to the standard Go versioning policy, all of its -interfaces may have methods added to them without a package major version bump. -This non-standard API evolution could surprise an uninformed implementation -author. They could unknowingly build their implementation in a way that would -result in a runtime panic for their users that update to the new API. - -The API is designed to help inform an instrumentation author about this -non-standard API evolution. It requires them to choose a default behavior for -unimplemented interface methods. There are three behavior choices they can -make: - - - Compilation failure - - Panic - - Default to another implementation - -All interfaces in this API embed a corresponding interface from -[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default -behavior of their implementations to be a compilation failure, signaling to -their users they need to update to the latest version of that implementation, -they need to embed the corresponding interface from -[go.opentelemetry.io/otel/metric/embedded] in their implementation. For -example, - - import "go.opentelemetry.io/otel/metric/embedded" - - type MeterProvider struct { - embedded.MeterProvider - // ... - } - -If an author wants the default behavior of their implementations to a panic, -they need to embed the API interface directly. - - import "go.opentelemetry.io/otel/metric" - - type MeterProvider struct { - metric.MeterProvider - // ... - } - -This is not a recommended behavior as it could lead to publishing packages that -contain runtime panics when users update other package that use newer versions -of [go.opentelemetry.io/otel/metric]. - -Finally, an author can embed another implementation in theirs. The embedded -implementation will be used for methods not defined by the author. For example, -an author who wants to default to silently dropping the call can use -[go.opentelemetry.io/otel/metric/noop]: - - import "go.opentelemetry.io/otel/metric/noop" - - type MeterProvider struct { - noop.MeterProvider - // ... - } - -It is strongly recommended that authors only embed -[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior. -That implementation is the only one OpenTelemetry authors can guarantee will -fully implement all the API interfaces when a user updates their API. - -[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax -[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ -[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider -*/ -package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md deleted file mode 100644 index 1f6e0efa73..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Metric Embedded - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded) diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go deleted file mode 100644 index 1a9dc68093..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package embedded provides interfaces embedded within the [OpenTelemetry -// metric API]. -// -// Implementers of the [OpenTelemetry metric API] can embed the relevant type -// from this package into their implementation directly. Doing so will result -// in a compilation error for users when the [OpenTelemetry metric API] is -// extended (which is something that can happen without a major version bump of -// the API package). -// -// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric -package embedded // import "go.opentelemetry.io/otel/metric/embedded" - -// MeterProvider is embedded in -// [go.opentelemetry.io/otel/metric.MeterProvider]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to -// experience a compilation error, signaling they need to update to your latest -// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type MeterProvider interface{ meterProvider() } - -// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a -// compilation error, signaling they need to update to your latest -// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface -// is extended (which is something that can happen without a major version bump -// of the API package). -type Meter interface{ meter() } - -// Float64Observer is embedded in -// [go.opentelemetry.io/otel/metric.Float64Observer]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Float64Observer] if you want -// users to experience a compilation error, signaling they need to update to -// your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Float64Observer] interface is -// extended (which is something that can happen without a major version bump of -// the API package). -type Float64Observer interface{ float64Observer() } - -// Int64Observer is embedded in -// [go.opentelemetry.io/otel/metric.Int64Observer]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users -// to experience a compilation error, signaling they need to update to your -// latest implementation, when the -// [go.opentelemetry.io/otel/metric.Int64Observer] interface is -// extended (which is something that can happen without a major version bump of -// the API package). -type Int64Observer interface{ int64Observer() } - -// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a -// compilation error, signaling they need to update to your latest -// implementation, when the [go.opentelemetry.io/otel/metric.Observer] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type Observer interface{ observer() } - -// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Registration] if you want users to -// experience a compilation error, signaling they need to update to your latest -// implementation, when the [go.opentelemetry.io/otel/metric.Registration] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type Registration interface{ registration() } - -// Float64Counter is embedded in -// [go.opentelemetry.io/otel/metric.Float64Counter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Float64Counter] if you want -// users to experience a compilation error, signaling they need to update to -// your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Float64Counter] interface is -// extended (which is something that can happen without a major version bump of -// the API package). -type Float64Counter interface{ float64Counter() } - -// Float64Histogram is embedded in -// [go.opentelemetry.io/otel/metric.Float64Histogram]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want -// users to experience a compilation error, signaling they need to update to -// your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is -// extended (which is something that can happen without a major version bump of -// the API package). -type Float64Histogram interface{ float64Histogram() } - -// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to -// experience a compilation error, signaling they need to update to your latest -// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type Float64Gauge interface{ float64Gauge() } - -// Float64ObservableCounter is embedded in -// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you -// want users to experience a compilation error, signaling they need to update -// to your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type Float64ObservableCounter interface{ float64ObservableCounter() } - -// Float64ObservableGauge is embedded in -// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you -// want users to experience a compilation error, signaling they need to update -// to your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type Float64ObservableGauge interface{ float64ObservableGauge() } - -// Float64ObservableUpDownCounter is embedded in -// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] -// if you want users to experience a compilation error, signaling they need to -// update to your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() } - -// Float64UpDownCounter is embedded in -// [go.opentelemetry.io/otel/metric.Float64UpDownCounter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you -// want users to experience a compilation error, signaling they need to update -// to your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface -// is extended (which is something that can happen without a major version bump -// of the API package). -type Float64UpDownCounter interface{ float64UpDownCounter() } - -// Int64Counter is embedded in -// [go.opentelemetry.io/otel/metric.Int64Counter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users -// to experience a compilation error, signaling they need to update to your -// latest implementation, when the -// [go.opentelemetry.io/otel/metric.Int64Counter] interface is -// extended (which is something that can happen without a major version bump of -// the API package). -type Int64Counter interface{ int64Counter() } - -// Int64Histogram is embedded in -// [go.opentelemetry.io/otel/metric.Int64Histogram]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want -// users to experience a compilation error, signaling they need to update to -// your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is -// extended (which is something that can happen without a major version bump of -// the API package). -type Int64Histogram interface{ int64Histogram() } - -// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience -// a compilation error, signaling they need to update to your latest -// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type Int64Gauge interface{ int64Gauge() } - -// Int64ObservableCounter is embedded in -// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you -// want users to experience a compilation error, signaling they need to update -// to your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type Int64ObservableCounter interface{ int64ObservableCounter() } - -// Int64ObservableGauge is embedded in -// [go.opentelemetry.io/otel/metric.Int64ObservableGauge]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you -// want users to experience a compilation error, signaling they need to update -// to your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface -// is extended (which is something that can happen without a major version bump -// of the API package). -type Int64ObservableGauge interface{ int64ObservableGauge() } - -// Int64ObservableUpDownCounter is embedded in -// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if -// you want users to experience a compilation error, signaling they need to -// update to your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() } - -// Int64UpDownCounter is embedded in -// [go.opentelemetry.io/otel/metric.Int64UpDownCounter]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want -// users to experience a compilation error, signaling they need to update to -// your latest implementation, when the -// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is -// extended (which is something that can happen without a major version bump of -// the API package). -type Int64UpDownCounter interface{ int64UpDownCounter() } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go deleted file mode 100644 index 9f48d5f117..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package metric // import "go.opentelemetry.io/otel/metric" - -import "go.opentelemetry.io/otel/attribute" - -// Observable is used as a grouping mechanism for all instruments that are -// updated within a Callback. -type Observable interface { - observable() -} - -// InstrumentOption applies options to all instruments. -type InstrumentOption interface { - Int64CounterOption - Int64UpDownCounterOption - Int64HistogramOption - Int64GaugeOption - Int64ObservableCounterOption - Int64ObservableUpDownCounterOption - Int64ObservableGaugeOption - - Float64CounterOption - Float64UpDownCounterOption - Float64HistogramOption - Float64GaugeOption - Float64ObservableCounterOption - Float64ObservableUpDownCounterOption - Float64ObservableGaugeOption -} - -// HistogramOption applies options to histogram instruments. -type HistogramOption interface { - Int64HistogramOption - Float64HistogramOption -} - -type descOpt string - -func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyFloat64ObservableUpDownCounter( - c Float64ObservableUpDownCounterConfig, -) Float64ObservableUpDownCounterConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyInt64ObservableUpDownCounter( - c Int64ObservableUpDownCounterConfig, -) Int64ObservableUpDownCounterConfig { - c.description = string(o) - return c -} - -func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { - c.description = string(o) - return c -} - -// WithDescription sets the instrument description. -func WithDescription(desc string) InstrumentOption { return descOpt(desc) } - -type unitOpt string - -func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyFloat64ObservableUpDownCounter( - c Float64ObservableUpDownCounterConfig, -) Float64ObservableUpDownCounterConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyInt64ObservableUpDownCounter( - c Int64ObservableUpDownCounterConfig, -) Int64ObservableUpDownCounterConfig { - c.unit = string(o) - return c -} - -func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { - c.unit = string(o) - return c -} - -// WithUnit sets the instrument unit. -// -// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. -func WithUnit(u string) InstrumentOption { return unitOpt(u) } - -// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries. -// -// This option is considered "advisory", and may be ignored by API implementations. -func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) } - -type bucketOpt []float64 - -func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { - c.explicitBucketBoundaries = o - return c -} - -func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { - c.explicitBucketBoundaries = o - return c -} - -// AddOption applies options to an addition measurement. See -// [MeasurementOption] for other options that can be used as an AddOption. -type AddOption interface { - applyAdd(AddConfig) AddConfig -} - -// AddConfig contains options for an addition measurement. -type AddConfig struct { - attrs attribute.Set -} - -// NewAddConfig returns a new [AddConfig] with all opts applied. -func NewAddConfig(opts []AddOption) AddConfig { - config := AddConfig{attrs: *attribute.EmptySet()} - for _, o := range opts { - config = o.applyAdd(config) - } - return config -} - -// Attributes returns the configured attribute set. -func (c AddConfig) Attributes() attribute.Set { - return c.attrs -} - -// RecordOption applies options to an addition measurement. See -// [MeasurementOption] for other options that can be used as a RecordOption. -type RecordOption interface { - applyRecord(RecordConfig) RecordConfig -} - -// RecordConfig contains options for a recorded measurement. -type RecordConfig struct { - attrs attribute.Set -} - -// NewRecordConfig returns a new [RecordConfig] with all opts applied. -func NewRecordConfig(opts []RecordOption) RecordConfig { - config := RecordConfig{attrs: *attribute.EmptySet()} - for _, o := range opts { - config = o.applyRecord(config) - } - return config -} - -// Attributes returns the configured attribute set. -func (c RecordConfig) Attributes() attribute.Set { - return c.attrs -} - -// ObserveOption applies options to an addition measurement. See -// [MeasurementOption] for other options that can be used as a ObserveOption. -type ObserveOption interface { - applyObserve(ObserveConfig) ObserveConfig -} - -// ObserveConfig contains options for an observed measurement. -type ObserveConfig struct { - attrs attribute.Set -} - -// NewObserveConfig returns a new [ObserveConfig] with all opts applied. -func NewObserveConfig(opts []ObserveOption) ObserveConfig { - config := ObserveConfig{attrs: *attribute.EmptySet()} - for _, o := range opts { - config = o.applyObserve(config) - } - return config -} - -// Attributes returns the configured attribute set. -func (c ObserveConfig) Attributes() attribute.Set { - return c.attrs -} - -// MeasurementOption applies options to all instrument measurement. -type MeasurementOption interface { - AddOption - RecordOption - ObserveOption -} - -type attrOpt struct { - set attribute.Set -} - -// mergeSets returns the union of keys between a and b. Any duplicate keys will -// use the value associated with b. -func mergeSets(a, b attribute.Set) attribute.Set { - // NewMergeIterator uses the first value for any duplicates. - iter := attribute.NewMergeIterator(&b, &a) - merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) - for iter.Next() { - merged = append(merged, iter.Attribute()) - } - return attribute.NewSet(merged...) -} - -func (o attrOpt) applyAdd(c AddConfig) AddConfig { - switch { - case o.set.Len() == 0: - case c.attrs.Len() == 0: - c.attrs = o.set - default: - c.attrs = mergeSets(c.attrs, o.set) - } - return c -} - -func (o attrOpt) applyRecord(c RecordConfig) RecordConfig { - switch { - case o.set.Len() == 0: - case c.attrs.Len() == 0: - c.attrs = o.set - default: - c.attrs = mergeSets(c.attrs, o.set) - } - return c -} - -func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig { - switch { - case o.set.Len() == 0: - case c.attrs.Len() == 0: - c.attrs = o.set - default: - c.attrs = mergeSets(c.attrs, o.set) - } - return c -} - -// WithAttributeSet sets the attribute Set associated with a measurement is -// made with. -// -// If multiple WithAttributeSet or WithAttributes options are passed the -// attributes will be merged together in the order they are passed. Attributes -// with duplicate keys will use the last value passed. -func WithAttributeSet(attributes attribute.Set) MeasurementOption { - return attrOpt{set: attributes} -} - -// WithAttributes converts attributes into an attribute Set and sets the Set to -// be associated with a measurement. This is shorthand for: -// -// cp := make([]attribute.KeyValue, len(attributes)) -// copy(cp, attributes) -// WithAttributeSet(attribute.NewSet(cp...)) -// -// [attribute.NewSet] may modify the passed attributes so this will make a copy -// of attributes before creating a set in order to ensure this function is -// concurrent safe. This makes this option function less optimized in -// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be -// preferred for performance sensitive code. -// -// See [WithAttributeSet] for information about how multiple WithAttributes are -// merged. -func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption { - cp := make([]attribute.KeyValue, len(attributes)) - copy(cp, attributes) - return attrOpt{set: attribute.NewSet(cp...)} -} diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go deleted file mode 100644 index 5606ec4bd9..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ /dev/null @@ -1,340 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/metric/embedded" -) - -// MeterProvider provides access to named Meter instances, for instrumenting -// an application or package. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type MeterProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.MeterProvider - - // Meter returns a new Meter with the provided name and configuration. - // - // A Meter should be scoped at most to a single package. The name needs to - // be unique so it does not collide with other names used by - // an application, nor other applications. To achieve this, the import path - // of the instrumentation package is recommended to be used as name. - // - // If the name is empty, then an implementation defined default name will - // be used instead. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Meter(name string, opts ...MeterOption) Meter -} - -// Meter provides access to instrument instances for recording metrics. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Meter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Meter - - // Int64Counter returns a new Int64Counter instrument identified by name - // and configured with options. The instrument is used to synchronously - // record increasing int64 measurements during a computational operation. - // - // The name needs to conform to the OpenTelemetry instrument name syntax. - // See the Instrument Name section of the package documentation for more - // information. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) - - // Int64UpDownCounter returns a new Int64UpDownCounter instrument - // identified by name and configured with options. The instrument is used - // to synchronously record int64 measurements during a computational - // operation. - // - // The name needs to conform to the OpenTelemetry instrument name syntax. - // See the Instrument Name section of the package documentation for more - // information. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) - - // Int64Histogram returns a new Int64Histogram instrument identified by - // name and configured with options. The instrument is used to - // synchronously record the distribution of int64 measurements during a - // computational operation. - // - // The name needs to conform to the OpenTelemetry instrument name syntax. - // See the Instrument Name section of the package documentation for more - // information. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) - - // Int64Gauge returns a new Int64Gauge instrument identified by name and - // configured with options. The instrument is used to synchronously record - // instantaneous int64 measurements during a computational operation. - // - // The name needs to conform to the OpenTelemetry instrument name syntax. - // See the Instrument Name section of the package documentation for more - // information. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) - - // Int64ObservableCounter returns a new Int64ObservableCounter identified - // by name and configured with options. The instrument is used to - // asynchronously record increasing int64 measurements once per a - // measurement collection cycle. - // - // Measurements for the returned instrument are made via a callback. Use - // the WithInt64Callback option to register the callback here, or use the - // RegisterCallback method of this Meter to register one later. See the - // Measurements section of the package documentation for more information. - // - // The name needs to conform to the OpenTelemetry instrument name syntax. - // See the Instrument Name section of the package documentation for more - // information. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) - - // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter - // instrument identified by name and configured with options. The - // instrument is used to asynchronously record int64 measurements once per - // a measurement collection cycle. - // - // Measurements for the returned instrument are made via a callback. Use - // the WithInt64Callback option to register the callback here, or use the - // RegisterCallback method of this Meter to register one later. See the - // Measurements section of the package documentation for more information. - // - // The name needs to conform to the OpenTelemetry instrument name syntax. - // See the Instrument Name section of the package documentation for more - // information. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Int64ObservableUpDownCounter( - name string, - options ...Int64ObservableUpDownCounterOption, - ) (Int64ObservableUpDownCounter, error) - - // Int64ObservableGauge returns a new Int64ObservableGauge instrument - // identified by name and configured with options. The instrument is used - // to asynchronously record instantaneous int64 measurements once per a - // measurement collection cycle. - // - // Measurements for the returned instrument are made via a callback. Use - // the WithInt64Callback option to register the callback here, or use the - // RegisterCallback method of this Meter to register one later. See the - // Measurements section of the package documentation for more information. - // - // The name needs to conform to the OpenTelemetry instrument name syntax. - // See the Instrument Name section of the package documentation for more - // information. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) - - // Float64Counter returns a new Float64Counter instrument identified by - // name and configured with options. The instrument is used to - // synchronously record increasing float64 measurements during a - // computational operation. - // - // The name needs to conform to the OpenTelemetry instrument name syntax. - // See the Instrument Name section of the package documentation for more - // information. - Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) - - // Float64UpDownCounter returns a new Float64UpDownCounter instrument - // identified by name and configured with options. The instrument is used - // to synchronously record float64 measurements during a computational - // operation. - // - // The name needs to conform to the OpenTelemetry instrument name syntax. - // See the Instrument Name section of the package documentation for more - // information. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) - - // Float64Histogram returns a new Float64Histogram instrument identified by - // name and configured with options. The instrument is used to - // synchronously record the distribution of float64 measurements during a - // computational operation. - // - // The name needs to conform to the OpenTelemetry instrument name syntax. - // See the Instrument Name section of the package documentation for more - // information. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) - - // Float64Gauge returns a new Float64Gauge instrument identified by name and - // configured with options. The instrument is used to synchronously record - // instantaneous float64 measurements during a computational operation. - // - // The name needs to conform to the OpenTelemetry instrument name syntax. - // See the Instrument Name section of the package documentation for more - // information. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) - - // Float64ObservableCounter returns a new Float64ObservableCounter - // instrument identified by name and configured with options. The - // instrument is used to asynchronously record increasing float64 - // measurements once per a measurement collection cycle. - // - // Measurements for the returned instrument are made via a callback. Use - // the WithFloat64Callback option to register the callback here, or use the - // RegisterCallback method of this Meter to register one later. See the - // Measurements section of the package documentation for more information. - // - // The name needs to conform to the OpenTelemetry instrument name syntax. - // See the Instrument Name section of the package documentation for more - // information. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) - - // Float64ObservableUpDownCounter returns a new - // Float64ObservableUpDownCounter instrument identified by name and - // configured with options. The instrument is used to asynchronously record - // float64 measurements once per a measurement collection cycle. - // - // Measurements for the returned instrument are made via a callback. Use - // the WithFloat64Callback option to register the callback here, or use the - // RegisterCallback method of this Meter to register one later. See the - // Measurements section of the package documentation for more information. - // - // The name needs to conform to the OpenTelemetry instrument name syntax. - // See the Instrument Name section of the package documentation for more - // information. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Float64ObservableUpDownCounter( - name string, - options ...Float64ObservableUpDownCounterOption, - ) (Float64ObservableUpDownCounter, error) - - // Float64ObservableGauge returns a new Float64ObservableGauge instrument - // identified by name and configured with options. The instrument is used - // to asynchronously record instantaneous float64 measurements once per a - // measurement collection cycle. - // - // Measurements for the returned instrument are made via a callback. Use - // the WithFloat64Callback option to register the callback here, or use the - // RegisterCallback method of this Meter to register one later. See the - // Measurements section of the package documentation for more information. - // - // The name needs to conform to the OpenTelemetry instrument name syntax. - // See the Instrument Name section of the package documentation for more - // information. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) - - // RegisterCallback registers f to be called during the collection of a - // measurement cycle. - // - // If Unregister of the returned Registration is called, f needs to be - // unregistered and not called during collection. - // - // The instruments f is registered with are the only instruments that f may - // observe values for. - // - // If no instruments are passed, f should not be registered nor called - // during collection. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - // - // The function f needs to be concurrent safe. - RegisterCallback(f Callback, instruments ...Observable) (Registration, error) -} - -// Callback is a function registered with a Meter that makes observations for -// the set of instruments it is registered with. The Observer parameter is used -// to record measurement observations for these instruments. -// -// The function needs to complete in a finite amount of time and the deadline -// of the passed context is expected to be honored. -// -// The function needs to make unique observations across all registered -// Callbacks. Meaning, it should not report measurements for an instrument with -// the same attributes as another Callback will report. -// -// The function needs to be reentrant and concurrent safe. -// -// Note that Go's mutexes are not reentrant, and locking a mutex takes -// an indefinite amount of time. It is therefore advised to avoid -// using mutexes inside callbacks. -type Callback func(context.Context, Observer) error - -// Observer records measurements for multiple instruments in a Callback. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Observer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Observer - - // ObserveFloat64 records the float64 value for obsrv. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) - - // ObserveInt64 records the int64 value for obsrv. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) -} - -// Registration is an token representing the unique registration of a callback -// for a set of instruments with a Meter. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Registration interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Registration - - // Unregister removes the callback registration from a Meter. - // - // Implementations of this method need to be idempotent and safe for a user - // to call concurrently. - Unregister() error -} diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/vendor/go.opentelemetry.io/otel/metric/noop/README.md deleted file mode 100644 index bb89694356..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/noop/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Metric Noop - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop) diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go deleted file mode 100644 index 634e73aee0..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package noop provides an implementation of the OpenTelemetry metric API that -// produces no telemetry and minimizes used computation resources. -// -// Using this package to implement the OpenTelemetry metric API will -// effectively disable OpenTelemetry. -// -// This implementation can be embedded in other implementations of the -// OpenTelemetry metric API. Doing so will mean the implementation defaults to -// no operation for methods it does not implement. -package noop // import "go.opentelemetry.io/otel/metric/noop" - -import ( - "context" - - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/embedded" -) - -var ( - // Compile-time check this implements the OpenTelemetry API. - - _ metric.MeterProvider = MeterProvider{} - _ metric.Meter = Meter{} - _ metric.Observer = Observer{} - _ metric.Registration = Registration{} - _ metric.Int64Counter = Int64Counter{} - _ metric.Float64Counter = Float64Counter{} - _ metric.Int64UpDownCounter = Int64UpDownCounter{} - _ metric.Float64UpDownCounter = Float64UpDownCounter{} - _ metric.Int64Histogram = Int64Histogram{} - _ metric.Float64Histogram = Float64Histogram{} - _ metric.Int64Gauge = Int64Gauge{} - _ metric.Float64Gauge = Float64Gauge{} - _ metric.Int64ObservableCounter = Int64ObservableCounter{} - _ metric.Float64ObservableCounter = Float64ObservableCounter{} - _ metric.Int64ObservableGauge = Int64ObservableGauge{} - _ metric.Float64ObservableGauge = Float64ObservableGauge{} - _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{} - _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{} - _ metric.Int64Observer = Int64Observer{} - _ metric.Float64Observer = Float64Observer{} -) - -// MeterProvider is an OpenTelemetry No-Op MeterProvider. -type MeterProvider struct{ embedded.MeterProvider } - -// NewMeterProvider returns a MeterProvider that does not record any telemetry. -func NewMeterProvider() MeterProvider { - return MeterProvider{} -} - -// Meter returns an OpenTelemetry Meter that does not record any telemetry. -func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter { - return Meter{} -} - -// Meter is an OpenTelemetry No-Op Meter. -type Meter struct{ embedded.Meter } - -// Int64Counter returns a Counter used to record int64 measurements that -// produces no telemetry. -func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) { - return Int64Counter{}, nil -} - -// Int64UpDownCounter returns an UpDownCounter used to record int64 -// measurements that produces no telemetry. -func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { - return Int64UpDownCounter{}, nil -} - -// Int64Histogram returns a Histogram used to record int64 measurements that -// produces no telemetry. -func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { - return Int64Histogram{}, nil -} - -// Int64Gauge returns a Gauge used to record int64 measurements that -// produces no telemetry. -func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { - return Int64Gauge{}, nil -} - -// Int64ObservableCounter returns an ObservableCounter used to record int64 -// measurements that produces no telemetry. -func (Meter) Int64ObservableCounter( - string, - ...metric.Int64ObservableCounterOption, -) (metric.Int64ObservableCounter, error) { - return Int64ObservableCounter{}, nil -} - -// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to -// record int64 measurements that produces no telemetry. -func (Meter) Int64ObservableUpDownCounter( - string, - ...metric.Int64ObservableUpDownCounterOption, -) (metric.Int64ObservableUpDownCounter, error) { - return Int64ObservableUpDownCounter{}, nil -} - -// Int64ObservableGauge returns an ObservableGauge used to record int64 -// measurements that produces no telemetry. -func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { - return Int64ObservableGauge{}, nil -} - -// Float64Counter returns a Counter used to record int64 measurements that -// produces no telemetry. -func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) { - return Float64Counter{}, nil -} - -// Float64UpDownCounter returns an UpDownCounter used to record int64 -// measurements that produces no telemetry. -func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { - return Float64UpDownCounter{}, nil -} - -// Float64Histogram returns a Histogram used to record int64 measurements that -// produces no telemetry. -func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { - return Float64Histogram{}, nil -} - -// Float64Gauge returns a Gauge used to record float64 measurements that -// produces no telemetry. -func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { - return Float64Gauge{}, nil -} - -// Float64ObservableCounter returns an ObservableCounter used to record int64 -// measurements that produces no telemetry. -func (Meter) Float64ObservableCounter( - string, - ...metric.Float64ObservableCounterOption, -) (metric.Float64ObservableCounter, error) { - return Float64ObservableCounter{}, nil -} - -// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to -// record int64 measurements that produces no telemetry. -func (Meter) Float64ObservableUpDownCounter( - string, - ...metric.Float64ObservableUpDownCounterOption, -) (metric.Float64ObservableUpDownCounter, error) { - return Float64ObservableUpDownCounter{}, nil -} - -// Float64ObservableGauge returns an ObservableGauge used to record int64 -// measurements that produces no telemetry. -func (Meter) Float64ObservableGauge( - string, - ...metric.Float64ObservableGaugeOption, -) (metric.Float64ObservableGauge, error) { - return Float64ObservableGauge{}, nil -} - -// RegisterCallback performs no operation. -func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) { - return Registration{}, nil -} - -// Observer acts as a recorder of measurements for multiple instruments in a -// Callback, it performing no operation. -type Observer struct{ embedded.Observer } - -// ObserveFloat64 performs no operation. -func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) { -} - -// ObserveInt64 performs no operation. -func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) { -} - -// Registration is the registration of a Callback with a No-Op Meter. -type Registration struct{ embedded.Registration } - -// Unregister unregisters the Callback the Registration represents with the -// No-Op Meter. This will always return nil because the No-Op Meter performs no -// operation, including hold any record of registrations. -func (Registration) Unregister() error { return nil } - -// Int64Counter is an OpenTelemetry Counter used to record int64 measurements. -// It produces no telemetry. -type Int64Counter struct{ embedded.Int64Counter } - -// Add performs no operation. -func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {} - -// Enabled performs no operation. -func (Int64Counter) Enabled(context.Context) bool { return false } - -// Float64Counter is an OpenTelemetry Counter used to record float64 -// measurements. It produces no telemetry. -type Float64Counter struct{ embedded.Float64Counter } - -// Add performs no operation. -func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {} - -// Enabled performs no operation. -func (Float64Counter) Enabled(context.Context) bool { return false } - -// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64 -// measurements. It produces no telemetry. -type Int64UpDownCounter struct{ embedded.Int64UpDownCounter } - -// Add performs no operation. -func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {} - -// Enabled performs no operation. -func (Int64UpDownCounter) Enabled(context.Context) bool { return false } - -// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record -// float64 measurements. It produces no telemetry. -type Float64UpDownCounter struct{ embedded.Float64UpDownCounter } - -// Add performs no operation. -func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {} - -// Enabled performs no operation. -func (Float64UpDownCounter) Enabled(context.Context) bool { return false } - -// Int64Histogram is an OpenTelemetry Histogram used to record int64 -// measurements. It produces no telemetry. -type Int64Histogram struct{ embedded.Int64Histogram } - -// Record performs no operation. -func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {} - -// Enabled performs no operation. -func (Int64Histogram) Enabled(context.Context) bool { return false } - -// Float64Histogram is an OpenTelemetry Histogram used to record float64 -// measurements. It produces no telemetry. -type Float64Histogram struct{ embedded.Float64Histogram } - -// Record performs no operation. -func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {} - -// Enabled performs no operation. -func (Float64Histogram) Enabled(context.Context) bool { return false } - -// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64 -// measurements. It produces no telemetry. -type Int64Gauge struct{ embedded.Int64Gauge } - -// Record performs no operation. -func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {} - -// Enabled performs no operation. -func (Int64Gauge) Enabled(context.Context) bool { return false } - -// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64 -// measurements. It produces no telemetry. -type Float64Gauge struct{ embedded.Float64Gauge } - -// Record performs no operation. -func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {} - -// Enabled performs no operation. -func (Float64Gauge) Enabled(context.Context) bool { return false } - -// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record -// int64 measurements. It produces no telemetry. -type Int64ObservableCounter struct { - metric.Int64Observable - embedded.Int64ObservableCounter -} - -// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record -// float64 measurements. It produces no telemetry. -type Float64ObservableCounter struct { - metric.Float64Observable - embedded.Float64ObservableCounter -} - -// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record -// int64 measurements. It produces no telemetry. -type Int64ObservableGauge struct { - metric.Int64Observable - embedded.Int64ObservableGauge -} - -// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record -// float64 measurements. It produces no telemetry. -type Float64ObservableGauge struct { - metric.Float64Observable - embedded.Float64ObservableGauge -} - -// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter -// used to record int64 measurements. It produces no telemetry. -type Int64ObservableUpDownCounter struct { - metric.Int64Observable - embedded.Int64ObservableUpDownCounter -} - -// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter -// used to record float64 measurements. It produces no telemetry. -type Float64ObservableUpDownCounter struct { - metric.Float64Observable - embedded.Float64ObservableUpDownCounter -} - -// Int64Observer is a recorder of int64 measurements that performs no operation. -type Int64Observer struct{ embedded.Int64Observer } - -// Observe performs no operation. -func (Int64Observer) Observe(int64, ...metric.ObserveOption) {} - -// Float64Observer is a recorder of float64 measurements that performs no -// operation. -type Float64Observer struct{ embedded.Float64Observer } - -// Observe performs no operation. -func (Float64Observer) Observe(float64, ...metric.ObserveOption) {} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go deleted file mode 100644 index abb3051d7f..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/metric/embedded" -) - -// Float64Counter is an instrument that records increasing float64 values. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Float64Counter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Float64Counter - - // Add records a change to the counter. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Add(ctx context.Context, incr float64, options ...AddOption) - - // Enabled reports whether the instrument will process measurements for the given context. - // - // This function can be used in places where measuring an instrument - // would result in computationally expensive operations. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Enabled(context.Context) bool -} - -// Float64CounterConfig contains options for synchronous counter instruments that -// record float64 values. -type Float64CounterConfig struct { - description string - unit string -} - -// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts -// applied. -func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig { - var config Float64CounterConfig - for _, o := range opts { - config = o.applyFloat64Counter(config) - } - return config -} - -// Description returns the configured description. -func (c Float64CounterConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Float64CounterConfig) Unit() string { - return c.unit -} - -// Float64CounterOption applies options to a [Float64CounterConfig]. See -// [InstrumentOption] for other options that can be used as a -// Float64CounterOption. -type Float64CounterOption interface { - applyFloat64Counter(Float64CounterConfig) Float64CounterConfig -} - -// Float64UpDownCounter is an instrument that records increasing or decreasing -// float64 values. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Float64UpDownCounter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Float64UpDownCounter - - // Add records a change to the counter. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Add(ctx context.Context, incr float64, options ...AddOption) - - // Enabled reports whether the instrument will process measurements for the given context. - // - // This function can be used in places where measuring an instrument - // would result in computationally expensive operations. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Enabled(context.Context) bool -} - -// Float64UpDownCounterConfig contains options for synchronous counter -// instruments that record float64 values. -type Float64UpDownCounterConfig struct { - description string - unit string -} - -// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig] -// with all opts applied. -func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig { - var config Float64UpDownCounterConfig - for _, o := range opts { - config = o.applyFloat64UpDownCounter(config) - } - return config -} - -// Description returns the configured description. -func (c Float64UpDownCounterConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Float64UpDownCounterConfig) Unit() string { - return c.unit -} - -// Float64UpDownCounterOption applies options to a -// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that -// can be used as a Float64UpDownCounterOption. -type Float64UpDownCounterOption interface { - applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig -} - -// Float64Histogram is an instrument that records a distribution of float64 -// values. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Float64Histogram interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Float64Histogram - - // Record adds an additional value to the distribution. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Record(ctx context.Context, incr float64, options ...RecordOption) - - // Enabled reports whether the instrument will process measurements for the given context. - // - // This function can be used in places where measuring an instrument - // would result in computationally expensive operations. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Enabled(context.Context) bool -} - -// Float64HistogramConfig contains options for synchronous histogram -// instruments that record float64 values. -type Float64HistogramConfig struct { - description string - unit string - explicitBucketBoundaries []float64 -} - -// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all -// opts applied. -func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig { - var config Float64HistogramConfig - for _, o := range opts { - config = o.applyFloat64Histogram(config) - } - return config -} - -// Description returns the configured description. -func (c Float64HistogramConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Float64HistogramConfig) Unit() string { - return c.unit -} - -// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. -func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { - return c.explicitBucketBoundaries -} - -// Float64HistogramOption applies options to a [Float64HistogramConfig]. See -// [InstrumentOption] for other options that can be used as a -// Float64HistogramOption. -type Float64HistogramOption interface { - applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig -} - -// Float64Gauge is an instrument that records instantaneous float64 values. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Float64Gauge interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Float64Gauge - - // Record records the instantaneous value. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Record(ctx context.Context, value float64, options ...RecordOption) - - // Enabled reports whether the instrument will process measurements for the given context. - // - // This function can be used in places where measuring an instrument - // would result in computationally expensive operations. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Enabled(context.Context) bool -} - -// Float64GaugeConfig contains options for synchronous gauge instruments that -// record float64 values. -type Float64GaugeConfig struct { - description string - unit string -} - -// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts -// applied. -func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig { - var config Float64GaugeConfig - for _, o := range opts { - config = o.applyFloat64Gauge(config) - } - return config -} - -// Description returns the configured description. -func (c Float64GaugeConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Float64GaugeConfig) Unit() string { - return c.unit -} - -// Float64GaugeOption applies options to a [Float64GaugeConfig]. See -// [InstrumentOption] for other options that can be used as a -// Float64GaugeOption. -type Float64GaugeOption interface { - applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig -} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go deleted file mode 100644 index 5bbfaf0397..0000000000 --- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/metric/embedded" -) - -// Int64Counter is an instrument that records increasing int64 values. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Int64Counter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Int64Counter - - // Add records a change to the counter. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Add(ctx context.Context, incr int64, options ...AddOption) - - // Enabled reports whether the instrument will process measurements for the given context. - // - // This function can be used in places where measuring an instrument - // would result in computationally expensive operations. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Enabled(context.Context) bool -} - -// Int64CounterConfig contains options for synchronous counter instruments that -// record int64 values. -type Int64CounterConfig struct { - description string - unit string -} - -// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts -// applied. -func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig { - var config Int64CounterConfig - for _, o := range opts { - config = o.applyInt64Counter(config) - } - return config -} - -// Description returns the configured description. -func (c Int64CounterConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Int64CounterConfig) Unit() string { - return c.unit -} - -// Int64CounterOption applies options to a [Int64CounterConfig]. See -// [InstrumentOption] for other options that can be used as an -// Int64CounterOption. -type Int64CounterOption interface { - applyInt64Counter(Int64CounterConfig) Int64CounterConfig -} - -// Int64UpDownCounter is an instrument that records increasing or decreasing -// int64 values. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Int64UpDownCounter interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Int64UpDownCounter - - // Add records a change to the counter. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Add(ctx context.Context, incr int64, options ...AddOption) - - // Enabled reports whether the instrument will process measurements for the given context. - // - // This function can be used in places where measuring an instrument - // would result in computationally expensive operations. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Enabled(context.Context) bool -} - -// Int64UpDownCounterConfig contains options for synchronous counter -// instruments that record int64 values. -type Int64UpDownCounterConfig struct { - description string - unit string -} - -// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with -// all opts applied. -func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig { - var config Int64UpDownCounterConfig - for _, o := range opts { - config = o.applyInt64UpDownCounter(config) - } - return config -} - -// Description returns the configured description. -func (c Int64UpDownCounterConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Int64UpDownCounterConfig) Unit() string { - return c.unit -} - -// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig]. -// See [InstrumentOption] for other options that can be used as an -// Int64UpDownCounterOption. -type Int64UpDownCounterOption interface { - applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig -} - -// Int64Histogram is an instrument that records a distribution of int64 -// values. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Int64Histogram interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Int64Histogram - - // Record adds an additional value to the distribution. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Record(ctx context.Context, incr int64, options ...RecordOption) - - // Enabled reports whether the instrument will process measurements for the given context. - // - // This function can be used in places where measuring an instrument - // would result in computationally expensive operations. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Enabled(context.Context) bool -} - -// Int64HistogramConfig contains options for synchronous histogram instruments -// that record int64 values. -type Int64HistogramConfig struct { - description string - unit string - explicitBucketBoundaries []float64 -} - -// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts -// applied. -func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig { - var config Int64HistogramConfig - for _, o := range opts { - config = o.applyInt64Histogram(config) - } - return config -} - -// Description returns the configured description. -func (c Int64HistogramConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Int64HistogramConfig) Unit() string { - return c.unit -} - -// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. -func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { - return c.explicitBucketBoundaries -} - -// Int64HistogramOption applies options to a [Int64HistogramConfig]. See -// [InstrumentOption] for other options that can be used as an -// Int64HistogramOption. -type Int64HistogramOption interface { - applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig -} - -// Int64Gauge is an instrument that records instantaneous int64 values. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Int64Gauge interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Int64Gauge - - // Record records the instantaneous value. - // - // Use the WithAttributeSet (or, if performance is not a concern, - // the WithAttributes) option to include measurement attributes. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Record(ctx context.Context, value int64, options ...RecordOption) - - // Enabled reports whether the instrument will process measurements for the given context. - // - // This function can be used in places where measuring an instrument - // would result in computationally expensive operations. - // - // Implementations of this method need to be safe for a user to call - // concurrently. - Enabled(context.Context) bool -} - -// Int64GaugeConfig contains options for synchronous gauge instruments that -// record int64 values. -type Int64GaugeConfig struct { - description string - unit string -} - -// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts -// applied. -func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig { - var config Int64GaugeConfig - for _, o := range opts { - config = o.applyInt64Gauge(config) - } - return config -} - -// Description returns the configured description. -func (c Int64GaugeConfig) Description() string { - return c.description -} - -// Unit returns the configured unit. -func (c Int64GaugeConfig) Unit() string { - return c.unit -} - -// Int64GaugeOption applies options to a [Int64GaugeConfig]. See -// [InstrumentOption] for other options that can be used as a -// Int64GaugeOption. -type Int64GaugeOption interface { - applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig -} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go deleted file mode 100644 index 2fd9497338..0000000000 --- a/vendor/go.opentelemetry.io/otel/propagation.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otel // import "go.opentelemetry.io/otel" - -import ( - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/propagation" -) - -// GetTextMapPropagator returns the global TextMapPropagator. If none has been -// set, a No-Op TextMapPropagator is returned. -func GetTextMapPropagator() propagation.TextMapPropagator { - return global.TextMapPropagator() -} - -// SetTextMapPropagator sets propagator as the global TextMapPropagator. -func SetTextMapPropagator(propagator propagation.TextMapPropagator) { - global.SetTextMapPropagator(propagator) -} diff --git a/vendor/go.opentelemetry.io/otel/propagation/README.md b/vendor/go.opentelemetry.io/otel/propagation/README.md deleted file mode 100644 index e2959ac747..0000000000 --- a/vendor/go.opentelemetry.io/otel/propagation/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Propagation - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/propagation)](https://pkg.go.dev/go.opentelemetry.io/otel/propagation) diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go deleted file mode 100644 index 2ecca3fed1..0000000000 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package propagation // import "go.opentelemetry.io/otel/propagation" - -import ( - "context" - - "go.opentelemetry.io/otel/baggage" - "go.opentelemetry.io/otel/internal/errorhandler" -) - -const ( - baggageHeader = "baggage" - - // W3C Baggage specification limits. - // https://www.w3.org/TR/baggage/#limits - maxMembers = 64 -) - -// Baggage is a propagator that supports the W3C Baggage format. -// -// This propagates user-defined baggage associated with a trace. The complete -// specification is defined at https://www.w3.org/TR/baggage/. -type Baggage struct{} - -var _ TextMapPropagator = Baggage{} - -// Inject sets baggage key-values from ctx into the carrier. -func (Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { - bStr := baggage.FromContext(ctx).String() - if bStr != "" { - carrier.Set(baggageHeader, bStr) - } -} - -// Extract returns a copy of parent with the baggage from the carrier added. -// If carrier implements [ValuesGetter] (e.g. [HeaderCarrier]), Values is invoked -// for multiple values extraction. Otherwise, Get is called. -func (Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { - if multiCarrier, ok := carrier.(ValuesGetter); ok { - return extractMultiBaggage(parent, multiCarrier) - } - return extractSingleBaggage(parent, carrier) -} - -// Fields returns the keys who's values are set with Inject. -func (Baggage) Fields() []string { - return []string{baggageHeader} -} - -func extractSingleBaggage(parent context.Context, carrier TextMapCarrier) context.Context { - bStr := carrier.Get(baggageHeader) - if bStr == "" { - return parent - } - - bag, err := baggage.Parse(bStr) - if err != nil { - errorhandler.GetErrorHandler().Handle(err) - } - if bag.Len() == 0 { - return parent - } - return baggage.ContextWithBaggage(parent, bag) -} - -func extractMultiBaggage(parent context.Context, carrier ValuesGetter) context.Context { - bVals := carrier.Values(baggageHeader) - if len(bVals) == 0 { - return parent - } - - var members []baggage.Member - for _, bStr := range bVals { - currBag, err := baggage.Parse(bStr) - if err != nil { - errorhandler.GetErrorHandler().Handle(err) - } - if currBag.Len() == 0 { - continue - } - members = append(members, currBag.Members()...) - if len(members) >= maxMembers { - break - } - } - - b, err := baggage.New(members...) - if err != nil { - errorhandler.GetErrorHandler().Handle(err) - } - if b.Len() == 0 { - return parent - } - return baggage.ContextWithBaggage(parent, b) -} diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go deleted file mode 100644 index 33a3baf15f..0000000000 --- a/vendor/go.opentelemetry.io/otel/propagation/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -/* -Package propagation contains OpenTelemetry context propagators. - -OpenTelemetry propagators are used to extract and inject context data from and -into messages exchanged by applications. The propagator supported by this -package is the W3C Trace Context encoding -(https://www.w3.org/TR/trace-context/), and W3C Baggage -(https://www.w3.org/TR/baggage/). -*/ -package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go deleted file mode 100644 index 0a32c59aa3..0000000000 --- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package propagation // import "go.opentelemetry.io/otel/propagation" - -import ( - "context" - "net/http" -) - -// TextMapCarrier is the storage medium used by a TextMapPropagator. -// See ValuesGetter for how a TextMapCarrier can get multiple values for a key. -type TextMapCarrier interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Get returns the value associated with the passed key. - Get(key string) string - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Set stores the key-value pair. - Set(key, value string) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Keys lists the keys stored in this carrier. - Keys() []string - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -// ValuesGetter can return multiple values for a single key, -// with contrast to TextMapCarrier.Get which returns a single value. -type ValuesGetter interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Values returns all values associated with the passed key. - Values(key string) []string - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage -// medium for propagated key-value pairs. -type MapCarrier map[string]string - -// Compile time check that MapCarrier implements the TextMapCarrier. -var _ TextMapCarrier = MapCarrier{} - -// Get returns the value associated with the passed key. -func (c MapCarrier) Get(key string) string { - return c[key] -} - -// Set stores the key-value pair. -func (c MapCarrier) Set(key, value string) { - c[key] = value -} - -// Keys lists the keys stored in this carrier. -func (c MapCarrier) Keys() []string { - keys := make([]string, 0, len(c)) - for k := range c { - keys = append(keys, k) - } - return keys -} - -// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier and ValuesGetter interfaces. -type HeaderCarrier http.Header - -// Compile time check that HeaderCarrier implements ValuesGetter. -var _ TextMapCarrier = HeaderCarrier{} - -// Compile time check that HeaderCarrier implements TextMapCarrier. -var _ ValuesGetter = HeaderCarrier{} - -// Get returns the first value associated with the passed key. -func (hc HeaderCarrier) Get(key string) string { - return http.Header(hc).Get(key) -} - -// Values returns all values associated with the passed key. -func (hc HeaderCarrier) Values(key string) []string { - return http.Header(hc).Values(key) -} - -// Set stores the key-value pair. -func (hc HeaderCarrier) Set(key, value string) { - http.Header(hc).Set(key, value) -} - -// Keys lists the keys stored in this carrier. -func (hc HeaderCarrier) Keys() []string { - keys := make([]string, 0, len(hc)) - for k := range hc { - keys = append(keys, k) - } - return keys -} - -// TextMapPropagator propagates cross-cutting concerns as key-value text -// pairs within a carrier that travels in-band across process boundaries. -type TextMapPropagator interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Inject set cross-cutting concerns from the Context into the carrier. - Inject(ctx context.Context, carrier TextMapCarrier) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Extract reads cross-cutting concerns from the carrier into a Context. - // Implementations may check if the carrier implements ValuesGetter, - // to support extraction of multiple values per key. - Extract(ctx context.Context, carrier TextMapCarrier) context.Context - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Fields returns the keys whose values are set with Inject. - Fields() []string - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -type compositeTextMapPropagator []TextMapPropagator - -func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) { - for _, i := range p { - i.Inject(ctx, carrier) - } -} - -func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { - for _, i := range p { - ctx = i.Extract(ctx, carrier) - } - return ctx -} - -func (p compositeTextMapPropagator) Fields() []string { - unique := make(map[string]struct{}) - for _, i := range p { - for _, k := range i.Fields() { - unique[k] = struct{}{} - } - } - - fields := make([]string, 0, len(unique)) - for k := range unique { - fields = append(fields, k) - } - return fields -} - -// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the -// group of passed TextMapPropagator. This allows different cross-cutting -// concerns to be propagates in a unified manner. -// -// The returned TextMapPropagator will inject and extract cross-cutting -// concerns in the order the TextMapPropagators were provided. Additionally, -// the Fields method will return a de-duplicated slice of the keys that are -// set with the Inject method. -func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator { - return compositeTextMapPropagator(p) -} diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go deleted file mode 100644 index 11f404deb7..0000000000 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package propagation // import "go.opentelemetry.io/otel/propagation" - -import ( - "context" - "encoding/hex" - "fmt" - "strings" - - "go.opentelemetry.io/otel/trace" -) - -const ( - supportedVersion = 0 - maxVersion = 254 - traceparentHeader = "traceparent" - tracestateHeader = "tracestate" - delimiter = "-" -) - -// TraceContext is a propagator that supports the W3C Trace Context format -// (https://www.w3.org/TR/trace-context/) -// -// This propagator will propagate the traceparent and tracestate headers to -// guarantee traces are not broken. It is up to the users of this propagator -// to choose if they want to participate in a trace by modifying the -// traceparent header and relevant parts of the tracestate header containing -// their proprietary information. -type TraceContext struct{} - -var ( - _ TextMapPropagator = TraceContext{} - versionPart = fmt.Sprintf("%.2X", supportedVersion) -) - -// Inject injects the trace context from ctx into carrier. -func (TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { - sc := trace.SpanContextFromContext(ctx) - if !sc.IsValid() { - return - } - - if ts := sc.TraceState().String(); ts != "" { - carrier.Set(tracestateHeader, ts) - } - - // Preserve only the spec-defined flags: sampled (0x01) and random (0x02). - flags := sc.TraceFlags() & (trace.FlagsSampled | trace.FlagsRandom) - - var sb strings.Builder - sb.Grow(2 + 32 + 16 + 2 + 3) - _, _ = sb.WriteString(versionPart) - traceID := sc.TraceID() - spanID := sc.SpanID() - flagByte := [1]byte{byte(flags)} - var buf [32]byte - for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} { - _ = sb.WriteByte(delimiter[0]) - n := hex.Encode(buf[:], src) - _, _ = sb.Write(buf[:n]) - } - carrier.Set(traceparentHeader, sb.String()) -} - -// Extract reads tracecontext from the carrier into a returned Context. -// -// The returned Context will be a copy of ctx and contain the extracted -// tracecontext as the remote SpanContext. If the extracted tracecontext is -// invalid, the passed ctx will be returned directly instead. -func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { - sc := tc.extract(carrier) - if !sc.IsValid() { - return ctx - } - return trace.ContextWithRemoteSpanContext(ctx, sc) -} - -func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { - h := carrier.Get(traceparentHeader) - if h == "" { - return trace.SpanContext{} - } - - var ver [1]byte - if !extractPart(ver[:], &h, 2) { - return trace.SpanContext{} - } - version := int(ver[0]) - if version > maxVersion { - return trace.SpanContext{} - } - - var scc trace.SpanContextConfig - if !extractPart(scc.TraceID[:], &h, 32) { - return trace.SpanContext{} - } - if !extractPart(scc.SpanID[:], &h, 16) { - return trace.SpanContext{} - } - - var opts [1]byte - if !extractPart(opts[:], &h, 2) { - return trace.SpanContext{} - } - if version == 0 && (h != "" || opts[0] > 3) { - // version 0 does not allow extra fields or reserved flag bits. - return trace.SpanContext{} - } - - scc.TraceFlags = trace.TraceFlags(opts[0]) & //nolint:gosec // slice size already checked. - (trace.FlagsSampled | trace.FlagsRandom) - - // Ignore the error returned here. Failure to parse tracestate MUST NOT - // affect the parsing of traceparent according to the W3C tracecontext - // specification. - scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) - scc.Remote = true - - sc := trace.NewSpanContext(scc) - if !sc.IsValid() { - return trace.SpanContext{} - } - - return sc -} - -// upperHex detect hex is upper case Unicode characters. -func upperHex(v string) bool { - for _, c := range v { - if c >= 'A' && c <= 'F' { - return true - } - } - return false -} - -func extractPart(dst []byte, h *string, n int) bool { - part, left, _ := strings.Cut(*h, delimiter) - *h = left - // hex.Decode decodes unsupported upper-case characters, so exclude explicitly. - if len(part) != n || upperHex(part) { - return false - } - if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 { - return false - } - return true -} - -// Fields returns the keys who's values are set with Inject. -func (TraceContext) Fields() []string { - return []string{traceparentHeader, tracestateHeader} -} diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json deleted file mode 100644 index fa5acf2d3b..0000000000 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "$schema": "https://docs.renovatebot.com/renovate-schema.json", - "extends": [ - "config:best-practices", - "helpers:pinGitHubActionDigestsToSemver" - ], - "ignorePaths": [], - "labels": ["Skip Changelog", "dependencies"], - "postUpdateOptions" : [ - "gomodTidy" - ], - "packageRules": [ - { - "matchManagers": ["gomod"], - "matchDepTypes": ["indirect"], - "enabled": true - }, - { - "matchPackageNames": ["go.opentelemetry.io/build-tools/**"], - "groupName": "build-tools" - }, - { - "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], - "groupName": "googleapis" - }, - { - "matchPackageNames": ["golang.org/x/**"], - "groupName": "golang.org/x" - }, - { - "matchPackageNames": ["go.opentelemetry.io/otel/sdk/log/logtest"], - "enabled": false - } - ] -} diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt deleted file mode 100644 index 7c541dee79..0000000000 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -codespell==2.4.2 diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE deleted file mode 100644 index f1aee0f110..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/LICENSE +++ /dev/null @@ -1,231 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------------------------------------- - -Copyright 2009 The Go Authors. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google LLC nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/sdk/README.md b/vendor/go.opentelemetry.io/otel/sdk/README.md deleted file mode 100644 index f81b1576ad..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# SDK - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk) diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md deleted file mode 100644 index 06e6d86854..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# SDK Instrumentation - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/instrumentation)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/instrumentation) diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go deleted file mode 100644 index a4faa6a03d..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package instrumentation provides types to represent the code libraries that -// provide OpenTelemetry instrumentation. These types are used in the -// OpenTelemetry signal pipelines to identify the source of telemetry. -// -// See -// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md -// and -// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md -// for more information. -package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go deleted file mode 100644 index f2cdf3c651..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" - -// Library represents the instrumentation library. -// -// Deprecated: use [Scope] instead. -type Library = Scope diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go deleted file mode 100644 index 34852a47b2..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" - -import "go.opentelemetry.io/otel/attribute" - -// Scope represents the instrumentation scope. -type Scope struct { - // Name is the name of the instrumentation scope. This should be the - // Go package name of that scope. - Name string - // Version is the version of the instrumentation scope. - Version string - // SchemaURL of the telemetry emitted by the scope. - SchemaURL string - // Attributes of the telemetry emitted by the scope. - Attributes attribute.Set -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md deleted file mode 100644 index fab61647c2..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# Experimental Features - -The SDK contains features that have not yet stabilized in the OpenTelemetry specification. -These features are added to the OpenTelemetry Go SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. - -These feature may change in backwards incompatible ways as feedback is applied. -See the [Compatibility and Stability](#compatibility-and-stability) section for more information. - -## Features - -- [Resource](#resource) - -### Resource - -[OpenTelemetry resource semantic conventions] include many attribute definitions that are defined as experimental. -To have experimental semantic conventions be added by [resource detectors] set the `OTEL_GO_X_RESOURCE` environment variable. -The value set must be the case-insensitive string of `"true"` to enable the feature. -All other values are ignored. - - - -[OpenTelemetry resource semantic conventions]: https://opentelemetry.io/docs/specs/semconv/resource/ -[resource detectors]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#Detector - -#### Examples - -Enable experimental resource semantic conventions. - -```console -export OTEL_GO_X_RESOURCE=true -``` - -Disable experimental resource semantic conventions. - -```console -unset OTEL_GO_X_RESOURCE -``` - -## Compatibility and Stability - -Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../VERSIONING.md). -These features may be removed or modified in successive version releases, including patch versions. - -When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. -There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. -If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go deleted file mode 100644 index 694b64a318..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package x documents experimental features for [go.opentelemetry.io/otel/sdk]. -package x // import "go.opentelemetry.io/otel/sdk/internal/x" - -import "strings" - -// Resource is an experimental feature flag that defines if resource detectors -// should be included experimental semantic conventions. -// -// To enable this feature set the OTEL_GO_X_RESOURCE environment variable -// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" -// will also enable this). -var Resource = newFeature( - []string{"RESOURCE"}, - func(v string) (string, bool) { - if strings.EqualFold(v, "true") { - return v, true - } - return "", false - }, -) - -// Observability is an experimental feature flag that determines if SDK -// observability metrics are enabled. -// -// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable -// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" -// will also enable this). -var Observability = newFeature( - []string{"OBSERVABILITY", "SELF_OBSERVABILITY"}, - func(v string) (string, bool) { - if strings.EqualFold(v, "true") { - return v, true - } - return "", false - }, -) - -// PerSeriesStartTimestamps is an experimental feature flag that determines if the SDK -// uses the new Start Timestamps specification. -// -// To enable this feature set the OTEL_GO_X_PER_SERIES_START_TIMESTAMPS environment variable -// to the case-insensitive string value of "true". -var PerSeriesStartTimestamps = newFeature( - []string{"PER_SERIES_START_TIMESTAMPS"}, - func(v string) (bool, bool) { - if strings.EqualFold(v, "true") { - return true, true - } - return false, false - }, -) diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go deleted file mode 100644 index 13347e5605..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go +++ /dev/null @@ -1,58 +0,0 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/x/x.go.tmpl - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package x documents experimental features for [go.opentelemetry.io/otel/sdk]. -package x // import "go.opentelemetry.io/otel/sdk/internal/x" - -import ( - "os" -) - -// Feature is an experimental feature control flag. It provides a uniform way -// to interact with these feature flags and parse their values. -type Feature[T any] struct { - keys []string - parse func(v string) (T, bool) -} - -func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] { - const envKeyRoot = "OTEL_GO_X_" - keys := make([]string, 0, len(suffix)) - for _, s := range suffix { - keys = append(keys, envKeyRoot+s) - } - return Feature[T]{ - keys: keys, - parse: parse, - } -} - -// Keys returns the environment variable keys that can be set to enable the -// feature. -func (f Feature[T]) Keys() []string { return f.keys } - -// Lookup returns the user configured value for the feature and true if the -// user has enabled the feature. Otherwise, if the feature is not enabled, a -// zero-value and false are returned. -func (f Feature[T]) Lookup() (v T, ok bool) { - // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value - // - // > The SDK MUST interpret an empty value of an environment variable the - // > same way as when the variable is unset. - for _, key := range f.keys { - vRaw := os.Getenv(key) - if vRaw != "" { - return f.parse(vRaw) - } - } - return v, ok -} - -// Enabled reports whether the feature is enabled. -func (f Feature[T]) Enabled() bool { - _, ok := f.Lookup() - return ok -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/README.md b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md deleted file mode 100644 index 4ad864d716..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# SDK Resource - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/resource)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go deleted file mode 100644 index c02aeefdde..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "context" - "errors" - "fmt" -) - -// ErrPartialResource is returned by a detector when complete source -// information for a Resource is unavailable or the source information -// contains invalid values that are omitted from the returned Resource. -var ErrPartialResource = errors.New("partial resource") - -// Detector detects OpenTelemetry resource information. -type Detector interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Detect returns an initialized Resource based on gathered information. - // If the source information to construct a Resource contains invalid - // values, a Resource is returned with the valid parts of the source - // information used for initialization along with an appropriately - // wrapped ErrPartialResource error. - Detect(ctx context.Context) (*Resource, error) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -// Detect returns a new [Resource] merged from all the Resources each of the -// detectors produces. Each of the detectors are called sequentially, in the -// order they are passed, merging the produced resource into the previous. -// -// This may return a partial Resource along with an error containing -// [ErrPartialResource] if that error is returned from a detector. It may also -// return a merge-conflicting Resource along with an error containing -// [ErrSchemaURLConflict] if merging Resources from different detectors results -// in a schema URL conflict. It is up to the caller to determine if this -// returned Resource should be used or not. -// -// If one of the detectors returns an error that is not [ErrPartialResource], -// the resource produced by the detector will not be merged and the returned -// error will wrap that detector's error. -func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { - r := new(Resource) - return r, detect(ctx, r, detectors) -} - -// detect runs all detectors using ctx and merges the result into res. This -// assumes res is allocated and not nil, it will panic otherwise. -// -// If the detectors or merging resources produces any errors (i.e. -// [ErrPartialResource] [ErrSchemaURLConflict]), a single error wrapping all of -// these errors will be returned. Otherwise, nil is returned. -func detect(ctx context.Context, res *Resource, detectors []Detector) error { - var ( - r *Resource - err error - e error - ) - - for _, detector := range detectors { - if detector == nil { - continue - } - r, e = detector.Detect(ctx) - if e != nil { - err = errors.Join(err, e) - if !errors.Is(e, ErrPartialResource) { - continue - } - } - r, e = Merge(res, r) - if e != nil { - err = errors.Join(err, e) - } - *res = *r - } - - if err != nil { - if errors.Is(err, ErrSchemaURLConflict) { - // If there has been a merge conflict, ensure the resource has no - // schema URL. - res.schemaURL = "" - } - - err = fmt.Errorf("error detecting resource: %w", err) - } - return err -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go deleted file mode 100644 index 04f15fcd21..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "context" - "fmt" - "os" - "path/filepath" - - "github.com/google/uuid" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.40.0" -) - -type ( - // telemetrySDK is a Detector that provides information about - // the OpenTelemetry SDK used. This Detector is included as a - // builtin. If these resource attributes are not wanted, use - // resource.New() to explicitly disable them. - telemetrySDK struct{} - - // host is a Detector that provides information about the host - // being run on. This Detector is included as a builtin. If - // these resource attributes are not wanted, use the - // resource.New() to explicitly disable them. - host struct{} - - stringDetector struct { - schemaURL string - K attribute.Key - F func() (string, error) - } - - defaultServiceNameDetector struct{} - - defaultServiceInstanceIDDetector struct{} -) - -var ( - _ Detector = telemetrySDK{} - _ Detector = host{} - _ Detector = stringDetector{} - _ Detector = defaultServiceNameDetector{} - _ Detector = defaultServiceInstanceIDDetector{} -) - -// Detect returns a *Resource that describes the OpenTelemetry SDK used. -func (telemetrySDK) Detect(context.Context) (*Resource, error) { - return NewWithAttributes( - semconv.SchemaURL, - semconv.TelemetrySDKName("opentelemetry"), - semconv.TelemetrySDKLanguageGo, - semconv.TelemetrySDKVersion(sdk.Version()), - ), nil -} - -// Detect returns a *Resource that describes the host being run on. -func (host) Detect(ctx context.Context) (*Resource, error) { - return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx) -} - -// StringDetector returns a Detector that will produce a *Resource -// containing the string as a value corresponding to k. The resulting Resource -// will have the specified schemaURL. -func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector { - return stringDetector{schemaURL: schemaURL, K: k, F: f} -} - -// Detect returns a *Resource that describes the string as a value -// corresponding to attribute.Key as well as the specific schemaURL. -func (sd stringDetector) Detect(context.Context) (*Resource, error) { - value, err := sd.F() - if err != nil { - return nil, fmt.Errorf("%s: %w", string(sd.K), err) - } - a := sd.K.String(value) - if !a.Valid() { - return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit()) - } - return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil -} - -// Detect implements Detector. -func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) { - return StringDetector( - semconv.SchemaURL, - semconv.ServiceNameKey, - func() (string, error) { - executable, err := os.Executable() - if err != nil { - return "unknown_service:go", nil - } - return "unknown_service:" + filepath.Base(executable), nil - }, - ).Detect(ctx) -} - -// Detect implements Detector. -func (defaultServiceInstanceIDDetector) Detect(ctx context.Context) (*Resource, error) { - return StringDetector( - semconv.SchemaURL, - semconv.ServiceInstanceIDKey, - func() (string, error) { - version4Uuid, err := uuid.NewRandom() - if err != nil { - return "", err - } - - return version4Uuid.String(), nil - }, - ).Detect(ctx) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go deleted file mode 100644 index a3d647d92c..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" -) - -// config contains configuration for Resource creation. -type config struct { - // detectors that will be evaluated. - detectors []Detector - // SchemaURL to associate with the Resource. - schemaURL string -} - -// Option is the interface that applies a configuration option. -type Option interface { - // apply sets the Option value of a config. - apply(config) config -} - -// WithAttributes adds attributes to the configured Resource. -func WithAttributes(attributes ...attribute.KeyValue) Option { - return WithDetectors(detectAttributes{attributes}) -} - -type detectAttributes struct { - attributes []attribute.KeyValue -} - -func (d detectAttributes) Detect(context.Context) (*Resource, error) { - return NewSchemaless(d.attributes...), nil -} - -// WithDetectors adds detectors to be evaluated for the configured resource. -func WithDetectors(detectors ...Detector) Option { - return detectorsOption{detectors: detectors} -} - -type detectorsOption struct { - detectors []Detector -} - -func (o detectorsOption) apply(cfg config) config { - cfg.detectors = append(cfg.detectors, o.detectors...) - return cfg -} - -// WithFromEnv adds attributes from environment variables to the configured resource. -func WithFromEnv() Option { - return WithDetectors(fromEnv{}) -} - -// WithHost adds attributes from the host to the configured resource. -func WithHost() Option { - return WithDetectors(host{}) -} - -// WithHostID adds host ID information to the configured resource. -func WithHostID() Option { - return WithDetectors(hostIDDetector{}) -} - -// WithTelemetrySDK adds TelemetrySDK version info to the configured resource. -func WithTelemetrySDK() Option { - return WithDetectors(telemetrySDK{}) -} - -// WithSchemaURL sets the schema URL for the configured resource. -func WithSchemaURL(schemaURL string) Option { - return schemaURLOption(schemaURL) -} - -type schemaURLOption string - -func (o schemaURLOption) apply(cfg config) config { - cfg.schemaURL = string(o) - return cfg -} - -// WithOS adds all the OS attributes to the configured Resource. -// See individual WithOS* functions to configure specific attributes. -func WithOS() Option { - return WithDetectors( - osTypeDetector{}, - osDescriptionDetector{}, - ) -} - -// WithOSType adds an attribute with the operating system type to the configured Resource. -func WithOSType() Option { - return WithDetectors(osTypeDetector{}) -} - -// WithOSDescription adds an attribute with the operating system description to the -// configured Resource. The formatted string is equivalent to the output of the -// `uname -snrvm` command. -func WithOSDescription() Option { - return WithDetectors(osDescriptionDetector{}) -} - -// WithProcess adds all the Process attributes to the configured Resource. -// -// Warning! This option will include process command line arguments. If these -// contain sensitive information it will be included in the exported resource. -// -// This option is equivalent to calling WithProcessPID, -// WithProcessExecutableName, WithProcessExecutablePath, -// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName, -// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each -// option function for information about what resource attributes each -// includes. -func WithProcess() Option { - return WithDetectors( - processPIDDetector{}, - processExecutableNameDetector{}, - processExecutablePathDetector{}, - processCommandArgsDetector{}, - processOwnerDetector{}, - processRuntimeNameDetector{}, - processRuntimeVersionDetector{}, - processRuntimeDescriptionDetector{}, - ) -} - -// WithProcessPID adds an attribute with the process identifier (PID) to the -// configured Resource. -func WithProcessPID() Option { - return WithDetectors(processPIDDetector{}) -} - -// WithProcessExecutableName adds an attribute with the name of the process -// executable to the configured Resource. -func WithProcessExecutableName() Option { - return WithDetectors(processExecutableNameDetector{}) -} - -// WithProcessExecutablePath adds an attribute with the full path to the process -// executable to the configured Resource. -func WithProcessExecutablePath() Option { - return WithDetectors(processExecutablePathDetector{}) -} - -// WithProcessCommandArgs adds an attribute with all the command arguments (including -// the command/executable itself) as received by the process to the configured -// Resource. -// -// Warning! This option will include process command line arguments. If these -// contain sensitive information it will be included in the exported resource. -func WithProcessCommandArgs() Option { - return WithDetectors(processCommandArgsDetector{}) -} - -// WithProcessOwner adds an attribute with the username of the user that owns the process -// to the configured Resource. -func WithProcessOwner() Option { - return WithDetectors(processOwnerDetector{}) -} - -// WithProcessRuntimeName adds an attribute with the name of the runtime of this -// process to the configured Resource. -func WithProcessRuntimeName() Option { - return WithDetectors(processRuntimeNameDetector{}) -} - -// WithProcessRuntimeVersion adds an attribute with the version of the runtime of -// this process to the configured Resource. -func WithProcessRuntimeVersion() Option { - return WithDetectors(processRuntimeVersionDetector{}) -} - -// WithProcessRuntimeDescription adds an attribute with an additional description -// about the runtime of the process to the configured Resource. -func WithProcessRuntimeDescription() Option { - return WithDetectors(processRuntimeDescriptionDetector{}) -} - -// WithContainer adds all the Container attributes to the configured Resource. -// See individual WithContainer* functions to configure specific attributes. -func WithContainer() Option { - return WithDetectors( - cgroupContainerIDDetector{}, - ) -} - -// WithContainerID adds an attribute with the id of the container to the configured Resource. -// Note: WithContainerID will not extract the correct container ID in an ECS environment. -// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs). -func WithContainerID() Option { - return WithDetectors(cgroupContainerIDDetector{}) -} - -// WithService adds all the Service attributes to the configured Resource. -func WithService() Option { - return WithDetectors( - defaultServiceInstanceIDDetector{}, - defaultServiceNameDetector{}, - ) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go deleted file mode 100644 index e977ff1c48..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "bufio" - "context" - "errors" - "io" - "os" - "regexp" - - semconv "go.opentelemetry.io/otel/semconv/v1.40.0" -) - -type containerIDProvider func() (string, error) - -var ( - containerID containerIDProvider = getContainerIDFromCGroup - cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*[-:])?([0-9a-f]+)(?:\.|\s*$)`) -) - -type cgroupContainerIDDetector struct{} - -const cgroupPath = "/proc/self/cgroup" - -// Detect returns a *Resource that describes the id of the container. -// If no container id found, an empty resource will be returned. -func (cgroupContainerIDDetector) Detect(context.Context) (*Resource, error) { - containerID, err := containerID() - if err != nil { - return nil, err - } - - if containerID == "" { - return Empty(), nil - } - return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil -} - -var ( - defaultOSStat = os.Stat - osStat = defaultOSStat - - defaultOSOpen = func(name string) (io.ReadCloser, error) { - return os.Open(name) - } - osOpen = defaultOSOpen -) - -// getContainerIDFromCGroup returns the id of the container from the cgroup file. -// If no container id found, an empty string will be returned. -func getContainerIDFromCGroup() (string, error) { - if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) { - // File does not exist, skip - return "", nil - } - - file, err := osOpen(cgroupPath) - if err != nil { - return "", err - } - defer file.Close() - - return getContainerIDFromReader(file), nil -} - -// getContainerIDFromReader returns the id of the container from reader. -func getContainerIDFromReader(reader io.Reader) string { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - line := scanner.Text() - - if id := getContainerIDFromLine(line); id != "" { - return id - } - } - return "" -} - -// getContainerIDFromLine returns the id of the container from one string line. -func getContainerIDFromLine(line string) string { - matches := cgroupContainerIDRe.FindStringSubmatch(line) - if len(matches) <= 1 { - return "" - } - return matches[1] -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go deleted file mode 100644 index 64939a2713..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package resource provides detecting and representing resources. -// -// The fundamental struct is a Resource which holds identifying information -// about the entities for which telemetry is exported. -// -// To automatically construct Resources from an environment a Detector -// interface is defined. Implementations of this interface can be passed to -// the Detect function to generate a Resource from the merged information. -// -// To load a user defined Resource from the environment variable -// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret -// the value as a list of comma delimited key/value pairs -// (e.g. `=,=,...`). -// -// While this package provides a stable API, -// the attributes added by resource detectors may change. -package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go deleted file mode 100644 index bc0e5c19e3..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "context" - "fmt" - "net/url" - "os" - "strings" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.40.0" -) - -const ( - // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from. - resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials - - // svcNameKey is the environment variable name that Service Name information will be read from. - svcNameKey = "OTEL_SERVICE_NAME" -) - -// errMissingValue is returned when a resource value is missing. -var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) - -// fromEnv is a Detector that implements the Detector and collects -// resources from environment. This Detector is included as a -// builtin. -type fromEnv struct{} - -// compile time assertion that FromEnv implements Detector interface. -var _ Detector = fromEnv{} - -// Detect collects resources from environment. -func (fromEnv) Detect(context.Context) (*Resource, error) { - attrs := strings.TrimSpace(os.Getenv(resourceAttrKey)) - svcName := strings.TrimSpace(os.Getenv(svcNameKey)) - - if attrs == "" && svcName == "" { - return Empty(), nil - } - - var res *Resource - - if svcName != "" { - res = NewSchemaless(semconv.ServiceName(svcName)) - } - - r2, err := constructOTResources(attrs) - - // Ensure that the resource with the service name from OTEL_SERVICE_NAME - // takes precedence, if it was defined. - res, err2 := Merge(r2, res) - - if err == nil { - err = err2 - } else if err2 != nil { - err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) - } - - return res, err -} - -func constructOTResources(s string) (*Resource, error) { - if s == "" { - return Empty(), nil - } - pairs := strings.Split(s, ",") - var attrs []attribute.KeyValue - var invalid []string - for _, p := range pairs { - k, v, found := strings.Cut(p, "=") - if !found { - invalid = append(invalid, p) - continue - } - key := strings.TrimSpace(k) - val, err := url.PathUnescape(strings.TrimSpace(v)) - if err != nil { - // Retain original value if decoding fails, otherwise it will be - // an empty string. - val = v - otel.Handle(err) - } - attrs = append(attrs, attribute.String(key, val)) - } - var err error - if len(invalid) > 0 { - err = fmt.Errorf("%w: %v", errMissingValue, invalid) - } - return NewSchemaless(attrs...), err -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go deleted file mode 100644 index 755c082427..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "context" - "errors" - "strings" - - semconv "go.opentelemetry.io/otel/semconv/v1.40.0" -) - -type hostIDProvider func() (string, error) - -var defaultHostIDProvider hostIDProvider = platformHostIDReader.read - -var hostID = defaultHostIDProvider - -type hostIDReader interface { - read() (string, error) -} - -type fileReader func(string) (string, error) - -type commandExecutor func(string, ...string) (string, error) - -// hostIDReaderBSD implements hostIDReader. -type hostIDReaderBSD struct { - execCommand commandExecutor - readFile fileReader -} - -// read attempts to read the machine-id from /etc/hostid. -// If not found it will execute: /bin/kenv -q smbios.system.uuid. -// If neither location yields an id an error will be returned. -func (r *hostIDReaderBSD) read() (string, error) { - if result, err := r.readFile("/etc/hostid"); err == nil { - return strings.TrimSpace(result), nil - } - - if result, err := r.execCommand("/bin/kenv", "-q", "smbios.system.uuid"); err == nil { - return strings.TrimSpace(result), nil - } - - return "", errors.New("host id not found in: /etc/hostid or /bin/kenv") -} - -// hostIDReaderDarwin implements hostIDReader. -type hostIDReaderDarwin struct { - execCommand commandExecutor -} - -// read executes `/usr/sbin/ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id -// from the IOPlatformUUID line. If the command fails or the uuid cannot be -// parsed an error will be returned. -func (r *hostIDReaderDarwin) read() (string, error) { - result, err := r.execCommand("/usr/sbin/ioreg", "-rd1", "-c", "IOPlatformExpertDevice") - if err != nil { - return "", err - } - - for line := range strings.SplitSeq(result, "\n") { - if strings.Contains(line, "IOPlatformUUID") { - parts := strings.Split(line, " = ") - if len(parts) == 2 { - return strings.Trim(parts[1], "\""), nil - } - break - } - } - - return "", errors.New("could not parse IOPlatformUUID") -} - -type hostIDReaderLinux struct { - readFile fileReader -} - -// read attempts to read the machine-id from /etc/machine-id followed by -// /var/lib/dbus/machine-id. If neither location yields an ID an error will -// be returned. -func (r *hostIDReaderLinux) read() (string, error) { - if result, err := r.readFile("/etc/machine-id"); err == nil { - return strings.TrimSpace(result), nil - } - - if result, err := r.readFile("/var/lib/dbus/machine-id"); err == nil { - return strings.TrimSpace(result), nil - } - - return "", errors.New("host id not found in: /etc/machine-id or /var/lib/dbus/machine-id") -} - -type hostIDDetector struct{} - -// Detect returns a *Resource containing the platform specific host id. -func (hostIDDetector) Detect(context.Context) (*Resource, error) { - hostID, err := hostID() - if err != nil { - return nil, err - } - - return NewWithAttributes( - semconv.SchemaURL, - semconv.HostID(hostID), - ), nil -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go deleted file mode 100644 index 4c1c30f256..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -//go:build dragonfly || freebsd || netbsd || openbsd || solaris - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -var platformHostIDReader hostIDReader = &hostIDReaderBSD{ - execCommand: execCommand, - readFile: readFile, -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go deleted file mode 100644 index b09fde3b73..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -var platformHostIDReader hostIDReader = &hostIDReaderDarwin{ - execCommand: execCommand, -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go deleted file mode 100644 index d9e5d1a8ff..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -//go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import "os/exec" - -func execCommand(name string, arg ...string) (string, error) { - cmd := exec.Command(name, arg...) - b, err := cmd.Output() - if err != nil { - return "", err - } - - return string(b), nil -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go deleted file mode 100644 index 4a26096c8d..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -//go:build linux - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -var platformHostIDReader hostIDReader = &hostIDReaderLinux{ - readFile: readFile, -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go deleted file mode 100644 index c95d87685c..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -//go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import "os" - -func readFile(filename string) (string, error) { - b, err := os.ReadFile(filename) // nolint:gosec // false positive - if err != nil { - return "", err - } - - return string(b), nil -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go deleted file mode 100644 index 63ad2fa4e0..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -// hostIDReaderUnsupported is a placeholder implementation for operating systems -// for which this project currently doesn't support host.id -// attribute detection. See build tags declaration early on this file -// for a list of unsupported OSes. -type hostIDReaderUnsupported struct{} - -func (*hostIDReaderUnsupported) read() (string, error) { - return "", nil -} - -var platformHostIDReader hostIDReader = &hostIDReaderUnsupported{} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go deleted file mode 100644 index 2b8ca20b38..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -//go:build windows - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "golang.org/x/sys/windows/registry" -) - -// implements hostIDReader. -type hostIDReaderWindows struct{} - -// read reads MachineGuid from the Windows registry key: -// SOFTWARE\Microsoft\Cryptography. -func (*hostIDReaderWindows) read() (string, error) { - k, err := registry.OpenKey( - registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, - registry.QUERY_VALUE|registry.WOW64_64KEY, - ) - if err != nil { - return "", err - } - defer k.Close() - - guid, _, err := k.GetStringValue("MachineGuid") - if err != nil { - return "", err - } - - return guid, nil -} - -var platformHostIDReader hostIDReader = &hostIDReaderWindows{} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go deleted file mode 100644 index f5682cad41..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "context" - "strings" - - "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.40.0" -) - -type osDescriptionProvider func() (string, error) - -var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription - -var osDescription = defaultOSDescriptionProvider - -func setDefaultOSDescriptionProvider() { - setOSDescriptionProvider(defaultOSDescriptionProvider) -} - -func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) { - osDescription = osDescriptionProvider -} - -type ( - osTypeDetector struct{} - osDescriptionDetector struct{} -) - -// Detect returns a *Resource that describes the operating system type the -// service is running on. -func (osTypeDetector) Detect(context.Context) (*Resource, error) { - osType := runtimeOS() - - osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) - - return NewWithAttributes( - semconv.SchemaURL, - osTypeAttribute, - ), nil -} - -// Detect returns a *Resource that describes the operating system the -// service is running on. -func (osDescriptionDetector) Detect(context.Context) (*Resource, error) { - description, err := osDescription() - if err != nil { - return nil, err - } - - return NewWithAttributes( - semconv.SchemaURL, - semconv.OSDescription(description), - ), nil -} - -// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime -// into an OS type attribute with the corresponding value defined by the semantic -// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase -// and used as the value for the returned OS type attribute. -func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue { - // the elements in this map are the intersection between - // available GOOS values and defined semconv OS types - osTypeAttributeMap := map[string]attribute.KeyValue{ - "aix": semconv.OSTypeAIX, - "darwin": semconv.OSTypeDarwin, - "dragonfly": semconv.OSTypeDragonflyBSD, - "freebsd": semconv.OSTypeFreeBSD, - "linux": semconv.OSTypeLinux, - "netbsd": semconv.OSTypeNetBSD, - "openbsd": semconv.OSTypeOpenBSD, - "solaris": semconv.OSTypeSolaris, - "windows": semconv.OSTypeWindows, - "zos": semconv.OSTypeZOS, - } - - var osTypeAttribute attribute.KeyValue - - if attr, ok := osTypeAttributeMap[osType]; ok { - osTypeAttribute = attr - } else { - osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType)) - } - - return osTypeAttribute -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go deleted file mode 100644 index 3d703c5d98..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "encoding/xml" - "errors" - "fmt" - "io" - "os" -) - -type plist struct { - XMLName xml.Name `xml:"plist"` - Dict dict `xml:"dict"` -} - -type dict struct { - Key []string `xml:"key"` - String []string `xml:"string"` -} - -// osRelease builds a string describing the operating system release based on the -// contents of the property list (.plist) system files. If no .plist files are found, -// or if the required properties to build the release description string are missing, -// an empty string is returned instead. The generated string resembles the output of -// the `sw_vers` commandline program, but in a single-line string. For more information -// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS. -func osRelease() string { - file, err := getPlistFile() - if err != nil { - return "" - } - - defer file.Close() - - values, err := parsePlistFile(file) - if err != nil { - return "" - } - - return buildOSRelease(values) -} - -// getPlistFile returns a *os.File pointing to one of the well-known .plist files -// available on macOS. If no file can be opened, it returns an error. -func getPlistFile() (*os.File, error) { - return getFirstAvailableFile([]string{ - "/System/Library/CoreServices/SystemVersion.plist", - "/System/Library/CoreServices/ServerVersion.plist", - }) -} - -// parsePlistFile process the file pointed by `file` as a .plist file and returns -// a map with the key-values for each pair of correlated and elements -// contained in it. -func parsePlistFile(file io.Reader) (map[string]string, error) { - var v plist - - err := xml.NewDecoder(file).Decode(&v) - if err != nil { - return nil, err - } - - if len(v.Dict.Key) != len(v.Dict.String) { - return nil, errors.New("the number of and elements doesn't match") - } - - properties := make(map[string]string, len(v.Dict.Key)) - for i, key := range v.Dict.Key { - properties[key] = v.Dict.String[i] - } - - return properties, nil -} - -// buildOSRelease builds a string describing the OS release based on the properties -// available on the provided map. It tries to find the `ProductName`, `ProductVersion` -// and `ProductBuildVersion` properties. If some of these properties are not found, -// it returns an empty string. -func buildOSRelease(properties map[string]string) string { - productName := properties["ProductName"] - productVersion := properties["ProductVersion"] - productBuildVersion := properties["ProductBuildVersion"] - - if productName == "" || productVersion == "" || productBuildVersion == "" { - return "" - } - - return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go deleted file mode 100644 index a1763267c2..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "bufio" - "fmt" - "io" - "os" - "strings" -) - -// osRelease builds a string describing the operating system release based on the -// properties of the os-release file. If no os-release file is found, or if the -// required properties to build the release description string are missing, an empty -// string is returned instead. For more information about os-release files, see: -// https://www.freedesktop.org/software/systemd/man/os-release.html -func osRelease() string { - file, err := getOSReleaseFile() - if err != nil { - return "" - } - - defer file.Close() - - values := parseOSReleaseFile(file) - - return buildOSRelease(values) -} - -// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release -// files, according to their order of preference. If no file can be opened, it -// returns an error. -func getOSReleaseFile() (*os.File, error) { - return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"}) -} - -// parseOSReleaseFile process the file pointed by `file` as an os-release file and -// returns a map with the key-values contained in it. Empty lines or lines starting -// with a '#' character are ignored, as well as lines with the missing key=value -// separator. Values are unquoted and unescaped. -func parseOSReleaseFile(file io.Reader) map[string]string { - values := make(map[string]string) - scanner := bufio.NewScanner(file) - - for scanner.Scan() { - line := scanner.Text() - - if skip(line) { - continue - } - - key, value, ok := parse(line) - if ok { - values[key] = value - } - } - - return values -} - -// skip reports whether the line is blank or starts with a '#' character, and -// therefore should be skipped from processing. -func skip(line string) bool { - line = strings.TrimSpace(line) - - return line == "" || strings.HasPrefix(line, "#") -} - -// parse attempts to split the provided line on the first '=' character, and then -// sanitize each side of the split before returning them as a key-value pair. -func parse(line string) (string, string, bool) { - k, v, found := strings.Cut(line, "=") - - if !found || k == "" { - return "", "", false - } - - key := strings.TrimSpace(k) - value := unescape(unquote(strings.TrimSpace(v))) - - return key, value, true -} - -// unquote checks whether the string `s` is quoted with double or single quotes -// and, if so, returns a version of the string without them. Otherwise it returns -// the provided string unchanged. -func unquote(s string) string { - if len(s) < 2 { - return s - } - - if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] { - return s[1 : len(s)-1] - } - - return s -} - -// unescape removes the `\` prefix from some characters that are expected -// to have it added in front of them for escaping purposes. -func unescape(s string) string { - return strings.NewReplacer( - `\$`, `$`, - `\"`, `"`, - `\'`, `'`, - `\\`, `\`, - "\\`", "`", - ).Replace(s) -} - -// buildOSRelease builds a string describing the OS release based on the properties -// available on the provided map. It favors a combination of the `NAME` and `VERSION` -// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't -// found), and using `PRETTY_NAME` alone if some of the previous are not present. If -// none of these properties are found, it returns an empty string. -// -// The rationale behind not using `PRETTY_NAME` as first choice was that, for some -// Linux distributions, it doesn't include the same detail that can be found on the -// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with -// other properties can produce "pretty" redundant strings in some cases. -func buildOSRelease(values map[string]string) string { - var osRelease string - - name := values["NAME"] - version := values["VERSION"] - - if version == "" { - version = values["VERSION_ID"] - } - - if name != "" && version != "" { - osRelease = fmt.Sprintf("%s %s", name, version) - } else { - osRelease = values["PRETTY_NAME"] - } - - return osRelease -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go deleted file mode 100644 index 6c50ab6867..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "fmt" - "os" - - "golang.org/x/sys/unix" -) - -type unameProvider func(buf *unix.Utsname) (err error) - -var defaultUnameProvider unameProvider = unix.Uname - -var currentUnameProvider = defaultUnameProvider - -func setDefaultUnameProvider() { - setUnameProvider(defaultUnameProvider) -} - -func setUnameProvider(unameProvider unameProvider) { - currentUnameProvider = unameProvider -} - -// platformOSDescription returns a human readable OS version information string. -// The final string combines OS release information (where available) and the -// result of the `uname` system call. -func platformOSDescription() (string, error) { - uname, err := uname() - if err != nil { - return "", err - } - - osRelease := osRelease() - if osRelease != "" { - return fmt.Sprintf("%s (%s)", osRelease, uname), nil - } - - return uname, nil -} - -// uname issues a uname(2) system call (or equivalent on systems which doesn't -// have one) and formats the output in a single string, similar to the output -// of the `uname` commandline program. The final string resembles the one -// obtained with a call to `uname -snrvm`. -func uname() (string, error) { - var utsName unix.Utsname - - err := currentUnameProvider(&utsName) - if err != nil { - return "", err - } - - return fmt.Sprintf("%s %s %s %s %s", - unix.ByteSliceToString(utsName.Sysname[:]), - unix.ByteSliceToString(utsName.Nodename[:]), - unix.ByteSliceToString(utsName.Release[:]), - unix.ByteSliceToString(utsName.Version[:]), - unix.ByteSliceToString(utsName.Machine[:]), - ), nil -} - -// getFirstAvailableFile returns an *os.File of the first available -// file from a list of candidate file paths. -func getFirstAvailableFile(candidates []string) (*os.File, error) { - for _, c := range candidates { - file, err := os.Open(c) - if err == nil { - return file, nil - } - } - - return nil, fmt.Errorf("no candidate file available: %v", candidates) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go deleted file mode 100644 index 25f629532a..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -// platformOSDescription is a placeholder implementation for OSes -// for which this project currently doesn't support os.description -// attribute detection. See build tags declaration early on this file -// for a list of unsupported OSes. -func platformOSDescription() (string, error) { - return "", nil -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go deleted file mode 100644 index a6a5a53c0e..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "fmt" - "strconv" - - "golang.org/x/sys/windows/registry" -) - -// platformOSDescription returns a human readable OS version information string. -// It does so by querying registry values under the -// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string -// resembles the one displayed by the Version Reporter Applet (winver.exe). -func platformOSDescription() (string, error) { - k, err := registry.OpenKey( - registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) - if err != nil { - return "", err - } - - defer k.Close() - - var ( - productName = readProductName(k) - displayVersion = readDisplayVersion(k) - releaseID = readReleaseID(k) - currentMajorVersionNumber = readCurrentMajorVersionNumber(k) - currentMinorVersionNumber = readCurrentMinorVersionNumber(k) - currentBuildNumber = readCurrentBuildNumber(k) - ubr = readUBR(k) - ) - - if displayVersion != "" { - displayVersion += " " - } - - return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]", - productName, - displayVersion, - releaseID, - currentMajorVersionNumber, - currentMinorVersionNumber, - currentBuildNumber, - ubr, - ), nil -} - -func getStringValue(name string, k registry.Key) string { - value, _, _ := k.GetStringValue(name) - - return value -} - -func getIntegerValue(name string, k registry.Key) uint64 { - value, _, _ := k.GetIntegerValue(name) - - return value -} - -func readProductName(k registry.Key) string { - return getStringValue("ProductName", k) -} - -func readDisplayVersion(k registry.Key) string { - return getStringValue("DisplayVersion", k) -} - -func readReleaseID(k registry.Key) string { - return getStringValue("ReleaseID", k) -} - -func readCurrentMajorVersionNumber(k registry.Key) string { - return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10) -} - -func readCurrentMinorVersionNumber(k registry.Key) string { - return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10) -} - -func readCurrentBuildNumber(k registry.Key) string { - return getStringValue("CurrentBuildNumber", k) -} - -func readUBR(k registry.Key) string { - return strconv.FormatUint(getIntegerValue("UBR", k), 10) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go deleted file mode 100644 index 99dce64f6d..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "context" - "fmt" - "os" - "os/user" - "path/filepath" - "runtime" - - semconv "go.opentelemetry.io/otel/semconv/v1.40.0" -) - -type ( - pidProvider func() int - executablePathProvider func() (string, error) - commandArgsProvider func() []string - ownerProvider func() (*user.User, error) - runtimeNameProvider func() string - runtimeVersionProvider func() string - runtimeOSProvider func() string - runtimeArchProvider func() string -) - -var ( - defaultPidProvider pidProvider = os.Getpid - defaultExecutablePathProvider executablePathProvider = os.Executable - defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args } - defaultOwnerProvider ownerProvider = user.Current - defaultRuntimeNameProvider runtimeNameProvider = func() string { - if runtime.Compiler == "gc" { - return "go" - } - return runtime.Compiler - } - defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version - defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS } - defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH } -) - -var ( - pid = defaultPidProvider - executablePath = defaultExecutablePathProvider - commandArgs = defaultCommandArgsProvider - owner = defaultOwnerProvider - runtimeName = defaultRuntimeNameProvider - runtimeVersion = defaultRuntimeVersionProvider - runtimeOS = defaultRuntimeOSProvider - runtimeArch = defaultRuntimeArchProvider -) - -func setDefaultOSProviders() { - setOSProviders( - defaultPidProvider, - defaultExecutablePathProvider, - defaultCommandArgsProvider, - ) -} - -func setOSProviders( - pidProvider pidProvider, - executablePathProvider executablePathProvider, - commandArgsProvider commandArgsProvider, -) { - pid = pidProvider - executablePath = executablePathProvider - commandArgs = commandArgsProvider -} - -func setDefaultRuntimeProviders() { - setRuntimeProviders( - defaultRuntimeNameProvider, - defaultRuntimeVersionProvider, - defaultRuntimeOSProvider, - defaultRuntimeArchProvider, - ) -} - -func setRuntimeProviders( - runtimeNameProvider runtimeNameProvider, - runtimeVersionProvider runtimeVersionProvider, - runtimeOSProvider runtimeOSProvider, - runtimeArchProvider runtimeArchProvider, -) { - runtimeName = runtimeNameProvider - runtimeVersion = runtimeVersionProvider - runtimeOS = runtimeOSProvider - runtimeArch = runtimeArchProvider -} - -func setDefaultUserProviders() { - setUserProviders(defaultOwnerProvider) -} - -func setUserProviders(ownerProvider ownerProvider) { - owner = ownerProvider -} - -type ( - processPIDDetector struct{} - processExecutableNameDetector struct{} - processExecutablePathDetector struct{} - processCommandArgsDetector struct{} - processOwnerDetector struct{} - processRuntimeNameDetector struct{} - processRuntimeVersionDetector struct{} - processRuntimeDescriptionDetector struct{} -) - -// Detect returns a *Resource that describes the process identifier (PID) of the -// executing process. -func (processPIDDetector) Detect(context.Context) (*Resource, error) { - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil -} - -// Detect returns a *Resource that describes the name of the process executable. -func (processExecutableNameDetector) Detect(context.Context) (*Resource, error) { - executableName := filepath.Base(commandArgs()[0]) - - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil -} - -// Detect returns a *Resource that describes the full path of the process executable. -func (processExecutablePathDetector) Detect(context.Context) (*Resource, error) { - executablePath, err := executablePath() - if err != nil { - return nil, err - } - - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil -} - -// Detect returns a *Resource that describes all the command arguments as received -// by the process. -func (processCommandArgsDetector) Detect(context.Context) (*Resource, error) { - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil -} - -// Detect returns a *Resource that describes the username of the user that owns the -// process. -func (processOwnerDetector) Detect(context.Context) (*Resource, error) { - owner, err := owner() - if err != nil { - return nil, err - } - - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil -} - -// Detect returns a *Resource that describes the name of the compiler used to compile -// this process image. -func (processRuntimeNameDetector) Detect(context.Context) (*Resource, error) { - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil -} - -// Detect returns a *Resource that describes the version of the runtime of this process. -func (processRuntimeVersionDetector) Detect(context.Context) (*Resource, error) { - return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil -} - -// Detect returns a *Resource that describes the runtime of this process. -func (processRuntimeDescriptionDetector) Detect(context.Context) (*Resource, error) { - runtimeDescription := fmt.Sprintf( - "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) - - return NewWithAttributes( - semconv.SchemaURL, - semconv.ProcessRuntimeDescription(runtimeDescription), - ), nil -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go deleted file mode 100644 index f715be53ed..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package resource // import "go.opentelemetry.io/otel/sdk/resource" - -import ( - "context" - "errors" - "fmt" - "sync" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/internal/x" -) - -// Resource describes an entity about which identifying information -// and metadata is exposed. Resource is an immutable object, -// equivalent to a map from key to unique value. -// -// Resources should be passed and stored as pointers -// (`*resource.Resource`). The `nil` value is equivalent to an empty -// Resource. -// -// Note that the Go == operator compares not just the resource attributes but -// also all other internals of the Resource type. Therefore, Resource values -// should not be used as map or database keys. In general, the [Resource.Equal] -// method should be used instead of direct comparison with ==, since that -// method ensures the correct comparison of resource attributes, and the -// [attribute.Distinct] returned from [Resource.Equivalent] should be used for -// map and database keys instead. -type Resource struct { - attrs attribute.Set - schemaURL string -} - -// Compile-time check that the Resource remains comparable. -var _ map[Resource]struct{} = nil - -var ( - defaultResource *Resource - defaultResourceOnce sync.Once -) - -// ErrSchemaURLConflict is an error returned when two Resources are merged -// together that contain different, non-empty, schema URLs. -var ErrSchemaURLConflict = errors.New("conflicting Schema URL") - -// New returns a [Resource] built using opts. -// -// This may return a partial Resource along with an error containing -// [ErrPartialResource] if options that provide a [Detector] are used and that -// error is returned from one or more of the Detectors. It may also return a -// merge-conflict Resource along with an error containing -// [ErrSchemaURLConflict] if merging Resources from the opts results in a -// schema URL conflict (see [Resource.Merge] for more information). It is up to -// the caller to determine if this returned Resource should be used or not -// based on these errors. -func New(ctx context.Context, opts ...Option) (*Resource, error) { - cfg := config{} - for _, opt := range opts { - cfg = opt.apply(cfg) - } - - r := &Resource{schemaURL: cfg.schemaURL} - return r, detect(ctx, r, cfg.detectors) -} - -// NewWithAttributes creates a resource from attrs and associates the resource with a -// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs -// contains any invalid items those items will be dropped. The attrs are assumed to be -// in a schema identified by schemaURL. -func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource { - resource := NewSchemaless(attrs...) - resource.schemaURL = schemaURL - return resource -} - -// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys, -// the last value will be used. If attrs contains any invalid items those items will -// be dropped. The resource will not be associated with a schema URL. If the schema -// of the attrs is known use NewWithAttributes instead. -func NewSchemaless(attrs ...attribute.KeyValue) *Resource { - if len(attrs) == 0 { - return &Resource{} - } - - // Ensure attributes comply with the specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/common/README.md#attribute - s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool { - return kv.Valid() - }) - - // If attrs only contains invalid entries do not allocate a new resource. - if s.Len() == 0 { - return &Resource{} - } - - return &Resource{attrs: s} //nolint -} - -// String implements the Stringer interface and provides a -// human-readable form of the resource. -// -// Avoid using this representation as the key in a map of resources, -// use Equivalent() as the key instead. -func (r *Resource) String() string { - if r == nil { - return "" - } - return r.attrs.Encoded(attribute.DefaultEncoder()) -} - -// MarshalLog is the marshaling function used by the logging system to represent this Resource. -func (r *Resource) MarshalLog() any { - return struct { - Attributes attribute.Set - SchemaURL string - }{ - Attributes: r.attrs, - SchemaURL: r.schemaURL, - } -} - -// Attributes returns a copy of attributes from the resource in a sorted order. -// To avoid allocating a new slice, use an iterator. -func (r *Resource) Attributes() []attribute.KeyValue { - if r == nil { - r = Empty() - } - return r.attrs.ToSlice() -} - -// SchemaURL returns the schema URL associated with Resource r. -func (r *Resource) SchemaURL() string { - if r == nil { - return "" - } - return r.schemaURL -} - -// Iter returns an iterator of the Resource attributes. -// This is ideal to use if you do not want a copy of the attributes. -func (r *Resource) Iter() attribute.Iterator { - if r == nil { - r = Empty() - } - return r.attrs.Iter() -} - -// Equal reports whether r and o represent the same resource. Two resources can -// be equal even if they have different schema URLs. -// -// See the documentation on the [Resource] type for the pitfalls of using == -// with Resource values; most code should use Equal instead. -func (r *Resource) Equal(o *Resource) bool { - if r == nil { - r = Empty() - } - if o == nil { - o = Empty() - } - return r.Equivalent() == o.Equivalent() -} - -// Merge creates a new [Resource] by merging a and b. -// -// If there are common keys between a and b, then the value from b will -// overwrite the value from a, even if b's value is empty. -// -// The SchemaURL of the resources will be merged according to the -// [OpenTelemetry specification rules]: -// -// - If a's schema URL is empty then the returned Resource's schema URL will -// be set to the schema URL of b, -// - Else if b's schema URL is empty then the returned Resource's schema URL -// will be set to the schema URL of a, -// - Else if the schema URLs of a and b are the same then that will be the -// schema URL of the returned Resource, -// - Else this is a merging error. If the resources have different, -// non-empty, schema URLs an error containing [ErrSchemaURLConflict] will -// be returned with the merged Resource. The merged Resource will have an -// empty schema URL. It may be the case that some unintended attributes -// have been overwritten or old semantic conventions persisted in the -// returned Resource. It is up to the caller to determine if this returned -// Resource should be used or not. -// -// [OpenTelemetry specification rules]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge -func Merge(a, b *Resource) (*Resource, error) { - if a == nil && b == nil { - return Empty(), nil - } - if a == nil { - return b, nil - } - if b == nil { - return a, nil - } - - // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key() - // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...) - mi := attribute.NewMergeIterator(b.Set(), a.Set()) - combine := make([]attribute.KeyValue, 0, a.Len()+b.Len()) - for mi.Next() { - combine = append(combine, mi.Attribute()) - } - - switch { - case a.schemaURL == "": - return NewWithAttributes(b.schemaURL, combine...), nil - case b.schemaURL == "": - return NewWithAttributes(a.schemaURL, combine...), nil - case a.schemaURL == b.schemaURL: - return NewWithAttributes(a.schemaURL, combine...), nil - } - // Return the merged resource with an appropriate error. It is up to - // the user to decide if the returned resource can be used or not. - return NewSchemaless(combine...), fmt.Errorf( - "%w: %s and %s", - ErrSchemaURLConflict, - a.schemaURL, - b.schemaURL, - ) -} - -// Empty returns an instance of Resource with no attributes. It is -// equivalent to a `nil` Resource. -func Empty() *Resource { - return &Resource{} -} - -// Default returns an instance of Resource with a default -// "service.name" and OpenTelemetrySDK attributes. -func Default() *Resource { - return DefaultWithContext(context.Background()) -} - -// DefaultWithContext returns an instance of Resource with a default -// "service.name" and OpenTelemetrySDK attributes. -// -// If the default resource has already been initialized, the provided ctx -// is ignored and the cached resource is returned. -func DefaultWithContext(ctx context.Context) *Resource { - defaultResourceOnce.Do(func() { - var err error - defaultDetectors := []Detector{ - defaultServiceNameDetector{}, - fromEnv{}, - telemetrySDK{}, - } - if x.Resource.Enabled() { - defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...) - } - defaultResource, err = Detect( - ctx, - defaultDetectors..., - ) - if err != nil { - otel.Handle(err) - } - // If Detect did not return a valid resource, fall back to emptyResource. - if defaultResource == nil { - defaultResource = &Resource{} - } - }) - return defaultResource -} - -// Environment returns an instance of Resource with attributes -// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. -func Environment() *Resource { - return EnvironmentWithContext(context.Background()) -} - -// EnvironmentWithContext returns an instance of Resource with attributes -// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. -func EnvironmentWithContext(ctx context.Context) *Resource { - detector := &fromEnv{} - resource, err := detector.Detect(ctx) - if err != nil { - otel.Handle(err) - } - return resource -} - -// Equivalent returns an object that can be compared for equality -// between two resources. This value is suitable for use as a key in -// a map. -func (r *Resource) Equivalent() attribute.Distinct { - return r.Set().Equivalent() -} - -// Set returns the equivalent *attribute.Set of this resource's attributes. -func (r *Resource) Set() *attribute.Set { - if r == nil { - r = Empty() - } - return &r.attrs -} - -// MarshalJSON encodes the resource attributes as a JSON list of { "Key": -// "...", "Value": ... } pairs in order sorted by key. -func (r *Resource) MarshalJSON() ([]byte, error) { - if r == nil { - r = Empty() - } - return r.attrs.MarshalJSON() -} - -// Len returns the number of unique key-values in this Resource. -func (r *Resource) Len() int { - if r == nil { - return 0 - } - return r.attrs.Len() -} - -// Encoded returns an encoded representation of the resource. -func (r *Resource) Encoded(enc attribute.Encoder) string { - if r == nil { - return "" - } - return r.attrs.Encoded(enc) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md deleted file mode 100644 index f2936e1439..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# SDK Trace - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go deleted file mode 100644 index 32854b14a3..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ /dev/null @@ -1,443 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "context" - "errors" - "sync" - "sync/atomic" - "time" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/sdk/trace/internal/env" - "go.opentelemetry.io/otel/sdk/trace/internal/observ" - "go.opentelemetry.io/otel/trace" -) - -// Defaults for BatchSpanProcessorOptions. -const ( - DefaultMaxQueueSize = 2048 - // DefaultScheduleDelay is the delay interval between two consecutive exports, in milliseconds. - DefaultScheduleDelay = 5000 - // DefaultExportTimeout is the duration after which an export is cancelled, in milliseconds. - DefaultExportTimeout = 30000 - DefaultMaxExportBatchSize = 512 -) - -// BatchSpanProcessorOption configures a BatchSpanProcessor. -type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) - -// BatchSpanProcessorOptions is configuration settings for a -// BatchSpanProcessor. -type BatchSpanProcessorOptions struct { - // MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the - // queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior. - // The default value of MaxQueueSize is 2048. - MaxQueueSize int - - // BatchTimeout is the maximum duration for constructing a batch. Processor - // forcefully sends available spans when timeout is reached. - // The default value of BatchTimeout is 5000 msec. - BatchTimeout time.Duration - - // ExportTimeout specifies the maximum duration for exporting spans. If the timeout - // is reached, the export will be cancelled. - // The default value of ExportTimeout is 30000 msec. - ExportTimeout time.Duration - - // MaxExportBatchSize is the maximum number of spans to process in a single batch. - // If there are more than one batch worth of spans then it processes multiple batches - // of spans one batch after the other without any delay. - // The default value of MaxExportBatchSize is 512. - MaxExportBatchSize int - - // BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full - // AND if BlockOnQueueFull is set to true. - // Blocking option should be used carefully as it can severely affect the performance of an - // application. - BlockOnQueueFull bool -} - -// batchSpanProcessor is a SpanProcessor that batches asynchronously-received -// spans and sends them to a trace.Exporter when complete. -type batchSpanProcessor struct { - e SpanExporter - o BatchSpanProcessorOptions - - queue chan ReadOnlySpan - dropped atomic.Uint32 - - inst *observ.BSP - - batch []ReadOnlySpan - batchMutex sync.Mutex - timer *time.Timer - stopWait sync.WaitGroup - stopOnce sync.Once - stopCh chan struct{} - stopped atomic.Bool -} - -var _ SpanProcessor = (*batchSpanProcessor)(nil) - -// NewBatchSpanProcessor creates a new SpanProcessor that will send completed -// span batches to the exporter with the supplied options. -// -// If the exporter is nil, the span processor will perform no action. -func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor { - maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize) - maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) - - if maxExportBatchSize > maxQueueSize { - maxExportBatchSize = min(DefaultMaxExportBatchSize, maxQueueSize) - } - - o := BatchSpanProcessorOptions{ - BatchTimeout: time.Duration(env.BatchSpanProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond, - ExportTimeout: time.Duration(env.BatchSpanProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond, - MaxQueueSize: maxQueueSize, - MaxExportBatchSize: maxExportBatchSize, - } - for _, opt := range options { - opt(&o) - } - bsp := &batchSpanProcessor{ - e: exporter, - o: o, - batch: make([]ReadOnlySpan, 0, o.MaxExportBatchSize), - timer: time.NewTimer(o.BatchTimeout), - queue: make(chan ReadOnlySpan, o.MaxQueueSize), - stopCh: make(chan struct{}), - } - - var err error - bsp.inst, err = observ.NewBSP( - nextProcessorID(), - func() int64 { return int64(len(bsp.queue)) }, - int64(bsp.o.MaxQueueSize), - ) - if err != nil { - otel.Handle(err) - } - - bsp.stopWait.Go(func() { - bsp.processQueue() - bsp.drainQueue() - }) - - return bsp -} - -var processorIDCounter atomic.Int64 - -// nextProcessorID returns an identifier for this batch span processor, -// starting with 0 and incrementing by 1 each time it is called. -func nextProcessorID() int64 { - return processorIDCounter.Add(1) - 1 -} - -// OnStart method does nothing. -func (*batchSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} - -// OnEnd method enqueues a ReadOnlySpan for later processing. -func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { - // Do not enqueue spans after Shutdown. - if bsp.stopped.Load() { - return - } - - // Do not enqueue spans if we are just going to drop them. - if bsp.e == nil { - return - } - bsp.enqueue(s) -} - -// Shutdown flushes the queue and waits until all spans are processed. -// It only executes once. Subsequent call does nothing. -func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { - var err error - bsp.stopOnce.Do(func() { - bsp.stopped.Store(true) - wait := make(chan struct{}) - go func() { - close(bsp.stopCh) - bsp.stopWait.Wait() - if bsp.e != nil { - if err := bsp.e.Shutdown(ctx); err != nil { - otel.Handle(err) - } - } - close(wait) - }() - // Wait until the wait group is done or the context is cancelled - select { - case <-wait: - case <-ctx.Done(): - err = ctx.Err() - } - if bsp.inst != nil { - err = errors.Join(err, bsp.inst.Shutdown()) - } - }) - return err -} - -type forceFlushSpan struct { - ReadOnlySpan - flushed chan struct{} -} - -func (forceFlushSpan) SpanContext() trace.SpanContext { - return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled}) -} - -// ForceFlush exports all ended spans that have not yet been exported. -func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { - // Interrupt if context is already canceled. - if err := ctx.Err(); err != nil { - return err - } - - // Do nothing after Shutdown. - if bsp.stopped.Load() { - return nil - } - - var err error - if bsp.e != nil { - flushCh := make(chan struct{}) - if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) { - select { - case <-bsp.stopCh: - // The batchSpanProcessor is Shutdown. - return nil - case <-flushCh: - // Processed any items in queue prior to ForceFlush being called - case <-ctx.Done(): - return ctx.Err() - } - } - - wait := make(chan error, 1) - go func() { - wait <- bsp.exportSpans(ctx) - }() - // Wait until the export is finished or the context is cancelled/timed out - select { - case err = <-wait: - case <-ctx.Done(): - err = ctx.Err() - } - } - return err -} - -// WithMaxQueueSize returns a BatchSpanProcessorOption that configures the -// maximum queue size allowed for a BatchSpanProcessor. -func WithMaxQueueSize(size int) BatchSpanProcessorOption { - return func(o *BatchSpanProcessorOptions) { - o.MaxQueueSize = size - } -} - -// WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures -// the maximum export batch size allowed for a BatchSpanProcessor. -func WithMaxExportBatchSize(size int) BatchSpanProcessorOption { - return func(o *BatchSpanProcessorOptions) { - o.MaxExportBatchSize = size - } -} - -// WithBatchTimeout returns a BatchSpanProcessorOption that configures the -// maximum delay allowed for a BatchSpanProcessor before it will export any -// held span (whether the queue is full or not). -func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption { - return func(o *BatchSpanProcessorOptions) { - o.BatchTimeout = delay - } -} - -// WithExportTimeout returns a BatchSpanProcessorOption that configures the -// amount of time a BatchSpanProcessor waits for an exporter to export before -// abandoning the export. -func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption { - return func(o *BatchSpanProcessorOptions) { - o.ExportTimeout = timeout - } -} - -// WithBlocking returns a BatchSpanProcessorOption that configures a -// BatchSpanProcessor to wait for enqueue operations to succeed instead of -// dropping data when the queue is full. -func WithBlocking() BatchSpanProcessorOption { - return func(o *BatchSpanProcessorOptions) { - o.BlockOnQueueFull = true - } -} - -// exportSpans is a subroutine of processing and draining the queue. -func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { - bsp.timer.Reset(bsp.o.BatchTimeout) - - bsp.batchMutex.Lock() - defer bsp.batchMutex.Unlock() - - if bsp.o.ExportTimeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeoutCause(ctx, bsp.o.ExportTimeout, errors.New("processor export timeout")) - defer cancel() - } - - if l := len(bsp.batch); l > 0 { - global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", bsp.dropped.Load()) - if bsp.inst != nil { - bsp.inst.Processed(ctx, int64(l)) - } - err := bsp.e.ExportSpans(ctx, bsp.batch) - - // A new batch is always created after exporting, even if the batch failed to be exported. - // - // It is up to the exporter to implement any type of retry logic if a batch is failing - // to be exported, since it is specific to the protocol and backend being sent to. - clear(bsp.batch) // Erase elements to let GC collect objects - bsp.batch = bsp.batch[:0] - - if err != nil { - return err - } - } - return nil -} - -// processQueue removes spans from the `queue` channel until processor -// is shut down. It calls the exporter in batches of up to MaxExportBatchSize -// waiting up to BatchTimeout to form a batch. -func (bsp *batchSpanProcessor) processQueue() { - defer bsp.timer.Stop() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - for { - select { - case <-bsp.stopCh: - return - case <-bsp.timer.C: - if err := bsp.exportSpans(ctx); err != nil { - otel.Handle(err) - } - case sd := <-bsp.queue: - if ffs, ok := sd.(forceFlushSpan); ok { - close(ffs.flushed) - continue - } - bsp.batchMutex.Lock() - bsp.batch = append(bsp.batch, sd) - shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize - bsp.batchMutex.Unlock() - if shouldExport { - if !bsp.timer.Stop() { - // Handle both GODEBUG=asynctimerchan=[0|1] properly. - select { - case <-bsp.timer.C: - default: - } - } - if err := bsp.exportSpans(ctx); err != nil { - otel.Handle(err) - } - } - } - } -} - -// drainQueue awaits the any caller that had added to bsp.stopWait -// to finish the enqueue, then exports the final batch. -func (bsp *batchSpanProcessor) drainQueue() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - for { - select { - case sd := <-bsp.queue: - if _, ok := sd.(forceFlushSpan); ok { - // Ignore flush requests as they are not valid spans. - continue - } - - bsp.batchMutex.Lock() - bsp.batch = append(bsp.batch, sd) - shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize - bsp.batchMutex.Unlock() - - if shouldExport { - if err := bsp.exportSpans(ctx); err != nil { - otel.Handle(err) - } - } - default: - // There are no more enqueued spans. Make final export. - if err := bsp.exportSpans(ctx); err != nil { - otel.Handle(err) - } - return - } - } -} - -func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) { - ctx := context.TODO() - if bsp.o.BlockOnQueueFull { - bsp.enqueueBlockOnQueueFull(ctx, sd) - } else { - bsp.enqueueDrop(ctx, sd) - } -} - -func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool { - if !sd.SpanContext().IsSampled() { - return false - } - - select { - case bsp.queue <- sd: - return true - case <-ctx.Done(): - if bsp.inst != nil { - bsp.inst.ProcessedQueueFull(ctx, 1) - } - return false - } -} - -func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool { - if !sd.SpanContext().IsSampled() { - return false - } - - select { - case bsp.queue <- sd: - return true - default: - bsp.dropped.Add(1) - if bsp.inst != nil { - bsp.inst.ProcessedQueueFull(ctx, 1) - } - } - return false -} - -// MarshalLog is the marshaling function used by the logging system to represent this Span Processor. -func (bsp *batchSpanProcessor) MarshalLog() any { - return struct { - Type string - SpanExporter SpanExporter - Config BatchSpanProcessorOptions - }{ - Type: "BatchSpanProcessor", - SpanExporter: bsp.e, - Config: bsp.o, - } -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go deleted file mode 100644 index b502c7d479..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -/* -Package trace contains support for OpenTelemetry distributed tracing. - -The following assumes a basic familiarity with OpenTelemetry concepts. -See https://opentelemetry.io. - -See [go.opentelemetry.io/otel/sdk/internal/x] for information about -the experimental features. -*/ -package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go deleted file mode 100644 index 60a7ed1349..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "time" - - "go.opentelemetry.io/otel/attribute" -) - -// Event is a thing that happened during a Span's lifetime. -type Event struct { - // Name is the name of this event - Name string - - // Attributes describe the aspects of the event. - Attributes []attribute.KeyValue - - // DroppedAttributeCount is the number of attributes that were not - // recorded due to configured limits being reached. - DroppedAttributeCount int - - // Time at which this event was recorded. - Time time.Time -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go deleted file mode 100644 index 8c308dd60a..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "slices" - "sync" - - "go.opentelemetry.io/otel/internal/global" -) - -// evictedQueue is a FIFO queue with a configurable capacity. -type evictedQueue[T any] struct { - queue []T - capacity int - droppedCount int - logDroppedMsg string - logDroppedOnce sync.Once -} - -func newEvictedQueueEvent(capacity int) evictedQueue[Event] { - // Do not pre-allocate queue, do this lazily. - return evictedQueue[Event]{ - capacity: capacity, - logDroppedMsg: "limit reached: dropping trace trace.Event", - } -} - -func newEvictedQueueLink(capacity int) evictedQueue[Link] { - // Do not pre-allocate queue, do this lazily. - return evictedQueue[Link]{ - capacity: capacity, - logDroppedMsg: "limit reached: dropping trace trace.Link", - } -} - -// add adds value to the evictedQueue eq. If eq is at capacity, the oldest -// queued value will be discarded and the drop count incremented. -func (eq *evictedQueue[T]) add(value T) { - if eq.capacity == 0 { - eq.droppedCount++ - eq.logDropped() - return - } - - if eq.capacity > 0 && len(eq.queue) == eq.capacity { - // Drop first-in while avoiding allocating more capacity to eq.queue. - copy(eq.queue[:eq.capacity-1], eq.queue[1:]) - eq.queue = eq.queue[:eq.capacity-1] - eq.droppedCount++ - eq.logDropped() - } - eq.queue = append(eq.queue, value) -} - -func (eq *evictedQueue[T]) logDropped() { - eq.logDroppedOnce.Do(func() { global.Warn(eq.logDroppedMsg) }) -} - -// copy returns a copy of the evictedQueue. -func (eq *evictedQueue[T]) copy() []T { - return slices.Clone(eq.queue) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go deleted file mode 100644 index 3649322a6e..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "context" - "encoding/binary" - "math/rand/v2" - - "go.opentelemetry.io/otel/trace" -) - -// IDGenerator allows custom generators for TraceID and SpanID. -type IDGenerator interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // NewIDs returns a new trace and span ID. - NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // NewSpanID returns a ID for a new span in the trace with traceID. - NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -type randomIDGenerator struct{} - -var _ IDGenerator = &randomIDGenerator{} - -// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. -func (*randomIDGenerator) NewSpanID(context.Context, trace.TraceID) trace.SpanID { - sid := trace.SpanID{} - for { - binary.NativeEndian.PutUint64(sid[:], rand.Uint64()) - if sid.IsValid() { - break - } - } - return sid -} - -// NewIDs returns a non-zero trace ID and a non-zero span ID from a -// randomly-chosen sequence. -func (*randomIDGenerator) NewIDs(context.Context) (trace.TraceID, trace.SpanID) { - tid := trace.TraceID{} - sid := trace.SpanID{} - for { - binary.NativeEndian.PutUint64(tid[:8], rand.Uint64()) - binary.NativeEndian.PutUint64(tid[8:], rand.Uint64()) - if tid.IsValid() { - break - } - } - for { - binary.NativeEndian.PutUint64(sid[:], rand.Uint64()) - if sid.IsValid() { - break - } - } - return tid, sid -} - -func defaultIDGenerator() IDGenerator { - return &randomIDGenerator{} -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go deleted file mode 100644 index 58f68df441..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package env provides types and functionality for environment variable support -// in the OpenTelemetry SDK. -package env // import "go.opentelemetry.io/otel/sdk/trace/internal/env" - -import ( - "os" - "strconv" - - "go.opentelemetry.io/otel/internal/global" -) - -// Environment variable names. -const ( - // BatchSpanProcessorScheduleDelayKey is the delay interval between two - // consecutive exports (i.e. 5000). - BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY" - // BatchSpanProcessorExportTimeoutKey is the maximum allowed time to - // export data (i.e. 3000). - BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT" - // BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048). - BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE" - // BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e. - // 512). Note: it must be less than or equal to - // BatchSpanProcessorMaxQueueSize. - BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE" - - // AttributeValueLengthKey is the maximum allowed attribute value size. - AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT" - - // AttributeCountKey is the maximum allowed span attribute count. - AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT" - - // SpanAttributeValueLengthKey is the maximum allowed attribute value size - // for a span. - SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT" - - // SpanAttributeCountKey is the maximum allowed span attribute count for a - // span. - SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT" - - // SpanEventCountKey is the maximum allowed span event count. - SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT" - - // SpanEventAttributeCountKey is the maximum allowed attribute per span - // event count. - SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT" - - // SpanLinkCountKey is the maximum allowed span link count. - SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT" - - // SpanLinkAttributeCountKey is the maximum allowed attribute per span - // link count. - SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT" -) - -// firstInt returns the value of the first matching environment variable from -// keys. If the value is not an integer or no match is found, defaultValue is -// returned. -func firstInt(defaultValue int, keys ...string) int { - for _, key := range keys { - value := os.Getenv(key) - if value == "" { - continue - } - - intValue, err := strconv.Atoi(value) - if err != nil { - global.Info("Got invalid value, number value expected.", key, value) - return defaultValue - } - - return intValue - } - - return defaultValue -} - -// IntEnvOr returns the int value of the environment variable with name key if -// it exists, it is not empty, and the value is an int. Otherwise, defaultValue is returned. -func IntEnvOr(key string, defaultValue int) int { - value := os.Getenv(key) - if value == "" { - return defaultValue - } - - intValue, err := strconv.Atoi(value) - if err != nil { - global.Info("Got invalid value, number value expected.", key, value) - return defaultValue - } - - return intValue -} - -// BatchSpanProcessorScheduleDelay returns the environment variable value for -// the OTEL_BSP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is -// returned. -func BatchSpanProcessorScheduleDelay(defaultValue int) int { - return IntEnvOr(BatchSpanProcessorScheduleDelayKey, defaultValue) -} - -// BatchSpanProcessorExportTimeout returns the environment variable value for -// the OTEL_BSP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is -// returned. -func BatchSpanProcessorExportTimeout(defaultValue int) int { - return IntEnvOr(BatchSpanProcessorExportTimeoutKey, defaultValue) -} - -// BatchSpanProcessorMaxQueueSize returns the environment variable value for -// the OTEL_BSP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is -// returned. -func BatchSpanProcessorMaxQueueSize(defaultValue int) int { - return IntEnvOr(BatchSpanProcessorMaxQueueSizeKey, defaultValue) -} - -// BatchSpanProcessorMaxExportBatchSize returns the environment variable value for -// the OTEL_BSP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue -// is returned. -func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int { - return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue) -} - -// SpanAttributeValueLength returns the environment variable value for the -// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the -// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is -// returned or defaultValue if that is not set. -func SpanAttributeValueLength(defaultValue int) int { - return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey) -} - -// SpanAttributeCount returns the environment variable value for the -// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the -// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or -// defaultValue if that is not set. -func SpanAttributeCount(defaultValue int) int { - return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey) -} - -// SpanEventCount returns the environment variable value for the -// OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is -// returned. -func SpanEventCount(defaultValue int) int { - return IntEnvOr(SpanEventCountKey, defaultValue) -} - -// SpanEventAttributeCount returns the environment variable value for the -// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue -// is returned. -func SpanEventAttributeCount(defaultValue int) int { - return IntEnvOr(SpanEventAttributeCountKey, defaultValue) -} - -// SpanLinkCount returns the environment variable value for the -// OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is -// returned. -func SpanLinkCount(defaultValue int) int { - return IntEnvOr(SpanLinkCountKey, defaultValue) -} - -// SpanLinkAttributeCount returns the environment variable value for the -// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is -// returned. -func SpanLinkAttributeCount(defaultValue int) int { - return IntEnvOr(SpanLinkAttributeCountKey, defaultValue) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go deleted file mode 100644 index c31e03aa0a..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" - -import ( - "context" - "errors" - "fmt" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/sdk" - "go.opentelemetry.io/otel/sdk/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.40.0" - "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" -) - -const ( - // ScopeName is the name of the instrumentation scope. - ScopeName = "go.opentelemetry.io/otel/sdk/trace/internal/observ" - - // SchemaURL is the schema URL of the instrumentation. - SchemaURL = semconv.SchemaURL -) - -// ErrQueueFull is the attribute value for the "queue_full" error type. -var ErrQueueFull = otelconv.SDKProcessorSpanProcessed{}.AttrErrorType( - otelconv.ErrorTypeAttr("queue_full"), -) - -// BSPComponentName returns the component name attribute for a -// BatchSpanProcessor with the given ID. -func BSPComponentName(id int64) attribute.KeyValue { - t := otelconv.ComponentTypeBatchingSpanProcessor - name := fmt.Sprintf("%s/%d", t, id) - return semconv.OTelComponentName(name) -} - -// BSP is the instrumentation for an OTel SDK BatchSpanProcessor. -type BSP struct { - reg metric.Registration - - processed metric.Int64Counter - processedOpts []metric.AddOption - processedQueueFullOpts []metric.AddOption -} - -func NewBSP(id int64, qLen func() int64, qMax int64) (*BSP, error) { - if !x.Observability.Enabled() { - return nil, nil - } - - meter := otel.GetMeterProvider().Meter( - ScopeName, - metric.WithInstrumentationVersion(sdk.Version()), - metric.WithSchemaURL(SchemaURL), - ) - - qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter) - if err != nil { - err = fmt.Errorf("failed to create BSP queue capacity metric: %w", err) - } - qCapInst := qCap.Inst() - - qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter) - if e != nil { - e := fmt.Errorf("failed to create BSP queue size metric: %w", e) - err = errors.Join(err, e) - } - qSizeInst := qSize.Inst() - - cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor - cmpnt := BSPComponentName(id) - set := attribute.NewSet(cmpnt, cmpntT) - - obsOpts := []metric.ObserveOption{metric.WithAttributeSet(set)} - reg, e := meter.RegisterCallback( - func(_ context.Context, o metric.Observer) error { - o.ObserveInt64(qSizeInst, qLen(), obsOpts...) - o.ObserveInt64(qCapInst, qMax, obsOpts...) - return nil - }, - qSizeInst, - qCapInst, - ) - if e != nil { - e := fmt.Errorf("failed to register BSP queue size/capacity callback: %w", e) - err = errors.Join(err, e) - } - - processed, e := otelconv.NewSDKProcessorSpanProcessed(meter) - if e != nil { - e := fmt.Errorf("failed to create BSP processed spans metric: %w", e) - err = errors.Join(err, e) - } - processedOpts := []metric.AddOption{metric.WithAttributeSet(set)} - - set = attribute.NewSet(cmpnt, cmpntT, ErrQueueFull) - processedQueueFullOpts := []metric.AddOption{metric.WithAttributeSet(set)} - - return &BSP{ - reg: reg, - processed: processed.Inst(), - processedOpts: processedOpts, - processedQueueFullOpts: processedQueueFullOpts, - }, err -} - -func (b *BSP) Shutdown() error { return b.reg.Unregister() } - -func (b *BSP) Processed(ctx context.Context, n int64) { - b.processed.Add(ctx, n, b.processedOpts...) -} - -func (b *BSP) ProcessedQueueFull(ctx context.Context, n int64) { - b.processed.Add(ctx, n, b.processedQueueFullOpts...) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go deleted file mode 100644 index b542121e6a..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package observ provides observability instrumentation for the OTel trace SDK -// package. -package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go deleted file mode 100644 index 0e77cd9537..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" - -import ( - "context" - "fmt" - "sync" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/sdk" - "go.opentelemetry.io/otel/sdk/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.40.0" - "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" -) - -var measureAttrsPool = sync.Pool{ - New: func() any { - // "component.name" + "component.type" + "error.type" - const n = 1 + 1 + 1 - s := make([]attribute.KeyValue, 0, n) - // Return a pointer to a slice instead of a slice itself - // to avoid allocations on every call. - return &s - }, -} - -// SSP is the instrumentation for an OTel SDK SimpleSpanProcessor. -type SSP struct { - spansProcessedCounter metric.Int64Counter - addOpts []metric.AddOption - attrs []attribute.KeyValue -} - -// SSPComponentName returns the component name attribute for a -// SimpleSpanProcessor with the given ID. -func SSPComponentName(id int64) attribute.KeyValue { - t := otelconv.ComponentTypeSimpleSpanProcessor - name := fmt.Sprintf("%s/%d", t, id) - return semconv.OTelComponentName(name) -} - -// NewSSP returns instrumentation for an OTel SDK SimpleSpanProcessor with the -// provided ID. -// -// If the experimental observability is disabled, nil is returned. -func NewSSP(id int64) (*SSP, error) { - if !x.Observability.Enabled() { - return nil, nil - } - - meter := otel.GetMeterProvider().Meter( - ScopeName, - metric.WithInstrumentationVersion(sdk.Version()), - metric.WithSchemaURL(SchemaURL), - ) - spansProcessedCounter, err := otelconv.NewSDKProcessorSpanProcessed(meter) - if err != nil { - err = fmt.Errorf("failed to create SSP processed spans metric: %w", err) - } - - componentName := SSPComponentName(id) - componentType := spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeSimpleSpanProcessor) - attrs := []attribute.KeyValue{componentName, componentType} - addOpts := []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(attrs...))} - - return &SSP{ - spansProcessedCounter: spansProcessedCounter.Inst(), - addOpts: addOpts, - attrs: attrs, - }, err -} - -// SpanProcessed records that a span has been processed by the SimpleSpanProcessor. -// If err is non-nil, it records the processing error as an attribute. -func (ssp *SSP) SpanProcessed(ctx context.Context, err error) { - ssp.spansProcessedCounter.Add(ctx, 1, ssp.addOption(err)...) -} - -func (ssp *SSP) addOption(err error) []metric.AddOption { - if err == nil { - return ssp.addOpts - } - attrs := measureAttrsPool.Get().(*[]attribute.KeyValue) - defer func() { - *attrs = (*attrs)[:0] // reset the slice for reuse - measureAttrsPool.Put(attrs) - }() - *attrs = append(*attrs, ssp.attrs...) - *attrs = append(*attrs, semconv.ErrorType(err)) - // Do not inefficiently make a copy of attrs by using - // WithAttributes instead of WithAttributeSet. - return []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(*attrs...))} -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go deleted file mode 100644 index 560d316f2f..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" - -import ( - "context" - "errors" - "fmt" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/sdk" - "go.opentelemetry.io/otel/sdk/internal/x" - "go.opentelemetry.io/otel/semconv/v1.40.0/otelconv" - "go.opentelemetry.io/otel/trace" -) - -var meterOpts = []metric.MeterOption{ - metric.WithInstrumentationVersion(sdk.Version()), - metric.WithSchemaURL(SchemaURL), -} - -// Tracer is instrumentation for an OTel SDK Tracer. -type Tracer struct { - enabled bool - - live metric.Int64UpDownCounter - started metric.Int64Counter -} - -func NewTracer() (Tracer, error) { - if !x.Observability.Enabled() { - return Tracer{}, nil - } - meter := otel.GetMeterProvider().Meter(ScopeName, meterOpts...) - - var err error - l, e := otelconv.NewSDKSpanLive(meter) - if e != nil { - e = fmt.Errorf("failed to create span live metric: %w", e) - err = errors.Join(err, e) - } - - s, e := otelconv.NewSDKSpanStarted(meter) - if e != nil { - e = fmt.Errorf("failed to create span started metric: %w", e) - err = errors.Join(err, e) - } - - return Tracer{enabled: true, live: l.Inst(), started: s.Inst()}, err -} - -func (t Tracer) Enabled() bool { return t.enabled } - -func (t Tracer) SpanStarted(ctx context.Context, psc trace.SpanContext, span trace.Span) { - if !t.started.Enabled(ctx) { - return - } - - key := spanStartedKey{ - parent: parentStateNoParent, - sampling: samplingStateDrop, - } - - if psc.IsValid() { - if psc.IsRemote() { - key.parent = parentStateRemoteParent - } else { - key.parent = parentStateLocalParent - } - } - - if span.IsRecording() { - if span.SpanContext().IsSampled() { - key.sampling = samplingStateRecordAndSample - } else { - key.sampling = samplingStateRecordOnly - } - } - - opts := spanStartedOpts[key] - t.started.Add(ctx, 1, opts...) -} - -func (t Tracer) SpanLive(ctx context.Context, span trace.Span) { - t.spanLive(ctx, 1, span) -} - -func (t Tracer) SpanEnded(ctx context.Context, span trace.Span) { - t.spanLive(ctx, -1, span) -} - -func (t Tracer) spanLive(ctx context.Context, value int64, span trace.Span) { - if !t.live.Enabled(ctx) { - return - } - - key := spanLiveKey{sampled: span.SpanContext().IsSampled()} - opts := spanLiveOpts[key] - t.live.Add(ctx, value, opts...) -} - -type parentState int - -const ( - parentStateNoParent parentState = iota - parentStateLocalParent - parentStateRemoteParent -) - -type samplingState int - -const ( - samplingStateDrop samplingState = iota - samplingStateRecordOnly - samplingStateRecordAndSample -) - -type spanStartedKey struct { - parent parentState - sampling samplingState -} - -var spanStartedOpts = map[spanStartedKey][]metric.AddOption{ - { - parentStateNoParent, - samplingStateDrop, - }: { - metric.WithAttributeSet(attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), - )), - }, - { - parentStateLocalParent, - samplingStateDrop, - }: { - metric.WithAttributeSet(attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), - )), - }, - { - parentStateRemoteParent, - samplingStateDrop, - }: { - metric.WithAttributeSet(attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), - )), - }, - - { - parentStateNoParent, - samplingStateRecordOnly, - }: { - metric.WithAttributeSet(attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), - )), - }, - { - parentStateLocalParent, - samplingStateRecordOnly, - }: { - metric.WithAttributeSet(attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), - )), - }, - { - parentStateRemoteParent, - samplingStateRecordOnly, - }: { - metric.WithAttributeSet(attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), - )), - }, - - { - parentStateNoParent, - samplingStateRecordAndSample, - }: { - metric.WithAttributeSet(attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), - )), - }, - { - parentStateLocalParent, - samplingStateRecordAndSample, - }: { - metric.WithAttributeSet(attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), - )), - }, - { - parentStateRemoteParent, - samplingStateRecordAndSample, - }: { - metric.WithAttributeSet(attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), - )), - }, -} - -type spanLiveKey struct { - sampled bool -} - -var spanLiveOpts = map[spanLiveKey][]metric.AddOption{ - {true}: { - metric.WithAttributeSet(attribute.NewSet( - otelconv.SDKSpanLive{}.AttrSpanSamplingResult( - otelconv.SpanSamplingResultRecordAndSample, - ), - )), - }, - {false}: { - metric.WithAttributeSet(attribute.NewSet( - otelconv.SDKSpanLive{}.AttrSpanSamplingResult( - otelconv.SpanSamplingResultRecordOnly, - ), - )), - }, -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go deleted file mode 100644 index c03bdc90f6..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -type Link struct { - // SpanContext of the linked Span. - SpanContext trace.SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue - - // DroppedAttributeCount is the number of attributes that were not - // recorded due to configured limits being reached. - DroppedAttributeCount int -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go deleted file mode 100644 index cd40d299d6..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ /dev/null @@ -1,503 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "context" - "errors" - "fmt" - "sync" - "sync/atomic" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/resource" - "go.opentelemetry.io/otel/sdk/trace/internal/observ" - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/trace/embedded" - "go.opentelemetry.io/otel/trace/noop" -) - -const defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" - -// tracerProviderConfig. -type tracerProviderConfig struct { - // processors contains collection of SpanProcessors that are processing pipeline - // for spans in the trace signal. - // SpanProcessors registered with a TracerProvider and are called at the start - // and end of a Span's lifecycle, and are called in the order they are - // registered. - processors []SpanProcessor - - // sampler is the default sampler used when creating new spans. - sampler Sampler - - // idGenerator is used to generate all Span and Trace IDs when needed. - idGenerator IDGenerator - - // spanLimits defines the attribute, event, and link limits for spans. - spanLimits SpanLimits - - // resource contains attributes representing an entity that produces telemetry. - resource *resource.Resource -} - -// MarshalLog is the marshaling function used by the logging system to represent this Provider. -func (cfg tracerProviderConfig) MarshalLog() any { - return struct { - SpanProcessors []SpanProcessor - SamplerType string - IDGeneratorType string - SpanLimits SpanLimits - Resource *resource.Resource - }{ - SpanProcessors: cfg.processors, - SamplerType: fmt.Sprintf("%T", cfg.sampler), - IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator), - SpanLimits: cfg.spanLimits, - Resource: cfg.resource, - } -} - -// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to -// instrumentation so it can trace operational flow through a system. -type TracerProvider struct { - embedded.TracerProvider - - mu sync.Mutex - namedTracer map[instrumentation.Scope]*tracer - spanProcessors atomic.Pointer[spanProcessorStates] - - isShutdown atomic.Bool - - // These fields are not protected by the lock mu. They are assumed to be - // immutable after creation of the TracerProvider. - sampler Sampler - idGenerator IDGenerator - spanLimits SpanLimits - resource *resource.Resource -} - -var _ trace.TracerProvider = &TracerProvider{} - -// NewTracerProvider returns a new and configured TracerProvider. -// -// By default the returned TracerProvider is configured with: -// - a ParentBased(AlwaysSample) Sampler -// - a random number IDGenerator -// - the resource.Default() Resource -// - the default SpanLimits. -// -// The passed opts are used to override these default values and configure the -// returned TracerProvider appropriately. -func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { - o := tracerProviderConfig{ - spanLimits: NewSpanLimits(), - } - o = applyTracerProviderEnvConfigs(o) - - for _, opt := range opts { - o = opt.apply(o) - } - - o = ensureValidTracerProviderConfig(o) - - tp := &TracerProvider{ - namedTracer: make(map[instrumentation.Scope]*tracer), - sampler: o.sampler, - idGenerator: o.idGenerator, - spanLimits: o.spanLimits, - resource: o.resource, - } - global.Info("TracerProvider created", "config", o) - - spss := make(spanProcessorStates, 0, len(o.processors)) - for _, sp := range o.processors { - spss = append(spss, newSpanProcessorState(sp)) - } - tp.spanProcessors.Store(&spss) - - return tp -} - -// Tracer returns a Tracer with the given name and options. If a Tracer for -// the given name and options does not exist it is created, otherwise the -// existing Tracer is returned. -// -// If name is empty, DefaultTracerName is used instead. -// -// This method is safe to be called concurrently. -func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { - // This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown(). - if p.isShutdown.Load() { - return noop.NewTracerProvider().Tracer(name, opts...) - } - c := trace.NewTracerConfig(opts...) - if name == "" { - name = defaultTracerName - } - is := instrumentation.Scope{ - Name: name, - Version: c.InstrumentationVersion(), - SchemaURL: c.SchemaURL(), - Attributes: c.InstrumentationAttributes(), - } - - t, ok := func() (trace.Tracer, bool) { - p.mu.Lock() - defer p.mu.Unlock() - // Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran - // after the first check above but before we acquired the mutex. - if p.isShutdown.Load() { - return noop.NewTracerProvider().Tracer(name, opts...), true - } - t, ok := p.namedTracer[is] - if !ok { - t = &tracer{ - provider: p, - instrumentationScope: is, - } - - var err error - t.inst, err = observ.NewTracer() - if err != nil { - otel.Handle(err) - } - - p.namedTracer[is] = t - } - return t, ok - }() - if !ok { - // This code is outside the mutex to not hold the lock while calling third party logging code: - // - That code may do slow things like I/O, which would prolong the duration the lock is held, - // slowing down all tracing consumers. - // - Logging code may be instrumented with tracing and deadlock because it could try - // acquiring the same non-reentrant mutex. - global.Info( - "Tracer created", - "name", - name, - "version", - is.Version, - "schemaURL", - is.SchemaURL, - "attributes", - is.Attributes, - ) - } - return t -} - -// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. -func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { - // This check prevents calls during a shutdown. - if p.isShutdown.Load() { - return - } - p.mu.Lock() - defer p.mu.Unlock() - // This check prevents calls after a shutdown. - if p.isShutdown.Load() { - return - } - - current := p.getSpanProcessors() - newSPS := make(spanProcessorStates, 0, len(current)+1) - newSPS = append(newSPS, current...) - newSPS = append(newSPS, newSpanProcessorState(sp)) - p.spanProcessors.Store(&newSPS) -} - -// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors. -func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) { - // This check prevents calls during a shutdown. - if p.isShutdown.Load() { - return - } - p.mu.Lock() - defer p.mu.Unlock() - // This check prevents calls after a shutdown. - if p.isShutdown.Load() { - return - } - old := p.getSpanProcessors() - if len(old) == 0 { - return - } - spss := make(spanProcessorStates, len(old)) - copy(spss, old) - - // stop the span processor if it is started and remove it from the list - var stopOnce *spanProcessorState - var idx int - for i, sps := range spss { - if sps.sp == sp { - stopOnce = sps - idx = i - } - } - if stopOnce != nil { - stopOnce.state.Do(func() { - if err := sp.Shutdown(context.Background()); err != nil { - otel.Handle(err) - } - }) - } - if len(spss) > 1 { - copy(spss[idx:], spss[idx+1:]) - } - spss[len(spss)-1] = nil - spss = spss[:len(spss)-1] - - p.spanProcessors.Store(&spss) -} - -// ForceFlush immediately exports all spans that have not yet been exported for -// all the registered span processors. -func (p *TracerProvider) ForceFlush(ctx context.Context) error { - spss := p.getSpanProcessors() - if len(spss) == 0 { - return nil - } - - var err error - for _, sps := range spss { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - err = errors.Join(err, sps.sp.ForceFlush(ctx)) - } - return err -} - -// Shutdown shuts down TracerProvider. All registered span processors are shut down -// in the order they were registered and any held computational resources are released. -// After Shutdown is called, all methods are no-ops. -func (p *TracerProvider) Shutdown(ctx context.Context) error { - // This check prevents deadlocks in case of recursive shutdown. - if p.isShutdown.Load() { - return nil - } - p.mu.Lock() - defer p.mu.Unlock() - // This check prevents calls after a shutdown has already been done concurrently. - if !p.isShutdown.CompareAndSwap(false, true) { // did toggle? - return nil - } - - var retErr error - for _, sps := range p.getSpanProcessors() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - var err error - sps.state.Do(func() { - err = sps.sp.Shutdown(ctx) - }) - retErr = errors.Join(retErr, err) - } - p.spanProcessors.Store(&spanProcessorStates{}) - return retErr -} - -func (p *TracerProvider) getSpanProcessors() spanProcessorStates { - return *(p.spanProcessors.Load()) -} - -// TracerProviderOption configures a TracerProvider. -type TracerProviderOption interface { - apply(tracerProviderConfig) tracerProviderConfig -} - -type traceProviderOptionFunc func(tracerProviderConfig) tracerProviderConfig - -func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProviderConfig { - return fn(cfg) -} - -// WithSyncer registers the exporter with the TracerProvider using a -// SimpleSpanProcessor. -// -// This is not recommended for production use. The synchronous nature of the -// SimpleSpanProcessor that will wrap the exporter make it good for testing, -// debugging, or showing examples of other feature, but it will be slow and -// have a high computation resource usage overhead. The WithBatcher option is -// recommended for production use instead. -func WithSyncer(e SpanExporter) TracerProviderOption { - return WithSpanProcessor(NewSimpleSpanProcessor(e)) -} - -// WithBatcher registers the exporter with the TracerProvider using a -// BatchSpanProcessor configured with the passed opts. -func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption { - return WithSpanProcessor(NewBatchSpanProcessor(e, opts...)) -} - -// WithSpanProcessor registers the SpanProcessor with a TracerProvider. -func WithSpanProcessor(sp SpanProcessor) TracerProviderOption { - return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { - cfg.processors = append(cfg.processors, sp) - return cfg - }) -} - -// WithResource returns a TracerProviderOption that will configure the -// Resource r as a TracerProvider's Resource. The configured Resource is -// referenced by all the Tracers the TracerProvider creates. It represents the -// entity producing telemetry. -// -// If this option is not used, the TracerProvider will use the -// resource.Default() Resource by default. -func WithResource(r *resource.Resource) TracerProviderOption { - return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { - var err error - cfg.resource, err = resource.Merge(resource.Environment(), r) - if err != nil { - otel.Handle(err) - } - return cfg - }) -} - -// WithIDGenerator returns a TracerProviderOption that will configure the -// IDGenerator g as a TracerProvider's IDGenerator. The configured IDGenerator -// is used by the Tracers the TracerProvider creates to generate new Span and -// Trace IDs. -// -// If this option is not used, the TracerProvider will use a random number -// IDGenerator by default. -func WithIDGenerator(g IDGenerator) TracerProviderOption { - return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { - if g != nil { - cfg.idGenerator = g - } - return cfg - }) -} - -// WithSampler returns a TracerProviderOption that will configure the Sampler -// s as a TracerProvider's Sampler. The configured Sampler is used by the -// Tracers the TracerProvider creates to make their sampling decisions for the -// Spans they create. -// -// This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER -// and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used -// and the sampler is not configured through environment variables or the environment -// contains invalid/unsupported configuration, the TracerProvider will use a -// ParentBased(AlwaysSample) Sampler by default. -func WithSampler(s Sampler) TracerProviderOption { - return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { - if s != nil { - cfg.sampler = s - } - return cfg - }) -} - -// WithSpanLimits returns a TracerProviderOption that configures a -// TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span -// created by a Tracer from the TracerProvider. -// -// If any field of sl is zero or negative it will be replaced with the default -// value for that field. -// -// If this or WithRawSpanLimits are not provided, the TracerProvider will use -// the limits defined by environment variables, or the defaults if unset. -// Refer to the NewSpanLimits documentation for information about this -// relationship. -// -// Deprecated: Use WithRawSpanLimits instead which allows setting unlimited -// and zero limits. This option will be kept until the next major version -// incremented release. -func WithSpanLimits(sl SpanLimits) TracerProviderOption { - if sl.AttributeValueLengthLimit <= 0 { - sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit - } - if sl.AttributeCountLimit <= 0 { - sl.AttributeCountLimit = DefaultAttributeCountLimit - } - if sl.EventCountLimit <= 0 { - sl.EventCountLimit = DefaultEventCountLimit - } - if sl.AttributePerEventCountLimit <= 0 { - sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit - } - if sl.LinkCountLimit <= 0 { - sl.LinkCountLimit = DefaultLinkCountLimit - } - if sl.AttributePerLinkCountLimit <= 0 { - sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit - } - return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { - cfg.spanLimits = sl - return cfg - }) -} - -// WithRawSpanLimits returns a TracerProviderOption that configures a -// TracerProvider to use these limits. These limits bound any Span created by -// a Tracer from the TracerProvider. -// -// The limits will be used as-is. Zero or negative values will not be changed -// to the default value like WithSpanLimits does. Setting a limit to zero will -// effectively disable the related resource it limits and setting to a -// negative value will mean that resource is unlimited. Consequentially, this -// means that the zero-value SpanLimits will disable all span resources. -// Because of this, limits should be constructed using NewSpanLimits and -// updated accordingly. -// -// If this or WithSpanLimits are not provided, the TracerProvider will use the -// limits defined by environment variables, or the defaults if unset. Refer to -// the NewSpanLimits documentation for information about this relationship. -func WithRawSpanLimits(limits SpanLimits) TracerProviderOption { - return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { - cfg.spanLimits = limits - return cfg - }) -} - -func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig { - for _, opt := range tracerProviderOptionsFromEnv() { - cfg = opt.apply(cfg) - } - - return cfg -} - -func tracerProviderOptionsFromEnv() []TracerProviderOption { - var opts []TracerProviderOption - - sampler, err := samplerFromEnv() - if err != nil { - otel.Handle(err) - } - - if sampler != nil { - opts = append(opts, WithSampler(sampler)) - } - - return opts -} - -// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid. -func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig { - if cfg.sampler == nil { - cfg.sampler = ParentBased(AlwaysSample()) - } - if cfg.idGenerator == nil { - cfg.idGenerator = defaultIDGenerator() - } - if cfg.resource == nil { - cfg.resource = resource.Default() - } - return cfg -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go deleted file mode 100644 index 9b672a1d70..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "errors" - "os" - "strconv" - "strings" -) - -const ( - tracesSamplerKey = "OTEL_TRACES_SAMPLER" - tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG" - - samplerAlwaysOn = "always_on" - samplerAlwaysOff = "always_off" - samplerTraceIDRatio = "traceidratio" - samplerParentBasedAlwaysOn = "parentbased_always_on" - samplerParsedBasedAlwaysOff = "parentbased_always_off" - samplerParentBasedTraceIDRatio = "parentbased_traceidratio" -) - -type errUnsupportedSampler string - -func (e errUnsupportedSampler) Error() string { - return "unsupported sampler: " + string(e) -} - -var ( - errNegativeTraceIDRatio = errors.New("invalid trace ID ratio: less than 0.0") - errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0") -) - -type samplerArgParseError struct { - parseErr error -} - -func (e samplerArgParseError) Error() string { - return "parsing sampler argument: " + e.parseErr.Error() -} - -func (e samplerArgParseError) Unwrap() error { - return e.parseErr -} - -func samplerFromEnv() (Sampler, error) { - sampler, ok := os.LookupEnv(tracesSamplerKey) - if !ok { - return nil, nil - } - - sampler = strings.ToLower(strings.TrimSpace(sampler)) - samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey) - samplerArg = strings.TrimSpace(samplerArg) - - switch sampler { - case samplerAlwaysOn: - return AlwaysSample(), nil - case samplerAlwaysOff: - return NeverSample(), nil - case samplerTraceIDRatio: - if !hasSamplerArg { - return TraceIDRatioBased(1.0), nil - } - return parseTraceIDRatio(samplerArg) - case samplerParentBasedAlwaysOn: - return ParentBased(AlwaysSample()), nil - case samplerParsedBasedAlwaysOff: - return ParentBased(NeverSample()), nil - case samplerParentBasedTraceIDRatio: - if !hasSamplerArg { - return ParentBased(TraceIDRatioBased(1.0)), nil - } - ratio, err := parseTraceIDRatio(samplerArg) - return ParentBased(ratio), err - default: - return nil, errUnsupportedSampler(sampler) - } -} - -func parseTraceIDRatio(arg string) (Sampler, error) { - v, err := strconv.ParseFloat(arg, 64) - if err != nil { - return TraceIDRatioBased(1.0), samplerArgParseError{err} - } - if v < 0.0 { - return TraceIDRatioBased(1.0), errNegativeTraceIDRatio - } - if v > 1.0 { - return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio - } - - return TraceIDRatioBased(v), nil -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go deleted file mode 100644 index 845e292c2b..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "context" - "encoding/binary" - "fmt" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// Sampler decides whether a trace should be sampled and exported. -type Sampler interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // ShouldSample returns a SamplingResult based on a decision made from the - // passed parameters. - ShouldSample(parameters SamplingParameters) SamplingResult - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Description returns information describing the Sampler. - Description() string - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -// SamplingParameters contains the values passed to a Sampler. -type SamplingParameters struct { - ParentContext context.Context - TraceID trace.TraceID - Name string - Kind trace.SpanKind - Attributes []attribute.KeyValue - Links []trace.Link -} - -// SamplingDecision indicates whether a span is dropped, recorded and/or sampled. -type SamplingDecision uint8 - -// Valid sampling decisions. -const ( - // Drop will not record the span and all attributes/events will be dropped. - Drop SamplingDecision = iota - - // RecordOnly indicates the span's IsRecording method returns true, but trace.FlagsSampled flag - // must not be set. - RecordOnly - - // RecordAndSample indicates the span's IsRecording method returns true and trace.FlagsSampled flag - // must be set. - RecordAndSample -) - -// SamplingResult conveys a SamplingDecision, set of Attributes and a Tracestate. -type SamplingResult struct { - Decision SamplingDecision - Attributes []attribute.KeyValue - Tracestate trace.TraceState -} - -type traceIDRatioSampler struct { - traceIDUpperBound uint64 - description string -} - -func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { - state := trace.SpanContextFromContext(p.ParentContext).TraceState() - x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1 - if x < ts.traceIDUpperBound { - return SamplingResult{ - Decision: RecordAndSample, - Tracestate: state, - } - } - return SamplingResult{ - Decision: Drop, - Tracestate: state, - } -} - -func (ts traceIDRatioSampler) Description() string { - return ts.description -} - -// TraceIDRatioBased samples a given fraction of traces. Fractions >= 1 will -// always sample. Fractions < 0 are treated as zero. To respect the -// parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used -// as a delegate of a `Parent` sampler. -// -//nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased` -func TraceIDRatioBased(fraction float64) Sampler { - // Cannot use AlwaysSample() and NeverSample(), must return spec-compliant descriptions. - // See https://opentelemetry.io/docs/specs/otel/trace/sdk/#traceidratiobased. - if fraction >= 1 { - return predeterminedSampler{ - description: "TraceIDRatioBased{1}", - decision: RecordAndSample, - } - } - - if fraction <= 0 { - return predeterminedSampler{ - description: "TraceIDRatioBased{0}", - decision: Drop, - } - } - - return &traceIDRatioSampler{ - traceIDUpperBound: uint64(fraction * (1 << 63)), - description: fmt.Sprintf("TraceIDRatioBased{%g}", fraction), - } -} - -type alwaysOnSampler struct{} - -func (alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { - return SamplingResult{ - Decision: RecordAndSample, - Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), - } -} - -func (alwaysOnSampler) Description() string { - // https://opentelemetry.io/docs/specs/otel/trace/sdk/#alwayson - return "AlwaysOnSampler" -} - -// AlwaysSample returns a Sampler that samples every trace. -// Be careful about using this sampler in a production application with -// significant traffic: a new trace will be started and exported for every -// request. -func AlwaysSample() Sampler { - return alwaysOnSampler{} -} - -type alwaysOffSampler struct{} - -func (alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { - return SamplingResult{ - Decision: Drop, - Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), - } -} - -func (alwaysOffSampler) Description() string { - // https://opentelemetry.io/docs/specs/otel/trace/sdk/#alwaysoff - return "AlwaysOffSampler" -} - -// NeverSample returns a Sampler that samples no traces. -func NeverSample() Sampler { - return alwaysOffSampler{} -} - -type predeterminedSampler struct { - description string - decision SamplingDecision -} - -func (s predeterminedSampler) ShouldSample(p SamplingParameters) SamplingResult { - return SamplingResult{ - Decision: s.decision, - Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), - } -} - -func (s predeterminedSampler) Description() string { - return s.description -} - -// ParentBased returns a sampler decorator which behaves differently, -// based on the parent of the span. If the span has no parent, -// the decorated sampler is used to make sampling decision. If the span has -// a parent, depending on whether the parent is remote and whether it -// is sampled, one of the following samplers will apply: -// - remoteParentSampled(Sampler) (default: AlwaysOn) -// - remoteParentNotSampled(Sampler) (default: AlwaysOff) -// - localParentSampled(Sampler) (default: AlwaysOn) -// - localParentNotSampled(Sampler) (default: AlwaysOff) -func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler { - return parentBased{ - root: root, - config: configureSamplersForParentBased(samplers), - } -} - -type parentBased struct { - root Sampler - config samplerConfig -} - -func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig { - c := samplerConfig{ - remoteParentSampled: AlwaysSample(), - remoteParentNotSampled: NeverSample(), - localParentSampled: AlwaysSample(), - localParentNotSampled: NeverSample(), - } - - for _, so := range samplers { - c = so.apply(c) - } - - return c -} - -// samplerConfig is a group of options for parentBased sampler. -type samplerConfig struct { - remoteParentSampled, remoteParentNotSampled Sampler - localParentSampled, localParentNotSampled Sampler -} - -// ParentBasedSamplerOption configures the sampler for a particular sampling case. -type ParentBasedSamplerOption interface { - apply(samplerConfig) samplerConfig -} - -// WithRemoteParentSampled sets the sampler for the case of sampled remote parent. -func WithRemoteParentSampled(s Sampler) ParentBasedSamplerOption { - return remoteParentSampledOption{s} -} - -type remoteParentSampledOption struct { - s Sampler -} - -func (o remoteParentSampledOption) apply(config samplerConfig) samplerConfig { - config.remoteParentSampled = o.s - return config -} - -// WithRemoteParentNotSampled sets the sampler for the case of remote parent -// which is not sampled. -func WithRemoteParentNotSampled(s Sampler) ParentBasedSamplerOption { - return remoteParentNotSampledOption{s} -} - -type remoteParentNotSampledOption struct { - s Sampler -} - -func (o remoteParentNotSampledOption) apply(config samplerConfig) samplerConfig { - config.remoteParentNotSampled = o.s - return config -} - -// WithLocalParentSampled sets the sampler for the case of sampled local parent. -func WithLocalParentSampled(s Sampler) ParentBasedSamplerOption { - return localParentSampledOption{s} -} - -type localParentSampledOption struct { - s Sampler -} - -func (o localParentSampledOption) apply(config samplerConfig) samplerConfig { - config.localParentSampled = o.s - return config -} - -// WithLocalParentNotSampled sets the sampler for the case of local parent -// which is not sampled. -func WithLocalParentNotSampled(s Sampler) ParentBasedSamplerOption { - return localParentNotSampledOption{s} -} - -type localParentNotSampledOption struct { - s Sampler -} - -func (o localParentNotSampledOption) apply(config samplerConfig) samplerConfig { - config.localParentNotSampled = o.s - return config -} - -func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult { - psc := trace.SpanContextFromContext(p.ParentContext) - if psc.IsValid() { - if psc.IsRemote() { - if psc.IsSampled() { - return pb.config.remoteParentSampled.ShouldSample(p) - } - return pb.config.remoteParentNotSampled.ShouldSample(p) - } - - if psc.IsSampled() { - return pb.config.localParentSampled.ShouldSample(p) - } - return pb.config.localParentNotSampled.ShouldSample(p) - } - return pb.root.ShouldSample(p) -} - -func (pb parentBased) Description() string { - return fmt.Sprintf("ParentBased{root:%s,remoteParentSampled:%s,"+ - "remoteParentNotSampled:%s,localParentSampled:%s,localParentNotSampled:%s}", - pb.root.Description(), - pb.config.remoteParentSampled.Description(), - pb.config.remoteParentNotSampled.Description(), - pb.config.localParentSampled.Description(), - pb.config.localParentNotSampled.Description(), - ) -} - -// AlwaysRecord returns a sampler decorator which ensures that every span -// is passed to the SpanProcessor, even those that would be normally dropped. -// It converts `Drop` decisions from the root sampler into `RecordOnly` decisions, -// allowing processors to see all spans without sending them to exporters. This is -// typically used to enable accurate span-to-metrics processing. -func AlwaysRecord(root Sampler) Sampler { - return alwaysRecord{root} -} - -type alwaysRecord struct { - root Sampler -} - -func (ar alwaysRecord) ShouldSample(p SamplingParameters) SamplingResult { - rootSamplerSamplingResult := ar.root.ShouldSample(p) - if rootSamplerSamplingResult.Decision == Drop { - return SamplingResult{ - Decision: RecordOnly, - Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), - } - } - return rootSamplerSamplingResult -} - -func (ar alwaysRecord) Description() string { - return "AlwaysRecord{root:" + ar.root.Description() + "}" -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go deleted file mode 100644 index 771e427a4c..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "context" - "sync" - "sync/atomic" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/sdk/trace/internal/observ" - "go.opentelemetry.io/otel/trace" -) - -// simpleSpanProcessor is a SpanProcessor that synchronously sends all -// completed Spans to a trace.Exporter immediately. -type simpleSpanProcessor struct { - exporterMu sync.Mutex - exporter SpanExporter - stopOnce sync.Once - - inst *observ.SSP -} - -var _ SpanProcessor = (*simpleSpanProcessor)(nil) - -// NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously -// send completed spans to the exporter immediately. -// -// This SpanProcessor is not recommended for production use. The synchronous -// nature of this SpanProcessor makes it good for testing, debugging, or showing -// examples of other features, but it will be slow and have a high computation -// resource usage overhead. The BatchSpanProcessor is recommended for production -// use instead. -func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { - ssp := &simpleSpanProcessor{ - exporter: exporter, - } - - var err error - ssp.inst, err = observ.NewSSP(nextSimpleProcessorID()) - if err != nil { - otel.Handle(err) - } - - global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.") - - return ssp -} - -var simpleProcessorIDCounter atomic.Int64 - -// nextSimpleProcessorID returns an identifier for this simple span processor, -// starting with 0 and incrementing by 1 each time it is called. -func nextSimpleProcessorID() int64 { - return simpleProcessorIDCounter.Add(1) - 1 -} - -// OnStart does nothing. -func (*simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} - -// OnEnd immediately exports a ReadOnlySpan. -func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { - ssp.exporterMu.Lock() - defer ssp.exporterMu.Unlock() - - var err error - if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() { - err = ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}) - if err != nil { - otel.Handle(err) - } - } - - if ssp.inst != nil { - // Add the span to the context to ensure the metric is recorded - // with the correct span context. - ctx := trace.ContextWithSpanContext(context.Background(), s.SpanContext()) - ssp.inst.SpanProcessed(ctx, err) - } -} - -// Shutdown shuts down the exporter this SimpleSpanProcessor exports to. -func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { - var err error - ssp.stopOnce.Do(func() { - stopFunc := func(exp SpanExporter) (<-chan error, func()) { - done := make(chan error, 1) - return done, func() { done <- exp.Shutdown(ctx) } - } - - // The exporter field of the simpleSpanProcessor needs to be zeroed to - // signal it is shut down, meaning all subsequent calls to OnEnd will - // be gracefully ignored. This needs to be done synchronously to avoid - // any race condition. - // - // A closure is used to keep reference to the exporter and then the - // field is zeroed. This ensures the simpleSpanProcessor is shut down - // before the exporter. This order is important as it avoids a potential - // deadlock. If the exporter shut down operation generates a span, that - // span would need to be exported. Meaning, OnEnd would be called and - // try acquiring the lock that is held here. - ssp.exporterMu.Lock() - done, shutdown := stopFunc(ssp.exporter) - ssp.exporter = nil - ssp.exporterMu.Unlock() - - go shutdown() - - // Wait for the exporter to shut down or the deadline to expire. - select { - case err = <-done: - case <-ctx.Done(): - // It is possible for the exporter to have immediately shut down and - // the context to be done simultaneously. In that case this outer - // select statement will randomly choose a case. This will result in - // a different returned error for similar scenarios. Instead, double - // check if the exporter shut down at the same time and return that - // error if so. This will ensure consistency as well as ensure - // the caller knows the exporter shut down successfully (they can - // already determine if the deadline is expired given they passed - // the context). - select { - case err = <-done: - default: - err = ctx.Err() - } - } - }) - return err -} - -// ForceFlush does nothing as there is no data to flush. -func (*simpleSpanProcessor) ForceFlush(context.Context) error { - return nil -} - -// MarshalLog is the marshaling function used by the logging system to represent -// this Span Processor. -func (ssp *simpleSpanProcessor) MarshalLog() any { - return struct { - Type string - Exporter SpanExporter - }{ - Type: "SimpleSpanProcessor", - Exporter: ssp.exporter, - } -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go deleted file mode 100644 index 63aa337800..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "time" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/resource" - "go.opentelemetry.io/otel/trace" -) - -// snapshot is an record of a spans state at a particular checkpointed time. -// It is used as a read-only representation of that state. -type snapshot struct { - name string - spanContext trace.SpanContext - parent trace.SpanContext - spanKind trace.SpanKind - startTime time.Time - endTime time.Time - attributes []attribute.KeyValue - events []Event - links []Link - status Status - childSpanCount int - droppedAttributeCount int - droppedEventCount int - droppedLinkCount int - resource *resource.Resource - instrumentationScope instrumentation.Scope -} - -var _ ReadOnlySpan = snapshot{} - -func (snapshot) private() {} - -// Name returns the name of the span. -func (s snapshot) Name() string { - return s.name -} - -// SpanContext returns the unique SpanContext that identifies the span. -func (s snapshot) SpanContext() trace.SpanContext { - return s.spanContext -} - -// Parent returns the unique SpanContext that identifies the parent of the -// span if one exists. If the span has no parent the returned SpanContext -// will be invalid. -func (s snapshot) Parent() trace.SpanContext { - return s.parent -} - -// SpanKind returns the role the span plays in a Trace. -func (s snapshot) SpanKind() trace.SpanKind { - return s.spanKind -} - -// StartTime returns the time the span started recording. -func (s snapshot) StartTime() time.Time { - return s.startTime -} - -// EndTime returns the time the span stopped recording. It will be zero if -// the span has not ended. -func (s snapshot) EndTime() time.Time { - return s.endTime -} - -// Attributes returns the defining attributes of the span. -func (s snapshot) Attributes() []attribute.KeyValue { - return s.attributes -} - -// Links returns all the links the span has to other spans. -func (s snapshot) Links() []Link { - return s.links -} - -// Events returns all the events that occurred within in the spans -// lifetime. -func (s snapshot) Events() []Event { - return s.events -} - -// Status returns the spans status. -func (s snapshot) Status() Status { - return s.status -} - -// InstrumentationScope returns information about the instrumentation -// scope that created the span. -func (s snapshot) InstrumentationScope() instrumentation.Scope { - return s.instrumentationScope -} - -// InstrumentationLibrary returns information about the instrumentation -// library that created the span. -func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility - return s.instrumentationScope -} - -// Resource returns information about the entity that produced the span. -func (s snapshot) Resource() *resource.Resource { - return s.resource -} - -// DroppedAttributes returns the number of attributes dropped by the span -// due to limits being reached. -func (s snapshot) DroppedAttributes() int { - return s.droppedAttributeCount -} - -// DroppedLinks returns the number of links dropped by the span due to limits -// being reached. -func (s snapshot) DroppedLinks() int { - return s.droppedLinkCount -} - -// DroppedEvents returns the number of events dropped by the span due to -// limits being reached. -func (s snapshot) DroppedEvents() int { - return s.droppedEventCount -} - -// ChildSpanCount returns the count of spans that consider the span a -// direct parent. -func (s snapshot) ChildSpanCount() int { - return s.childSpanCount -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go deleted file mode 100644 index 7d55ce1dc2..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ /dev/null @@ -1,959 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "context" - "fmt" - "reflect" - "runtime" - rt "runtime/trace" - "slices" - "strings" - "sync" - "time" - "unicode/utf8" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.40.0" - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/trace/embedded" -) - -// ReadOnlySpan allows reading information from the data structure underlying a -// trace.Span. It is used in places where reading information from a span is -// necessary but changing the span isn't necessary or allowed. -// -// Warning: methods may be added to this interface in minor releases. -type ReadOnlySpan interface { - // Name returns the name of the span. - Name() string - // SpanContext returns the unique SpanContext that identifies the span. - SpanContext() trace.SpanContext - // Parent returns the unique SpanContext that identifies the parent of the - // span if one exists. If the span has no parent the returned SpanContext - // will be invalid. - Parent() trace.SpanContext - // SpanKind returns the role the span plays in a Trace. - SpanKind() trace.SpanKind - // StartTime returns the time the span started recording. - StartTime() time.Time - // EndTime returns the time the span stopped recording. It will be zero if - // the span has not ended. - EndTime() time.Time - // Attributes returns the defining attributes of the span. - // The order of the returned attributes is not guaranteed to be stable across invocations. - Attributes() []attribute.KeyValue - // Links returns all the links the span has to other spans. - Links() []Link - // Events returns all the events that occurred within in the spans - // lifetime. - Events() []Event - // Status returns the spans status. - Status() Status - // InstrumentationScope returns information about the instrumentation - // scope that created the span. - InstrumentationScope() instrumentation.Scope - // InstrumentationLibrary returns information about the instrumentation - // library that created the span. - // - // Deprecated: please use InstrumentationScope instead. - InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility - // Resource returns information about the entity that produced the span. - Resource() *resource.Resource - // DroppedAttributes returns the number of attributes dropped by the span - // due to limits being reached. - DroppedAttributes() int - // DroppedLinks returns the number of links dropped by the span due to - // limits being reached. - DroppedLinks() int - // DroppedEvents returns the number of events dropped by the span due to - // limits being reached. - DroppedEvents() int - // ChildSpanCount returns the count of spans that consider the span a - // direct parent. - ChildSpanCount() int - - // A private method to prevent users implementing the - // interface and so future additions to it will not - // violate compatibility. - private() -} - -// ReadWriteSpan exposes the same methods as trace.Span and in addition allows -// reading information from the underlying data structure. -// This interface exposes the union of the methods of trace.Span (which is a -// "write-only" span) and ReadOnlySpan. New methods for writing or reading span -// information should be added under trace.Span or ReadOnlySpan, respectively. -// -// Warning: methods may be added to this interface in minor releases. -type ReadWriteSpan interface { - trace.Span - ReadOnlySpan -} - -// recordingSpan is an implementation of the OpenTelemetry Span API -// representing the individual component of a trace that is sampled. -type recordingSpan struct { - embedded.Span - - // mu protects the contents of this span. - mu sync.Mutex - - // parent holds the parent span of this span as a trace.SpanContext. - parent trace.SpanContext - - // spanKind represents the kind of this span as a trace.SpanKind. - spanKind trace.SpanKind - - // name is the name of this span. - name string - - // startTime is the time at which this span was started. - startTime time.Time - - // endTime is the time at which this span was ended. It contains the zero - // value of time.Time until the span is ended. - endTime time.Time - - // status is the status of this span. - status Status - - // childSpanCount holds the number of child spans created for this span. - childSpanCount int - - // spanContext holds the SpanContext of this span. - spanContext trace.SpanContext - - // attributes is a collection of user provided key/values. The collection - // is constrained by a configurable maximum held by the parent - // TracerProvider. When additional attributes are added after this maximum - // is reached these attributes the user is attempting to add are dropped. - // This dropped number of attributes is tracked and reported in the - // ReadOnlySpan exported when the span ends. - attributes []attribute.KeyValue - droppedAttributes int - logDropAttrsOnce sync.Once - - // events are stored in FIFO queue capped by configured limit. - events evictedQueue[Event] - - // links are stored in FIFO queue capped by configured limit. - links evictedQueue[Link] - - // executionTracerTaskEnd ends the execution tracer span. - executionTracerTaskEnd func() - - // tracer is the SDK tracer that created this span. - tracer *tracer - - // origCtx is the context used when starting this span that has the - // recordingSpan instance set as the active span. If not nil, it is used - // when ending the span to ensure any metrics are recorded with a context - // containing this span without requiring an additional allocation. - origCtx context.Context -} - -var ( - _ ReadWriteSpan = (*recordingSpan)(nil) - _ runtimeTracer = (*recordingSpan)(nil) -) - -func (s *recordingSpan) setOrigCtx(ctx context.Context) { - s.origCtx = ctx -} - -// SpanContext returns the SpanContext of this span. -func (s *recordingSpan) SpanContext() trace.SpanContext { - if s == nil { - return trace.SpanContext{} - } - return s.spanContext -} - -// IsRecording reports whether this span is being recorded. If this span has ended -// this will return false. -func (s *recordingSpan) IsRecording() bool { - if s == nil { - return false - } - s.mu.Lock() - defer s.mu.Unlock() - - return s.isRecording() -} - -// isRecording reports whether this span is being recorded. If this span has ended -// this will return false. -// -// This method assumes s.mu.Lock is held by the caller. -func (s *recordingSpan) isRecording() bool { - if s == nil { - return false - } - return s.endTime.IsZero() -} - -// SetStatus sets the status of the Span in the form of a code and a -// description, overriding previous values set. The description is only -// included in the set status when the code is for an error. If this span is -// not being recorded than this method does nothing. -func (s *recordingSpan) SetStatus(code codes.Code, description string) { - if s == nil { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - if !s.isRecording() { - return - } - if s.status.Code > code { - return - } - - status := Status{Code: code} - if code == codes.Error { - status.Description = description - } - - s.status = status -} - -// SetAttributes sets attributes of this span. -// -// If a key from attributes already exists the value associated with that key -// will be overwritten with the value contained in attributes. -// -// If this span is not being recorded than this method does nothing. -// -// If adding attributes to the span would exceed the maximum amount of -// attributes the span is configured to have, the last added attributes will -// be dropped. -func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { - if s == nil || len(attributes) == 0 { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - if !s.isRecording() { - return - } - - limit := s.tracer.provider.spanLimits.AttributeCountLimit - if limit == 0 { - // No attributes allowed. - s.addDroppedAttr(len(attributes)) - return - } - - // If adding these attributes could exceed the capacity of s perform a - // de-duplication and truncation while adding to avoid over allocation. - if limit > 0 && len(s.attributes)+len(attributes) > limit { - s.addOverCapAttrs(limit, attributes) - return - } - - // Otherwise, add without deduplication. When attributes are read they - // will be deduplicated, optimizing the operation. - s.attributes = slices.Grow(s.attributes, len(attributes)) - for _, a := range attributes { - if !a.Valid() { - // Drop all invalid attributes. - s.addDroppedAttr(1) - continue - } - a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) - s.attributes = append(s.attributes, a) - } -} - -// Declared as a var so tests can override. -var logDropAttrs = func() { - global.Warn("limit reached: dropping trace Span attributes") -} - -// addDroppedAttr adds incr to the count of dropped attributes. -// -// The first, and only the first, time this method is called a warning will be -// logged. -// -// This method assumes s.mu.Lock is held by the caller. -func (s *recordingSpan) addDroppedAttr(incr int) { - s.droppedAttributes += incr - s.logDropAttrsOnce.Do(logDropAttrs) -} - -// addOverCapAttrs adds the attributes attrs to the span s while -// de-duplicating the attributes of s and attrs and dropping attributes that -// exceed the limit. -// -// This method assumes s.mu.Lock is held by the caller. -// -// This method should only be called when there is a possibility that adding -// attrs to s will exceed the limit. Otherwise, attrs should be added to s -// without checking for duplicates and all retrieval methods of the attributes -// for s will de-duplicate as needed. -// -// This method assumes limit is a value > 0. The argument should be validated -// by the caller. -func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { - // In order to not allocate more capacity to s.attributes than needed, - // prune and truncate this addition of attributes while adding. - - // Do not set a capacity when creating this map. Benchmark testing has - // showed this to only add unused memory allocations in general use. - exists := make(map[attribute.Key]int, len(s.attributes)) - s.dedupeAttrsFromRecord(exists) - - // Now that s.attributes is deduplicated, adding unique attributes up to - // the capacity of s will not over allocate s.attributes. - - // max size = limit - maxCap := min(len(attrs)+len(s.attributes), limit) - if cap(s.attributes) < maxCap { - s.attributes = slices.Grow(s.attributes, maxCap-cap(s.attributes)) - } - for _, a := range attrs { - if !a.Valid() { - // Drop all invalid attributes. - s.addDroppedAttr(1) - continue - } - - if idx, ok := exists[a.Key]; ok { - // Perform all updates before dropping, even when at capacity. - a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) - s.attributes[idx] = a - continue - } - - if len(s.attributes) >= limit { - // Do not just drop all of the remaining attributes, make sure - // updates are checked and performed. - s.addDroppedAttr(1) - } else { - a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) - s.attributes = append(s.attributes, a) - exists[a.Key] = len(s.attributes) - 1 - } - } -} - -// truncateAttr returns a truncated version of attr. Only string and string -// slice attribute values are truncated. String values are truncated to at -// most a length of limit. Each string slice value is truncated in this fashion -// (the slice length itself is unaffected). -// -// No truncation is performed for a negative limit. -func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { - if limit < 0 { - return attr - } - switch attr.Value.Type() { - case attribute.STRING: - v := attr.Value.AsString() - return attr.Key.String(truncate(limit, v)) - case attribute.STRINGSLICE: - v := attr.Value.AsStringSlice() - for i := range v { - v[i] = truncate(limit, v[i]) - } - return attr.Key.StringSlice(v) - } - return attr -} - -// truncate returns a truncated version of s such that it contains less than -// the limit number of characters. Truncation is applied by returning the limit -// number of valid characters contained in s. -// -// If limit is negative, it returns the original string. -// -// UTF-8 is supported. When truncating, all invalid characters are dropped -// before applying truncation. -// -// If s already contains less than the limit number of bytes, it is returned -// unchanged. No invalid characters are removed. -func truncate(limit int, s string) string { - // This prioritize performance in the following order based on the most - // common expected use-cases. - // - // - Short values less than the default limit (128). - // - Strings with valid encodings that exceed the limit. - // - No limit. - // - Strings with invalid encodings that exceed the limit. - if limit < 0 || len(s) <= limit { - return s - } - - // Optimistically, assume all valid UTF-8. - var b strings.Builder - count := 0 - for i, c := range s { - if c != utf8.RuneError { - count++ - if count > limit { - return s[:i] - } - continue - } - - _, size := utf8.DecodeRuneInString(s[i:]) - if size == 1 { - // Invalid encoding. - b.Grow(len(s) - 1) - _, _ = b.WriteString(s[:i]) - s = s[i:] - break - } - } - - // Fast-path, no invalid input. - if b.Cap() == 0 { - return s - } - - // Truncate while validating UTF-8. - for i := 0; i < len(s) && count < limit; { - c := s[i] - if c < utf8.RuneSelf { - // Optimization for single byte runes (common case). - _ = b.WriteByte(c) - i++ - count++ - continue - } - - _, size := utf8.DecodeRuneInString(s[i:]) - if size == 1 { - // We checked for all 1-byte runes above, this is a RuneError. - i++ - continue - } - - _, _ = b.WriteString(s[i : i+size]) - i += size - count++ - } - - return b.String() -} - -// End ends the span. This method does nothing if the span is already ended or -// is not being recorded. -// -// The only SpanEndOption currently supported are [trace.WithTimestamp], and -// [trace.WithStackTrace]. -// -// If this method is called while panicking an error event is added to the -// Span before ending it and the panic is continued. -func (s *recordingSpan) End(options ...trace.SpanEndOption) { - // Do not start by checking if the span is being recorded which requires - // acquiring a lock. Make a minimal check that the span is not nil. - if s == nil { - return - } - - // Store the end time as soon as possible to avoid artificially increasing - // the span's duration in case some operation below takes a while. - et := monotonicEndTime(s.startTime) - - // Lock the span now that we have an end time and see if we need to do any more processing. - s.mu.Lock() - if !s.isRecording() { - s.mu.Unlock() - return - } - - config := trace.NewSpanEndConfig(options...) - if recovered := recover(); recovered != nil { - // Record but don't stop the panic. - defer panic(recovered) - opts := []trace.EventOption{ - trace.WithAttributes( - semconv.ExceptionType(typeStr(recovered)), - semconv.ExceptionMessage(fmt.Sprint(recovered)), - ), - } - - if config.StackTrace() { - opts = append(opts, trace.WithAttributes( - semconv.ExceptionStacktrace(recordStackTrace()), - )) - } - - s.addEvent(semconv.ExceptionEventName, opts...) - } - - if s.executionTracerTaskEnd != nil { - s.mu.Unlock() - s.executionTracerTaskEnd() - s.mu.Lock() - } - - // Setting endTime to non-zero marks the span as ended and not recording. - if config.Timestamp().IsZero() { - s.endTime = et - } else { - s.endTime = config.Timestamp() - } - s.mu.Unlock() - - if s.tracer.inst.Enabled() { - ctx := s.origCtx - if ctx == nil { - // This should not happen as the origCtx should be set, but - // ensure trace information is propagated in the case of an - // error. - ctx = trace.ContextWithSpan(context.Background(), s) - } - defer s.tracer.inst.SpanEnded(ctx, s) - } - - sps := s.tracer.provider.getSpanProcessors() - if len(sps) == 0 { - return - } - snap := s.snapshot() - for _, sp := range sps { - sp.sp.OnEnd(snap) - } -} - -// monotonicEndTime returns the end time at present but offset from start, -// monotonically. -// -// The monotonic clock is used in subtractions hence the duration since start -// added back to start gives end as a monotonic time. See -// https://golang.org/pkg/time/#hdr-Monotonic_Clocks -func monotonicEndTime(start time.Time) time.Time { - return start.Add(time.Since(start)) -} - -// RecordError will record err as a span event for this span. An additional call to -// SetStatus is required if the Status of the Span should be set to Error, this method -// does not change the Span status. If this span is not being recorded or err is nil -// than this method does nothing. -func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { - if s == nil || err == nil { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - if !s.isRecording() { - return - } - - opts = append(opts, trace.WithAttributes( - semconv.ExceptionType(typeStr(err)), - semconv.ExceptionMessage(err.Error()), - )) - - c := trace.NewEventConfig(opts...) - if c.StackTrace() { - opts = append(opts, trace.WithAttributes( - semconv.ExceptionStacktrace(recordStackTrace()), - )) - } - - s.addEvent(semconv.ExceptionEventName, opts...) -} - -func typeStr(i any) string { - t := reflect.TypeOf(i) - if t.PkgPath() == "" && t.Name() == "" { - // Likely a builtin type. - return t.String() - } - return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) -} - -func recordStackTrace() string { - stackTrace := make([]byte, 2048) - n := runtime.Stack(stackTrace, false) - - return string(stackTrace[0:n]) -} - -// AddEvent adds an event with the provided name and options. If this span is -// not being recorded then this method does nothing. -func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) { - if s == nil { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - if !s.isRecording() { - return - } - s.addEvent(name, o...) -} - -// addEvent adds an event with the provided name and options. -// -// This method assumes s.mu.Lock is held by the caller. -func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { - c := trace.NewEventConfig(o...) - e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()} - - // Discard attributes over limit. - limit := s.tracer.provider.spanLimits.AttributePerEventCountLimit - if limit == 0 { - // Drop all attributes. - e.DroppedAttributeCount = len(e.Attributes) - e.Attributes = nil - } else if limit > 0 && len(e.Attributes) > limit { - // Drop over capacity. - e.DroppedAttributeCount = len(e.Attributes) - limit - e.Attributes = e.Attributes[:limit] - } - - s.events.add(e) -} - -// SetName sets the name of this span. If this span is not being recorded than -// this method does nothing. -func (s *recordingSpan) SetName(name string) { - if s == nil { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - if !s.isRecording() { - return - } - s.name = name -} - -// Name returns the name of this span. -func (s *recordingSpan) Name() string { - s.mu.Lock() - defer s.mu.Unlock() - return s.name -} - -// Name returns the SpanContext of this span's parent span. -func (s *recordingSpan) Parent() trace.SpanContext { - s.mu.Lock() - defer s.mu.Unlock() - return s.parent -} - -// SpanKind returns the SpanKind of this span. -func (s *recordingSpan) SpanKind() trace.SpanKind { - s.mu.Lock() - defer s.mu.Unlock() - return s.spanKind -} - -// StartTime returns the time this span started. -func (s *recordingSpan) StartTime() time.Time { - s.mu.Lock() - defer s.mu.Unlock() - return s.startTime -} - -// EndTime returns the time this span ended. For spans that have not yet -// ended, the returned value will be the zero value of time.Time. -func (s *recordingSpan) EndTime() time.Time { - s.mu.Lock() - defer s.mu.Unlock() - return s.endTime -} - -// Attributes returns the attributes of this span. -// -// The order of the returned attributes is not guaranteed to be stable. -func (s *recordingSpan) Attributes() []attribute.KeyValue { - s.mu.Lock() - defer s.mu.Unlock() - s.dedupeAttrs() - return s.attributes -} - -// dedupeAttrs deduplicates the attributes of s to fit capacity. -// -// This method assumes s.mu.Lock is held by the caller. -func (s *recordingSpan) dedupeAttrs() { - // Do not set a capacity when creating this map. Benchmark testing has - // showed this to only add unused memory allocations in general use. - exists := make(map[attribute.Key]int, len(s.attributes)) - s.dedupeAttrsFromRecord(exists) -} - -// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity -// using record as the record of unique attribute keys to their index. -// -// This method assumes s.mu.Lock is held by the caller. -func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) { - // Use the fact that slices share the same backing array. - unique := s.attributes[:0] - for _, a := range s.attributes { - if idx, ok := record[a.Key]; ok { - unique[idx] = a - } else { - unique = append(unique, a) - record[a.Key] = len(unique) - 1 - } - } - clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects. - s.attributes = unique -} - -// Links returns the links of this span. -func (s *recordingSpan) Links() []Link { - s.mu.Lock() - defer s.mu.Unlock() - if len(s.links.queue) == 0 { - return []Link{} - } - return s.links.copy() -} - -// Events returns the events of this span. -func (s *recordingSpan) Events() []Event { - s.mu.Lock() - defer s.mu.Unlock() - if len(s.events.queue) == 0 { - return []Event{} - } - return s.events.copy() -} - -// Status returns the status of this span. -func (s *recordingSpan) Status() Status { - s.mu.Lock() - defer s.mu.Unlock() - return s.status -} - -// InstrumentationScope returns the instrumentation.Scope associated with -// the Tracer that created this span. -func (s *recordingSpan) InstrumentationScope() instrumentation.Scope { - s.mu.Lock() - defer s.mu.Unlock() - return s.tracer.instrumentationScope -} - -// InstrumentationLibrary returns the instrumentation.Library associated with -// the Tracer that created this span. -func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility - s.mu.Lock() - defer s.mu.Unlock() - return s.tracer.instrumentationScope -} - -// Resource returns the Resource associated with the Tracer that created this -// span. -func (s *recordingSpan) Resource() *resource.Resource { - s.mu.Lock() - defer s.mu.Unlock() - return s.tracer.provider.resource -} - -func (s *recordingSpan) AddLink(link trace.Link) { - if s == nil { - return - } - if !link.SpanContext.IsValid() && len(link.Attributes) == 0 && - link.SpanContext.TraceState().Len() == 0 { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - if !s.isRecording() { - return - } - - l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes} - - // Discard attributes over limit. - limit := s.tracer.provider.spanLimits.AttributePerLinkCountLimit - if limit == 0 { - // Drop all attributes. - l.DroppedAttributeCount = len(l.Attributes) - l.Attributes = nil - } else if limit > 0 && len(l.Attributes) > limit { - l.DroppedAttributeCount = len(l.Attributes) - limit - l.Attributes = l.Attributes[:limit] - } - - s.links.add(l) -} - -// DroppedAttributes returns the number of attributes dropped by the span -// due to limits being reached. -func (s *recordingSpan) DroppedAttributes() int { - s.mu.Lock() - defer s.mu.Unlock() - return s.droppedAttributes -} - -// DroppedLinks returns the number of links dropped by the span due to limits -// being reached. -func (s *recordingSpan) DroppedLinks() int { - s.mu.Lock() - defer s.mu.Unlock() - return s.links.droppedCount -} - -// DroppedEvents returns the number of events dropped by the span due to -// limits being reached. -func (s *recordingSpan) DroppedEvents() int { - s.mu.Lock() - defer s.mu.Unlock() - return s.events.droppedCount -} - -// ChildSpanCount returns the count of spans that consider the span a -// direct parent. -func (s *recordingSpan) ChildSpanCount() int { - s.mu.Lock() - defer s.mu.Unlock() - return s.childSpanCount -} - -// TracerProvider returns a trace.TracerProvider that can be used to generate -// additional Spans on the same telemetry pipeline as the current Span. -func (s *recordingSpan) TracerProvider() trace.TracerProvider { - return s.tracer.provider -} - -// snapshot creates a read-only copy of the current state of the span. -func (s *recordingSpan) snapshot() ReadOnlySpan { - var sd snapshot - s.mu.Lock() - defer s.mu.Unlock() - - sd.endTime = s.endTime - sd.instrumentationScope = s.tracer.instrumentationScope - sd.name = s.name - sd.parent = s.parent - sd.resource = s.tracer.provider.resource - sd.spanContext = s.spanContext - sd.spanKind = s.spanKind - sd.startTime = s.startTime - sd.status = s.status - sd.childSpanCount = s.childSpanCount - - if len(s.attributes) > 0 { - s.dedupeAttrs() - sd.attributes = s.attributes - } - sd.droppedAttributeCount = s.droppedAttributes - if len(s.events.queue) > 0 { - sd.events = s.events.copy() - sd.droppedEventCount = s.events.droppedCount - } - if len(s.links.queue) > 0 { - sd.links = s.links.copy() - sd.droppedLinkCount = s.links.droppedCount - } - return &sd -} - -func (s *recordingSpan) addChild() { - if s == nil { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - if !s.isRecording() { - return - } - s.childSpanCount++ -} - -func (*recordingSpan) private() {} - -// runtimeTrace starts a "runtime/trace".Task for the span and returns a -// context containing the task. -func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context { - if !rt.IsEnabled() { - // Avoid additional overhead if runtime/trace is not enabled. - return ctx - } - nctx, task := rt.NewTask(ctx, s.name) - - s.mu.Lock() - s.executionTracerTaskEnd = task.End - s.mu.Unlock() - - return nctx -} - -// nonRecordingSpan is a minimal implementation of the OpenTelemetry Span API -// that wraps a SpanContext. It performs no operations other than to return -// the wrapped SpanContext or TracerProvider that created it. -type nonRecordingSpan struct { - embedded.Span - - // tracer is the SDK tracer that created this span. - tracer *tracer - sc trace.SpanContext -} - -var _ trace.Span = nonRecordingSpan{} - -// SpanContext returns the wrapped SpanContext. -func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } - -// IsRecording always returns false. -func (nonRecordingSpan) IsRecording() bool { return false } - -// SetStatus does nothing. -func (nonRecordingSpan) SetStatus(codes.Code, string) {} - -// SetError does nothing. -func (nonRecordingSpan) SetError(bool) {} - -// SetAttributes does nothing. -func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} - -// End does nothing. -func (nonRecordingSpan) End(...trace.SpanEndOption) {} - -// RecordError does nothing. -func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} - -// AddEvent does nothing. -func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} - -// AddLink does nothing. -func (nonRecordingSpan) AddLink(trace.Link) {} - -// SetName does nothing. -func (nonRecordingSpan) SetName(string) {} - -// TracerProvider returns the trace.TracerProvider that provided the Tracer -// that created this span. -func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } - -func isRecording(s SamplingResult) bool { - return s.Decision == RecordOnly || s.Decision == RecordAndSample -} - -func isSampled(s SamplingResult) bool { - return s.Decision == RecordAndSample -} - -// Status is the classified state of a Span. -type Status struct { - // Code is an identifier of a Spans state classification. - Code codes.Code - // Description is a user hint about why that status was set. It is only - // applicable when Code is Error. - Description string -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go deleted file mode 100644 index 6bdda3d94a..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import "context" - -// SpanExporter handles the delivery of spans to external receivers. This is -// the final component in the trace export pipeline. -type SpanExporter interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // ExportSpans exports a batch of spans. - // - // This function is called synchronously, so there is no concurrency - // safety requirement. However, due to the synchronous calling pattern, - // it is critical that all timeouts and cancellations contained in the - // passed context must be honored. - // - // Any retry logic must be contained in this function. The SDK that - // calls this function will not implement any retry logic. All errors - // returned by this function are considered unrecoverable and will be - // reported to a configured error Handler. - ExportSpans(ctx context.Context, spans []ReadOnlySpan) error - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Shutdown notifies the exporter of a pending halt to operations. The - // exporter is expected to perform any cleanup or synchronization it - // requires while honoring all timeouts and cancellations contained in - // the passed context. - Shutdown(ctx context.Context) error - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go deleted file mode 100644 index 321d974305..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import "go.opentelemetry.io/otel/sdk/trace/internal/env" - -const ( - // DefaultAttributeValueLengthLimit is the default maximum allowed - // attribute value length, unlimited. - DefaultAttributeValueLengthLimit = -1 - - // DefaultAttributeCountLimit is the default maximum number of attributes - // a span can have. - DefaultAttributeCountLimit = 128 - - // DefaultEventCountLimit is the default maximum number of events a span - // can have. - DefaultEventCountLimit = 128 - - // DefaultLinkCountLimit is the default maximum number of links a span can - // have. - DefaultLinkCountLimit = 128 - - // DefaultAttributePerEventCountLimit is the default maximum number of - // attributes a span event can have. - DefaultAttributePerEventCountLimit = 128 - - // DefaultAttributePerLinkCountLimit is the default maximum number of - // attributes a span link can have. - DefaultAttributePerLinkCountLimit = 128 -) - -// SpanLimits represents the limits of a span. -type SpanLimits struct { - // AttributeValueLengthLimit is the maximum allowed attribute value length. - // - // This limit only applies to string and string slice attribute values. - // Any string longer than this value will be truncated to this length. - // - // Setting this to a negative value means no limit is applied. - AttributeValueLengthLimit int - - // AttributeCountLimit is the maximum allowed span attribute count. Any - // attribute added to a span once this limit is reached will be dropped. - // - // Setting this to zero means no attributes will be recorded. - // - // Setting this to a negative value means no limit is applied. - AttributeCountLimit int - - // EventCountLimit is the maximum allowed span event count. Any event - // added to a span once this limit is reached means it will be added but - // the oldest event will be dropped. - // - // Setting this to zero means no events we be recorded. - // - // Setting this to a negative value means no limit is applied. - EventCountLimit int - - // LinkCountLimit is the maximum allowed span link count. Any link added - // to a span once this limit is reached means it will be added but the - // oldest link will be dropped. - // - // Setting this to zero means no links we be recorded. - // - // Setting this to a negative value means no limit is applied. - LinkCountLimit int - - // AttributePerEventCountLimit is the maximum number of attributes allowed - // per span event. Any attribute added after this limit reached will be - // dropped. - // - // Setting this to zero means no attributes will be recorded for events. - // - // Setting this to a negative value means no limit is applied. - AttributePerEventCountLimit int - - // AttributePerLinkCountLimit is the maximum number of attributes allowed - // per span link. Any attribute added after this limit reached will be - // dropped. - // - // Setting this to zero means no attributes will be recorded for links. - // - // Setting this to a negative value means no limit is applied. - AttributePerLinkCountLimit int -} - -// NewSpanLimits returns a SpanLimits with all limits set to the value their -// corresponding environment variable holds, or the default if unset. -// -// • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT -// (default: unlimited) -// -// • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128) -// -// • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128) -// -// • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default: -// 128) -// -// • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128) -// -// • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128) -func NewSpanLimits() SpanLimits { - return SpanLimits{ - AttributeValueLengthLimit: env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit), - AttributeCountLimit: env.SpanAttributeCount(DefaultAttributeCountLimit), - EventCountLimit: env.SpanEventCount(DefaultEventCountLimit), - LinkCountLimit: env.SpanLinkCount(DefaultLinkCountLimit), - AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit), - AttributePerLinkCountLimit: env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit), - } -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go deleted file mode 100644 index af7f9177fc..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "context" - "sync" -) - -// SpanProcessor is a processing pipeline for spans in the trace signal. -// SpanProcessors registered with a TracerProvider and are called at the start -// and end of a Span's lifecycle, and are called in the order they are -// registered. -type SpanProcessor interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // OnStart is called when a span is started. It is called synchronously - // and should not block. - OnStart(parent context.Context, s ReadWriteSpan) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // OnEnd is called when span is finished. It is called synchronously and - // hence not block. - OnEnd(s ReadOnlySpan) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Shutdown is called when the SDK shuts down. Any cleanup or release of - // resources held by the processor should be done in this call. - // - // Calls to OnStart, OnEnd, or ForceFlush after this has been called - // should be ignored. - // - // All timeouts and cancellations contained in ctx must be honored, this - // should not block indefinitely. - Shutdown(ctx context.Context) error - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // ForceFlush exports all ended spans to the configured Exporter that have not yet - // been exported. It should only be called when absolutely necessary, such as when - // using a FaaS provider that may suspend the process after an invocation, but before - // the Processor can export the completed spans. - ForceFlush(ctx context.Context) error - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -type spanProcessorState struct { - sp SpanProcessor - state sync.Once -} - -func newSpanProcessorState(sp SpanProcessor) *spanProcessorState { - return &spanProcessorState{sp: sp} -} - -type spanProcessorStates []*spanProcessorState diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go deleted file mode 100644 index e1d08fd4d8..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -import ( - "context" - "time" - - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/trace/internal/observ" - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/trace/embedded" -) - -type tracer struct { - embedded.Tracer - - provider *TracerProvider - instrumentationScope instrumentation.Scope - - inst observ.Tracer -} - -var _ trace.Tracer = &tracer{} - -// Start starts a Span and returns it along with a context containing it. -// -// The Span is created with the provided name and as a child of any existing -// span context found in the passed context. The created Span will be -// configured appropriately by any SpanOption passed. -func (tr *tracer) Start( - ctx context.Context, - name string, - options ...trace.SpanStartOption, -) (context.Context, trace.Span) { - config := trace.NewSpanStartConfig(options...) - - if ctx == nil { - // Prevent trace.ContextWithSpan from panicking. - ctx = context.Background() - } - - // For local spans created by this SDK, track child span count. - if p := trace.SpanFromContext(ctx); p != nil { - if sdkSpan, ok := p.(*recordingSpan); ok { - sdkSpan.addChild() - } - } - - s := tr.newSpan(ctx, name, &config) - newCtx := trace.ContextWithSpan(ctx, s) - if tr.inst.Enabled() { - if o, ok := s.(interface{ setOrigCtx(context.Context) }); ok { - // If this is a recording span, store the original context. - // This allows later retrieval of baggage and other information - // that may have been stored in the context at span start time and - // to avoid the allocation of repeatedly calling - // trace.ContextWithSpan. - o.setOrigCtx(newCtx) - } - psc := trace.SpanContextFromContext(ctx) - tr.inst.SpanStarted(newCtx, psc, s) - } - - if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { - sps := tr.provider.getSpanProcessors() - for _, sp := range sps { - // Use original context. - sp.sp.OnStart(ctx, rw) - } - } - if rtt, ok := s.(runtimeTracer); ok { - newCtx = rtt.runtimeTrace(newCtx) - } - - return newCtx, s -} - -type runtimeTracer interface { - // runtimeTrace starts a "runtime/trace".Task for the span and - // returns a context containing the task. - runtimeTrace(ctx context.Context) context.Context -} - -// newSpan returns a new configured span. -func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span { - // If told explicitly to make this a new root use a zero value SpanContext - // as a parent which contains an invalid trace ID and is not remote. - var psc trace.SpanContext - if config.NewRoot() { - ctx = trace.ContextWithSpanContext(ctx, psc) - } else { - psc = trace.SpanContextFromContext(ctx) - } - - // If there is a valid parent trace ID, use it to ensure the continuity of - // the trace. Always generate a new span ID so other components can rely - // on a unique span ID, even if the Span is non-recording. - var tid trace.TraceID - var sid trace.SpanID - if !psc.TraceID().IsValid() { - tid, sid = tr.provider.idGenerator.NewIDs(ctx) - } else { - tid = psc.TraceID() - sid = tr.provider.idGenerator.NewSpanID(ctx, tid) - } - - samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{ - ParentContext: ctx, - TraceID: tid, - Name: name, - Kind: config.SpanKind(), - Attributes: config.Attributes(), - Links: config.Links(), - }) - - scc := trace.SpanContextConfig{ - TraceID: tid, - SpanID: sid, - TraceState: samplingResult.Tracestate, - } - if isSampled(samplingResult) { - scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled - } else { - scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled - } - sc := trace.NewSpanContext(scc) - - if !isRecording(samplingResult) { - return tr.newNonRecordingSpan(sc) - } - return tr.newRecordingSpan(ctx, psc, sc, name, samplingResult, config) -} - -// newRecordingSpan returns a new configured recordingSpan. -func (tr *tracer) newRecordingSpan( - ctx context.Context, - psc, sc trace.SpanContext, - name string, - sr SamplingResult, - config *trace.SpanConfig, -) *recordingSpan { - startTime := config.Timestamp() - if startTime.IsZero() { - startTime = time.Now() - } - - s := &recordingSpan{ - // Do not pre-allocate the attributes slice here! Doing so will - // allocate memory that is likely never going to be used, or if used, - // will be over-sized. The default Go compiler has been tested to - // dynamically allocate needed space very well. Benchmarking has shown - // it to be more performant than what we can predetermine here, - // especially for the common use case of few to no added - // attributes. - - parent: psc, - spanContext: sc, - spanKind: trace.ValidateSpanKind(config.SpanKind()), - name: name, - startTime: startTime, - events: newEvictedQueueEvent(tr.provider.spanLimits.EventCountLimit), - links: newEvictedQueueLink(tr.provider.spanLimits.LinkCountLimit), - tracer: tr, - } - - for _, l := range config.Links() { - s.AddLink(l) - } - - s.SetAttributes(sr.Attributes...) - s.SetAttributes(config.Attributes()...) - - if tr.inst.Enabled() { - // Propagate any existing values from the context with the new span to - // the measurement context. - ctx = trace.ContextWithSpan(ctx, s) - tr.inst.SpanLive(ctx, s) - } - - return s -} - -// newNonRecordingSpan returns a new configured nonRecordingSpan. -func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan { - return nonRecordingSpan{tracer: tr, sc: sc} -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go deleted file mode 100644 index 766731dd25..0000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package sdk provides the OpenTelemetry default SDK for Go. -package sdk // import "go.opentelemetry.io/otel/sdk" - -// Version is the current release version of the OpenTelemetry SDK in use. -func Version() string { - return "1.43.0" -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md deleted file mode 100644 index 2480547895..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md +++ /dev/null @@ -1,41 +0,0 @@ - -# Migration from v1.36.0 to v1.37.0 - -The `go.opentelemetry.io/otel/semconv/v1.37.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.36.0` with the following exceptions. - -## Removed - -The following declarations have been removed. -Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions. - -If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use. -If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case. - -- `ContainerRuntime` -- `ContainerRuntimeKey` -- `GenAIOpenAIRequestServiceTierAuto` -- `GenAIOpenAIRequestServiceTierDefault` -- `GenAIOpenAIRequestServiceTierKey` -- `GenAIOpenAIResponseServiceTier` -- `GenAIOpenAIResponseServiceTierKey` -- `GenAIOpenAIResponseSystemFingerprint` -- `GenAIOpenAIResponseSystemFingerprintKey` -- `GenAISystemAWSBedrock` -- `GenAISystemAnthropic` -- `GenAISystemAzureAIInference` -- `GenAISystemAzureAIOpenAI` -- `GenAISystemCohere` -- `GenAISystemDeepseek` -- `GenAISystemGCPGemini` -- `GenAISystemGCPGenAI` -- `GenAISystemGCPVertexAI` -- `GenAISystemGroq` -- `GenAISystemIBMWatsonxAI` -- `GenAISystemKey` -- `GenAISystemMistralAI` -- `GenAISystemOpenAI` -- `GenAISystemPerplexity` -- `GenAISystemXai` - -[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions -[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md deleted file mode 100644 index d795247f32..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.37.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.37.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.37.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go deleted file mode 100644 index b6b27498f2..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go +++ /dev/null @@ -1,15193 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" - -import "go.opentelemetry.io/otel/attribute" - -// Namespace: android -const ( - // AndroidAppStateKey is the attribute Key conforming to the "android.app.state" - // semantic conventions. It represents the this attribute represents the state - // of the application. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "created" - // Note: The Android lifecycle states are defined in - // [Activity lifecycle callbacks], and from which the `OS identifiers` are - // derived. - // - // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc - AndroidAppStateKey = attribute.Key("android.app.state") - - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version (`os.version`) of - // the android operating system. More information can be found in the - // [Android API levels documentation]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "33", "32" - // - // [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found in the -// [Android API levels documentation]. -// -// [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// Enum values for android.app.state -var ( - // Any time before Activity.onResume() or, if the app has no Activity, - // Context.startService() has been called in the app for the first time. - // - // Stability: development - AndroidAppStateCreated = AndroidAppStateKey.String("created") - // Any time after Activity.onPause() or, if the app has no Activity, - // Context.stopService() has been called when the app was in the foreground - // state. - // - // Stability: development - AndroidAppStateBackground = AndroidAppStateKey.String("background") - // Any time after Activity.onResume() or, if the app has no Activity, - // Context.startService() has been called when the app was in either the created - // or background states. - // - // Stability: development - AndroidAppStateForeground = AndroidAppStateKey.String("foreground") -) - -// Namespace: app -const ( - // AppBuildIDKey is the attribute Key conforming to the "app.build_id" semantic - // conventions. It represents the unique identifier for a particular build or - // compilation of the application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "6cff0a7e-cefc-4668-96f5-1273d8b334d0", - // "9f2b833506aa6973a92fde9733e6271f", "my-app-1.0.0-code-123" - AppBuildIDKey = attribute.Key("app.build_id") - - // AppInstallationIDKey is the attribute Key conforming to the - // "app.installation.id" semantic conventions. It represents a unique identifier - // representing the installation of an application on a specific device. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092" - // Note: Its value SHOULD persist across launches of the same application - // installation, including through application upgrades. - // It SHOULD change if the application is uninstalled or if all applications of - // the vendor are uninstalled. - // Additionally, users might be able to reset this value (e.g. by clearing - // application data). - // If an app is installed multiple times on the same device (e.g. in different - // accounts on Android), each `app.installation.id` SHOULD have a different - // value. - // If multiple OpenTelemetry SDKs are used within the same application, they - // SHOULD use the same value for `app.installation.id`. - // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the - // `app.installation.id`. - // - // For iOS, this value SHOULD be equal to the [vendor identifier]. - // - // For Android, examples of `app.installation.id` implementations include: - // - // - [Firebase Installation ID]. - // - A globally unique UUID which is persisted across sessions in your - // application. - // - [App set ID]. - // - [`Settings.getString(Settings.Secure.ANDROID_ID)`]. - // - // More information about Android identifier best practices can be found in the - // [Android user data IDs guide]. - // - // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor - // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations - // [App set ID]: https://developer.android.com/identity/app-set-id - // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID - // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids - AppInstallationIDKey = attribute.Key("app.installation.id") - - // AppJankFrameCountKey is the attribute Key conforming to the - // "app.jank.frame_count" semantic conventions. It represents a number of frame - // renders that experienced jank. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 9, 42 - // Note: Depending on platform limitations, the value provided MAY be - // approximation. - AppJankFrameCountKey = attribute.Key("app.jank.frame_count") - - // AppJankPeriodKey is the attribute Key conforming to the "app.jank.period" - // semantic conventions. It represents the time period, in seconds, for which - // this jank is being reported. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0, 5.0, 10.24 - AppJankPeriodKey = attribute.Key("app.jank.period") - - // AppJankThresholdKey is the attribute Key conforming to the - // "app.jank.threshold" semantic conventions. It represents the minimum - // rendering threshold for this jank, in seconds. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0.016, 0.7, 1.024 - AppJankThresholdKey = attribute.Key("app.jank.threshold") - - // AppScreenCoordinateXKey is the attribute Key conforming to the - // "app.screen.coordinate.x" semantic conventions. It represents the x - // (horizontal) coordinate of a screen coordinate, in screen pixels. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0, 131 - AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x") - - // AppScreenCoordinateYKey is the attribute Key conforming to the - // "app.screen.coordinate.y" semantic conventions. It represents the y - // (vertical) component of a screen coordinate, in screen pixels. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 12, 99 - AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y") - - // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id" - // semantic conventions. It represents an identifier that uniquely - // differentiates this widget from other widgets in the same application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829" - // Note: A widget is an application component, typically an on-screen visual GUI - // element. - AppWidgetIDKey = attribute.Key("app.widget.id") - - // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name" - // semantic conventions. It represents the name of an application widget. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "submit", "attack", "Clear Cart" - // Note: A widget is an application component, typically an on-screen visual GUI - // element. - AppWidgetNameKey = attribute.Key("app.widget.name") -) - -// AppBuildID returns an attribute KeyValue conforming to the "app.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the application. -func AppBuildID(val string) attribute.KeyValue { - return AppBuildIDKey.String(val) -} - -// AppInstallationID returns an attribute KeyValue conforming to the -// "app.installation.id" semantic conventions. It represents a unique identifier -// representing the installation of an application on a specific device. -func AppInstallationID(val string) attribute.KeyValue { - return AppInstallationIDKey.String(val) -} - -// AppJankFrameCount returns an attribute KeyValue conforming to the -// "app.jank.frame_count" semantic conventions. It represents a number of frame -// renders that experienced jank. -func AppJankFrameCount(val int) attribute.KeyValue { - return AppJankFrameCountKey.Int(val) -} - -// AppJankPeriod returns an attribute KeyValue conforming to the -// "app.jank.period" semantic conventions. It represents the time period, in -// seconds, for which this jank is being reported. -func AppJankPeriod(val float64) attribute.KeyValue { - return AppJankPeriodKey.Float64(val) -} - -// AppJankThreshold returns an attribute KeyValue conforming to the -// "app.jank.threshold" semantic conventions. It represents the minimum rendering -// threshold for this jank, in seconds. -func AppJankThreshold(val float64) attribute.KeyValue { - return AppJankThresholdKey.Float64(val) -} - -// AppScreenCoordinateX returns an attribute KeyValue conforming to the -// "app.screen.coordinate.x" semantic conventions. It represents the x -// (horizontal) coordinate of a screen coordinate, in screen pixels. -func AppScreenCoordinateX(val int) attribute.KeyValue { - return AppScreenCoordinateXKey.Int(val) -} - -// AppScreenCoordinateY returns an attribute KeyValue conforming to the -// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical) -// component of a screen coordinate, in screen pixels. -func AppScreenCoordinateY(val int) attribute.KeyValue { - return AppScreenCoordinateYKey.Int(val) -} - -// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id" -// semantic conventions. It represents an identifier that uniquely differentiates -// this widget from other widgets in the same application. -func AppWidgetID(val string) attribute.KeyValue { - return AppWidgetIDKey.String(val) -} - -// AppWidgetName returns an attribute KeyValue conforming to the -// "app.widget.name" semantic conventions. It represents the name of an -// application widget. -func AppWidgetName(val string) attribute.KeyValue { - return AppWidgetNameKey.String(val) -} - -// Namespace: artifact -const ( - // ArtifactAttestationFilenameKey is the attribute Key conforming to the - // "artifact.attestation.filename" semantic conventions. It represents the - // provenance filename of the built attestation which directly relates to the - // build artifact filename. This filename SHOULD accompany the artifact at - // publish time. See the [SLSA Relationship] specification for more information. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "golang-binary-amd64-v0.1.0.attestation", - // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation", - // "file-name-package.tar.gz.intoto.json1" - // - // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations - ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename") - - // ArtifactAttestationHashKey is the attribute Key conforming to the - // "artifact.attestation.hash" semantic conventions. It represents the full - // [hash value (see glossary)], of the built attestation. Some envelopes in the - // [software attestation space] also refer to this as the **digest**. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408" - // - // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf - // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec - ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash") - - // ArtifactAttestationIDKey is the attribute Key conforming to the - // "artifact.attestation.id" semantic conventions. It represents the id of the - // build [software attestation]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "123" - // - // [software attestation]: https://slsa.dev/attestation-model - ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id") - - // ArtifactFilenameKey is the attribute Key conforming to the - // "artifact.filename" semantic conventions. It represents the human readable - // file name of the artifact, typically generated during build and release - // processes. Often includes the package name and version in the file name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0", - // "release-1.tar.gz", "file-name-package.tar.gz" - // Note: This file name can also act as the [Package Name] - // in cases where the package ecosystem maps accordingly. - // Additionally, the artifact [can be published] - // for others, but that is not a guarantee. - // - // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model - // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain - ArtifactFilenameKey = attribute.Key("artifact.filename") - - // ArtifactHashKey is the attribute Key conforming to the "artifact.hash" - // semantic conventions. It represents the full [hash value (see glossary)], - // often found in checksum.txt on a release of the artifact and used to verify - // package integrity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9" - // Note: The specific algorithm used to create the cryptographic hash value is - // not defined. In situations where an artifact has multiple - // cryptographic hashes, it is up to the implementer to choose which - // hash value to set here; this should be the most secure hash algorithm - // that is suitable for the situation and consistent with the - // corresponding attestation. The implementer can then provide the other - // hash values through an additional set of attribute extensions as they - // deem necessary. - // - // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf - ArtifactHashKey = attribute.Key("artifact.hash") - - // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl" - // semantic conventions. It represents the [Package URL] of the - // [package artifact] provides a standard way to identify and locate the - // packaged artifact. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "pkg:github/package-url/purl-spec@1209109710924", - // "pkg:npm/foo@12.12.3" - // - // [Package URL]: https://github.com/package-url/purl-spec - // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model - ArtifactPurlKey = attribute.Key("artifact.purl") - - // ArtifactVersionKey is the attribute Key conforming to the "artifact.version" - // semantic conventions. It represents the version of the artifact. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "v0.1.0", "1.2.1", "122691-build" - ArtifactVersionKey = attribute.Key("artifact.version") -) - -// ArtifactAttestationFilename returns an attribute KeyValue conforming to the -// "artifact.attestation.filename" semantic conventions. It represents the -// provenance filename of the built attestation which directly relates to the -// build artifact filename. This filename SHOULD accompany the artifact at -// publish time. See the [SLSA Relationship] specification for more information. -// -// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations -func ArtifactAttestationFilename(val string) attribute.KeyValue { - return ArtifactAttestationFilenameKey.String(val) -} - -// ArtifactAttestationHash returns an attribute KeyValue conforming to the -// "artifact.attestation.hash" semantic conventions. It represents the full -// [hash value (see glossary)], of the built attestation. Some envelopes in the -// [software attestation space] also refer to this as the **digest**. -// -// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf -// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec -func ArtifactAttestationHash(val string) attribute.KeyValue { - return ArtifactAttestationHashKey.String(val) -} - -// ArtifactAttestationID returns an attribute KeyValue conforming to the -// "artifact.attestation.id" semantic conventions. It represents the id of the -// build [software attestation]. -// -// [software attestation]: https://slsa.dev/attestation-model -func ArtifactAttestationID(val string) attribute.KeyValue { - return ArtifactAttestationIDKey.String(val) -} - -// ArtifactFilename returns an attribute KeyValue conforming to the -// "artifact.filename" semantic conventions. It represents the human readable -// file name of the artifact, typically generated during build and release -// processes. Often includes the package name and version in the file name. -func ArtifactFilename(val string) attribute.KeyValue { - return ArtifactFilenameKey.String(val) -} - -// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash" -// semantic conventions. It represents the full [hash value (see glossary)], -// often found in checksum.txt on a release of the artifact and used to verify -// package integrity. -// -// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf -func ArtifactHash(val string) attribute.KeyValue { - return ArtifactHashKey.String(val) -} - -// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl" -// semantic conventions. It represents the [Package URL] of the -// [package artifact] provides a standard way to identify and locate the packaged -// artifact. -// -// [Package URL]: https://github.com/package-url/purl-spec -// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model -func ArtifactPurl(val string) attribute.KeyValue { - return ArtifactPurlKey.String(val) -} - -// ArtifactVersion returns an attribute KeyValue conforming to the -// "artifact.version" semantic conventions. It represents the version of the -// artifact. -func ArtifactVersion(val string) attribute.KeyValue { - return ArtifactVersionKey.String(val) -} - -// Namespace: aws -const ( - // AWSBedrockGuardrailIDKey is the attribute Key conforming to the - // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique - // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and - // prevent unwanted behavior from model responses or user messages. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "sgi5gkybzqak" - // - // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html - AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id") - - // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the - // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the - // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a - // bank of information that can be queried by models to generate more relevant - // responses and augment prompts. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "XFWUPB9PAW" - // - // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html - AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id") - - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the - // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the - // JSON-serialized value of each item in the `AttributeDefinitions` request - // field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "AttributeName": "string", "AttributeType": "string" }" - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "lives", "id" - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the value - // of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, - // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": - // "string", "WriteCapacityUnits": number }" - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of the - // `Count` response parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the - // value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Users", "CatsTable" - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }" - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") - - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the - // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents - // the JSON-serialized value of each item of the `GlobalSecondaryIndexes` - // request field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }" - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value of - // the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "name_to_group" - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the - // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents - // the JSON-serialized value of the `ItemCollectionMetrics` response field. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, - // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : - // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": - // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }" - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of the - // `Limit` request parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the - // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents - // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request - // field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes": - // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" } }" - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value of - // the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems, - // ProductReviews" - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents - // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents - // the value of the `ProvisionedThroughput.WriteCapacityUnits` request - // parameter. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of - // the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of - // the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of the - // `Segment` request parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of the - // `Select` request parameter. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ALL_ATTRIBUTES", "COUNT" - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the number of - // items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys in - // the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Users", "Cats" - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the value - // of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS cluster]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" - // - // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container instance]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9" - // - // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch type] - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn" - // semantic conventions. It represents the ARN of a running [ECS task]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b", - // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" - // - // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the family name of - // the [ECS task definition] used to create the ECS task. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-family" - // - // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" - // semantic conventions. It represents the ID of a running ECS task. The ID MUST - // be extracted from `task.arn`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b", - // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" - AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision for - // the task definition used to create the ECS task. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "8", "26" - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") - - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS - // cluster. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") - - // AWSExtendedRequestIDKey is the attribute Key conforming to the - // "aws.extended_request_id" semantic conventions. It represents the AWS - // extended request ID as returned in the response header `x-amz-id-2`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ=" - AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id") - - // AWSKinesisStreamNameKey is the attribute Key conforming to the - // "aws.kinesis.stream_name" semantic conventions. It represents the name of the - // AWS Kinesis [stream] the request refers to. Corresponds to the - // `--stream-name` parameter of the Kinesis [describe-stream] operation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "some-stream-name" - // - // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html - // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html - AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name") - - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked - // ARN as provided on the `Context` passed to the function ( - // `Lambda-Runtime-Invoked-Function-Arn` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias" - // Note: This may be different from `cloud.resource_id` if an alias is involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") - - // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the - // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID - // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda - // function. It's contents are read by Lambda and used to trigger a function. - // This isn't available in the lambda execution context or the lambda runtime - // environtment. This is going to be populated by the AWS SDK for each language - // when that UUID is present. Some of these operations are - // Create/Delete/Get/List/Update EventSourceMapping. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7" - // - // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html - AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id") - - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource - // Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*" - // Note: See the [log group ARN format documentation]. - // - // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of the - // AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/aws/lambda/my-function", "opentelemetry-service" - // Note: Multiple log groups must be supported for cases like multi-container - // applications, where a single application has sidecar containers, and each - // write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the - // AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" - // Note: See the [log stream ARN format documentation]. One log group can - // contain several log streams, so these ARNs necessarily identify both a log - // group and a log stream. - // - // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) of the - // AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") - - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in the - // response headers `x-amzn-requestid`, `x-amzn-request-id` or - // `x-amz-request-id`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ" - AWSRequestIDKey = attribute.Key("aws.request_id") - - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request refers to. - // Corresponds to the `--bucket` parameter of the [S3 API] operations. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "some-bucket-name" - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - // - // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source object - // (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "someFile.yml" - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 API]. - // This applies in particular to the following operations: - // - // - [copy-object] - // - [upload-part-copy] - // - // - // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html - // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html - // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean" - // Note: The `delete` attribute is only applicable to the [delete-object] - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 API]. - // - // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html - // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 API] operations. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "someFile.yml" - // Note: The `key` attribute is applicable to all object-related S3 operations, - // i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - [copy-object] - // - [delete-object] - // - [get-object] - // - [head-object] - // - [put-object] - // - [restore-object] - // - [select-object-content] - // - [abort-multipart-upload] - // - [complete-multipart-upload] - // - [create-multipart-upload] - // - [list-parts] - // - [upload-part] - // - [upload-part-copy] - // - // - // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html - // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html - // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html - // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html - // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html - // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html - // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html - // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html - // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html - // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html - // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html - // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html - // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html - // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number of - // the part being uploaded in a multipart-upload operation. This is a positive - // integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the [upload-part] - // and [upload-part-copy] operations. - // The `part_number` attribute corresponds to the `--part-number` parameter of - // the - // [upload-part operation within the S3 API]. - // - // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html - // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html - // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id" - // semantic conventions. It represents the upload ID that identifies the - // multipart upload. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ" - // Note: The `upload_id` attribute applies to S3 multipart-upload operations and - // corresponds to the `--upload-id` parameter - // of the [S3 API] multipart operations. - // This applies in particular to the following operations: - // - // - [abort-multipart-upload] - // - [complete-multipart-upload] - // - [list-parts] - // - [upload-part] - // - [upload-part-copy] - // - // - // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html - // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html - // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html - // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html - // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html - // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") - - // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the - // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN - // of the Secret stored in the Secrets Mangger. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters" - AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn") - - // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn" - // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon - // SNS [topic] is a logical access point that acts as a communication channel. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE" - // - // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html - AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn") - - // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url" - // semantic conventions. It represents the URL of the AWS SQS Queue. It's a - // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is - // used to access the queue and perform actions on it. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue" - AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url") - - // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the - // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN - // of the AWS Step Functions Activity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting" - AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn") - - // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the - // "aws.step_functions.state_machine.arn" semantic conventions. It represents - // the ARN of the AWS Step Functions State Machine. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1" - AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn") -) - -// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the -// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique -// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and -// prevent unwanted behavior from model responses or user messages. -// -// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html -func AWSBedrockGuardrailID(val string) attribute.KeyValue { - return AWSBedrockGuardrailIDKey.String(val) -} - -// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the -// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique -// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of -// information that can be queried by models to generate more relevant responses -// and augment prompts. -// -// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html -func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue { - return AWSBedrockKnowledgeBaseIDKey.String(val) -} - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to -// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents -// the JSON-serialized value of each item in the `AttributeDefinitions` request -// field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the -// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value -// of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the -// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the -// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the -// value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to -// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field. -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of the -// `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to -// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents -// the JSON-serialized value of the `ItemCollectionMetrics` response field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to -// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents -// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request -// field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of the -// `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It -// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request -// parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming -// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It -// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request -// parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of -// the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the number of -// items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the -// `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value of -// the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an -// [ECS cluster]. -// -// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container instance]. -// -// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running -// [ECS task]. -// -// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the family name of -// the [ECS task definition] used to create the ECS task. -// -// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id" -// semantic conventions. It represents the ID of a running ECS task. The ID MUST -// be extracted from `task.arn`. -func AWSECSTaskID(val string) attribute.KeyValue { - return AWSECSTaskIDKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// the task definition used to create the ECS task. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// AWSExtendedRequestID returns an attribute KeyValue conforming to the -// "aws.extended_request_id" semantic conventions. It represents the AWS extended -// request ID as returned in the response header `x-amz-id-2`. -func AWSExtendedRequestID(val string) attribute.KeyValue { - return AWSExtendedRequestIDKey.String(val) -} - -// AWSKinesisStreamName returns an attribute KeyValue conforming to the -// "aws.kinesis.stream_name" semantic conventions. It represents the name of the -// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name` -// parameter of the Kinesis [describe-stream] operation. -// -// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html -// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html -func AWSKinesisStreamName(val string) attribute.KeyValue { - return AWSKinesisStreamNameKey.String(val) -} - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked -// ARN as provided on the `Context` passed to the function ( -// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` -// applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the -// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID -// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda -// function. It's contents are read by Lambda and used to trigger a function. -// This isn't available in the lambda execution context or the lambda runtime -// environtment. This is going to be populated by the AWS SDK for each language -// when that UUID is present. Some of these operations are -// Create/Delete/Get/List/Update EventSourceMapping. -// -// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html -func AWSLambdaResourceMappingID(val string) attribute.KeyValue { - return AWSLambdaResourceMappingIDKey.String(val) -} - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of the -// AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id" -// semantic conventions. It represents the AWS request ID as returned in the -// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id` -// . -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket" -// semantic conventions. It represents the S3 bucket name the request refers to. -// Corresponds to the `--bucket` parameter of the [S3 API] operations. -// -// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object (in -// the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete" -// semantic conventions. It represents the delete request container that -// specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic -// conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 API] operations. -// -// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the -// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of -// the Secret stored in the Secrets Mangger. -func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue { - return AWSSecretsmanagerSecretARNKey.String(val) -} - -// AWSSNSTopicARN returns an attribute KeyValue conforming to the -// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS -// Topic. An Amazon SNS [topic] is a logical access point that acts as a -// communication channel. -// -// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html -func AWSSNSTopicARN(val string) attribute.KeyValue { - return AWSSNSTopicARNKey.String(val) -} - -// AWSSQSQueueURL returns an attribute KeyValue conforming to the -// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS -// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service -// (SQS) and is used to access the queue and perform actions on it. -func AWSSQSQueueURL(val string) attribute.KeyValue { - return AWSSQSQueueURLKey.String(val) -} - -// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the -// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN -// of the AWS Step Functions Activity. -func AWSStepFunctionsActivityARN(val string) attribute.KeyValue { - return AWSStepFunctionsActivityARNKey.String(val) -} - -// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to -// the "aws.step_functions.state_machine.arn" semantic conventions. It represents -// the ARN of the AWS Step Functions State Machine. -func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue { - return AWSStepFunctionsStateMachineARNKey.String(val) -} - -// Enum values for aws.ecs.launchtype -var ( - // Amazon EC2 - // Stability: development - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // Amazon Fargate - // Stability: development - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// Namespace: azure -const ( - // AzureClientIDKey is the attribute Key conforming to the "azure.client.id" - // semantic conventions. It represents the unique identifier of the client - // instance. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1" - AzureClientIDKey = attribute.Key("azure.client.id") - - // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the - // "azure.cosmosdb.connection.mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode") - - // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the - // "azure.cosmosdb.consistency.level" semantic conventions. It represents the - // account or request [consistency level]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong", - // "Session" - // - // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels - AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level") - - // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to - // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It - // represents the list of regions contacted during operation in the order that - // they were contacted. If there is more than one region listed, it indicates - // that the operation was performed on multiple regions i.e. cross-regional - // call. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "North Central US", "Australia East", "Australia Southeast" - // Note: Region name matches the format of `displayName` in [Azure Location API] - // - // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location - AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions") - - // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the - // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents - // the number of request units consumed by the operation. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 46.18, 1.0 - AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge") - - // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the - // "azure.cosmosdb.request.body.size" semantic conventions. It represents the - // request payload size in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size") - - // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the - // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents - // the cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1000, 1002 - AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code") - - // AzureResourceProviderNamespaceKey is the attribute Key conforming to the - // "azure.resource_provider.namespace" semantic conventions. It represents the - // [Azure Resource Provider Namespace] as recognized by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" - // - // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers - AzureResourceProviderNamespaceKey = attribute.Key("azure.resource_provider.namespace") - - // AzureServiceRequestIDKey is the attribute Key conforming to the - // "azure.service.request.id" semantic conventions. It represents the unique - // identifier of the service request. It's generated by the Azure service and - // returned with the response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "00000000-0000-0000-0000-000000000000" - AzureServiceRequestIDKey = attribute.Key("azure.service.request.id") -) - -// AzureClientID returns an attribute KeyValue conforming to the -// "azure.client.id" semantic conventions. It represents the unique identifier of -// the client instance. -func AzureClientID(val string) attribute.KeyValue { - return AzureClientIDKey.String(val) -} - -// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue -// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic -// conventions. It represents the list of regions contacted during operation in -// the order that they were contacted. If there is more than one region listed, -// it indicates that the operation was performed on multiple regions i.e. -// cross-regional call. -func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue { - return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val) -} - -// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming -// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It -// represents the number of request units consumed by the operation. -func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue { - return AzureCosmosDBOperationRequestChargeKey.Float64(val) -} - -// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the -// "azure.cosmosdb.request.body.size" semantic conventions. It represents the -// request payload size in bytes. -func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue { - return AzureCosmosDBRequestBodySizeKey.Int(val) -} - -// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to -// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It -// represents the cosmos DB sub status code. -func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { - return AzureCosmosDBResponseSubStatusCodeKey.Int(val) -} - -// AzureResourceProviderNamespace returns an attribute KeyValue conforming to the -// "azure.resource_provider.namespace" semantic conventions. It represents the -// [Azure Resource Provider Namespace] as recognized by the client. -// -// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers -func AzureResourceProviderNamespace(val string) attribute.KeyValue { - return AzureResourceProviderNamespaceKey.String(val) -} - -// AzureServiceRequestID returns an attribute KeyValue conforming to the -// "azure.service.request.id" semantic conventions. It represents the unique -// identifier of the service request. It's generated by the Azure service and -// returned with the response. -func AzureServiceRequestID(val string) attribute.KeyValue { - return AzureServiceRequestIDKey.String(val) -} - -// Enum values for azure.cosmosdb.connection.mode -var ( - // Gateway (HTTP) connection. - // Stability: development - AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway") - // Direct connection. - // Stability: development - AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct") -) - -// Enum values for azure.cosmosdb.consistency.level -var ( - // Strong - // Stability: development - AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong") - // Bounded Staleness - // Stability: development - AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness") - // Session - // Stability: development - AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session") - // Eventual - // Stability: development - AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual") - // Consistent Prefix - // Stability: development - AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix") -) - -// Namespace: browser -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99" - // Note: This value is intended to be taken from the [UA client hints API] ( - // `navigator.userAgentData.brands`). - // - // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the "browser.language" - // semantic conventions. It represents the preferred language of the user using - // the browser. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "en", "en-US", "fr", "fr-FR" - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the browser is - // running on a mobile device. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: This value is intended to be taken from the [UA client hints API] ( - // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be - // left unset. - // - // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the "browser.platform" - // semantic conventions. It represents the platform on which the browser is - // running. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Windows", "macOS", "Android" - // Note: This value is intended to be taken from the [UA client hints API] ( - // `navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD - // be left unset in order for the values to be consistent. - // The list of possible values is defined in the - // [W3C User-Agent Client Hints specification]. Note that some (but not all) of - // these values can overlap with values in the - // [`os.type` and `os.name` attributes]. However, for consistency, the values in - // the `browser.platform` attribute should capture the exact value that the user - // agent provides. - // - // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface - // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform - // [`os.type` and `os.name` attributes]: ./os.md - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands" -// semantic conventions. It represents the array of brand name and version -// separated by a space. -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred language -// of the user using the browser. -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile" -// semantic conventions. It represents a boolean that is true if the browser is -// running on a mobile device. -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running. -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// Namespace: cassandra -const ( - // CassandraConsistencyLevelKey is the attribute Key conforming to the - // "cassandra.consistency.level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from [CQL]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html - CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level") - - // CassandraCoordinatorDCKey is the attribute Key conforming to the - // "cassandra.coordinator.dc" semantic conventions. It represents the data - // center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: us-west-2 - CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc") - - // CassandraCoordinatorIDKey is the attribute Key conforming to the - // "cassandra.coordinator.id" semantic conventions. It represents the ID of the - // coordinating node for a query. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af - CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id") - - // CassandraPageSizeKey is the attribute Key conforming to the - // "cassandra.page.size" semantic conventions. It represents the fetch size used - // for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 5000 - CassandraPageSizeKey = attribute.Key("cassandra.page.size") - - // CassandraQueryIdempotentKey is the attribute Key conforming to the - // "cassandra.query.idempotent" semantic conventions. It represents the whether - // or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent") - - // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the - // "cassandra.speculative_execution.count" semantic conventions. It represents - // the number of times a query was speculatively executed. Not set or `0` if the - // query was not executed speculatively. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0, 2 - CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count") -) - -// CassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "cassandra.coordinator.dc" semantic conventions. It represents the data center -// of the coordinating node for a query. -func CassandraCoordinatorDC(val string) attribute.KeyValue { - return CassandraCoordinatorDCKey.String(val) -} - -// CassandraCoordinatorID returns an attribute KeyValue conforming to the -// "cassandra.coordinator.id" semantic conventions. It represents the ID of the -// coordinating node for a query. -func CassandraCoordinatorID(val string) attribute.KeyValue { - return CassandraCoordinatorIDKey.String(val) -} - -// CassandraPageSize returns an attribute KeyValue conforming to the -// "cassandra.page.size" semantic conventions. It represents the fetch size used -// for paging, i.e. how many rows will be returned at once. -func CassandraPageSize(val int) attribute.KeyValue { - return CassandraPageSizeKey.Int(val) -} - -// CassandraQueryIdempotent returns an attribute KeyValue conforming to the -// "cassandra.query.idempotent" semantic conventions. It represents the whether -// or not the query is idempotent. -func CassandraQueryIdempotent(val bool) attribute.KeyValue { - return CassandraQueryIdempotentKey.Bool(val) -} - -// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to -// the "cassandra.speculative_execution.count" semantic conventions. It -// represents the number of times a query was speculatively executed. Not set or -// `0` if the query was not executed speculatively. -func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return CassandraSpeculativeExecutionCountKey.Int(val) -} - -// Enum values for cassandra.consistency.level -var ( - // All - // Stability: development - CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all") - // Each Quorum - // Stability: development - CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum") - // Quorum - // Stability: development - CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum") - // Local Quorum - // Stability: development - CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum") - // One - // Stability: development - CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one") - // Two - // Stability: development - CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two") - // Three - // Stability: development - CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three") - // Local One - // Stability: development - CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one") - // Any - // Stability: development - CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any") - // Serial - // Stability: development - CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial") - // Local Serial - // Stability: development - CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial") -) - -// Namespace: cicd -const ( - // CICDPipelineActionNameKey is the attribute Key conforming to the - // "cicd.pipeline.action.name" semantic conventions. It represents the kind of - // action a pipeline run is performing. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "BUILD", "RUN", "SYNC" - CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name") - - // CICDPipelineNameKey is the attribute Key conforming to the - // "cicd.pipeline.name" semantic conventions. It represents the human readable - // name of the pipeline within a CI/CD system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Build and Test", "Lint", "Deploy Go Project", - // "deploy_to_environment" - CICDPipelineNameKey = attribute.Key("cicd.pipeline.name") - - // CICDPipelineResultKey is the attribute Key conforming to the - // "cicd.pipeline.result" semantic conventions. It represents the result of a - // pipeline run. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "success", "failure", "timeout", "skipped" - CICDPipelineResultKey = attribute.Key("cicd.pipeline.result") - - // CICDPipelineRunIDKey is the attribute Key conforming to the - // "cicd.pipeline.run.id" semantic conventions. It represents the unique - // identifier of a pipeline run within a CI/CD system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "120912" - CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id") - - // CICDPipelineRunStateKey is the attribute Key conforming to the - // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline - // run goes through these states during its lifecycle. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "pending", "executing", "finalizing" - CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state") - - // CICDPipelineRunURLFullKey is the attribute Key conforming to the - // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of - // the pipeline run, providing the complete address in order to locate and - // identify the pipeline run. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075" - // - // [URL]: https://wikipedia.org/wiki/URL - CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full") - - // CICDPipelineTaskNameKey is the attribute Key conforming to the - // "cicd.pipeline.task.name" semantic conventions. It represents the human - // readable name of a task within a pipeline. Task here most closely aligns with - // a [computing process] in a pipeline. Other terms for tasks include commands, - // steps, and procedures. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary" - // - // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) - CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name") - - // CICDPipelineTaskRunIDKey is the attribute Key conforming to the - // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique - // identifier of a task run within a pipeline. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "12097" - CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id") - - // CICDPipelineTaskRunResultKey is the attribute Key conforming to the - // "cicd.pipeline.task.run.result" semantic conventions. It represents the - // result of a task run. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "success", "failure", "timeout", "skipped" - CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result") - - // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the - // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the - // [URL] of the pipeline task run, providing the complete address in order to - // locate and identify the pipeline task run. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075" - // - // [URL]: https://wikipedia.org/wiki/URL - CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full") - - // CICDPipelineTaskTypeKey is the attribute Key conforming to the - // "cicd.pipeline.task.type" semantic conventions. It represents the type of the - // task within a pipeline. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "build", "test", "deploy" - CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type") - - // CICDSystemComponentKey is the attribute Key conforming to the - // "cicd.system.component" semantic conventions. It represents the name of a - // component of the CICD system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "controller", "scheduler", "agent" - CICDSystemComponentKey = attribute.Key("cicd.system.component") - - // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id" - // semantic conventions. It represents the unique identifier of a worker within - // a CICD system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "abc123", "10.0.1.2", "controller" - CICDWorkerIDKey = attribute.Key("cicd.worker.id") - - // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name" - // semantic conventions. It represents the name of a worker within a CICD - // system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "agent-abc", "controller", "Ubuntu LTS" - CICDWorkerNameKey = attribute.Key("cicd.worker.name") - - // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state" - // semantic conventions. It represents the state of a CICD worker / agent. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "idle", "busy", "down" - CICDWorkerStateKey = attribute.Key("cicd.worker.state") - - // CICDWorkerURLFullKey is the attribute Key conforming to the - // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the - // worker, providing the complete address in order to locate and identify the - // worker. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://cicd.example.org/worker/abc123" - // - // [URL]: https://wikipedia.org/wiki/URL - CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full") -) - -// CICDPipelineName returns an attribute KeyValue conforming to the -// "cicd.pipeline.name" semantic conventions. It represents the human readable -// name of the pipeline within a CI/CD system. -func CICDPipelineName(val string) attribute.KeyValue { - return CICDPipelineNameKey.String(val) -} - -// CICDPipelineRunID returns an attribute KeyValue conforming to the -// "cicd.pipeline.run.id" semantic conventions. It represents the unique -// identifier of a pipeline run within a CI/CD system. -func CICDPipelineRunID(val string) attribute.KeyValue { - return CICDPipelineRunIDKey.String(val) -} - -// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the -// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of -// the pipeline run, providing the complete address in order to locate and -// identify the pipeline run. -// -// [URL]: https://wikipedia.org/wiki/URL -func CICDPipelineRunURLFull(val string) attribute.KeyValue { - return CICDPipelineRunURLFullKey.String(val) -} - -// CICDPipelineTaskName returns an attribute KeyValue conforming to the -// "cicd.pipeline.task.name" semantic conventions. It represents the human -// readable name of a task within a pipeline. Task here most closely aligns with -// a [computing process] in a pipeline. Other terms for tasks include commands, -// steps, and procedures. -// -// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) -func CICDPipelineTaskName(val string) attribute.KeyValue { - return CICDPipelineTaskNameKey.String(val) -} - -// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the -// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique -// identifier of a task run within a pipeline. -func CICDPipelineTaskRunID(val string) attribute.KeyValue { - return CICDPipelineTaskRunIDKey.String(val) -} - -// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the -// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the -// [URL] of the pipeline task run, providing the complete address in order to -// locate and identify the pipeline task run. -// -// [URL]: https://wikipedia.org/wiki/URL -func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue { - return CICDPipelineTaskRunURLFullKey.String(val) -} - -// CICDSystemComponent returns an attribute KeyValue conforming to the -// "cicd.system.component" semantic conventions. It represents the name of a -// component of the CICD system. -func CICDSystemComponent(val string) attribute.KeyValue { - return CICDSystemComponentKey.String(val) -} - -// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id" -// semantic conventions. It represents the unique identifier of a worker within a -// CICD system. -func CICDWorkerID(val string) attribute.KeyValue { - return CICDWorkerIDKey.String(val) -} - -// CICDWorkerName returns an attribute KeyValue conforming to the -// "cicd.worker.name" semantic conventions. It represents the name of a worker -// within a CICD system. -func CICDWorkerName(val string) attribute.KeyValue { - return CICDWorkerNameKey.String(val) -} - -// CICDWorkerURLFull returns an attribute KeyValue conforming to the -// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the -// worker, providing the complete address in order to locate and identify the -// worker. -// -// [URL]: https://wikipedia.org/wiki/URL -func CICDWorkerURLFull(val string) attribute.KeyValue { - return CICDWorkerURLFullKey.String(val) -} - -// Enum values for cicd.pipeline.action.name -var ( - // The pipeline run is executing a build. - // Stability: development - CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD") - // The pipeline run is executing. - // Stability: development - CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN") - // The pipeline run is executing a sync. - // Stability: development - CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC") -) - -// Enum values for cicd.pipeline.result -var ( - // The pipeline run finished successfully. - // Stability: development - CICDPipelineResultSuccess = CICDPipelineResultKey.String("success") - // The pipeline run did not finish successfully, eg. due to a compile error or a - // failing test. Such failures are usually detected by non-zero exit codes of - // the tools executed in the pipeline run. - // Stability: development - CICDPipelineResultFailure = CICDPipelineResultKey.String("failure") - // The pipeline run failed due to an error in the CICD system, eg. due to the - // worker being killed. - // Stability: development - CICDPipelineResultError = CICDPipelineResultKey.String("error") - // A timeout caused the pipeline run to be interrupted. - // Stability: development - CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout") - // The pipeline run was cancelled, eg. by a user manually cancelling the - // pipeline run. - // Stability: development - CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation") - // The pipeline run was skipped, eg. due to a precondition not being met. - // Stability: development - CICDPipelineResultSkip = CICDPipelineResultKey.String("skip") -) - -// Enum values for cicd.pipeline.run.state -var ( - // The run pending state spans from the event triggering the pipeline run until - // the execution of the run starts (eg. time spent in a queue, provisioning - // agents, creating run resources). - // - // Stability: development - CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending") - // The executing state spans the execution of any run tasks (eg. build, test). - // Stability: development - CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing") - // The finalizing state spans from when the run has finished executing (eg. - // cleanup of run resources). - // Stability: development - CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing") -) - -// Enum values for cicd.pipeline.task.run.result -var ( - // The task run finished successfully. - // Stability: development - CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success") - // The task run did not finish successfully, eg. due to a compile error or a - // failing test. Such failures are usually detected by non-zero exit codes of - // the tools executed in the task run. - // Stability: development - CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure") - // The task run failed due to an error in the CICD system, eg. due to the worker - // being killed. - // Stability: development - CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error") - // A timeout caused the task run to be interrupted. - // Stability: development - CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout") - // The task run was cancelled, eg. by a user manually cancelling the task run. - // Stability: development - CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation") - // The task run was skipped, eg. due to a precondition not being met. - // Stability: development - CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip") -) - -// Enum values for cicd.pipeline.task.type -var ( - // build - // Stability: development - CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build") - // test - // Stability: development - CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test") - // deploy - // Stability: development - CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy") -) - -// Enum values for cicd.worker.state -var ( - // The worker is not performing work for the CICD system. It is available to the - // CICD system to perform work on (online / idle). - // Stability: development - CICDWorkerStateAvailable = CICDWorkerStateKey.String("available") - // The worker is performing work for the CICD system. - // Stability: development - CICDWorkerStateBusy = CICDWorkerStateKey.String("busy") - // The worker is not available to the CICD system (disconnected / down). - // Stability: development - CICDWorkerStateOffline = CICDWorkerStateKey.String("offline") -) - -// Namespace: client -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix domain - // socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock" - // Note: When observed from the server side, and when communicating through an - // intermediary, `client.address` SHOULD represent the client address behind any - // intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" semantic - // conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 65123 - // Note: When observed from the server side, and when communicating through an - // intermediary, `client.port` SHOULD represent the client port behind any - // intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the "client.address" -// semantic conventions. It represents the client address - domain name if -// available without reverse DNS lookup; otherwise, IP address or Unix domain -// socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// Namespace: cloud -const ( - // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id" - // semantic conventions. It represents the cloud account ID the resource is - // assigned to. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "111111111111", "opentelemetry" - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to increase - // availability. Availability zone represents the zone where the resource is - // running. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "us-east-1c" - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") - - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic - // conventions. It represents the geographical region within a cloud provider. - // When associated with a resource, this attribute specifies the region where - // the resource operates. When calling services or APIs deployed on a cloud, - // this attribute identifies the region where the called destination is - // deployed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "us-central1", "us-east-1" - // Note: Refer to your provider's docs to see the available regions, for example - // [Alibaba Cloud regions], [AWS regions], [Azure regions], - // [Google Cloud regions], or [Tencent Cloud regions]. - // - // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm - // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/ - // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/ - // [Google Cloud regions]: https://cloud.google.com/about/locations - // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091 - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id" - // semantic conventions. It represents the cloud provider-specific native - // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a - // [fully qualified resource ID] on Azure, a [full resource name] on GCP). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function", - // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID", - // "/subscriptions//resourceGroups/ - // /providers/Microsoft.Web/sites//functions/" - // Note: On some cloud providers, it may not be possible to determine the full - // ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud provider. - // The following well-known definitions MUST be used if you set this attribute - // and they apply: - // - // - **AWS Lambda:** The function [ARN]. - // Take care not to use the "invoked ARN" directly but replace any - // [alias suffix] - // with the resolved function version, as the same runtime instance may be - // invocable with - // multiple different aliases. - // - **GCP:** The [URI of the resource] - // - **Azure:** The [Fully Qualified Resource ID] of the invoked function, - // *not* the function app, having the form - // - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/` - // . - // This means that a span attribute MUST be used, as an Azure function app - // can host multiple functions that would usually share - // a TracerProvider. - // - // - // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id - // [full resource name]: https://google.aip.dev/122#full-resource-names - // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html - // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names - // [Fully Qualified Resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id - CloudResourceIDKey = attribute.Key("cloud.resource_id") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the "cloud.region" -// semantic conventions. It represents the geographical region within a cloud -// provider. When associated with a resource, this attribute specifies the region -// where the resource operates. When calling services or APIs deployed on a -// cloud, this attribute identifies the region where the called destination is -// deployed. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name] -// on GCP). -// -// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html -// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id -// [full resource name]: https://google.aip.dev/122#full-resource-names -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// Enum values for cloud.platform -var ( - // Alibaba Cloud Elastic Compute Service - // Stability: development - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - // Stability: development - CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - // Stability: development - CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - // Stability: development - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - // Stability: development - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - // Stability: development - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - // Stability: development - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - // Stability: development - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - // Stability: development - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - // Stability: development - CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - // Stability: development - CloudPlatformAzureVM = CloudPlatformKey.String("azure.vm") - // Azure Container Apps - // Stability: development - CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure.container_apps") - // Azure Container Instances - // Stability: development - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure.container_instances") - // Azure Kubernetes Service - // Stability: development - CloudPlatformAzureAKS = CloudPlatformKey.String("azure.aks") - // Azure Functions - // Stability: development - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure.functions") - // Azure App Service - // Stability: development - CloudPlatformAzureAppService = CloudPlatformKey.String("azure.app_service") - // Azure Red Hat OpenShift - // Stability: development - CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure.openshift") - // Google Bare Metal Solution (BMS) - // Stability: development - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - // Stability: development - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - // Stability: development - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - // Stability: development - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - // Stability: development - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - // Stability: development - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - // Stability: development - CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - // Stability: development - CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift") - // Compute on Oracle Cloud Infrastructure (OCI) - // Stability: development - CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute") - // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI) - // Stability: development - CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke") - // Tencent Cloud Cloud Virtual Machine (CVM) - // Stability: development - CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - // Stability: development - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - // Stability: development - CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf") -) - -// Enum values for cloud.provider -var ( - // Alibaba Cloud - // Stability: development - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - // Stability: development - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - // Stability: development - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - // Stability: development - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - // Stability: development - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - // Stability: development - CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud") - // Oracle Cloud Infrastructure (OCI) - // Stability: development - CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud") - // Tencent Cloud - // Stability: development - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -// Namespace: cloudevents -const ( - // CloudEventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the [event_id] - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001" - // - // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id - CloudEventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudEventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the [source] - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123", - // "my-service" - // - // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 - CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudEventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents specification] which the event uses. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0 - // - // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion - CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudEventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the [subject] - // of the event in the context of the event producer (identified by source). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: mynewfile.jpg - // - // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject - CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudEventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the [event_type] - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2" - // - // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type - CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudEventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the [event_id] -// uniquely identifies the event. -// -// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id -func CloudEventsEventID(val string) attribute.KeyValue { - return CloudEventsEventIDKey.String(val) -} - -// CloudEventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the [source] -// identifies the context in which an event happened. -// -// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 -func CloudEventsEventSource(val string) attribute.KeyValue { - return CloudEventsEventSourceKey.String(val) -} - -// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the -// "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents specification] which the event uses. -// -// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion -func CloudEventsEventSpecVersion(val string) attribute.KeyValue { - return CloudEventsEventSpecVersionKey.String(val) -} - -// CloudEventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the [subject] -// of the event in the context of the event producer (identified by source). -// -// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject -func CloudEventsEventSubject(val string) attribute.KeyValue { - return CloudEventsEventSubjectKey.String(val) -} - -// CloudEventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the [event_type] -// contains a value describing the type of event related to the originating -// occurrence. -// -// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type -func CloudEventsEventType(val string) attribute.KeyValue { - return CloudEventsEventTypeKey.String(val) -} - -// Namespace: cloudfoundry -const ( - // CloudFoundryAppIDKey is the attribute Key conforming to the - // "cloudfoundry.app.id" semantic conventions. It represents the guid of the - // application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.application_id`. This is the same value as - // reported by `cf app --guid`. - CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id") - - // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the - // "cloudfoundry.app.instance.id" semantic conventions. It represents the index - // of the application instance. 0 when just one instance is active. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0", "1" - // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] - // . - // It is used for logs and metrics emitted by CloudFoundry. It is - // supposed to contain the application instance index for applications - // deployed on the runtime. - // - // Application instrumentation should use the value from environment - // variable `CF_INSTANCE_INDEX`. - // - // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope - CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id") - - // CloudFoundryAppNameKey is the attribute Key conforming to the - // "cloudfoundry.app.name" semantic conventions. It represents the name of the - // application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-app-name" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.application_name`. This is the same value - // as reported by `cf apps`. - CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name") - - // CloudFoundryOrgIDKey is the attribute Key conforming to the - // "cloudfoundry.org.id" semantic conventions. It represents the guid of the - // CloudFoundry org the application is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.org_id`. This is the same value as - // reported by `cf org --guid`. - CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id") - - // CloudFoundryOrgNameKey is the attribute Key conforming to the - // "cloudfoundry.org.name" semantic conventions. It represents the name of the - // CloudFoundry organization the app is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-org-name" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.org_name`. This is the same value as - // reported by `cf orgs`. - CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name") - - // CloudFoundryProcessIDKey is the attribute Key conforming to the - // "cloudfoundry.process.id" semantic conventions. It represents the UID - // identifying the process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to - // `VCAP_APPLICATION.app_id` for applications deployed to the runtime. - // For system components, this could be the actual PID. - CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id") - - // CloudFoundryProcessTypeKey is the attribute Key conforming to the - // "cloudfoundry.process.type" semantic conventions. It represents the type of - // process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "web" - // Note: CloudFoundry applications can consist of multiple jobs. Usually the - // main process will be of type `web`. There can be additional background - // tasks or side-cars with different process types. - CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type") - - // CloudFoundrySpaceIDKey is the attribute Key conforming to the - // "cloudfoundry.space.id" semantic conventions. It represents the guid of the - // CloudFoundry space the application is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.space_id`. This is the same value as - // reported by `cf space --guid`. - CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id") - - // CloudFoundrySpaceNameKey is the attribute Key conforming to the - // "cloudfoundry.space.name" semantic conventions. It represents the name of the - // CloudFoundry space the application is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-space-name" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.space_name`. This is the same value as - // reported by `cf spaces`. - CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name") - - // CloudFoundrySystemIDKey is the attribute Key conforming to the - // "cloudfoundry.system.id" semantic conventions. It represents a guid or - // another name describing the event source. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cf/gorouter" - // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope]. - // It is used for logs and metrics emitted by CloudFoundry. It is - // supposed to contain the component name, e.g. "gorouter", for - // CloudFoundry components. - // - // When system components are instrumented, values from the - // [Bosh spec] - // should be used. The `system.id` should be set to - // `spec.deployment/spec.name`. - // - // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope - // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec - CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id") - - // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the - // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid - // describing the concrete instance of the event source. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] - // . - // It is used for logs and metrics emitted by CloudFoundry. It is - // supposed to contain the vm id for CloudFoundry components. - // - // When system components are instrumented, values from the - // [Bosh spec] - // should be used. The `system.instance.id` should be set to `spec.id`. - // - // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope - // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec - CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id") -) - -// CloudFoundryAppID returns an attribute KeyValue conforming to the -// "cloudfoundry.app.id" semantic conventions. It represents the guid of the -// application. -func CloudFoundryAppID(val string) attribute.KeyValue { - return CloudFoundryAppIDKey.String(val) -} - -// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the -// "cloudfoundry.app.instance.id" semantic conventions. It represents the index -// of the application instance. 0 when just one instance is active. -func CloudFoundryAppInstanceID(val string) attribute.KeyValue { - return CloudFoundryAppInstanceIDKey.String(val) -} - -// CloudFoundryAppName returns an attribute KeyValue conforming to the -// "cloudfoundry.app.name" semantic conventions. It represents the name of the -// application. -func CloudFoundryAppName(val string) attribute.KeyValue { - return CloudFoundryAppNameKey.String(val) -} - -// CloudFoundryOrgID returns an attribute KeyValue conforming to the -// "cloudfoundry.org.id" semantic conventions. It represents the guid of the -// CloudFoundry org the application is running in. -func CloudFoundryOrgID(val string) attribute.KeyValue { - return CloudFoundryOrgIDKey.String(val) -} - -// CloudFoundryOrgName returns an attribute KeyValue conforming to the -// "cloudfoundry.org.name" semantic conventions. It represents the name of the -// CloudFoundry organization the app is running in. -func CloudFoundryOrgName(val string) attribute.KeyValue { - return CloudFoundryOrgNameKey.String(val) -} - -// CloudFoundryProcessID returns an attribute KeyValue conforming to the -// "cloudfoundry.process.id" semantic conventions. It represents the UID -// identifying the process. -func CloudFoundryProcessID(val string) attribute.KeyValue { - return CloudFoundryProcessIDKey.String(val) -} - -// CloudFoundryProcessType returns an attribute KeyValue conforming to the -// "cloudfoundry.process.type" semantic conventions. It represents the type of -// process. -func CloudFoundryProcessType(val string) attribute.KeyValue { - return CloudFoundryProcessTypeKey.String(val) -} - -// CloudFoundrySpaceID returns an attribute KeyValue conforming to the -// "cloudfoundry.space.id" semantic conventions. It represents the guid of the -// CloudFoundry space the application is running in. -func CloudFoundrySpaceID(val string) attribute.KeyValue { - return CloudFoundrySpaceIDKey.String(val) -} - -// CloudFoundrySpaceName returns an attribute KeyValue conforming to the -// "cloudfoundry.space.name" semantic conventions. It represents the name of the -// CloudFoundry space the application is running in. -func CloudFoundrySpaceName(val string) attribute.KeyValue { - return CloudFoundrySpaceNameKey.String(val) -} - -// CloudFoundrySystemID returns an attribute KeyValue conforming to the -// "cloudfoundry.system.id" semantic conventions. It represents a guid or another -// name describing the event source. -func CloudFoundrySystemID(val string) attribute.KeyValue { - return CloudFoundrySystemIDKey.String(val) -} - -// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the -// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid -// describing the concrete instance of the event source. -func CloudFoundrySystemInstanceID(val string) attribute.KeyValue { - return CloudFoundrySystemInstanceIDKey.String(val) -} - -// Namespace: code -const ( - // CodeColumnNumberKey is the attribute Key conforming to the - // "code.column.number" semantic conventions. It represents the column number in - // `code.file.path` best representing the operation. It SHOULD point within the - // code unit named in `code.function.name`. This attribute MUST NOT be used on - // the Profile signal since the data is already captured in 'message Line'. This - // constraint is imposed to prevent redundancy and maintain data integrity. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - CodeColumnNumberKey = attribute.Key("code.column.number") - - // CodeFilePathKey is the attribute Key conforming to the "code.file.path" - // semantic conventions. It represents the source code file name that identifies - // the code unit as uniquely as possible (preferably an absolute file path). - // This attribute MUST NOT be used on the Profile signal since the data is - // already captured in 'message Function'. This constraint is imposed to prevent - // redundancy and maintain data integrity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: /usr/local/MyApplication/content_root/app/index.php - CodeFilePathKey = attribute.Key("code.file.path") - - // CodeFunctionNameKey is the attribute Key conforming to the - // "code.function.name" semantic conventions. It represents the method or - // function fully-qualified name without arguments. The value should fit the - // natural representation of the language runtime, which is also likely the same - // used within `code.stacktrace` attribute value. This attribute MUST NOT be - // used on the Profile signal since the data is already captured in 'message - // Function'. This constraint is imposed to prevent redundancy and maintain data - // integrity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "com.example.MyHttpService.serveRequest", - // "GuzzleHttp\Client::transfer", "fopen" - // Note: Values and format depends on each language runtime, thus it is - // impossible to provide an exhaustive list of examples. - // The values are usually the same (or prefixes of) the ones found in native - // stack trace representation stored in - // `code.stacktrace` without information on arguments. - // - // Examples: - // - // - Java method: `com.example.MyHttpService.serveRequest` - // - Java anonymous class method: `com.mycompany.Main$1.myMethod` - // - Java lambda method: - // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod` - // - PHP function: `GuzzleHttp\Client::transfer` - // - Go function: `github.com/my/repo/pkg.foo.func5` - // - Elixir: `OpenTelemetry.Ctx.new` - // - Erlang: `opentelemetry_ctx:new` - // - Rust: `playground::my_module::my_cool_func` - // - C function: `fopen` - CodeFunctionNameKey = attribute.Key("code.function.name") - - // CodeLineNumberKey is the attribute Key conforming to the "code.line.number" - // semantic conventions. It represents the line number in `code.file.path` best - // representing the operation. It SHOULD point within the code unit named in - // `code.function.name`. This attribute MUST NOT be used on the Profile signal - // since the data is already captured in 'message Line'. This constraint is - // imposed to prevent redundancy and maintain data integrity. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - CodeLineNumberKey = attribute.Key("code.line.number") - - // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace" - // semantic conventions. It represents a stacktrace as a string in the natural - // representation for the language runtime. The representation is identical to - // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile - // signal since the data is already captured in 'message Location'. This - // constraint is imposed to prevent redundancy and maintain data integrity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at - // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at - // com.example.GenerateTrace.main(GenerateTrace.java:5) - // - // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumnNumber returns an attribute KeyValue conforming to the -// "code.column.number" semantic conventions. It represents the column number in -// `code.file.path` best representing the operation. It SHOULD point within the -// code unit named in `code.function.name`. This attribute MUST NOT be used on -// the Profile signal since the data is already captured in 'message Line'. This -// constraint is imposed to prevent redundancy and maintain data integrity. -func CodeColumnNumber(val int) attribute.KeyValue { - return CodeColumnNumberKey.Int(val) -} - -// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path" -// semantic conventions. It represents the source code file name that identifies -// the code unit as uniquely as possible (preferably an absolute file path). This -// attribute MUST NOT be used on the Profile signal since the data is already -// captured in 'message Function'. This constraint is imposed to prevent -// redundancy and maintain data integrity. -func CodeFilePath(val string) attribute.KeyValue { - return CodeFilePathKey.String(val) -} - -// CodeFunctionName returns an attribute KeyValue conforming to the -// "code.function.name" semantic conventions. It represents the method or -// function fully-qualified name without arguments. The value should fit the -// natural representation of the language runtime, which is also likely the same -// used within `code.stacktrace` attribute value. This attribute MUST NOT be used -// on the Profile signal since the data is already captured in 'message -// Function'. This constraint is imposed to prevent redundancy and maintain data -// integrity. -func CodeFunctionName(val string) attribute.KeyValue { - return CodeFunctionNameKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the -// "code.line.number" semantic conventions. It represents the line number in -// `code.file.path` best representing the operation. It SHOULD point within the -// code unit named in `code.function.name`. This attribute MUST NOT be used on -// the Profile signal since the data is already captured in 'message Line'. This -// constraint is imposed to prevent redundancy and maintain data integrity. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a string -// in the natural representation for the language runtime. The representation is -// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the -// Profile signal since the data is already captured in 'message Location'. This -// constraint is imposed to prevent redundancy and maintain data integrity. -// -// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// Namespace: container -const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used to - // run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otelcontribcol" - // Note: If using embedded credentials or sensitive data, it is recommended to - // remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otelcontribcol", "--config", "config.yaml" - ContainerCommandArgsKey = attribute.Key("container.command_args") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full command - // run by the container as a single string representing the full command. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otelcontribcol --config config.yaml" - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerCSIPluginNameKey is the attribute Key conforming to the - // "container.csi.plugin.name" semantic conventions. It represents the name of - // the CSI ([Container Storage Interface]) plugin used by the volume. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "pd.csi.storage.gke.io" - // Note: This can sometimes be referred to as a "driver" in CSI implementations. - // This should represent the `name` field of the GetPluginInfo RPC. - // - // [Container Storage Interface]: https://github.com/container-storage-interface/spec - ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name") - - // ContainerCSIVolumeIDKey is the attribute Key conforming to the - // "container.csi.volume.id" semantic conventions. It represents the unique - // volume ID returned by the CSI ([Container Storage Interface]) plugin. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk" - // Note: This can sometimes be referred to as a "volume handle" in CSI - // implementations. This should represent the `Volume.volume_id` field in CSI - // spec. - // - // [Container Storage Interface]: https://github.com/container-storage-interface/spec - ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id") - - // ContainerIDKey is the attribute Key conforming to the "container.id" semantic - // conventions. It represents the container ID. Usually a UUID, as for example - // used to [identify Docker containers]. The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "a3bf90e006b2" - // - // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification - ContainerIDKey = attribute.Key("container.id") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime specific - // image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f" - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect [API] - // endpoint. - // K8s defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"` - // . - // The ID is assigned by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - // - // [API]: https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of the - // image the container was built on. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "gcr.io/opentelemetry/operator" - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the repo - // digests of the container image as provided by the container runtime. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", - // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" - // Note: [Docker] and [CRI] report those under the `RepoDigests` field. - // - // [Docker]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect - // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238 - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image Inspect]. Should be only - // the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "v1.27.1", "3.5.7-0" - // - // [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect - ContainerImageTagsKey = attribute.Key("container.image.tags") - - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-autoconf" - ContainerNameKey = attribute.Key("container.name") - - // ContainerRuntimeDescriptionKey is the attribute Key conforming to the - // "container.runtime.description" semantic conventions. It represents a - // description about the runtime which could include, for example details about - // the CRI/API version being used or other customisations. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "docker://19.3.1 - CRI: 1.22.0" - ContainerRuntimeDescriptionKey = attribute.Key("container.runtime.description") - - // ContainerRuntimeNameKey is the attribute Key conforming to the - // "container.runtime.name" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "docker", "containerd", "rkt" - ContainerRuntimeNameKey = attribute.Key("container.runtime.name") - - // ContainerRuntimeVersionKey is the attribute Key conforming to the - // "container.runtime.version" semantic conventions. It represents the version - // of the runtime of this process, as returned by the runtime without - // modification. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0.0 - ContainerRuntimeVersionKey = attribute.Key("container.runtime.version") -) - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full command -// run by the container as a single string representing the full command. -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerCSIPluginName returns an attribute KeyValue conforming to the -// "container.csi.plugin.name" semantic conventions. It represents the name of -// the CSI ([Container Storage Interface]) plugin used by the volume. -// -// [Container Storage Interface]: https://github.com/container-storage-interface/spec -func ContainerCSIPluginName(val string) attribute.KeyValue { - return ContainerCSIPluginNameKey.String(val) -} - -// ContainerCSIVolumeID returns an attribute KeyValue conforming to the -// "container.csi.volume.id" semantic conventions. It represents the unique -// volume ID returned by the CSI ([Container Storage Interface]) plugin. -// -// [Container Storage Interface]: https://github.com/container-storage-interface/spec -func ContainerCSIVolumeID(val string) attribute.KeyValue { - return ContainerCSIVolumeIDKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the "container.id" -// semantic conventions. It represents the container ID. Usually a UUID, as for -// example used to [identify Docker containers]. The UUID might be abbreviated. -// -// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime specific -// image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container image -// tags. An example can be found in [Docker Image Inspect]. Should be only the -// `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -// -// [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) -} - -// ContainerLabel returns an attribute KeyValue conforming to the -// "container.label" semantic conventions. It represents the container labels, -// `` being the label name, the value being the label value. -func ContainerLabel(key string, val string) attribute.KeyValue { - return attribute.String("container.label."+key, val) -} - -// ContainerName returns an attribute KeyValue conforming to the "container.name" -// semantic conventions. It represents the container name used by container -// runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerRuntimeDescription returns an attribute KeyValue conforming to the -// "container.runtime.description" semantic conventions. It represents a -// description about the runtime which could include, for example details about -// the CRI/API version being used or other customisations. -func ContainerRuntimeDescription(val string) attribute.KeyValue { - return ContainerRuntimeDescriptionKey.String(val) -} - -// ContainerRuntimeName returns an attribute KeyValue conforming to the -// "container.runtime.name" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntimeName(val string) attribute.KeyValue { - return ContainerRuntimeNameKey.String(val) -} - -// ContainerRuntimeVersion returns an attribute KeyValue conforming to the -// "container.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without modification. -func ContainerRuntimeVersion(val string) attribute.KeyValue { - return ContainerRuntimeVersionKey.String(val) -} - -// Namespace: cpu -const ( - // CPULogicalNumberKey is the attribute Key conforming to the - // "cpu.logical_number" semantic conventions. It represents the logical CPU - // number [0..n-1]. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1 - CPULogicalNumberKey = attribute.Key("cpu.logical_number") - - // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic - // conventions. It represents the mode of the CPU. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "user", "system" - CPUModeKey = attribute.Key("cpu.mode") -) - -// CPULogicalNumber returns an attribute KeyValue conforming to the -// "cpu.logical_number" semantic conventions. It represents the logical CPU -// number [0..n-1]. -func CPULogicalNumber(val int) attribute.KeyValue { - return CPULogicalNumberKey.Int(val) -} - -// Enum values for cpu.mode -var ( - // User - // Stability: development - CPUModeUser = CPUModeKey.String("user") - // System - // Stability: development - CPUModeSystem = CPUModeKey.String("system") - // Nice - // Stability: development - CPUModeNice = CPUModeKey.String("nice") - // Idle - // Stability: development - CPUModeIdle = CPUModeKey.String("idle") - // IO Wait - // Stability: development - CPUModeIOWait = CPUModeKey.String("iowait") - // Interrupt - // Stability: development - CPUModeInterrupt = CPUModeKey.String("interrupt") - // Steal - // Stability: development - CPUModeSteal = CPUModeKey.String("steal") - // Kernel - // Stability: development - CPUModeKernel = CPUModeKey.String("kernel") -) - -// Namespace: db -const ( - // DBClientConnectionPoolNameKey is the attribute Key conforming to the - // "db.client.connection.pool.name" semantic conventions. It represents the name - // of the connection pool; unique within the instrumented application. In case - // the connection pool implementation doesn't provide a name, instrumentation - // SHOULD use a combination of parameters that would make the name unique, for - // example, combining attributes `server.address`, `server.port`, and - // `db.namespace`, formatted as `server.address:server.port/db.namespace`. - // Instrumentations that generate connection pool name following different - // patterns SHOULD document it. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "myDataSource" - DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name") - - // DBClientConnectionStateKey is the attribute Key conforming to the - // "db.client.connection.state" semantic conventions. It represents the state of - // a connection in the pool. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "idle" - DBClientConnectionStateKey = attribute.Key("db.client.connection.state") - - // DBCollectionNameKey is the attribute Key conforming to the - // "db.collection.name" semantic conventions. It represents the name of a - // collection (table, container) within the database. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "public.users", "customers" - // Note: It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - // - // The collection name SHOULD NOT be extracted from `db.query.text`, - // when the database system supports query text with multiple collections - // in non-batch operations. - // - // For batch operations, if the individual operations are known to have the same - // collection name then that collection name SHOULD be used. - DBCollectionNameKey = attribute.Key("db.collection.name") - - // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic - // conventions. It represents the name of the database, fully qualified within - // the server address and port. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "customers", "test.users" - // Note: If a database system has multiple namespace components, they SHOULD be - // concatenated from the most general to the most specific namespace component, - // using `|` as a separator between the components. Any missing components (and - // their associated separators) SHOULD be omitted. - // Semantic conventions for individual database systems SHOULD document what - // `db.namespace` means in the context of that system. - // It is RECOMMENDED to capture the value as provided by the application without - // attempting to do any case normalization. - DBNamespaceKey = attribute.Key("db.namespace") - - // DBOperationBatchSizeKey is the attribute Key conforming to the - // "db.operation.batch.size" semantic conventions. It represents the number of - // queries included in a batch operation. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 2, 3, 4 - // Note: Operations are only considered batches when they contain two or more - // operations, and so `db.operation.batch.size` SHOULD never be `1`. - DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size") - - // DBOperationNameKey is the attribute Key conforming to the "db.operation.name" - // semantic conventions. It represents the name of the operation or command - // being executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "findAndModify", "HMSET", "SELECT" - // Note: It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - // - // The operation name SHOULD NOT be extracted from `db.query.text`, - // when the database system supports query text with multiple operations - // in non-batch operations. - // - // If spaces can occur in the operation name, multiple consecutive spaces - // SHOULD be normalized to a single space. - // - // For batch operations, if the individual operations are known to have the same - // operation name - // then that operation name SHOULD be used prepended by `BATCH `, - // otherwise `db.operation.name` SHOULD be `BATCH` or some other database - // system specific term if more applicable. - DBOperationNameKey = attribute.Key("db.operation.name") - - // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary" - // semantic conventions. It represents the low cardinality summary of a database - // query. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get - // user by id" - // Note: The query summary describes a class of database queries and is useful - // as a grouping key, especially when analyzing telemetry for database - // calls involving complex queries. - // - // Summary may be available to the instrumentation through - // instrumentation hooks or other means. If it is not available, - // instrumentations - // that support query parsing SHOULD generate a summary following - // [Generating query summary] - // section. - // - // [Generating query summary]: /docs/database/database-spans.md#generating-a-summary-of-the-query - DBQuerySummaryKey = attribute.Key("db.query.summary") - - // DBQueryTextKey is the attribute Key conforming to the "db.query.text" - // semantic conventions. It represents the database query being executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?" - // Note: For sanitization see [Sanitization of `db.query.text`]. - // For batch operations, if the individual operations are known to have the same - // query text then that query text SHOULD be used, otherwise all of the - // individual query texts SHOULD be concatenated with separator `; ` or some - // other database system specific separator if more applicable. - // Parameterized query text SHOULD NOT be sanitized. Even though parameterized - // query text can potentially have sensitive data, by using a parameterized - // query the user is giving a strong signal that any sensitive data will be - // passed as parameter values, and the benefit to observability of capturing the - // static part of the query text by default outweighs the risk. - // - // [Sanitization of `db.query.text`]: /docs/database/database-spans.md#sanitization-of-dbquerytext - DBQueryTextKey = attribute.Key("db.query.text") - - // DBResponseReturnedRowsKey is the attribute Key conforming to the - // "db.response.returned_rows" semantic conventions. It represents the number of - // rows returned by the operation. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 10, 30, 1000 - DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows") - - // DBResponseStatusCodeKey is the attribute Key conforming to the - // "db.response.status_code" semantic conventions. It represents the database - // response status code. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "102", "ORA-17002", "08P01", "404" - // Note: The status code returned by the database. Usually it represents an - // error code, but may also represent partial success, warning, or differentiate - // between various types of successful outcomes. - // Semantic conventions for individual database systems SHOULD document what - // `db.response.status_code` means in the context of that system. - DBResponseStatusCodeKey = attribute.Key("db.response.status_code") - - // DBStoredProcedureNameKey is the attribute Key conforming to the - // "db.stored_procedure.name" semantic conventions. It represents the name of a - // stored procedure within the database. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "GetCustomer" - // Note: It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - // - // For batch operations, if the individual operations are known to have the same - // stored procedure name then that stored procedure name SHOULD be used. - DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name") - - // DBSystemNameKey is the attribute Key conforming to the "db.system.name" - // semantic conventions. It represents the database management system (DBMS) - // product as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: - // Note: The actual DBMS may differ from the one identified by the client. For - // example, when using PostgreSQL client libraries to connect to a CockroachDB, - // the `db.system.name` is set to `postgresql` based on the instrumentation's - // best knowledge. - DBSystemNameKey = attribute.Key("db.system.name") -) - -// DBClientConnectionPoolName returns an attribute KeyValue conforming to the -// "db.client.connection.pool.name" semantic conventions. It represents the name -// of the connection pool; unique within the instrumented application. In case -// the connection pool implementation doesn't provide a name, instrumentation -// SHOULD use a combination of parameters that would make the name unique, for -// example, combining attributes `server.address`, `server.port`, and -// `db.namespace`, formatted as `server.address:server.port/db.namespace`. -// Instrumentations that generate connection pool name following different -// patterns SHOULD document it. -func DBClientConnectionPoolName(val string) attribute.KeyValue { - return DBClientConnectionPoolNameKey.String(val) -} - -// DBCollectionName returns an attribute KeyValue conforming to the -// "db.collection.name" semantic conventions. It represents the name of a -// collection (table, container) within the database. -func DBCollectionName(val string) attribute.KeyValue { - return DBCollectionNameKey.String(val) -} - -// DBNamespace returns an attribute KeyValue conforming to the "db.namespace" -// semantic conventions. It represents the name of the database, fully qualified -// within the server address and port. -func DBNamespace(val string) attribute.KeyValue { - return DBNamespaceKey.String(val) -} - -// DBOperationBatchSize returns an attribute KeyValue conforming to the -// "db.operation.batch.size" semantic conventions. It represents the number of -// queries included in a batch operation. -func DBOperationBatchSize(val int) attribute.KeyValue { - return DBOperationBatchSizeKey.Int(val) -} - -// DBOperationName returns an attribute KeyValue conforming to the -// "db.operation.name" semantic conventions. It represents the name of the -// operation or command being executed. -func DBOperationName(val string) attribute.KeyValue { - return DBOperationNameKey.String(val) -} - -// DBOperationParameter returns an attribute KeyValue conforming to the -// "db.operation.parameter" semantic conventions. It represents a database -// operation parameter, with `` being the parameter name, and the attribute -// value being a string representation of the parameter value. -func DBOperationParameter(key string, val string) attribute.KeyValue { - return attribute.String("db.operation.parameter."+key, val) -} - -// DBQueryParameter returns an attribute KeyValue conforming to the -// "db.query.parameter" semantic conventions. It represents a database query -// parameter, with `` being the parameter name, and the attribute value -// being a string representation of the parameter value. -func DBQueryParameter(key string, val string) attribute.KeyValue { - return attribute.String("db.query.parameter."+key, val) -} - -// DBQuerySummary returns an attribute KeyValue conforming to the -// "db.query.summary" semantic conventions. It represents the low cardinality -// summary of a database query. -func DBQuerySummary(val string) attribute.KeyValue { - return DBQuerySummaryKey.String(val) -} - -// DBQueryText returns an attribute KeyValue conforming to the "db.query.text" -// semantic conventions. It represents the database query being executed. -func DBQueryText(val string) attribute.KeyValue { - return DBQueryTextKey.String(val) -} - -// DBResponseReturnedRows returns an attribute KeyValue conforming to the -// "db.response.returned_rows" semantic conventions. It represents the number of -// rows returned by the operation. -func DBResponseReturnedRows(val int) attribute.KeyValue { - return DBResponseReturnedRowsKey.Int(val) -} - -// DBResponseStatusCode returns an attribute KeyValue conforming to the -// "db.response.status_code" semantic conventions. It represents the database -// response status code. -func DBResponseStatusCode(val string) attribute.KeyValue { - return DBResponseStatusCodeKey.String(val) -} - -// DBStoredProcedureName returns an attribute KeyValue conforming to the -// "db.stored_procedure.name" semantic conventions. It represents the name of a -// stored procedure within the database. -func DBStoredProcedureName(val string) attribute.KeyValue { - return DBStoredProcedureNameKey.String(val) -} - -// Enum values for db.client.connection.state -var ( - // idle - // Stability: development - DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle") - // used - // Stability: development - DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used") -) - -// Enum values for db.system.name -var ( - // Some other SQL database. Fallback only. - // Stability: development - DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql") - // [Adabas (Adaptable Database System)] - // Stability: development - // - // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas - DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas") - // [Actian Ingres] - // Stability: development - // - // [Actian Ingres]: https://www.actian.com/databases/ingres/ - DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres") - // [Amazon DynamoDB] - // Stability: development - // - // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/ - DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb") - // [Amazon Redshift] - // Stability: development - // - // [Amazon Redshift]: https://aws.amazon.com/redshift/ - DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift") - // [Azure Cosmos DB] - // Stability: development - // - // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db - DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb") - // [InterSystems Caché] - // Stability: development - // - // [InterSystems Caché]: https://www.intersystems.com/products/cache/ - DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache") - // [Apache Cassandra] - // Stability: development - // - // [Apache Cassandra]: https://cassandra.apache.org/ - DBSystemNameCassandra = DBSystemNameKey.String("cassandra") - // [ClickHouse] - // Stability: development - // - // [ClickHouse]: https://clickhouse.com/ - DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse") - // [CockroachDB] - // Stability: development - // - // [CockroachDB]: https://www.cockroachlabs.com/ - DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb") - // [Couchbase] - // Stability: development - // - // [Couchbase]: https://www.couchbase.com/ - DBSystemNameCouchbase = DBSystemNameKey.String("couchbase") - // [Apache CouchDB] - // Stability: development - // - // [Apache CouchDB]: https://couchdb.apache.org/ - DBSystemNameCouchDB = DBSystemNameKey.String("couchdb") - // [Apache Derby] - // Stability: development - // - // [Apache Derby]: https://db.apache.org/derby/ - DBSystemNameDerby = DBSystemNameKey.String("derby") - // [Elasticsearch] - // Stability: development - // - // [Elasticsearch]: https://www.elastic.co/elasticsearch - DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch") - // [Firebird] - // Stability: development - // - // [Firebird]: https://www.firebirdsql.org/ - DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql") - // [Google Cloud Spanner] - // Stability: development - // - // [Google Cloud Spanner]: https://cloud.google.com/spanner - DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner") - // [Apache Geode] - // Stability: development - // - // [Apache Geode]: https://geode.apache.org/ - DBSystemNameGeode = DBSystemNameKey.String("geode") - // [H2 Database] - // Stability: development - // - // [H2 Database]: https://h2database.com/ - DBSystemNameH2database = DBSystemNameKey.String("h2database") - // [Apache HBase] - // Stability: development - // - // [Apache HBase]: https://hbase.apache.org/ - DBSystemNameHBase = DBSystemNameKey.String("hbase") - // [Apache Hive] - // Stability: development - // - // [Apache Hive]: https://hive.apache.org/ - DBSystemNameHive = DBSystemNameKey.String("hive") - // [HyperSQL Database] - // Stability: development - // - // [HyperSQL Database]: https://hsqldb.org/ - DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb") - // [IBM Db2] - // Stability: development - // - // [IBM Db2]: https://www.ibm.com/db2 - DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2") - // [IBM Informix] - // Stability: development - // - // [IBM Informix]: https://www.ibm.com/products/informix - DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix") - // [IBM Netezza] - // Stability: development - // - // [IBM Netezza]: https://www.ibm.com/products/netezza - DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza") - // [InfluxDB] - // Stability: development - // - // [InfluxDB]: https://www.influxdata.com/ - DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb") - // [Instant] - // Stability: development - // - // [Instant]: https://www.instantdb.com/ - DBSystemNameInstantDB = DBSystemNameKey.String("instantdb") - // [MariaDB] - // Stability: stable - // - // [MariaDB]: https://mariadb.org/ - DBSystemNameMariaDB = DBSystemNameKey.String("mariadb") - // [Memcached] - // Stability: development - // - // [Memcached]: https://memcached.org/ - DBSystemNameMemcached = DBSystemNameKey.String("memcached") - // [MongoDB] - // Stability: development - // - // [MongoDB]: https://www.mongodb.com/ - DBSystemNameMongoDB = DBSystemNameKey.String("mongodb") - // [Microsoft SQL Server] - // Stability: stable - // - // [Microsoft SQL Server]: https://www.microsoft.com/sql-server - DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server") - // [MySQL] - // Stability: stable - // - // [MySQL]: https://www.mysql.com/ - DBSystemNameMySQL = DBSystemNameKey.String("mysql") - // [Neo4j] - // Stability: development - // - // [Neo4j]: https://neo4j.com/ - DBSystemNameNeo4j = DBSystemNameKey.String("neo4j") - // [OpenSearch] - // Stability: development - // - // [OpenSearch]: https://opensearch.org/ - DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch") - // [Oracle Database] - // Stability: development - // - // [Oracle Database]: https://www.oracle.com/database/ - DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db") - // [PostgreSQL] - // Stability: stable - // - // [PostgreSQL]: https://www.postgresql.org/ - DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql") - // [Redis] - // Stability: development - // - // [Redis]: https://redis.io/ - DBSystemNameRedis = DBSystemNameKey.String("redis") - // [SAP HANA] - // Stability: development - // - // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html - DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana") - // [SAP MaxDB] - // Stability: development - // - // [SAP MaxDB]: https://maxdb.sap.com/ - DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb") - // [SQLite] - // Stability: development - // - // [SQLite]: https://www.sqlite.org/ - DBSystemNameSQLite = DBSystemNameKey.String("sqlite") - // [Teradata] - // Stability: development - // - // [Teradata]: https://www.teradata.com/ - DBSystemNameTeradata = DBSystemNameKey.String("teradata") - // [Trino] - // Stability: development - // - // [Trino]: https://trino.io/ - DBSystemNameTrino = DBSystemNameKey.String("trino") -) - -// Namespace: deployment -const ( - // DeploymentEnvironmentNameKey is the attribute Key conforming to the - // "deployment.environment.name" semantic conventions. It represents the name of - // the [deployment environment] (aka deployment tier). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "staging", "production" - // Note: `deployment.environment.name` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` resource - // attributes. - // This implies that resources carrying the following attribute combinations - // MUST be - // considered to be identifying the same service: - // - // - `service.name=frontend`, `deployment.environment.name=production` - // - `service.name=frontend`, `deployment.environment.name=staging`. - // - // - // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment - DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name") - - // DeploymentIDKey is the attribute Key conforming to the "deployment.id" - // semantic conventions. It represents the id of the deployment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1208" - DeploymentIDKey = attribute.Key("deployment.id") - - // DeploymentNameKey is the attribute Key conforming to the "deployment.name" - // semantic conventions. It represents the name of the deployment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "deploy my app", "deploy-frontend" - DeploymentNameKey = attribute.Key("deployment.name") - - // DeploymentStatusKey is the attribute Key conforming to the - // "deployment.status" semantic conventions. It represents the status of the - // deployment. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - DeploymentStatusKey = attribute.Key("deployment.status") -) - -// DeploymentEnvironmentName returns an attribute KeyValue conforming to the -// "deployment.environment.name" semantic conventions. It represents the name of -// the [deployment environment] (aka deployment tier). -// -// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment -func DeploymentEnvironmentName(val string) attribute.KeyValue { - return DeploymentEnvironmentNameKey.String(val) -} - -// DeploymentID returns an attribute KeyValue conforming to the "deployment.id" -// semantic conventions. It represents the id of the deployment. -func DeploymentID(val string) attribute.KeyValue { - return DeploymentIDKey.String(val) -} - -// DeploymentName returns an attribute KeyValue conforming to the -// "deployment.name" semantic conventions. It represents the name of the -// deployment. -func DeploymentName(val string) attribute.KeyValue { - return DeploymentNameKey.String(val) -} - -// Enum values for deployment.status -var ( - // failed - // Stability: development - DeploymentStatusFailed = DeploymentStatusKey.String("failed") - // succeeded - // Stability: development - DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded") -) - -// Namespace: destination -const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the destination - // address - domain name if available without reverse DNS lookup; otherwise, IP - // address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock" - // Note: When observed from the source side, and when communicating through an - // intermediary, `destination.address` SHOULD represent the destination address - // behind any intermediaries, for example proxies, if it's available. - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the "destination.port" - // semantic conventions. It represents the destination port number. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number. -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// Namespace: device -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "123456789012345", "01:23:45:67:89:AB" - // Note: Its value SHOULD be identical for all apps on a device and it SHOULD - // NOT change if an app is uninstalled and re-installed. - // However, it might be resettable by the user for all apps on a device. - // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be - // used as values. - // - // More information about Android identifier best practices can be found in the - // [Android user data IDs guide]. - // - // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution - // > should be taken when storing personal data or anything which can identify a - // > user. GDPR and data protection laws may apply, - // > ensure you do your own due diligence.> Due to these reasons, this - // > identifier is not recommended for consumer applications and will likely - // > result in rejection from both Google Play and App Store. - // > However, it may be appropriate for specific enterprise scenarios, such as - // > kiosk devices or enterprise-managed devices, with appropriate compliance - // > clearance. - // > Any instrumentation providing this identifier MUST implement it as an - // > opt-in feature.> See [`app.installation.id`]> for a more - // > privacy-preserving alternative. - // - // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids - // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id - DeviceIDKey = attribute.Key("device.id") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of the - // device manufacturer. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Apple", "Samsung" - // Note: The Android OS provides this field via [Build]. iOS apps SHOULD - // hardcode the value `Apple`. - // - // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER - DeviceManufacturerKey = attribute.Key("device.manufacturer") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "iPhone3,4", "SM-G920F" - // Note: It's recommended this value represents a machine-readable version of - // the model identifier rather than the market or consumer-friendly name of the - // device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the "device.model.name" - // semantic conventions. It represents the marketing name for the device model. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "iPhone 6s Plus", "Samsung Galaxy S6" - // Note: It's recommended this value represents a human-readable version of the - // device model rather than a machine-readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic -// conventions. It represents a unique identifier representing the device. -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer. -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device. -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name for -// the device model. -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// Namespace: disk -const ( - // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction" - // semantic conventions. It represents the disk IO operation direction. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "read" - DiskIODirectionKey = attribute.Key("disk.io.direction") -) - -// Enum values for disk.io.direction -var ( - // read - // Stability: development - DiskIODirectionRead = DiskIODirectionKey.String("read") - // write - // Stability: development - DiskIODirectionWrite = DiskIODirectionKey.String("write") -) - -// Namespace: dns -const ( - // DNSAnswersKey is the attribute Key conforming to the "dns.answers" semantic - // conventions. It represents the list of IPv4 or IPv6 addresses resolved during - // DNS lookup. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "10.0.0.1", "2001:0db8:85a3:0000:0000:8a2e:0370:7334" - DNSAnswersKey = attribute.Key("dns.answers") - - // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name" - // semantic conventions. It represents the name being queried. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "www.example.com", "opentelemetry.io" - // Note: If the name field contains non-printable characters (below 32 or above - // 126), those characters should be represented as escaped base 10 integers - // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, - // and line feeds should be converted to \t, \r, and \n respectively. - DNSQuestionNameKey = attribute.Key("dns.question.name") -) - -// DNSAnswers returns an attribute KeyValue conforming to the "dns.answers" -// semantic conventions. It represents the list of IPv4 or IPv6 addresses -// resolved during DNS lookup. -func DNSAnswers(val ...string) attribute.KeyValue { - return DNSAnswersKey.StringSlice(val) -} - -// DNSQuestionName returns an attribute KeyValue conforming to the -// "dns.question.name" semantic conventions. It represents the name being -// queried. -func DNSQuestionName(val string) attribute.KeyValue { - return DNSQuestionNameKey.String(val) -} - -// Namespace: elasticsearch -const ( - // ElasticsearchNodeNameKey is the attribute Key conforming to the - // "elasticsearch.node.name" semantic conventions. It represents the represents - // the human-readable identifier of the node/instance to which a request was - // routed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "instance-0000000001" - ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name") -) - -// ElasticsearchNodeName returns an attribute KeyValue conforming to the -// "elasticsearch.node.name" semantic conventions. It represents the represents -// the human-readable identifier of the node/instance to which a request was -// routed. -func ElasticsearchNodeName(val string) attribute.KeyValue { - return ElasticsearchNodeNameKey.String(val) -} - -// Namespace: enduser -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic - // conventions. It represents the unique identifier of an end user in the - // system. It maybe a username, email address, or other identifier. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "username" - // Note: Unique identifier of an end user in the system. - // - // > [!Warning] - // > This field contains sensitive (PII) information. - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id" - // semantic conventions. It represents the pseudonymous identifier of an end - // user. This identifier should be a random value that is not directly linked or - // associated with the end user's actual identity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "QdH5CAWJgqVT4rOr0qtumf" - // Note: Pseudonymous identifier of an end user. - // - // > [!Warning] - // > This field contains sensitive (linkable PII) information. - EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the unique identifier of an end user in -// the system. It maybe a username, email address, or other identifier. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserPseudoID returns an attribute KeyValue conforming to the -// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous -// identifier of an end user. This identifier should be a random value that is -// not directly linked or associated with the end user's actual identity. -func EnduserPseudoID(val string) attribute.KeyValue { - return EnduserPseudoIDKey.String(val) -} - -// Namespace: error -const ( - // ErrorMessageKey is the attribute Key conforming to the "error.message" - // semantic conventions. It represents a message providing more detail about an - // error in human-readable form. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Unexpected input type: string", "The user has exceeded their - // storage quota" - // Note: `error.message` should provide additional context and detail about an - // error. - // It is NOT RECOMMENDED to duplicate the value of `error.type` in - // `error.message`. - // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in - // `error.message`. - // - // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded - // cardinality and overlap with span status. - ErrorMessageKey = attribute.Key("error.message") - - // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic - // conventions. It represents the describes a class of error the operation ended - // with. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "timeout", "java.net.UnknownHostException", - // "server_certificate_invalid", "500" - // Note: The `error.type` SHOULD be predictable, and SHOULD have low - // cardinality. - // - // When `error.type` is set to a type (e.g., an exception type), its - // canonical class name identifying the type within the artifact SHOULD be used. - // - // Instrumentations SHOULD document the list of errors they report. - // - // The cardinality of `error.type` within one instrumentation library SHOULD be - // low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query time - // when no - // additional filters are applied. - // - // If the operation has completed successfully, instrumentations SHOULD NOT set - // `error.type`. - // - // If a specific domain defines its own set of error identifiers (such as HTTP - // or gRPC status codes), - // it's RECOMMENDED to: - // - // - Use a domain-specific attribute - // - Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) - -// ErrorMessage returns an attribute KeyValue conforming to the "error.message" -// semantic conventions. It represents a message providing more detail about an -// error in human-readable form. -func ErrorMessage(val string) attribute.KeyValue { - return ErrorMessageKey.String(val) -} - -// Enum values for error.type -var ( - // A fallback error value to be used when the instrumentation doesn't define a - // custom value. - // - // Stability: stable - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) - -// Namespace: exception -const ( - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "Division by zero", "Can't convert 'int' object to str implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: Exception in thread "main" java.lang.RuntimeException: Test - // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at - // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at - // com.example.GenerateTrace.main(GenerateTrace.java:5) - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the exception - // should be preferred over the static type in languages that support it. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "java.net.ConnectException", "OSError" - ExceptionTypeKey = attribute.Key("exception.type") -) - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// ExceptionType returns an attribute KeyValue conforming to the "exception.type" -// semantic conventions. It represents the type of the exception (its -// fully-qualified class name, if applicable). The dynamic type of the exception -// should be preferred over the static type in languages that support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// Namespace: faas -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the serverless - // function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - FaaSColdstartKey = attribute.Key("faas.coldstart") - - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron Expression]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0/5 * * * ? * - // - // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name of - // the source on which the triggering operation was performed. For example, in - // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the - // database name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "myBucketName", "myDbName" - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or S3 is - // the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "myFile.txt", "myTableName" - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the describes - // the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string containing - // the time when the data was accessed in the [ISO 8601] format expressed in - // [UTC]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 2020-01-23T13:47:06Z - // - // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html - // [UTC]: https://www.w3.org/TR/NOTE-datetime - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a string, - // that will be potentially reused for other invocations to the same - // function/function version. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de" - // Note: - **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation ID of - // the current function invocation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28 - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") - - // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name" - // semantic conventions. It represents the name of the invoked function. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: my-function - // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked - // function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud region of - // the invoked function. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: eu-central-1 - // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked - // function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory" - // semantic conventions. It represents the amount of memory available to the - // serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Note: It's recommended to set this attribute since e.g. too little memory can - // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, - // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this - // information (which must be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this runtime - // instance executes. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-function", "myazurefunctionapp/some-function-name" - // Note: This is the name of the function as configured/deployed on the FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function.name`] - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud providers/products: - // - // - **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - // - // - // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes - FaaSNameKey = attribute.Key("faas.name") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation time - // in the [ISO 8601] format expressed in [UTC]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 2020-01-23T13:47:06Z - // - // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html - // [UTC]: https://www.w3.org/TR/NOTE-datetime - FaaSTimeKey = attribute.Key("faas.time") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic - // conventions. It represents the type of the trigger which caused this function - // invocation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - FaaSTriggerKey = attribute.Key("faas.trigger") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic - // conventions. It represents the immutable version of the function being - // executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "26", "pinkfroid-00002" - // Note: Depending on the cloud provider and platform, use: - // - // - **AWS Lambda:** The [function version] - // (an integer represented as a decimal string). - // - **Google Cloud Run (Services):** The [revision] - // (i.e., the function name plus the revision suffix). - // - **Google Cloud Functions:** The value of the - // [`K_REVISION` environment variable]. - // - **Azure Functions:** Not applicable. Do not set this attribute. - // - // - // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html - // [revision]: https://cloud.google.com/run/docs/managing/revisions - // [`K_REVISION` environment variable]: https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically - FaaSVersionKey = attribute.Key("faas.version") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart" -// semantic conventions. It represents a boolean that is true if the serverless -// function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic -// conventions. It represents a string containing the schedule period as -// [Cron Expression]. -// -// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of the -// source on which the triggering operation was performed. For example, in Cloud -// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database -// name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 is -// the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO 8601] format expressed in -// [UTC]. -// -// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html -// [UTC]: https://www.w3.org/TR/NOTE-datetime -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance" -// semantic conventions. It represents the execution environment ID as a string, -// that will be potentially reused for other invocations to the same -// function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID of -// the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region of -// the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic -// conventions. It represents the name of the single function that this runtime -// instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic -// conventions. It represents a string containing the function invocation time in -// the [ISO 8601] format expressed in [UTC]. -// -// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html -// [UTC]: https://www.w3.org/TR/NOTE-datetime -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the "faas.version" -// semantic conventions. It represents the immutable version of the function -// being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// Enum values for faas.document.operation -var ( - // When a new object is created. - // Stability: development - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified. - // Stability: development - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted. - // Stability: development - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// Enum values for faas.invoked_provider -var ( - // Alibaba Cloud - // Stability: development - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - // Stability: development - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - // Stability: development - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - // Stability: development - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - // Stability: development - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -// Enum values for faas.trigger -var ( - // A response to some data source operation such as a database or filesystem - // read/write - // Stability: development - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - // Stability: development - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - // Stability: development - FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - // Stability: development - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - // Stability: development - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// Namespace: feature_flag -const ( - // FeatureFlagContextIDKey is the attribute Key conforming to the - // "feature_flag.context.id" semantic conventions. It represents the unique - // identifier for the flag evaluation context. For example, the targeting key. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" - FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") - - // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key" - // semantic conventions. It represents the lookup key of the feature flag. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "logo-color" - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider.name" semantic conventions. It represents the - // identifies the feature flag provider. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "Flag Manager" - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name") - - // FeatureFlagResultReasonKey is the attribute Key conforming to the - // "feature_flag.result.reason" semantic conventions. It represents the reason - // code which shows how a feature flag value was determined. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "static", "targeting_match", "error", "default" - FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason") - - // FeatureFlagResultValueKey is the attribute Key conforming to the - // "feature_flag.result.value" semantic conventions. It represents the evaluated - // value of the feature flag. - // - // Type: any - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "#ff0000", true, 3 - // Note: With some feature flag providers, feature flag results can be quite - // large or contain private or sensitive details. - // Because of this, `feature_flag.result.variant` is often the preferred - // attribute if it is available. - // - // It may be desirable to redact or otherwise limit the size and scope of - // `feature_flag.result.value` if possible. - // Because the evaluated flag value is unstructured and may be any type, it is - // left to the instrumentation author to determine how best to achieve this. - FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value") - - // FeatureFlagResultVariantKey is the attribute Key conforming to the - // "feature_flag.result.variant" semantic conventions. It represents a semantic - // identifier for an evaluated flag value. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "red", "true", "on" - // Note: A semantic identifier, commonly referred to as a variant, provides a - // means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant") - - // FeatureFlagSetIDKey is the attribute Key conforming to the - // "feature_flag.set.id" semantic conventions. It represents the identifier of - // the [flag set] to which the feature flag belongs. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "proj-1", "ab98sgs", "service1/dev" - // - // [flag set]: https://openfeature.dev/specification/glossary/#flag-set - FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id") - - // FeatureFlagVersionKey is the attribute Key conforming to the - // "feature_flag.version" semantic conventions. It represents the version of the - // ruleset used during the evaluation. This may be any stable value which - // uniquely identifies the ruleset. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "1", "01ABCDEF" - FeatureFlagVersionKey = attribute.Key("feature_flag.version") -) - -// FeatureFlagContextID returns an attribute KeyValue conforming to the -// "feature_flag.context.id" semantic conventions. It represents the unique -// identifier for the flag evaluation context. For example, the targeting key. -func FeatureFlagContextID(val string) attribute.KeyValue { - return FeatureFlagContextIDKey.String(val) -} - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the lookup key of the -// feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider.name" semantic conventions. It represents the -// identifies the feature flag provider. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagResultVariant returns an attribute KeyValue conforming to the -// "feature_flag.result.variant" semantic conventions. It represents a semantic -// identifier for an evaluated flag value. -func FeatureFlagResultVariant(val string) attribute.KeyValue { - return FeatureFlagResultVariantKey.String(val) -} - -// FeatureFlagSetID returns an attribute KeyValue conforming to the -// "feature_flag.set.id" semantic conventions. It represents the identifier of -// the [flag set] to which the feature flag belongs. -// -// [flag set]: https://openfeature.dev/specification/glossary/#flag-set -func FeatureFlagSetID(val string) attribute.KeyValue { - return FeatureFlagSetIDKey.String(val) -} - -// FeatureFlagVersion returns an attribute KeyValue conforming to the -// "feature_flag.version" semantic conventions. It represents the version of the -// ruleset used during the evaluation. This may be any stable value which -// uniquely identifies the ruleset. -func FeatureFlagVersion(val string) attribute.KeyValue { - return FeatureFlagVersionKey.String(val) -} - -// Enum values for feature_flag.result.reason -var ( - // The resolved value is static (no dynamic evaluation). - // Stability: release_candidate - FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static") - // The resolved value fell back to a pre-configured value (no dynamic evaluation - // occurred or dynamic evaluation yielded no result). - // Stability: release_candidate - FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default") - // The resolved value was the result of a dynamic evaluation, such as a rule or - // specific user-targeting. - // Stability: release_candidate - FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match") - // The resolved value was the result of pseudorandom assignment. - // Stability: release_candidate - FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split") - // The resolved value was retrieved from cache. - // Stability: release_candidate - FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached") - // The resolved value was the result of the flag being disabled in the - // management system. - // Stability: release_candidate - FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled") - // The reason for the resolved value could not be determined. - // Stability: release_candidate - FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown") - // The resolved value is non-authoritative or possibly out of date - // Stability: release_candidate - FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale") - // The resolved value was the result of an error. - // Stability: release_candidate - FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error") -) - -// Namespace: file -const ( - // FileAccessedKey is the attribute Key conforming to the "file.accessed" - // semantic conventions. It represents the time when the file was last accessed, - // in ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T12:00:00Z" - // Note: This attribute might not be supported by some file systems — NFS, - // FAT32, in embedded OS, etc. - FileAccessedKey = attribute.Key("file.accessed") - - // FileAttributesKey is the attribute Key conforming to the "file.attributes" - // semantic conventions. It represents the array of file attributes. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "readonly", "hidden" - // Note: Attributes names depend on the OS or file system. Here’s a - // non-exhaustive list of values expected for this attribute: `archive`, - // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, - // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, - // `write`. - FileAttributesKey = attribute.Key("file.attributes") - - // FileChangedKey is the attribute Key conforming to the "file.changed" semantic - // conventions. It represents the time when the file attributes or metadata was - // last changed, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T12:00:00Z" - // Note: `file.changed` captures the time when any of the file's properties or - // attributes (including the content) are changed, while `file.modified` - // captures the timestamp when the file content is modified. - FileChangedKey = attribute.Key("file.changed") - - // FileCreatedKey is the attribute Key conforming to the "file.created" semantic - // conventions. It represents the time when the file was created, in ISO 8601 - // format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T12:00:00Z" - // Note: This attribute might not be supported by some file systems — NFS, - // FAT32, in embedded OS, etc. - FileCreatedKey = attribute.Key("file.created") - - // FileDirectoryKey is the attribute Key conforming to the "file.directory" - // semantic conventions. It represents the directory where the file is located. - // It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/home/user", "C:\Program Files\MyApp" - FileDirectoryKey = attribute.Key("file.directory") - - // FileExtensionKey is the attribute Key conforming to the "file.extension" - // semantic conventions. It represents the file extension, excluding the leading - // dot. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "png", "gz" - // Note: When the file name has multiple extensions (example.tar.gz), only the - // last one should be captured ("gz", not "tar.gz"). - FileExtensionKey = attribute.Key("file.extension") - - // FileForkNameKey is the attribute Key conforming to the "file.fork_name" - // semantic conventions. It represents the name of the fork. A fork is - // additional data associated with a filesystem object. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Zone.Identifier" - // Note: On Linux, a resource fork is used to store additional data with a - // filesystem object. A file always has at least one fork for the data portion, - // and additional forks may exist. - // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default - // data stream for a file is just called $DATA. Zone.Identifier is commonly used - // by Windows to track contents downloaded from the Internet. An ADS is - // typically of the form: C:\path\to\filename.extension:some_fork_name, and - // some_fork_name is the value that should populate `fork_name`. - // `filename.extension` should populate `file.name`, and `extension` should - // populate `file.extension`. The full path, `file.path`, will include the fork - // name. - FileForkNameKey = attribute.Key("file.fork_name") - - // FileGroupIDKey is the attribute Key conforming to the "file.group.id" - // semantic conventions. It represents the primary Group ID (GID) of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1000" - FileGroupIDKey = attribute.Key("file.group.id") - - // FileGroupNameKey is the attribute Key conforming to the "file.group.name" - // semantic conventions. It represents the primary group name of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "users" - FileGroupNameKey = attribute.Key("file.group.name") - - // FileInodeKey is the attribute Key conforming to the "file.inode" semantic - // conventions. It represents the inode representing the file in the filesystem. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "256383" - FileInodeKey = attribute.Key("file.inode") - - // FileModeKey is the attribute Key conforming to the "file.mode" semantic - // conventions. It represents the mode of the file in octal representation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0640" - FileModeKey = attribute.Key("file.mode") - - // FileModifiedKey is the attribute Key conforming to the "file.modified" - // semantic conventions. It represents the time when the file content was last - // modified, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T12:00:00Z" - FileModifiedKey = attribute.Key("file.modified") - - // FileNameKey is the attribute Key conforming to the "file.name" semantic - // conventions. It represents the name of the file including the extension, - // without the directory. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "example.png" - FileNameKey = attribute.Key("file.name") - - // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id" - // semantic conventions. It represents the user ID (UID) or security identifier - // (SID) of the file owner. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1000" - FileOwnerIDKey = attribute.Key("file.owner.id") - - // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name" - // semantic conventions. It represents the username of the file owner. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "root" - FileOwnerNameKey = attribute.Key("file.owner.name") - - // FilePathKey is the attribute Key conforming to the "file.path" semantic - // conventions. It represents the full path to the file, including the file - // name. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe" - FilePathKey = attribute.Key("file.path") - - // FileSizeKey is the attribute Key conforming to the "file.size" semantic - // conventions. It represents the file size in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - FileSizeKey = attribute.Key("file.size") - - // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the - // "file.symbolic_link.target_path" semantic conventions. It represents the path - // to the target of a symbolic link. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/usr/bin/python3" - // Note: This attribute is only applicable to symbolic links. - FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path") -) - -// FileAccessed returns an attribute KeyValue conforming to the "file.accessed" -// semantic conventions. It represents the time when the file was last accessed, -// in ISO 8601 format. -func FileAccessed(val string) attribute.KeyValue { - return FileAccessedKey.String(val) -} - -// FileAttributes returns an attribute KeyValue conforming to the -// "file.attributes" semantic conventions. It represents the array of file -// attributes. -func FileAttributes(val ...string) attribute.KeyValue { - return FileAttributesKey.StringSlice(val) -} - -// FileChanged returns an attribute KeyValue conforming to the "file.changed" -// semantic conventions. It represents the time when the file attributes or -// metadata was last changed, in ISO 8601 format. -func FileChanged(val string) attribute.KeyValue { - return FileChangedKey.String(val) -} - -// FileCreated returns an attribute KeyValue conforming to the "file.created" -// semantic conventions. It represents the time when the file was created, in ISO -// 8601 format. -func FileCreated(val string) attribute.KeyValue { - return FileCreatedKey.String(val) -} - -// FileDirectory returns an attribute KeyValue conforming to the "file.directory" -// semantic conventions. It represents the directory where the file is located. -// It should include the drive letter, when appropriate. -func FileDirectory(val string) attribute.KeyValue { - return FileDirectoryKey.String(val) -} - -// FileExtension returns an attribute KeyValue conforming to the "file.extension" -// semantic conventions. It represents the file extension, excluding the leading -// dot. -func FileExtension(val string) attribute.KeyValue { - return FileExtensionKey.String(val) -} - -// FileForkName returns an attribute KeyValue conforming to the "file.fork_name" -// semantic conventions. It represents the name of the fork. A fork is additional -// data associated with a filesystem object. -func FileForkName(val string) attribute.KeyValue { - return FileForkNameKey.String(val) -} - -// FileGroupID returns an attribute KeyValue conforming to the "file.group.id" -// semantic conventions. It represents the primary Group ID (GID) of the file. -func FileGroupID(val string) attribute.KeyValue { - return FileGroupIDKey.String(val) -} - -// FileGroupName returns an attribute KeyValue conforming to the -// "file.group.name" semantic conventions. It represents the primary group name -// of the file. -func FileGroupName(val string) attribute.KeyValue { - return FileGroupNameKey.String(val) -} - -// FileInode returns an attribute KeyValue conforming to the "file.inode" -// semantic conventions. It represents the inode representing the file in the -// filesystem. -func FileInode(val string) attribute.KeyValue { - return FileInodeKey.String(val) -} - -// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic -// conventions. It represents the mode of the file in octal representation. -func FileMode(val string) attribute.KeyValue { - return FileModeKey.String(val) -} - -// FileModified returns an attribute KeyValue conforming to the "file.modified" -// semantic conventions. It represents the time when the file content was last -// modified, in ISO 8601 format. -func FileModified(val string) attribute.KeyValue { - return FileModifiedKey.String(val) -} - -// FileName returns an attribute KeyValue conforming to the "file.name" semantic -// conventions. It represents the name of the file including the extension, -// without the directory. -func FileName(val string) attribute.KeyValue { - return FileNameKey.String(val) -} - -// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id" -// semantic conventions. It represents the user ID (UID) or security identifier -// (SID) of the file owner. -func FileOwnerID(val string) attribute.KeyValue { - return FileOwnerIDKey.String(val) -} - -// FileOwnerName returns an attribute KeyValue conforming to the -// "file.owner.name" semantic conventions. It represents the username of the file -// owner. -func FileOwnerName(val string) attribute.KeyValue { - return FileOwnerNameKey.String(val) -} - -// FilePath returns an attribute KeyValue conforming to the "file.path" semantic -// conventions. It represents the full path to the file, including the file name. -// It should include the drive letter, when appropriate. -func FilePath(val string) attribute.KeyValue { - return FilePathKey.String(val) -} - -// FileSize returns an attribute KeyValue conforming to the "file.size" semantic -// conventions. It represents the file size in bytes. -func FileSize(val int) attribute.KeyValue { - return FileSizeKey.Int(val) -} - -// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the -// "file.symbolic_link.target_path" semantic conventions. It represents the path -// to the target of a symbolic link. -func FileSymbolicLinkTargetPath(val string) attribute.KeyValue { - return FileSymbolicLinkTargetPathKey.String(val) -} - -// Namespace: gcp -const ( - // GCPAppHubApplicationContainerKey is the attribute Key conforming to the - // "gcp.apphub.application.container" semantic conventions. It represents the - // container within GCP where the AppHub application is defined. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "projects/my-container-project" - GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container") - - // GCPAppHubApplicationIDKey is the attribute Key conforming to the - // "gcp.apphub.application.id" semantic conventions. It represents the name of - // the application as configured in AppHub. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-application" - GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id") - - // GCPAppHubApplicationLocationKey is the attribute Key conforming to the - // "gcp.apphub.application.location" semantic conventions. It represents the GCP - // zone or region where the application is defined. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "us-central1" - GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location") - - // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the - // "gcp.apphub.service.criticality_type" semantic conventions. It represents the - // criticality of a service indicates its importance to the business. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: [See AppHub type enum] - // - // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type - GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type") - - // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the - // "gcp.apphub.service.environment_type" semantic conventions. It represents the - // environment of a service is the stage of a software lifecycle. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: [See AppHub environment type] - // - // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 - GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type") - - // GCPAppHubServiceIDKey is the attribute Key conforming to the - // "gcp.apphub.service.id" semantic conventions. It represents the name of the - // service as configured in AppHub. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-service" - GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id") - - // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the - // "gcp.apphub.workload.criticality_type" semantic conventions. It represents - // the criticality of a workload indicates its importance to the business. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: [See AppHub type enum] - // - // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type - GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type") - - // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the - // "gcp.apphub.workload.environment_type" semantic conventions. It represents - // the environment of a workload is the stage of a software lifecycle. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: [See AppHub environment type] - // - // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 - GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type") - - // GCPAppHubWorkloadIDKey is the attribute Key conforming to the - // "gcp.apphub.workload.id" semantic conventions. It represents the name of the - // workload as configured in AppHub. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-workload" - GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id") - - // GCPClientServiceKey is the attribute Key conforming to the - // "gcp.client.service" semantic conventions. It represents the identifies the - // Google Cloud service for which the official client library is intended. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "appengine", "run", "firestore", "alloydb", "spanner" - // Note: Intended to be a stable identifier for Google Cloud client libraries - // that is uniform across implementation languages. The value should be derived - // from the canonical service domain for the service; for example, - // 'foo.googleapis.com' should result in a value of 'foo'. - GCPClientServiceKey = attribute.Key("gcp.client.service") - - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of - // the Cloud Run [execution] being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`] environment variable. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "job-name-xxxx", "sample-job-mdw84" - // - // [execution]: https://cloud.google.com/run/docs/managing/job-executions - // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index - // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] - // environment variable. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0, 1 - // - // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") - - // GCPGCEInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname - // of a GCE instance. This is the full value of the default or [custom hostname] - // . - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-host1234.example.com", - // "sample-vm.us-west1-b.c.my-project.internal" - // - // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm - GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGCEInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance name - // of a GCE instance. This is the value provided by `host.name`, the visible - // name of the instance in the Cloud Console UI, and the prefix for the default - // hostname of the instance as defined by the [default internal DNS name]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "instance-1", "my-vm-name" - // - // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names - GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name") -) - -// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the -// "gcp.apphub.application.container" semantic conventions. It represents the -// container within GCP where the AppHub application is defined. -func GCPAppHubApplicationContainer(val string) attribute.KeyValue { - return GCPAppHubApplicationContainerKey.String(val) -} - -// GCPAppHubApplicationID returns an attribute KeyValue conforming to the -// "gcp.apphub.application.id" semantic conventions. It represents the name of -// the application as configured in AppHub. -func GCPAppHubApplicationID(val string) attribute.KeyValue { - return GCPAppHubApplicationIDKey.String(val) -} - -// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the -// "gcp.apphub.application.location" semantic conventions. It represents the GCP -// zone or region where the application is defined. -func GCPAppHubApplicationLocation(val string) attribute.KeyValue { - return GCPAppHubApplicationLocationKey.String(val) -} - -// GCPAppHubServiceID returns an attribute KeyValue conforming to the -// "gcp.apphub.service.id" semantic conventions. It represents the name of the -// service as configured in AppHub. -func GCPAppHubServiceID(val string) attribute.KeyValue { - return GCPAppHubServiceIDKey.String(val) -} - -// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the -// "gcp.apphub.workload.id" semantic conventions. It represents the name of the -// workload as configured in AppHub. -func GCPAppHubWorkloadID(val string) attribute.KeyValue { - return GCPAppHubWorkloadIDKey.String(val) -} - -// GCPClientService returns an attribute KeyValue conforming to the -// "gcp.client.service" semantic conventions. It represents the identifies the -// Google Cloud service for which the official client library is intended. -func GCPClientService(val string) attribute.KeyValue { - return GCPClientServiceKey.String(val) -} - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of -// the Cloud Run [execution] being run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`] environment variable. -// -// [execution]: https://cloud.google.com/run/docs/managing/job-executions -// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] -// environment variable. -// -// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom hostname] -// . -// -// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm -func GCPGCEInstanceHostname(val string) attribute.KeyValue { - return GCPGCEInstanceHostnameKey.String(val) -} - -// GCPGCEInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance name -// of a GCE instance. This is the value provided by `host.name`, the visible name -// of the instance in the Cloud Console UI, and the prefix for the default -// hostname of the instance as defined by the [default internal DNS name]. -// -// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names -func GCPGCEInstanceName(val string) attribute.KeyValue { - return GCPGCEInstanceNameKey.String(val) -} - -// Enum values for gcp.apphub.service.criticality_type -var ( - // Mission critical service. - // Stability: development - GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL") - // High impact. - // Stability: development - GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH") - // Medium impact. - // Stability: development - GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM") - // Low impact. - // Stability: development - GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW") -) - -// Enum values for gcp.apphub.service.environment_type -var ( - // Production environment. - // Stability: development - GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION") - // Staging environment. - // Stability: development - GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING") - // Test environment. - // Stability: development - GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST") - // Development environment. - // Stability: development - GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT") -) - -// Enum values for gcp.apphub.workload.criticality_type -var ( - // Mission critical service. - // Stability: development - GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL") - // High impact. - // Stability: development - GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH") - // Medium impact. - // Stability: development - GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM") - // Low impact. - // Stability: development - GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW") -) - -// Enum values for gcp.apphub.workload.environment_type -var ( - // Production environment. - // Stability: development - GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION") - // Staging environment. - // Stability: development - GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING") - // Test environment. - // Stability: development - GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST") - // Development environment. - // Stability: development - GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT") -) - -// Namespace: gen_ai -const ( - // GenAIAgentDescriptionKey is the attribute Key conforming to the - // "gen_ai.agent.description" semantic conventions. It represents the free-form - // description of the GenAI agent provided by the application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Helps with math problems", "Generates fiction stories" - GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description") - - // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id" - // semantic conventions. It represents the unique identifier of the GenAI agent. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY" - GenAIAgentIDKey = attribute.Key("gen_ai.agent.id") - - // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name" - // semantic conventions. It represents the human-readable name of the GenAI - // agent provided by the application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Math Tutor", "Fiction Writer" - GenAIAgentNameKey = attribute.Key("gen_ai.agent.name") - - // GenAIConversationIDKey is the attribute Key conforming to the - // "gen_ai.conversation.id" semantic conventions. It represents the unique - // identifier for a conversation (session, thread), used to store and correlate - // messages within this conversation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY" - GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id") - - // GenAIDataSourceIDKey is the attribute Key conforming to the - // "gen_ai.data_source.id" semantic conventions. It represents the data source - // identifier. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "H7STPQYOND" - // Note: Data sources are used by AI agents and RAG applications to store - // grounding data. A data source may be an external database, object store, - // document collection, website, or any other storage system used by the GenAI - // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier - // used by the GenAI system rather than a name specific to the external storage, - // such as a database or object store. Semantic conventions referencing - // `gen_ai.data_source.id` MAY also leverage additional attributes, such as - // `db.*`, to further identify and describe the data source. - GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id") - - // GenAIInputMessagesKey is the attribute Key conforming to the - // "gen_ai.input.messages" semantic conventions. It represents the chat history - // provided to the model as an input. - // - // Type: any - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "[\n {\n "role": "user",\n "parts": [\n {\n "type": "text",\n - // "content": "Weather in Paris?"\n }\n ]\n },\n {\n "role": "assistant",\n - // "parts": [\n {\n "type": "tool_call",\n "id": - // "call_VSPygqKTWdrhaFErNvMV18Yl",\n "name": "get_weather",\n "arguments": {\n - // "location": "Paris"\n }\n }\n ]\n },\n {\n "role": "tool",\n "parts": [\n {\n - // "type": "tool_call_response",\n "id": " call_VSPygqKTWdrhaFErNvMV18Yl",\n - // "result": "rainy, 57°F"\n }\n ]\n }\n]\n" - // Note: Instrumentations MUST follow [Input messages JSON schema]. - // When the attribute is recorded on events, it MUST be recorded in structured - // form. When recorded on spans, it MAY be recorded as a JSON string if - // structured - // format is not supported and SHOULD be recorded in structured form otherwise. - // - // Messages MUST be provided in the order they were sent to the model. - // Instrumentations MAY provide a way for users to filter or truncate - // input messages. - // - // > [!Warning] - // > This attribute is likely to contain sensitive information including - // > user/PII data. - // - // See [Recording content on attributes] - // section for more details. - // - // [Input messages JSON schema]: /docs/gen-ai/gen-ai-input-messages.json - // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes - GenAIInputMessagesKey = attribute.Key("gen_ai.input.messages") - - // GenAIOperationNameKey is the attribute Key conforming to the - // "gen_ai.operation.name" semantic conventions. It represents the name of the - // operation being performed. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: If one of the predefined values applies, but specific system uses a - // different name it's RECOMMENDED to document it in the semantic conventions - // for specific GenAI system and use system-specific name in the - // instrumentation. If a different name is not documented, instrumentation - // libraries SHOULD use applicable predefined value. - GenAIOperationNameKey = attribute.Key("gen_ai.operation.name") - - // GenAIOutputMessagesKey is the attribute Key conforming to the - // "gen_ai.output.messages" semantic conventions. It represents the messages - // returned by the model where each message represents a specific model response - // (choice, candidate). - // - // Type: any - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "[\n {\n "role": "assistant",\n "parts": [\n {\n "type": "text",\n - // "content": "The weather in Paris is currently rainy with a temperature of - // 57°F."\n }\n ],\n "finish_reason": "stop"\n }\n]\n" - // Note: Instrumentations MUST follow [Output messages JSON schema] - // - // Each message represents a single output choice/candidate generated by - // the model. Each message corresponds to exactly one generation - // (choice/candidate) and vice versa - one choice cannot be split across - // multiple messages or one message cannot contain parts from multiple choices. - // - // When the attribute is recorded on events, it MUST be recorded in structured - // form. When recorded on spans, it MAY be recorded as a JSON string if - // structured - // format is not supported and SHOULD be recorded in structured form otherwise. - // - // Instrumentations MAY provide a way for users to filter or truncate - // output messages. - // - // > [!Warning] - // > This attribute is likely to contain sensitive information including - // > user/PII data. - // - // See [Recording content on attributes] - // section for more details. - // - // [Output messages JSON schema]: /docs/gen-ai/gen-ai-output-messages.json - // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes - GenAIOutputMessagesKey = attribute.Key("gen_ai.output.messages") - - // GenAIOutputTypeKey is the attribute Key conforming to the - // "gen_ai.output.type" semantic conventions. It represents the represents the - // content type requested by the client. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: This attribute SHOULD be used when the client requests output of a - // specific type. The model may return zero or more outputs of this type. - // This attribute specifies the output modality and not the actual output - // format. For example, if an image is requested, the actual output could be a - // URL pointing to an image file. - // Additional output format details may be recorded in the future in the - // `gen_ai.output.{type}.*` attributes. - GenAIOutputTypeKey = attribute.Key("gen_ai.output.type") - - // GenAIProviderNameKey is the attribute Key conforming to the - // "gen_ai.provider.name" semantic conventions. It represents the Generative AI - // provider as identified by the client or server instrumentation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: The attribute SHOULD be set based on the instrumentation's best - // knowledge and may differ from the actual model provider. - // - // Multiple providers, including Azure OpenAI, Gemini, and AI hosting platforms - // are accessible using the OpenAI REST API and corresponding client libraries, - // but may proxy or host models from different providers. - // - // The `gen_ai.request.model`, `gen_ai.response.model`, and `server.address` - // attributes may help identify the actual system in use. - // - // The `gen_ai.provider.name` attribute acts as a discriminator that - // identifies the GenAI telemetry format flavor specific to that provider - // within GenAI semantic conventions. - // It SHOULD be set consistently with provider-specific attributes and signals. - // For example, GenAI spans, metrics, and events related to AWS Bedrock - // should have the `gen_ai.provider.name` set to `aws.bedrock` and include - // applicable `aws.bedrock.*` attributes and are not expected to include - // `openai.*` attributes. - GenAIProviderNameKey = attribute.Key("gen_ai.provider.name") - - // GenAIRequestChoiceCountKey is the attribute Key conforming to the - // "gen_ai.request.choice.count" semantic conventions. It represents the target - // number of candidate completions to return. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 3 - GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count") - - // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the - // "gen_ai.request.encoding_formats" semantic conventions. It represents the - // encoding formats requested in an embeddings operation, if specified. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "base64"], ["float", "binary" - // Note: In some GenAI systems the encoding formats are called embedding types. - // Also, some GenAI systems only accept a single format per request. - GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats") - - // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the - // "gen_ai.request.frequency_penalty" semantic conventions. It represents the - // frequency penalty setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0.1 - GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty") - - // GenAIRequestMaxTokensKey is the attribute Key conforming to the - // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum - // number of tokens the model generates for a request. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 100 - GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") - - // GenAIRequestModelKey is the attribute Key conforming to the - // "gen_ai.request.model" semantic conventions. It represents the name of the - // GenAI model a request is being made to. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: gpt-4 - GenAIRequestModelKey = attribute.Key("gen_ai.request.model") - - // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the - // "gen_ai.request.presence_penalty" semantic conventions. It represents the - // presence penalty setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0.1 - GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty") - - // GenAIRequestSeedKey is the attribute Key conforming to the - // "gen_ai.request.seed" semantic conventions. It represents the requests with - // same seed value more likely to return same result. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 100 - GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed") - - // GenAIRequestStopSequencesKey is the attribute Key conforming to the - // "gen_ai.request.stop_sequences" semantic conventions. It represents the list - // of sequences that the model will use to stop generating further tokens. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "forest", "lived" - GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences") - - // GenAIRequestTemperatureKey is the attribute Key conforming to the - // "gen_ai.request.temperature" semantic conventions. It represents the - // temperature setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0.0 - GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") - - // GenAIRequestTopKKey is the attribute Key conforming to the - // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling - // setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0 - GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k") - - // GenAIRequestTopPKey is the attribute Key conforming to the - // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling - // setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0 - GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p") - - // GenAIResponseFinishReasonsKey is the attribute Key conforming to the - // "gen_ai.response.finish_reasons" semantic conventions. It represents the - // array of reasons the model stopped generating tokens, corresponding to each - // generation received. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "stop"], ["stop", "length" - GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") - - // GenAIResponseIDKey is the attribute Key conforming to the - // "gen_ai.response.id" semantic conventions. It represents the unique - // identifier for the completion. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "chatcmpl-123" - GenAIResponseIDKey = attribute.Key("gen_ai.response.id") - - // GenAIResponseModelKey is the attribute Key conforming to the - // "gen_ai.response.model" semantic conventions. It represents the name of the - // model that generated the response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "gpt-4-0613" - GenAIResponseModelKey = attribute.Key("gen_ai.response.model") - - // GenAISystemInstructionsKey is the attribute Key conforming to the - // "gen_ai.system_instructions" semantic conventions. It represents the system - // message or instructions provided to the GenAI model separately from the chat - // history. - // - // Type: any - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "[\n {\n "type": "text",\n "content": "You are an Agent that greet - // users, always use greetings tool to respond"\n }\n]\n", "[\n {\n "type": - // "text",\n "content": "You are a language translator."\n },\n {\n "type": - // "text",\n "content": "Your mission is to translate text in English to - // French."\n }\n]\n" - // Note: This attribute SHOULD be used when the corresponding provider or API - // allows to provide system instructions or messages separately from the - // chat history. - // - // Instructions that are part of the chat history SHOULD be recorded in - // `gen_ai.input.messages` attribute instead. - // - // Instrumentations MUST follow [System instructions JSON schema]. - // - // When recorded on spans, it MAY be recorded as a JSON string if structured - // format is not supported and SHOULD be recorded in structured form otherwise. - // - // Instrumentations MAY provide a way for users to filter or truncate - // system instructions. - // - // > [!Warning] - // > This attribute may contain sensitive information. - // - // See [Recording content on attributes] - // section for more details. - // - // [System instructions JSON schema]: /docs/gen-ai/gen-ai-system-instructions.json - // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes - GenAISystemInstructionsKey = attribute.Key("gen_ai.system_instructions") - - // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type" - // semantic conventions. It represents the type of token being counted. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "input", "output" - GenAITokenTypeKey = attribute.Key("gen_ai.token.type") - - // GenAIToolCallIDKey is the attribute Key conforming to the - // "gen_ai.tool.call.id" semantic conventions. It represents the tool call - // identifier. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4" - GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id") - - // GenAIToolDescriptionKey is the attribute Key conforming to the - // "gen_ai.tool.description" semantic conventions. It represents the tool - // description. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Multiply two numbers" - GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description") - - // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name" - // semantic conventions. It represents the name of the tool utilized by the - // agent. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Flights" - GenAIToolNameKey = attribute.Key("gen_ai.tool.name") - - // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type" - // semantic conventions. It represents the type of the tool utilized by the - // agent. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "function", "extension", "datastore" - // Note: Extension: A tool executed on the agent-side to directly call external - // APIs, bridging the gap between the agent and real-world systems. - // Agent-side operations involve actions that are performed by the agent on the - // server or within the agent's controlled environment. - // Function: A tool executed on the client-side, where the agent generates - // parameters for a predefined function, and the client executes the logic. - // Client-side operations are actions taken on the user's end or within the - // client application. - // Datastore: A tool used by the agent to access and query structured or - // unstructured external data for retrieval-augmented tasks or knowledge - // updates. - GenAIToolTypeKey = attribute.Key("gen_ai.tool.type") - - // GenAIUsageInputTokensKey is the attribute Key conforming to the - // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of - // tokens used in the GenAI input (prompt). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 100 - GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens") - - // GenAIUsageOutputTokensKey is the attribute Key conforming to the - // "gen_ai.usage.output_tokens" semantic conventions. It represents the number - // of tokens used in the GenAI response (completion). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 180 - GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens") -) - -// GenAIAgentDescription returns an attribute KeyValue conforming to the -// "gen_ai.agent.description" semantic conventions. It represents the free-form -// description of the GenAI agent provided by the application. -func GenAIAgentDescription(val string) attribute.KeyValue { - return GenAIAgentDescriptionKey.String(val) -} - -// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id" -// semantic conventions. It represents the unique identifier of the GenAI agent. -func GenAIAgentID(val string) attribute.KeyValue { - return GenAIAgentIDKey.String(val) -} - -// GenAIAgentName returns an attribute KeyValue conforming to the -// "gen_ai.agent.name" semantic conventions. It represents the human-readable -// name of the GenAI agent provided by the application. -func GenAIAgentName(val string) attribute.KeyValue { - return GenAIAgentNameKey.String(val) -} - -// GenAIConversationID returns an attribute KeyValue conforming to the -// "gen_ai.conversation.id" semantic conventions. It represents the unique -// identifier for a conversation (session, thread), used to store and correlate -// messages within this conversation. -func GenAIConversationID(val string) attribute.KeyValue { - return GenAIConversationIDKey.String(val) -} - -// GenAIDataSourceID returns an attribute KeyValue conforming to the -// "gen_ai.data_source.id" semantic conventions. It represents the data source -// identifier. -func GenAIDataSourceID(val string) attribute.KeyValue { - return GenAIDataSourceIDKey.String(val) -} - -// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the -// "gen_ai.request.choice.count" semantic conventions. It represents the target -// number of candidate completions to return. -func GenAIRequestChoiceCount(val int) attribute.KeyValue { - return GenAIRequestChoiceCountKey.Int(val) -} - -// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the -// "gen_ai.request.encoding_formats" semantic conventions. It represents the -// encoding formats requested in an embeddings operation, if specified. -func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue { - return GenAIRequestEncodingFormatsKey.StringSlice(val) -} - -// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the -// "gen_ai.request.frequency_penalty" semantic conventions. It represents the -// frequency penalty setting for the GenAI request. -func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue { - return GenAIRequestFrequencyPenaltyKey.Float64(val) -} - -// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the -// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum -// number of tokens the model generates for a request. -func GenAIRequestMaxTokens(val int) attribute.KeyValue { - return GenAIRequestMaxTokensKey.Int(val) -} - -// GenAIRequestModel returns an attribute KeyValue conforming to the -// "gen_ai.request.model" semantic conventions. It represents the name of the -// GenAI model a request is being made to. -func GenAIRequestModel(val string) attribute.KeyValue { - return GenAIRequestModelKey.String(val) -} - -// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the -// "gen_ai.request.presence_penalty" semantic conventions. It represents the -// presence penalty setting for the GenAI request. -func GenAIRequestPresencePenalty(val float64) attribute.KeyValue { - return GenAIRequestPresencePenaltyKey.Float64(val) -} - -// GenAIRequestSeed returns an attribute KeyValue conforming to the -// "gen_ai.request.seed" semantic conventions. It represents the requests with -// same seed value more likely to return same result. -func GenAIRequestSeed(val int) attribute.KeyValue { - return GenAIRequestSeedKey.Int(val) -} - -// GenAIRequestStopSequences returns an attribute KeyValue conforming to the -// "gen_ai.request.stop_sequences" semantic conventions. It represents the list -// of sequences that the model will use to stop generating further tokens. -func GenAIRequestStopSequences(val ...string) attribute.KeyValue { - return GenAIRequestStopSequencesKey.StringSlice(val) -} - -// GenAIRequestTemperature returns an attribute KeyValue conforming to the -// "gen_ai.request.temperature" semantic conventions. It represents the -// temperature setting for the GenAI request. -func GenAIRequestTemperature(val float64) attribute.KeyValue { - return GenAIRequestTemperatureKey.Float64(val) -} - -// GenAIRequestTopK returns an attribute KeyValue conforming to the -// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling -// setting for the GenAI request. -func GenAIRequestTopK(val float64) attribute.KeyValue { - return GenAIRequestTopKKey.Float64(val) -} - -// GenAIRequestTopP returns an attribute KeyValue conforming to the -// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling -// setting for the GenAI request. -func GenAIRequestTopP(val float64) attribute.KeyValue { - return GenAIRequestTopPKey.Float64(val) -} - -// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the -// "gen_ai.response.finish_reasons" semantic conventions. It represents the array -// of reasons the model stopped generating tokens, corresponding to each -// generation received. -func GenAIResponseFinishReasons(val ...string) attribute.KeyValue { - return GenAIResponseFinishReasonsKey.StringSlice(val) -} - -// GenAIResponseID returns an attribute KeyValue conforming to the -// "gen_ai.response.id" semantic conventions. It represents the unique identifier -// for the completion. -func GenAIResponseID(val string) attribute.KeyValue { - return GenAIResponseIDKey.String(val) -} - -// GenAIResponseModel returns an attribute KeyValue conforming to the -// "gen_ai.response.model" semantic conventions. It represents the name of the -// model that generated the response. -func GenAIResponseModel(val string) attribute.KeyValue { - return GenAIResponseModelKey.String(val) -} - -// GenAIToolCallID returns an attribute KeyValue conforming to the -// "gen_ai.tool.call.id" semantic conventions. It represents the tool call -// identifier. -func GenAIToolCallID(val string) attribute.KeyValue { - return GenAIToolCallIDKey.String(val) -} - -// GenAIToolDescription returns an attribute KeyValue conforming to the -// "gen_ai.tool.description" semantic conventions. It represents the tool -// description. -func GenAIToolDescription(val string) attribute.KeyValue { - return GenAIToolDescriptionKey.String(val) -} - -// GenAIToolName returns an attribute KeyValue conforming to the -// "gen_ai.tool.name" semantic conventions. It represents the name of the tool -// utilized by the agent. -func GenAIToolName(val string) attribute.KeyValue { - return GenAIToolNameKey.String(val) -} - -// GenAIToolType returns an attribute KeyValue conforming to the -// "gen_ai.tool.type" semantic conventions. It represents the type of the tool -// utilized by the agent. -func GenAIToolType(val string) attribute.KeyValue { - return GenAIToolTypeKey.String(val) -} - -// GenAIUsageInputTokens returns an attribute KeyValue conforming to the -// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of -// tokens used in the GenAI input (prompt). -func GenAIUsageInputTokens(val int) attribute.KeyValue { - return GenAIUsageInputTokensKey.Int(val) -} - -// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the -// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of -// tokens used in the GenAI response (completion). -func GenAIUsageOutputTokens(val int) attribute.KeyValue { - return GenAIUsageOutputTokensKey.Int(val) -} - -// Enum values for gen_ai.operation.name -var ( - // Chat completion operation such as [OpenAI Chat API] - // Stability: development - // - // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat - GenAIOperationNameChat = GenAIOperationNameKey.String("chat") - // Multimodal content generation operation such as [Gemini Generate Content] - // Stability: development - // - // [Gemini Generate Content]: https://ai.google.dev/api/generate-content - GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content") - // Text completions operation such as [OpenAI Completions API (Legacy)] - // Stability: development - // - // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions - GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion") - // Embeddings operation such as [OpenAI Create embeddings API] - // Stability: development - // - // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create - GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings") - // Create GenAI agent - // Stability: development - GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent") - // Invoke GenAI agent - // Stability: development - GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent") - // Execute a tool - // Stability: development - GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool") -) - -// Enum values for gen_ai.output.type -var ( - // Plain text - // Stability: development - GenAIOutputTypeText = GenAIOutputTypeKey.String("text") - // JSON object with known or unknown schema - // Stability: development - GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json") - // Image - // Stability: development - GenAIOutputTypeImage = GenAIOutputTypeKey.String("image") - // Speech - // Stability: development - GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech") -) - -// Enum values for gen_ai.provider.name -var ( - // [OpenAI] - // Stability: development - // - // [OpenAI]: https://openai.com/ - GenAIProviderNameOpenAI = GenAIProviderNameKey.String("openai") - // Any Google generative AI endpoint - // Stability: development - GenAIProviderNameGCPGenAI = GenAIProviderNameKey.String("gcp.gen_ai") - // [Vertex AI] - // Stability: development - // - // [Vertex AI]: https://cloud.google.com/vertex-ai - GenAIProviderNameGCPVertexAI = GenAIProviderNameKey.String("gcp.vertex_ai") - // [Gemini] - // Stability: development - // - // [Gemini]: https://cloud.google.com/products/gemini - GenAIProviderNameGCPGemini = GenAIProviderNameKey.String("gcp.gemini") - // [Anthropic] - // Stability: development - // - // [Anthropic]: https://www.anthropic.com/ - GenAIProviderNameAnthropic = GenAIProviderNameKey.String("anthropic") - // [Cohere] - // Stability: development - // - // [Cohere]: https://cohere.com/ - GenAIProviderNameCohere = GenAIProviderNameKey.String("cohere") - // Azure AI Inference - // Stability: development - GenAIProviderNameAzureAIInference = GenAIProviderNameKey.String("azure.ai.inference") - // [Azure OpenAI] - // Stability: development - // - // [Azure OpenAI]: https://azure.microsoft.com/products/ai-services/openai-service/ - GenAIProviderNameAzureAIOpenAI = GenAIProviderNameKey.String("azure.ai.openai") - // [IBM Watsonx AI] - // Stability: development - // - // [IBM Watsonx AI]: https://www.ibm.com/products/watsonx-ai - GenAIProviderNameIBMWatsonxAI = GenAIProviderNameKey.String("ibm.watsonx.ai") - // [AWS Bedrock] - // Stability: development - // - // [AWS Bedrock]: https://aws.amazon.com/bedrock - GenAIProviderNameAWSBedrock = GenAIProviderNameKey.String("aws.bedrock") - // [Perplexity] - // Stability: development - // - // [Perplexity]: https://www.perplexity.ai/ - GenAIProviderNamePerplexity = GenAIProviderNameKey.String("perplexity") - // [xAI] - // Stability: development - // - // [xAI]: https://x.ai/ - GenAIProviderNameXAI = GenAIProviderNameKey.String("x_ai") - // [DeepSeek] - // Stability: development - // - // [DeepSeek]: https://www.deepseek.com/ - GenAIProviderNameDeepseek = GenAIProviderNameKey.String("deepseek") - // [Groq] - // Stability: development - // - // [Groq]: https://groq.com/ - GenAIProviderNameGroq = GenAIProviderNameKey.String("groq") - // [Mistral AI] - // Stability: development - // - // [Mistral AI]: https://mistral.ai/ - GenAIProviderNameMistralAI = GenAIProviderNameKey.String("mistral_ai") -) - -// Enum values for gen_ai.token.type -var ( - // Input tokens (prompt, input, etc.) - // Stability: development - GenAITokenTypeInput = GenAITokenTypeKey.String("input") - // Output tokens (completion, response, etc.) - // Stability: development - GenAITokenTypeOutput = GenAITokenTypeKey.String("output") -) - -// Namespace: geo -const ( - // GeoContinentCodeKey is the attribute Key conforming to the - // "geo.continent.code" semantic conventions. It represents the two-letter code - // representing continent’s name. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - GeoContinentCodeKey = attribute.Key("geo.continent.code") - - // GeoCountryISOCodeKey is the attribute Key conforming to the - // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO - // Country Code ([ISO 3166-1 alpha2]). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CA" - // - // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes - GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code") - - // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name" - // semantic conventions. It represents the locality name. Represents the name of - // a city, town, village, or similar populated place. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Montreal", "Berlin" - GeoLocalityNameKey = attribute.Key("geo.locality.name") - - // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat" - // semantic conventions. It represents the latitude of the geo location in - // [WGS84]. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 45.505918 - // - // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 - GeoLocationLatKey = attribute.Key("geo.location.lat") - - // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon" - // semantic conventions. It represents the longitude of the geo location in - // [WGS84]. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: -73.61483 - // - // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 - GeoLocationLonKey = attribute.Key("geo.location.lon") - - // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code" - // semantic conventions. It represents the postal code associated with the - // location. Values appropriate for this field may also be known as a postcode - // or ZIP code and will vary widely from country to country. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "94040" - GeoPostalCodeKey = attribute.Key("geo.postal_code") - - // GeoRegionISOCodeKey is the attribute Key conforming to the - // "geo.region.iso_code" semantic conventions. It represents the region ISO code - // ([ISO 3166-2]). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CA-QC" - // - // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 - GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code") -) - -// GeoCountryISOCode returns an attribute KeyValue conforming to the -// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO -// Country Code ([ISO 3166-1 alpha2]). -// -// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes -func GeoCountryISOCode(val string) attribute.KeyValue { - return GeoCountryISOCodeKey.String(val) -} - -// GeoLocalityName returns an attribute KeyValue conforming to the -// "geo.locality.name" semantic conventions. It represents the locality name. -// Represents the name of a city, town, village, or similar populated place. -func GeoLocalityName(val string) attribute.KeyValue { - return GeoLocalityNameKey.String(val) -} - -// GeoLocationLat returns an attribute KeyValue conforming to the -// "geo.location.lat" semantic conventions. It represents the latitude of the geo -// location in [WGS84]. -// -// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 -func GeoLocationLat(val float64) attribute.KeyValue { - return GeoLocationLatKey.Float64(val) -} - -// GeoLocationLon returns an attribute KeyValue conforming to the -// "geo.location.lon" semantic conventions. It represents the longitude of the -// geo location in [WGS84]. -// -// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 -func GeoLocationLon(val float64) attribute.KeyValue { - return GeoLocationLonKey.Float64(val) -} - -// GeoPostalCode returns an attribute KeyValue conforming to the -// "geo.postal_code" semantic conventions. It represents the postal code -// associated with the location. Values appropriate for this field may also be -// known as a postcode or ZIP code and will vary widely from country to country. -func GeoPostalCode(val string) attribute.KeyValue { - return GeoPostalCodeKey.String(val) -} - -// GeoRegionISOCode returns an attribute KeyValue conforming to the -// "geo.region.iso_code" semantic conventions. It represents the region ISO code -// ([ISO 3166-2]). -// -// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 -func GeoRegionISOCode(val string) attribute.KeyValue { - return GeoRegionISOCodeKey.String(val) -} - -// Enum values for geo.continent.code -var ( - // Africa - // Stability: development - GeoContinentCodeAf = GeoContinentCodeKey.String("AF") - // Antarctica - // Stability: development - GeoContinentCodeAn = GeoContinentCodeKey.String("AN") - // Asia - // Stability: development - GeoContinentCodeAs = GeoContinentCodeKey.String("AS") - // Europe - // Stability: development - GeoContinentCodeEu = GeoContinentCodeKey.String("EU") - // North America - // Stability: development - GeoContinentCodeNa = GeoContinentCodeKey.String("NA") - // Oceania - // Stability: development - GeoContinentCodeOc = GeoContinentCodeKey.String("OC") - // South America - // Stability: development - GeoContinentCodeSa = GeoContinentCodeKey.String("SA") -) - -// Namespace: go -const ( - // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type" - // semantic conventions. It represents the type of memory. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "other", "stack" - GoMemoryTypeKey = attribute.Key("go.memory.type") -) - -// Enum values for go.memory.type -var ( - // Memory allocated from the heap that is reserved for stack space, whether or - // not it is currently in-use. - // Stability: development - GoMemoryTypeStack = GoMemoryTypeKey.String("stack") - // Memory used by the Go runtime, excluding other categories of memory usage - // described in this enumeration. - // Stability: development - GoMemoryTypeOther = GoMemoryTypeKey.String("other") -) - -// Namespace: graphql -const ( - // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document" - // semantic conventions. It represents the GraphQL document being executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: query findBookById { bookById(id: ?) { name } } - // Note: The value may be sanitized to exclude sensitive information. - GraphQLDocumentKey = attribute.Key("graphql.document") - - // GraphQLOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of the - // operation being executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: findBookById - GraphQLOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphQLOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of the - // operation being executed. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "query", "mutation", "subscription" - GraphQLOperationTypeKey = attribute.Key("graphql.operation.type") -) - -// GraphQLDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphQLDocument(val string) attribute.KeyValue { - return GraphQLDocumentKey.String(val) -} - -// GraphQLOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphQLOperationName(val string) attribute.KeyValue { - return GraphQLOperationNameKey.String(val) -} - -// Enum values for graphql.operation.type -var ( - // GraphQL query - // Stability: development - GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query") - // GraphQL mutation - // Stability: development - GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation") - // GraphQL subscription - // Stability: development - GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription") -) - -// Namespace: heroku -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da" - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit hash - // for the current release. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "e6134959463efd8966b20e75b913cafe3f5ec" - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents the - // time and date the release was created. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2022-10-23T18:00:42Z" - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id" -// semantic conventions. It represents the unique identifier for the application. -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release. -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the -// "heroku.release.creation_timestamp" semantic conventions. It represents the -// time and date the release was created. -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// Namespace: host -const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is running - // on. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of - // level 2 memory cache available to the processor (in Bytes). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") - - // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family" - // semantic conventions. It represents the family or generation of the CPU. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "6", "PA-RISC 1.1e" - HostCPUFamilyKey = attribute.Key("host.cpu.family") - - // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id" - // semantic conventions. It represents the model identifier. It provides more - // granular information about the CPU, distinguishing it from other CPUs within - // the same family. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "6", "9000/778/B180L" - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") - - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz" - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") - - // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping" - // semantic conventions. It represents the stepping or core revisions. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1", "r1p1" - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") - - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "GenuineIntel" - // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX - // registers. Writing these to memory in this order results in a 12-character - // string. - // - // [CPUID]: https://wiki.osdev.org/CPUID - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") - - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be the - // instance_id assigned by the cloud provider. For non-containerized systems, - // this should be the `machine-id`. See the table below for the sources to use - // to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "fdbf79e8af94cb7f9e8df36789187052" - HostIDKey = attribute.Key("host.id") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the VM image ID or host OS image ID. For - // Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ami-07b06b442921831e5" - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the "host.image.name" - // semantic conventions. It represents the name of the VM image or OS install - // the host was instantiated from. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905" - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version string - // of the VM image or host OS as defined in [Version Attributes]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0.1" - // - // [Version Attributes]: /docs/resource/README.md#version-attributes - HostImageVersionKey = attribute.Key("host.image.version") - - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, excluding - // loopback interfaces. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e" - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC 5952] format. - // - // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html - HostIPKey = attribute.Key("host.ip") - - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, excluding - // loopback interfaces. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F" - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as - // hyphen-separated octets in uppercase hexadecimal form from most to least - // significant. - // - // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf - HostMacKey = attribute.Key("host.mac") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified hostname, - // or another name specified by the user. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-test" - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "n1-standard-1" - HostTypeKey = attribute.Key("host.type") -) - -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or generation -// of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model identifier. -// It provides more granular information about the CPU, distinguishing it from -// other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val string) attribute.KeyValue { - return HostCPUSteppingKey.String(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use to -// determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the "host.image.id" -// semantic conventions. It represents the VM image ID or host OS image ID. For -// Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM image -// or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string of -// the VM image or host OS as defined in [Version Attributes]. -// -// [Version Attributes]: /docs/resource/README.md#version-attributes -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic -// conventions. It represents the available MAC addresses of the host, excluding -// loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" semantic -// conventions. It represents the name of the host. On Unix systems, it may -// contain what the hostname command returns, or the fully qualified hostname, or -// another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" semantic -// conventions. It represents the type of host. For Cloud, this must be the -// machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// Enum values for host.arch -var ( - // AMD64 - // Stability: development - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - // Stability: development - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - // Stability: development - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - // Stability: development - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - // Stability: development - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - // Stability: development - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - // Stability: development - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - // Stability: development - HostArchX86 = HostArchKey.String("x86") -) - -// Namespace: http -const ( - // HTTPConnectionStateKey is the attribute Key conforming to the - // "http.connection.state" semantic conventions. It represents the state of the - // HTTP connection in the HTTP connection pool. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "active", "idle" - HTTPConnectionStateKey = attribute.Key("http.connection.state") - - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of the - // request payload body in bytes. This is the number of bytes transferred - // excluding headers and is often, but not always, present as the - // [Content-Length] header. For requests using transport encoding, this should - // be the compressed size. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the HTTP request - // method. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "GET", "POST", "HEAD" - // Note: HTTP request method value SHOULD be "known" to the instrumentation. - // By default, this convention defines "known" methods as the ones listed in - // [RFC9110] - // and the PATCH method defined in [RFC5789]. - // - // If the HTTP request method is not known to instrumentation, it MUST set the - // `http.request.method` attribute to `_OTHER`. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of - // case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is not a - // list of known methods in addition to the defaults). - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods to be - // case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - // - // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods - // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "GeT", "ACL", "foo" - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestResendCountKey is the attribute Key conforming to the - // "http.request.resend_count" semantic conventions. It represents the ordinal - // number of request resending attempt (for any reason, including redirects). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending (e.g. - // redirection, authorization failure, 503 Server Unavailable, network issues, - // or any other). - HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") - - // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size" - // semantic conventions. It represents the total size of the request in bytes. - // This should be the total number of bytes sent over the wire, including the - // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request - // body if any. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - HTTPRequestSizeKey = attribute.Key("http.request.size") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size of the - // response payload body in bytes. This is the number of bytes transferred - // excluding headers and is often, but not always, present as the - // [Content-Length] header. For requests using transport encoding, this should - // be the compressed size. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") - - // HTTPResponseSizeKey is the attribute Key conforming to the - // "http.response.size" semantic conventions. It represents the total size of - // the response in bytes. This should be the total number of bytes sent over the - // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), - // headers, and response body and trailers if any. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - HTTPResponseSizeKey = attribute.Key("http.response.size") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status code]. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 200 - // - // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic - // conventions. It represents the matched route, that is, the path template in - // the format used by the respective server framework. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "/users/:userID?", "{controller}/{action}/{id?}" - // Note: MUST NOT be populated when this is not supported by the HTTP server - // framework as the route attribute should have low-cardinality and the URI path - // can NOT substitute it. - // SHOULD include the [application root] if there is one. - // - // [application root]: /docs/http/http-spans.md#http-server-definitions - HTTPRouteKey = attribute.Key("http.route") -) - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length] header. For requests using transport encoding, this should be -// the compressed size. -// -// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPRequestHeader returns an attribute KeyValue conforming to the -// "http.request.header" semantic conventions. It represents the HTTP request -// headers, `` being the normalized HTTP Header name (lowercase), the value -// being the header values. -func HTTPRequestHeader(key string, val ...string) attribute.KeyValue { - return attribute.StringSlice("http.request.header."+key, val) -} - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) -} - -// HTTPRequestSize returns an attribute KeyValue conforming to the -// "http.request.size" semantic conventions. It represents the total size of the -// request in bytes. This should be the total number of bytes sent over the wire, -// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, -// and request body if any. -func HTTPRequestSize(val int) attribute.KeyValue { - return HTTPRequestSizeKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of the -// response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length] header. For requests using transport encoding, this should be -// the compressed size. -// -// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// HTTPResponseHeader returns an attribute KeyValue conforming to the -// "http.response.header" semantic conventions. It represents the HTTP response -// headers, `` being the normalized HTTP Header name (lowercase), the value -// being the header values. -func HTTPResponseHeader(key string, val ...string) attribute.KeyValue { - return attribute.StringSlice("http.response.header."+key, val) -} - -// HTTPResponseSize returns an attribute KeyValue conforming to the -// "http.response.size" semantic conventions. It represents the total size of the -// response in bytes. This should be the total number of bytes sent over the -// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and response body and trailers if any. -func HTTPResponseSize(val int) attribute.KeyValue { - return HTTPResponseSizeKey.Int(val) -} - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the -// [HTTP response status code]. -// -// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route, that is, the path -// template in the format used by the respective server framework. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Enum values for http.connection.state -var ( - // active state. - // Stability: development - HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") - // idle state. - // Stability: development - HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") -) - -// Enum values for http.request.method -var ( - // CONNECT method. - // Stability: stable - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method. - // Stability: stable - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method. - // Stability: stable - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method. - // Stability: stable - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method. - // Stability: stable - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method. - // Stability: stable - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method. - // Stability: stable - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method. - // Stability: stable - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method. - // Stability: stable - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // Any HTTP method that the instrumentation has no prior knowledge of. - // Stability: stable - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// Namespace: hw -const ( - // HwBatteryCapacityKey is the attribute Key conforming to the - // "hw.battery.capacity" semantic conventions. It represents the design capacity - // in Watts-hours or Amper-hours. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9.3Ah", "50Wh" - HwBatteryCapacityKey = attribute.Key("hw.battery.capacity") - - // HwBatteryChemistryKey is the attribute Key conforming to the - // "hw.battery.chemistry" semantic conventions. It represents the battery - // [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Li-ion", "NiMH" - // - // [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html - HwBatteryChemistryKey = attribute.Key("hw.battery.chemistry") - - // HwBatteryStateKey is the attribute Key conforming to the "hw.battery.state" - // semantic conventions. It represents the current state of the battery. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HwBatteryStateKey = attribute.Key("hw.battery.state") - - // HwBiosVersionKey is the attribute Key conforming to the "hw.bios_version" - // semantic conventions. It represents the BIOS version of the hardware - // component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1.2.3" - HwBiosVersionKey = attribute.Key("hw.bios_version") - - // HwDriverVersionKey is the attribute Key conforming to the "hw.driver_version" - // semantic conventions. It represents the driver version for the hardware - // component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "10.2.1-3" - HwDriverVersionKey = attribute.Key("hw.driver_version") - - // HwEnclosureTypeKey is the attribute Key conforming to the "hw.enclosure.type" - // semantic conventions. It represents the type of the enclosure (useful for - // modular systems). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Computer", "Storage", "Switch" - HwEnclosureTypeKey = attribute.Key("hw.enclosure.type") - - // HwFirmwareVersionKey is the attribute Key conforming to the - // "hw.firmware_version" semantic conventions. It represents the firmware - // version of the hardware component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2.0.1" - HwFirmwareVersionKey = attribute.Key("hw.firmware_version") - - // HwGpuTaskKey is the attribute Key conforming to the "hw.gpu.task" semantic - // conventions. It represents the type of task the GPU is performing. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HwGpuTaskKey = attribute.Key("hw.gpu.task") - - // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. - // It represents an identifier for the hardware component, unique within the - // monitored host. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "win32battery_battery_testsysa33_1" - HwIDKey = attribute.Key("hw.id") - - // HwLimitTypeKey is the attribute Key conforming to the "hw.limit_type" - // semantic conventions. It represents the type of limit for hardware - // components. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HwLimitTypeKey = attribute.Key("hw.limit_type") - - // HwLogicalDiskRaidLevelKey is the attribute Key conforming to the - // "hw.logical_disk.raid_level" semantic conventions. It represents the RAID - // Level of the logical disk. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "RAID0+1", "RAID5", "RAID10" - HwLogicalDiskRaidLevelKey = attribute.Key("hw.logical_disk.raid_level") - - // HwLogicalDiskStateKey is the attribute Key conforming to the - // "hw.logical_disk.state" semantic conventions. It represents the state of the - // logical disk space usage. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HwLogicalDiskStateKey = attribute.Key("hw.logical_disk.state") - - // HwMemoryTypeKey is the attribute Key conforming to the "hw.memory.type" - // semantic conventions. It represents the type of the memory module. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "DDR4", "DDR5", "LPDDR5" - HwMemoryTypeKey = attribute.Key("hw.memory.type") - - // HwModelKey is the attribute Key conforming to the "hw.model" semantic - // conventions. It represents the descriptive model name of the hardware - // component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "PERC H740P", "Intel(R) Core(TM) i7-10700K", "Dell XPS 15 Battery" - HwModelKey = attribute.Key("hw.model") - - // HwNameKey is the attribute Key conforming to the "hw.name" semantic - // conventions. It represents an easily-recognizable name for the hardware - // component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "eth0" - HwNameKey = attribute.Key("hw.name") - - // HwNetworkLogicalAddressesKey is the attribute Key conforming to the - // "hw.network.logical_addresses" semantic conventions. It represents the - // logical addresses of the adapter (e.g. IP address, or WWPN). - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "172.16.8.21", "57.11.193.42" - HwNetworkLogicalAddressesKey = attribute.Key("hw.network.logical_addresses") - - // HwNetworkPhysicalAddressKey is the attribute Key conforming to the - // "hw.network.physical_address" semantic conventions. It represents the - // physical address of the adapter (e.g. MAC address, or WWNN). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "00-90-F5-E9-7B-36" - HwNetworkPhysicalAddressKey = attribute.Key("hw.network.physical_address") - - // HwParentKey is the attribute Key conforming to the "hw.parent" semantic - // conventions. It represents the unique identifier of the parent component - // (typically the `hw.id` attribute of the enclosure, or disk controller). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "dellStorage_perc_0" - HwParentKey = attribute.Key("hw.parent") - - // HwPhysicalDiskSmartAttributeKey is the attribute Key conforming to the - // "hw.physical_disk.smart_attribute" semantic conventions. It represents the - // [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute - // of the physical disk. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Spin Retry Count", "Seek Error Rate", "Raw Read Error Rate" - // - // [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. - HwPhysicalDiskSmartAttributeKey = attribute.Key("hw.physical_disk.smart_attribute") - - // HwPhysicalDiskStateKey is the attribute Key conforming to the - // "hw.physical_disk.state" semantic conventions. It represents the state of the - // physical disk endurance utilization. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HwPhysicalDiskStateKey = attribute.Key("hw.physical_disk.state") - - // HwPhysicalDiskTypeKey is the attribute Key conforming to the - // "hw.physical_disk.type" semantic conventions. It represents the type of the - // physical disk. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "HDD", "SSD", "10K" - HwPhysicalDiskTypeKey = attribute.Key("hw.physical_disk.type") - - // HwSensorLocationKey is the attribute Key conforming to the - // "hw.sensor_location" semantic conventions. It represents the location of the - // sensor. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cpu0", "ps1", "INLET", "CPU0_DIE", "AMBIENT", "MOTHERBOARD", "PS0 - // V3_3", "MAIN_12V", "CPU_VCORE" - HwSensorLocationKey = attribute.Key("hw.sensor_location") - - // HwSerialNumberKey is the attribute Key conforming to the "hw.serial_number" - // semantic conventions. It represents the serial number of the hardware - // component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CNFCP0123456789" - HwSerialNumberKey = attribute.Key("hw.serial_number") - - // HwStateKey is the attribute Key conforming to the "hw.state" semantic - // conventions. It represents the current state of the component. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HwStateKey = attribute.Key("hw.state") - - // HwTapeDriveOperationTypeKey is the attribute Key conforming to the - // "hw.tape_drive.operation_type" semantic conventions. It represents the type - // of tape drive operation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HwTapeDriveOperationTypeKey = attribute.Key("hw.tape_drive.operation_type") - - // HwTypeKey is the attribute Key conforming to the "hw.type" semantic - // conventions. It represents the type of the component. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: Describes the category of the hardware component for which `hw.state` - // is being reported. For example, `hw.type=temperature` along with - // `hw.state=degraded` would indicate that the temperature of the hardware - // component has been reported as `degraded`. - HwTypeKey = attribute.Key("hw.type") - - // HwVendorKey is the attribute Key conforming to the "hw.vendor" semantic - // conventions. It represents the vendor name of the hardware component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Dell", "HP", "Intel", "AMD", "LSI", "Lenovo" - HwVendorKey = attribute.Key("hw.vendor") -) - -// HwBatteryCapacity returns an attribute KeyValue conforming to the -// "hw.battery.capacity" semantic conventions. It represents the design capacity -// in Watts-hours or Amper-hours. -func HwBatteryCapacity(val string) attribute.KeyValue { - return HwBatteryCapacityKey.String(val) -} - -// HwBatteryChemistry returns an attribute KeyValue conforming to the -// "hw.battery.chemistry" semantic conventions. It represents the battery -// [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. -// -// [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html -func HwBatteryChemistry(val string) attribute.KeyValue { - return HwBatteryChemistryKey.String(val) -} - -// HwBiosVersion returns an attribute KeyValue conforming to the -// "hw.bios_version" semantic conventions. It represents the BIOS version of the -// hardware component. -func HwBiosVersion(val string) attribute.KeyValue { - return HwBiosVersionKey.String(val) -} - -// HwDriverVersion returns an attribute KeyValue conforming to the -// "hw.driver_version" semantic conventions. It represents the driver version for -// the hardware component. -func HwDriverVersion(val string) attribute.KeyValue { - return HwDriverVersionKey.String(val) -} - -// HwEnclosureType returns an attribute KeyValue conforming to the -// "hw.enclosure.type" semantic conventions. It represents the type of the -// enclosure (useful for modular systems). -func HwEnclosureType(val string) attribute.KeyValue { - return HwEnclosureTypeKey.String(val) -} - -// HwFirmwareVersion returns an attribute KeyValue conforming to the -// "hw.firmware_version" semantic conventions. It represents the firmware version -// of the hardware component. -func HwFirmwareVersion(val string) attribute.KeyValue { - return HwFirmwareVersionKey.String(val) -} - -// HwID returns an attribute KeyValue conforming to the "hw.id" semantic -// conventions. It represents an identifier for the hardware component, unique -// within the monitored host. -func HwID(val string) attribute.KeyValue { - return HwIDKey.String(val) -} - -// HwLogicalDiskRaidLevel returns an attribute KeyValue conforming to the -// "hw.logical_disk.raid_level" semantic conventions. It represents the RAID -// Level of the logical disk. -func HwLogicalDiskRaidLevel(val string) attribute.KeyValue { - return HwLogicalDiskRaidLevelKey.String(val) -} - -// HwMemoryType returns an attribute KeyValue conforming to the "hw.memory.type" -// semantic conventions. It represents the type of the memory module. -func HwMemoryType(val string) attribute.KeyValue { - return HwMemoryTypeKey.String(val) -} - -// HwModel returns an attribute KeyValue conforming to the "hw.model" semantic -// conventions. It represents the descriptive model name of the hardware -// component. -func HwModel(val string) attribute.KeyValue { - return HwModelKey.String(val) -} - -// HwName returns an attribute KeyValue conforming to the "hw.name" semantic -// conventions. It represents an easily-recognizable name for the hardware -// component. -func HwName(val string) attribute.KeyValue { - return HwNameKey.String(val) -} - -// HwNetworkLogicalAddresses returns an attribute KeyValue conforming to the -// "hw.network.logical_addresses" semantic conventions. It represents the logical -// addresses of the adapter (e.g. IP address, or WWPN). -func HwNetworkLogicalAddresses(val ...string) attribute.KeyValue { - return HwNetworkLogicalAddressesKey.StringSlice(val) -} - -// HwNetworkPhysicalAddress returns an attribute KeyValue conforming to the -// "hw.network.physical_address" semantic conventions. It represents the physical -// address of the adapter (e.g. MAC address, or WWNN). -func HwNetworkPhysicalAddress(val string) attribute.KeyValue { - return HwNetworkPhysicalAddressKey.String(val) -} - -// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic -// conventions. It represents the unique identifier of the parent component -// (typically the `hw.id` attribute of the enclosure, or disk controller). -func HwParent(val string) attribute.KeyValue { - return HwParentKey.String(val) -} - -// HwPhysicalDiskSmartAttribute returns an attribute KeyValue conforming to the -// "hw.physical_disk.smart_attribute" semantic conventions. It represents the -// [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute -// of the physical disk. -// -// [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. -func HwPhysicalDiskSmartAttribute(val string) attribute.KeyValue { - return HwPhysicalDiskSmartAttributeKey.String(val) -} - -// HwPhysicalDiskType returns an attribute KeyValue conforming to the -// "hw.physical_disk.type" semantic conventions. It represents the type of the -// physical disk. -func HwPhysicalDiskType(val string) attribute.KeyValue { - return HwPhysicalDiskTypeKey.String(val) -} - -// HwSensorLocation returns an attribute KeyValue conforming to the -// "hw.sensor_location" semantic conventions. It represents the location of the -// sensor. -func HwSensorLocation(val string) attribute.KeyValue { - return HwSensorLocationKey.String(val) -} - -// HwSerialNumber returns an attribute KeyValue conforming to the -// "hw.serial_number" semantic conventions. It represents the serial number of -// the hardware component. -func HwSerialNumber(val string) attribute.KeyValue { - return HwSerialNumberKey.String(val) -} - -// HwVendor returns an attribute KeyValue conforming to the "hw.vendor" semantic -// conventions. It represents the vendor name of the hardware component. -func HwVendor(val string) attribute.KeyValue { - return HwVendorKey.String(val) -} - -// Enum values for hw.battery.state -var ( - // Charging - // Stability: development - HwBatteryStateCharging = HwBatteryStateKey.String("charging") - // Discharging - // Stability: development - HwBatteryStateDischarging = HwBatteryStateKey.String("discharging") -) - -// Enum values for hw.gpu.task -var ( - // Decoder - // Stability: development - HwGpuTaskDecoder = HwGpuTaskKey.String("decoder") - // Encoder - // Stability: development - HwGpuTaskEncoder = HwGpuTaskKey.String("encoder") - // General - // Stability: development - HwGpuTaskGeneral = HwGpuTaskKey.String("general") -) - -// Enum values for hw.limit_type -var ( - // Critical - // Stability: development - HwLimitTypeCritical = HwLimitTypeKey.String("critical") - // Degraded - // Stability: development - HwLimitTypeDegraded = HwLimitTypeKey.String("degraded") - // High Critical - // Stability: development - HwLimitTypeHighCritical = HwLimitTypeKey.String("high.critical") - // High Degraded - // Stability: development - HwLimitTypeHighDegraded = HwLimitTypeKey.String("high.degraded") - // Low Critical - // Stability: development - HwLimitTypeLowCritical = HwLimitTypeKey.String("low.critical") - // Low Degraded - // Stability: development - HwLimitTypeLowDegraded = HwLimitTypeKey.String("low.degraded") - // Maximum - // Stability: development - HwLimitTypeMax = HwLimitTypeKey.String("max") - // Throttled - // Stability: development - HwLimitTypeThrottled = HwLimitTypeKey.String("throttled") - // Turbo - // Stability: development - HwLimitTypeTurbo = HwLimitTypeKey.String("turbo") -) - -// Enum values for hw.logical_disk.state -var ( - // Used - // Stability: development - HwLogicalDiskStateUsed = HwLogicalDiskStateKey.String("used") - // Free - // Stability: development - HwLogicalDiskStateFree = HwLogicalDiskStateKey.String("free") -) - -// Enum values for hw.physical_disk.state -var ( - // Remaining - // Stability: development - HwPhysicalDiskStateRemaining = HwPhysicalDiskStateKey.String("remaining") -) - -// Enum values for hw.state -var ( - // Degraded - // Stability: development - HwStateDegraded = HwStateKey.String("degraded") - // Failed - // Stability: development - HwStateFailed = HwStateKey.String("failed") - // Needs Cleaning - // Stability: development - HwStateNeedsCleaning = HwStateKey.String("needs_cleaning") - // OK - // Stability: development - HwStateOk = HwStateKey.String("ok") - // Predicted Failure - // Stability: development - HwStatePredictedFailure = HwStateKey.String("predicted_failure") -) - -// Enum values for hw.tape_drive.operation_type -var ( - // Mount - // Stability: development - HwTapeDriveOperationTypeMount = HwTapeDriveOperationTypeKey.String("mount") - // Unmount - // Stability: development - HwTapeDriveOperationTypeUnmount = HwTapeDriveOperationTypeKey.String("unmount") - // Clean - // Stability: development - HwTapeDriveOperationTypeClean = HwTapeDriveOperationTypeKey.String("clean") -) - -// Enum values for hw.type -var ( - // Battery - // Stability: development - HwTypeBattery = HwTypeKey.String("battery") - // CPU - // Stability: development - HwTypeCPU = HwTypeKey.String("cpu") - // Disk controller - // Stability: development - HwTypeDiskController = HwTypeKey.String("disk_controller") - // Enclosure - // Stability: development - HwTypeEnclosure = HwTypeKey.String("enclosure") - // Fan - // Stability: development - HwTypeFan = HwTypeKey.String("fan") - // GPU - // Stability: development - HwTypeGpu = HwTypeKey.String("gpu") - // Logical disk - // Stability: development - HwTypeLogicalDisk = HwTypeKey.String("logical_disk") - // Memory - // Stability: development - HwTypeMemory = HwTypeKey.String("memory") - // Network - // Stability: development - HwTypeNetwork = HwTypeKey.String("network") - // Physical disk - // Stability: development - HwTypePhysicalDisk = HwTypeKey.String("physical_disk") - // Power supply - // Stability: development - HwTypePowerSupply = HwTypeKey.String("power_supply") - // Tape drive - // Stability: development - HwTypeTapeDrive = HwTypeKey.String("tape_drive") - // Temperature - // Stability: development - HwTypeTemperature = HwTypeKey.String("temperature") - // Voltage - // Stability: development - HwTypeVoltage = HwTypeKey.String("voltage") -) - -// Namespace: ios -const ( - // IOSAppStateKey is the attribute Key conforming to the "ios.app.state" - // semantic conventions. It represents the this attribute represents the state - // of the application. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: The iOS lifecycle states are defined in the - // [UIApplicationDelegate documentation], and from which the `OS terminology` - // column values are derived. - // - // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate - IOSAppStateKey = attribute.Key("ios.app.state") -) - -// Enum values for ios.app.state -var ( - // The app has become `active`. Associated with UIKit notification - // `applicationDidBecomeActive`. - // - // Stability: development - IOSAppStateActive = IOSAppStateKey.String("active") - // The app is now `inactive`. Associated with UIKit notification - // `applicationWillResignActive`. - // - // Stability: development - IOSAppStateInactive = IOSAppStateKey.String("inactive") - // The app is now in the background. This value is associated with UIKit - // notification `applicationDidEnterBackground`. - // - // Stability: development - IOSAppStateBackground = IOSAppStateKey.String("background") - // The app is now in the foreground. This value is associated with UIKit - // notification `applicationWillEnterForeground`. - // - // Stability: development - IOSAppStateForeground = IOSAppStateKey.String("foreground") - // The app is about to terminate. Associated with UIKit notification - // `applicationWillTerminate`. - // - // Stability: development - IOSAppStateTerminate = IOSAppStateKey.String("terminate") -) - -// Namespace: k8s -const ( - // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name" - // semantic conventions. It represents the name of the cluster. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-cluster" - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid" - // semantic conventions. It represents a pseudo-ID for the cluster, set to the - // UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8s cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8s ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T X.667]. - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // > different from all other UUIDs generated before 3603 A.D., or is - // > extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - // - // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") - - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "redis" - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the number - // of times the container was restarted. This attribute can be used to identify - // a particular container (running or stopped) within a container spec. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") - - // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to - // the "k8s.container.status.last_terminated_reason" semantic conventions. It - // represents the last terminated reason of the Container. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Evicted", "Error" - K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") - - // K8SContainerStatusReasonKey is the attribute Key conforming to the - // "k8s.container.status.reason" semantic conventions. It represents the reason - // for the container state. Corresponds to the `reason` field of the: - // [K8s ContainerStateWaiting] or [K8s ContainerStateTerminated]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ContainerCreating", "CrashLoopBackOff", - // "CreateContainerConfigError", "ErrImagePull", "ImagePullBackOff", - // "OOMKilled", "Completed", "Error", "ContainerCannotRun" - // - // [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core - // [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core - K8SContainerStatusReasonKey = attribute.Key("k8s.container.status.reason") - - // K8SContainerStatusStateKey is the attribute Key conforming to the - // "k8s.container.status.state" semantic conventions. It represents the state of - // the container. [K8s ContainerState]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "terminated", "running", "waiting" - // - // [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core - K8SContainerStatusStateKey = attribute.Key("k8s.container.status.state") - - // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name" - // semantic conventions. It represents the name of the CronJob. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") - - // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid" - // semantic conventions. It represents the UID of the CronJob. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") - - // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid" - // semantic conventions. It represents the UID of the DaemonSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of the - // Deployment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") - - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SHPAMetricTypeKey is the attribute Key conforming to the - // "k8s.hpa.metric.type" semantic conventions. It represents the type of metric - // source for the horizontal pod autoscaler. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Resource", "ContainerResource" - // Note: This attribute reflects the `type` field of spec.metrics[] in the HPA. - K8SHPAMetricTypeKey = attribute.Key("k8s.hpa.metric.type") - - // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic - // conventions. It represents the name of the horizontal pod autoscaler. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SHPANameKey = attribute.Key("k8s.hpa.name") - - // K8SHPAScaletargetrefAPIVersionKey is the attribute Key conforming to the - // "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the - // API version of the target resource to scale for the HorizontalPodAutoscaler. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "apps/v1", "autoscaling/v2" - // Note: This maps to the `apiVersion` field in the `scaleTargetRef` of the HPA - // spec. - K8SHPAScaletargetrefAPIVersionKey = attribute.Key("k8s.hpa.scaletargetref.api_version") - - // K8SHPAScaletargetrefKindKey is the attribute Key conforming to the - // "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of - // the target resource to scale for the HorizontalPodAutoscaler. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Deployment", "StatefulSet" - // Note: This maps to the `kind` field in the `scaleTargetRef` of the HPA spec. - K8SHPAScaletargetrefKindKey = attribute.Key("k8s.hpa.scaletargetref.kind") - - // K8SHPAScaletargetrefNameKey is the attribute Key conforming to the - // "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of - // the target resource to scale for the HorizontalPodAutoscaler. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-deployment", "my-statefulset" - // Note: This maps to the `name` field in the `scaleTargetRef` of the HPA spec. - K8SHPAScaletargetrefNameKey = attribute.Key("k8s.hpa.scaletargetref.name") - - // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic - // conventions. It represents the UID of the horizontal pod autoscaler. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SHPAUIDKey = attribute.Key("k8s.hpa.uid") - - // K8SHugepageSizeKey is the attribute Key conforming to the "k8s.hugepage.size" - // semantic conventions. It represents the size (identifier) of the K8s huge - // page. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2Mi" - K8SHugepageSizeKey = attribute.Key("k8s.hugepage.size") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic - // conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SJobNameKey = attribute.Key("k8s.job.name") - - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic - // conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "default" - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") - - // K8SNamespacePhaseKey is the attribute Key conforming to the - // "k8s.namespace.phase" semantic conventions. It represents the phase of the - // K8s namespace. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "active", "terminating" - // Note: This attribute aligns with the `phase` field of the - // [K8s NamespaceStatus] - // - // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core - K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase") - - // K8SNodeConditionStatusKey is the attribute Key conforming to the - // "k8s.node.condition.status" semantic conventions. It represents the status of - // the condition, one of True, False, Unknown. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "true", "false", "unknown" - // Note: This attribute aligns with the `status` field of the - // [NodeCondition] - // - // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core - K8SNodeConditionStatusKey = attribute.Key("k8s.node.condition.status") - - // K8SNodeConditionTypeKey is the attribute Key conforming to the - // "k8s.node.condition.type" semantic conventions. It represents the condition - // type of a K8s Node. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Ready", "DiskPressure" - // Note: K8s Node conditions as described - // by [K8s documentation]. - // - // This attribute aligns with the `type` field of the - // [NodeCondition] - // - // The set of possible values is not limited to those listed here. Managed - // Kubernetes environments, - // or custom controllers MAY introduce additional node condition types. - // When this occurs, the exact value as reported by the Kubernetes API SHOULD be - // used. - // - // [K8s documentation]: https://v1-32.docs.kubernetes.io/docs/reference/node/node-status/#condition - // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core - K8SNodeConditionTypeKey = attribute.Key("k8s.node.condition.type") - - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "node-1" - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic - // conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2" - K8SNodeUIDKey = attribute.Key("k8s.node.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic - // conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-pod-autoconf" - K8SPodNameKey = attribute.Key("k8s.pod.name") - - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic - // conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") - - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SReplicationControllerNameKey is the attribute Key conforming to the - // "k8s.replicationcontroller.name" semantic conventions. It represents the name - // of the replication controller. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name") - - // K8SReplicationControllerUIDKey is the attribute Key conforming to the - // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID - // of the replication controller. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid") - - // K8SResourceQuotaNameKey is the attribute Key conforming to the - // "k8s.resourcequota.name" semantic conventions. It represents the name of the - // resource quota. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name") - - // K8SResourceQuotaResourceNameKey is the attribute Key conforming to the - // "k8s.resourcequota.resource_name" semantic conventions. It represents the - // name of the K8s resource a resource quota defines. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "count/replicationcontrollers" - // Note: The value for this attribute can be either the full - // `count/[.]` string (e.g., count/deployments.apps, - // count/pods), or, for certain core Kubernetes resources, just the resource - // name (e.g., pods, services, configmaps). Both forms are supported by - // Kubernetes for object count quotas. See - // [Kubernetes Resource Quotas documentation] for more details. - // - // [Kubernetes Resource Quotas documentation]: https://kubernetes.io/docs/concepts/policy/resource-quotas/#object-count-quota - K8SResourceQuotaResourceNameKey = attribute.Key("k8s.resourcequota.resource_name") - - // K8SResourceQuotaUIDKey is the attribute Key conforming to the - // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the - // resource quota. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") - - // K8SStorageclassNameKey is the attribute Key conforming to the - // "k8s.storageclass.name" semantic conventions. It represents the name of K8s - // [StorageClass] object. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "gold.storageclass.storage.k8s.io" - // - // [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io - K8SStorageclassNameKey = attribute.Key("k8s.storageclass.name") - - // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name" - // semantic conventions. It represents the name of the K8s volume. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "volume0" - K8SVolumeNameKey = attribute.Key("k8s.volume.name") - - // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type" - // semantic conventions. It represents the type of the K8s volume. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "emptyDir", "persistentVolumeClaim" - K8SVolumeTypeKey = attribute.Key("k8s.volume.type") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify a -// particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue -// conforming to the "k8s.container.status.last_terminated_reason" semantic -// conventions. It represents the last terminated reason of the Container. -func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { - return K8SContainerStatusLastTerminatedReasonKey.String(val) -} - -// K8SCronJobAnnotation returns an attribute KeyValue conforming to the -// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob -// annotation placed on the CronJob, the `` being the annotation name, the -// value being the annotation value. -func K8SCronJobAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.cronjob.annotation."+key, val) -} - -// K8SCronJobLabel returns an attribute KeyValue conforming to the -// "k8s.cronjob.label" semantic conventions. It represents the label placed on -// the CronJob, the `` being the label name, the value being the label -// value. -func K8SCronJobLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.cronjob.label."+key, val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the -// "k8s.daemonset.annotation" semantic conventions. It represents the annotation -// placed on the DaemonSet, the `` being the annotation name, the value -// being the annotation value, even if the value is empty. -func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.daemonset.annotation."+key, val) -} - -// K8SDaemonSetLabel returns an attribute KeyValue conforming to the -// "k8s.daemonset.label" semantic conventions. It represents the label placed on -// the DaemonSet, the `` being the label name, the value being the label -// value, even if the value is empty. -func K8SDaemonSetLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.daemonset.label."+key, val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the -// "k8s.deployment.annotation" semantic conventions. It represents the annotation -// placed on the Deployment, the `` being the annotation name, the value -// being the annotation value, even if the value is empty. -func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.deployment.annotation."+key, val) -} - -// K8SDeploymentLabel returns an attribute KeyValue conforming to the -// "k8s.deployment.label" semantic conventions. It represents the label placed on -// the Deployment, the `` being the label name, the value being the label -// value, even if the value is empty. -func K8SDeploymentLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.deployment.label."+key, val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SHPAMetricType returns an attribute KeyValue conforming to the -// "k8s.hpa.metric.type" semantic conventions. It represents the type of metric -// source for the horizontal pod autoscaler. -func K8SHPAMetricType(val string) attribute.KeyValue { - return K8SHPAMetricTypeKey.String(val) -} - -// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name" -// semantic conventions. It represents the name of the horizontal pod autoscaler. -func K8SHPAName(val string) attribute.KeyValue { - return K8SHPANameKey.String(val) -} - -// K8SHPAScaletargetrefAPIVersion returns an attribute KeyValue conforming to the -// "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the -// API version of the target resource to scale for the HorizontalPodAutoscaler. -func K8SHPAScaletargetrefAPIVersion(val string) attribute.KeyValue { - return K8SHPAScaletargetrefAPIVersionKey.String(val) -} - -// K8SHPAScaletargetrefKind returns an attribute KeyValue conforming to the -// "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of -// the target resource to scale for the HorizontalPodAutoscaler. -func K8SHPAScaletargetrefKind(val string) attribute.KeyValue { - return K8SHPAScaletargetrefKindKey.String(val) -} - -// K8SHPAScaletargetrefName returns an attribute KeyValue conforming to the -// "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of -// the target resource to scale for the HorizontalPodAutoscaler. -func K8SHPAScaletargetrefName(val string) attribute.KeyValue { - return K8SHPAScaletargetrefNameKey.String(val) -} - -// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid" -// semantic conventions. It represents the UID of the horizontal pod autoscaler. -func K8SHPAUID(val string) attribute.KeyValue { - return K8SHPAUIDKey.String(val) -} - -// K8SHugepageSize returns an attribute KeyValue conforming to the -// "k8s.hugepage.size" semantic conventions. It represents the size (identifier) -// of the K8s huge page. -func K8SHugepageSize(val string) attribute.KeyValue { - return K8SHugepageSizeKey.String(val) -} - -// K8SJobAnnotation returns an attribute KeyValue conforming to the -// "k8s.job.annotation" semantic conventions. It represents the annotation placed -// on the Job, the `` being the annotation name, the value being the -// annotation value, even if the value is empty. -func K8SJobAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.job.annotation."+key, val) -} - -// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label" -// semantic conventions. It represents the label placed on the Job, the `` -// being the label name, the value being the label value, even if the value is -// empty. -func K8SJobLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.job.label."+key, val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the -// "k8s.namespace.annotation" semantic conventions. It represents the annotation -// placed on the Namespace, the `` being the annotation name, the value -// being the annotation value, even if the value is empty. -func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.namespace.annotation."+key, val) -} - -// K8SNamespaceLabel returns an attribute KeyValue conforming to the -// "k8s.namespace.label" semantic conventions. It represents the label placed on -// the Namespace, the `` being the label name, the value being the label -// value, even if the value is empty. -func K8SNamespaceLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.namespace.label."+key, val) -} - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeAnnotation returns an attribute KeyValue conforming to the -// "k8s.node.annotation" semantic conventions. It represents the annotation -// placed on the Node, the `` being the annotation name, the value being the -// annotation value, even if the value is empty. -func K8SNodeAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.node.annotation."+key, val) -} - -// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label" -// semantic conventions. It represents the label placed on the Node, the `` -// being the label name, the value being the label value, even if the value is -// empty. -func K8SNodeLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.node.label."+key, val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" -// semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodAnnotation returns an attribute KeyValue conforming to the -// "k8s.pod.annotation" semantic conventions. It represents the annotation placed -// on the Pod, the `` being the annotation name, the value being the -// annotation value. -func K8SPodAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.pod.annotation."+key, val) -} - -// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label" -// semantic conventions. It represents the label placed on the Pod, the `` -// being the label name, the value being the label value. -func K8SPodLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.pod.label."+key, val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the -// "k8s.replicaset.annotation" semantic conventions. It represents the annotation -// placed on the ReplicaSet, the `` being the annotation name, the value -// being the annotation value, even if the value is empty. -func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.replicaset.annotation."+key, val) -} - -// K8SReplicaSetLabel returns an attribute KeyValue conforming to the -// "k8s.replicaset.label" semantic conventions. It represents the label placed on -// the ReplicaSet, the `` being the label name, the value being the label -// value, even if the value is empty. -func K8SReplicaSetLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.replicaset.label."+key, val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SReplicationControllerName returns an attribute KeyValue conforming to the -// "k8s.replicationcontroller.name" semantic conventions. It represents the name -// of the replication controller. -func K8SReplicationControllerName(val string) attribute.KeyValue { - return K8SReplicationControllerNameKey.String(val) -} - -// K8SReplicationControllerUID returns an attribute KeyValue conforming to the -// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of -// the replication controller. -func K8SReplicationControllerUID(val string) attribute.KeyValue { - return K8SReplicationControllerUIDKey.String(val) -} - -// K8SResourceQuotaName returns an attribute KeyValue conforming to the -// "k8s.resourcequota.name" semantic conventions. It represents the name of the -// resource quota. -func K8SResourceQuotaName(val string) attribute.KeyValue { - return K8SResourceQuotaNameKey.String(val) -} - -// K8SResourceQuotaResourceName returns an attribute KeyValue conforming to the -// "k8s.resourcequota.resource_name" semantic conventions. It represents the name -// of the K8s resource a resource quota defines. -func K8SResourceQuotaResourceName(val string) attribute.KeyValue { - return K8SResourceQuotaResourceNameKey.String(val) -} - -// K8SResourceQuotaUID returns an attribute KeyValue conforming to the -// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the -// resource quota. -func K8SResourceQuotaUID(val string) attribute.KeyValue { - return K8SResourceQuotaUIDKey.String(val) -} - -// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the -// "k8s.statefulset.annotation" semantic conventions. It represents the -// annotation placed on the StatefulSet, the `` being the annotation name, -// the value being the annotation value, even if the value is empty. -func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.statefulset.annotation."+key, val) -} - -// K8SStatefulSetLabel returns an attribute KeyValue conforming to the -// "k8s.statefulset.label" semantic conventions. It represents the label placed -// on the StatefulSet, the `` being the label name, the value being the -// label value, even if the value is empty. -func K8SStatefulSetLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.statefulset.label."+key, val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// K8SStorageclassName returns an attribute KeyValue conforming to the -// "k8s.storageclass.name" semantic conventions. It represents the name of K8s -// [StorageClass] object. -// -// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io -func K8SStorageclassName(val string) attribute.KeyValue { - return K8SStorageclassNameKey.String(val) -} - -// K8SVolumeName returns an attribute KeyValue conforming to the -// "k8s.volume.name" semantic conventions. It represents the name of the K8s -// volume. -func K8SVolumeName(val string) attribute.KeyValue { - return K8SVolumeNameKey.String(val) -} - -// Enum values for k8s.container.status.reason -var ( - // The container is being created. - // Stability: development - K8SContainerStatusReasonContainerCreating = K8SContainerStatusReasonKey.String("ContainerCreating") - // The container is in a crash loop back off state. - // Stability: development - K8SContainerStatusReasonCrashLoopBackOff = K8SContainerStatusReasonKey.String("CrashLoopBackOff") - // There was an error creating the container configuration. - // Stability: development - K8SContainerStatusReasonCreateContainerConfigError = K8SContainerStatusReasonKey.String("CreateContainerConfigError") - // There was an error pulling the container image. - // Stability: development - K8SContainerStatusReasonErrImagePull = K8SContainerStatusReasonKey.String("ErrImagePull") - // The container image pull is in back off state. - // Stability: development - K8SContainerStatusReasonImagePullBackOff = K8SContainerStatusReasonKey.String("ImagePullBackOff") - // The container was killed due to out of memory. - // Stability: development - K8SContainerStatusReasonOomKilled = K8SContainerStatusReasonKey.String("OOMKilled") - // The container has completed execution. - // Stability: development - K8SContainerStatusReasonCompleted = K8SContainerStatusReasonKey.String("Completed") - // There was an error with the container. - // Stability: development - K8SContainerStatusReasonError = K8SContainerStatusReasonKey.String("Error") - // The container cannot run. - // Stability: development - K8SContainerStatusReasonContainerCannotRun = K8SContainerStatusReasonKey.String("ContainerCannotRun") -) - -// Enum values for k8s.container.status.state -var ( - // The container has terminated. - // Stability: development - K8SContainerStatusStateTerminated = K8SContainerStatusStateKey.String("terminated") - // The container is running. - // Stability: development - K8SContainerStatusStateRunning = K8SContainerStatusStateKey.String("running") - // The container is waiting. - // Stability: development - K8SContainerStatusStateWaiting = K8SContainerStatusStateKey.String("waiting") -) - -// Enum values for k8s.namespace.phase -var ( - // Active namespace phase as described by [K8s API] - // Stability: development - // - // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase - K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active") - // Terminating namespace phase as described by [K8s API] - // Stability: development - // - // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase - K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating") -) - -// Enum values for k8s.node.condition.status -var ( - // condition_true - // Stability: development - K8SNodeConditionStatusConditionTrue = K8SNodeConditionStatusKey.String("true") - // condition_false - // Stability: development - K8SNodeConditionStatusConditionFalse = K8SNodeConditionStatusKey.String("false") - // condition_unknown - // Stability: development - K8SNodeConditionStatusConditionUnknown = K8SNodeConditionStatusKey.String("unknown") -) - -// Enum values for k8s.node.condition.type -var ( - // The node is healthy and ready to accept pods - // Stability: development - K8SNodeConditionTypeReady = K8SNodeConditionTypeKey.String("Ready") - // Pressure exists on the disk size—that is, if the disk capacity is low - // Stability: development - K8SNodeConditionTypeDiskPressure = K8SNodeConditionTypeKey.String("DiskPressure") - // Pressure exists on the node memory—that is, if the node memory is low - // Stability: development - K8SNodeConditionTypeMemoryPressure = K8SNodeConditionTypeKey.String("MemoryPressure") - // Pressure exists on the processes—that is, if there are too many processes - // on the node - // Stability: development - K8SNodeConditionTypePIDPressure = K8SNodeConditionTypeKey.String("PIDPressure") - // The network for the node is not correctly configured - // Stability: development - K8SNodeConditionTypeNetworkUnavailable = K8SNodeConditionTypeKey.String("NetworkUnavailable") -) - -// Enum values for k8s.volume.type -var ( - // A [persistentVolumeClaim] volume - // Stability: development - // - // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim - K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim") - // A [configMap] volume - // Stability: development - // - // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap - K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap") - // A [downwardAPI] volume - // Stability: development - // - // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi - K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI") - // An [emptyDir] volume - // Stability: development - // - // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir - K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir") - // A [secret] volume - // Stability: development - // - // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret - K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret") - // A [local] volume - // Stability: development - // - // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local - K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local") -) - -// Namespace: linux -const ( - // LinuxMemorySlabStateKey is the attribute Key conforming to the - // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab - // memory state. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "reclaimable", "unreclaimable" - LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state") -) - -// Enum values for linux.memory.slab.state -var ( - // reclaimable - // Stability: development - LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable") - // unreclaimable - // Stability: development - LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable") -) - -// Namespace: log -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "audit.log" - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the basename of - // the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "uuid.log" - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/var/log/mysql/audit.log" - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full path to - // the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/var/lib/docker/uuid.log" - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") - - // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic - // conventions. It represents the stream associated with the log. See below for - // a list of well-known values. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - LogIostreamKey = attribute.Key("log.iostream") - - // LogRecordOriginalKey is the attribute Key conforming to the - // "log.record.original" semantic conventions. It represents the complete - // original Log Record. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - - // Something happened", "[INFO] 8/3/24 12:34:56 Something happened" - // Note: This value MAY be added when processing a Log Record which was - // originally transmitted as a string or equivalent data type AND the Body field - // of the Log Record does not contain the same value. (e.g. a syslog or a log - // record read from a file.) - LogRecordOriginalKey = attribute.Key("log.record.original") - - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log Record. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV" - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an - // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other - // identifiers (e.g. UUID) may be used as needed. - // - // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogFileName returns an attribute KeyValue conforming to the "log.file.name" -// semantic conventions. It represents the basename of the file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the "log.file.path" -// semantic conventions. It represents the full path to the file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path to -// the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// LogRecordOriginal returns an attribute KeyValue conforming to the -// "log.record.original" semantic conventions. It represents the complete -// original Log Record. -func LogRecordOriginal(val string) attribute.KeyValue { - return LogRecordOriginalKey.String(val) -} - -// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid" -// semantic conventions. It represents a unique identifier for the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Enum values for log.iostream -var ( - // Logs from stdout stream - // Stability: development - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - // Stability: development - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// Namespace: mainframe -const ( - // MainframeLparNameKey is the attribute Key conforming to the - // "mainframe.lpar.name" semantic conventions. It represents the name of the - // logical partition that hosts a systems with a mainframe operating system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "LPAR01" - MainframeLparNameKey = attribute.Key("mainframe.lpar.name") -) - -// MainframeLparName returns an attribute KeyValue conforming to the -// "mainframe.lpar.name" semantic conventions. It represents the name of the -// logical partition that hosts a systems with a mainframe operating system. -func MainframeLparName(val string) attribute.KeyValue { - return MainframeLparNameKey.String(val) -} - -// Namespace: messaging -const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the batching - // operation. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client library - // supports both batch and single-message API for the same operation, - // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs - // and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client.id" semantic conventions. It represents a unique identifier - // for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "client-5", "myhost@8742@s8083jm" - MessagingClientIDKey = attribute.Key("messaging.client.id") - - // MessagingConsumerGroupNameKey is the attribute Key conforming to the - // "messaging.consumer.group.name" semantic conventions. It represents the name - // of the consumer group with which a consumer is associated. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-group", "indexer" - // Note: Semantic conventions for individual messaging systems SHOULD document - // whether `messaging.consumer.group.name` is applicable and what it means in - // the context of that system. - MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") - - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the message - // destination name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MyQueue", "MyTopic" - // Note: Destination name SHOULD uniquely identify a specific queue, topic or - // other entity within the broker. If - // the broker doesn't have such notion, the destination name SHOULD uniquely - // identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationPartitionIDKey is the attribute Key conforming to the - // "messaging.destination.partition.id" semantic conventions. It represents the - // identifier of the partition messages are sent to or received from, unique - // within the `messaging.destination.name`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1 - MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") - - // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to - // the "messaging.destination.subscription.name" semantic conventions. It - // represents the name of the destination subscription from which a message is - // consumed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "subscription-a" - // Note: Semantic conventions for individual messaging systems SHOULD document - // whether `messaging.destination.subscription.name` is applicable and what it - // means in the context of that system. - MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the low - // cardinality representation of the messaging destination name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/customers/{customerId}" - // Note: Destination names could be constructed from templates. An example would - // be a destination name involving a user name or product id. Although the - // destination name in this case is of high cardinality, the underlying template - // is of low cardinality and can be effectively used for grouping and - // aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might not - // exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to - // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It - // represents the UTC epoch seconds at which the message has been accepted and - // stored in the entity. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") - - // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to - // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It - // represents the ack deadline in seconds set for the modify ack deadline - // request. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") - - // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the - // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the - // ack id for a given message. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: ack_id - MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") - - // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions. - // It represents the delivery attempt for a given message. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") - - // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to - // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It - // represents the ordering key for a given message. If the attribute is not - // present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: ordering_key - MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the message - // keys in Kafka are used for grouping alike messages to ensure they're - // processed on the same partition. They differ from `messaging.message.id` in - // that they're not unique. If the key is `null`, the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myKey - // Note: If the key type is not string, it's string representation has to be - // supplied for the attribute. If the key has no unambiguous, canonical string - // form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents a - // boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") - - // MessagingKafkaOffsetKey is the attribute Key conforming to the - // "messaging.kafka.offset" semantic conventions. It represents the offset of a - // record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset") - - // MessagingMessageBodySizeKey is the attribute Key conforming to the - // "messaging.message.body.size" semantic conventions. It represents the size of - // the message body in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Note: This can refer to both the compressed or uncompressed body size. If - // both sizes are known, the uncompressed - // body size should be used. - MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents the - // conversation ID identifying the conversation to which the message belongs, - // represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: MyConversationId - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the - // "messaging.message.envelope.size" semantic conventions. It represents the - // size of the message body and metadata in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Note: This can refer to both the compressed or uncompressed size. If both - // sizes are known, the uncompressed - // size should be used. - MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") - - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used by - // the messaging system as an identifier for the message, represented as a - // string. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 452a7c7c7c7048c2f887f61572b18fc2 - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingOperationNameKey is the attribute Key conforming to the - // "messaging.operation.name" semantic conventions. It represents the - // system-specific name of the messaging operation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ack", "nack", "send" - MessagingOperationNameKey = attribute.Key("messaging.operation.name") - - // MessagingOperationTypeKey is the attribute Key conforming to the - // "messaging.operation.type" semantic conventions. It represents a string - // identifying the type of the messaging operation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationTypeKey = attribute.Key("messaging.operation.type") - - // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to - // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It - // represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myKey - MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the - // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents - // the rabbitMQ message delivery tag. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") - - // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the - // "messaging.rocketmq.consumption_model" semantic conventions. It represents - // the model of message consumption. This only applies to consumer spans. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") - - // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to - // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It - // represents the delay time level for delay message, which determines the - // message delay time. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming - // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions. - // It represents the timestamp in milliseconds that the delay message is - // expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents the it - // is essential for FIFO message. Messages that belong to the same message group - // are always processed one by one within the same consumer group. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myMessageGroup - MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents the - // key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "keyA", "keyB" - MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketMQMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: tagA - MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents the - // type of message. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketMQNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myNamespace - MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - - // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to - // the "messaging.servicebus.disposition_status" semantic conventions. It - // represents the describes the [settlement type]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock - MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") - - // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to - // the "messaging.servicebus.message.delivery_count" semantic conventions. It - // represents the number of deliveries that have been attempted for this - // message. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") - - // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to - // the "messaging.servicebus.message.enqueued_time" semantic conventions. It - // represents the UTC epoch seconds at which the message has been accepted and - // stored in the entity. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") - - // MessagingSystemKey is the attribute Key conforming to the "messaging.system" - // semantic conventions. It represents the messaging system as identified by the - // client instrumentation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: The actual messaging system may differ from the one known by the - // client. For example, when using Kafka client libraries to communicate with - // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the - // instrumentation's best knowledge. - MessagingSystemKey = attribute.Key("messaging.system") -) - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to the -// "messaging.batch.message_count" semantic conventions. It represents the number -// of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client.id" semantic conventions. It represents a unique identifier -// for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// MessagingConsumerGroupName returns an attribute KeyValue conforming to the -// "messaging.consumer.group.name" semantic conventions. It represents the name -// of the consumer group with which a consumer is associated. -func MessagingConsumerGroupName(val string) attribute.KeyValue { - return MessagingConsumerGroupNameKey.String(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the -// "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be unnamed -// or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name. -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationPartitionID returns an attribute KeyValue conforming to -// the "messaging.destination.partition.id" semantic conventions. It represents -// the identifier of the partition messages are sent to or received from, unique -// within the `messaging.destination.name`. -func MessagingDestinationPartitionID(val string) attribute.KeyValue { - return MessagingDestinationPartitionIDKey.String(val) -} - -// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming -// to the "messaging.destination.subscription.name" semantic conventions. It -// represents the name of the destination subscription from which a message is -// consumed. -func MessagingDestinationSubscriptionName(val string) attribute.KeyValue { - return MessagingDestinationSubscriptionNameKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to the -// "messaging.destination.template" semantic conventions. It represents the low -// cardinality representation of the messaging destination name. -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to the -// "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming -// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It -// represents the UTC epoch seconds at which the message has been accepted and -// stored in the entity. -func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingEventHubsMessageEnqueuedTimeKey.Int(val) -} - -// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming -// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It -// represents the ack deadline in seconds set for the modify ack deadline -// request. -func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue { - return MessagingGCPPubSubMessageAckDeadlineKey.Int(val) -} - -// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the -// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the -// ack id for a given message. -func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue { - return MessagingGCPPubSubMessageAckIDKey.String(val) -} - -// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic -// conventions. It represents the delivery attempt for a given message. -func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue { - return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val) -} - -// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming -// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It -// represents the ordering key for a given message. If the attribute is not -// present, the message does not have an ordering key. -func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubSubMessageOrderingKeyKey.String(val) -} - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the message -// keys in Kafka are used for grouping alike messages to ensure they're processed -// on the same partition. They differ from `messaging.message.id` in that they're -// not unique. If the key is `null`, the attribute MUST NOT be set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the -// "messaging.kafka.message.tombstone" semantic conventions. It represents a -// boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// MessagingKafkaOffset returns an attribute KeyValue conforming to the -// "messaging.kafka.offset" semantic conventions. It represents the offset of a -// record in the corresponding Kafka partition. -func MessagingKafkaOffset(val int) attribute.KeyValue { - return MessagingKafkaOffsetKey.Int(val) -} - -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size of -// the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming to the -// "messaging.message.conversation_id" semantic conventions. It represents the -// conversation ID identifying the conversation to which the message belongs, -// represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the -// "messaging.message.envelope.size" semantic conventions. It represents the size -// of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by the -// messaging system as an identifier for the message, represented as a string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingOperationName returns an attribute KeyValue conforming to the -// "messaging.operation.name" semantic conventions. It represents the -// system-specific name of the messaging operation. -func MessagingOperationName(val string) attribute.KeyValue { - return MessagingOperationNameKey.String(val) -} - -// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitMQDestinationRoutingKeyKey.String(val) -} - -// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming -// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It -// represents the rabbitMQ message delivery tag. -func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue { - return MessagingRabbitMQMessageDeliveryTagKey.Int(val) -} - -// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketMQMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketMQMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the -// "messaging.rocketmq.message.group" semantic conventions. It represents the it -// is essential for FIFO message. Messages that belong to the same message group -// are always processed one by one within the same consumer group. -func MessagingRocketMQMessageGroup(val string) attribute.KeyValue { - return MessagingRocketMQMessageGroupKey.String(val) -} - -// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the -// "messaging.rocketmq.message.keys" semantic conventions. It represents the -// key(s) of message, another way to mark message besides message id. -func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketMQMessageKeysKey.StringSlice(val) -} - -// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the -// "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketMQMessageTag(val string) attribute.KeyValue { - return MessagingRocketMQMessageTagKey.String(val) -} - -// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the -// "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketMQNamespace(val string) attribute.KeyValue { - return MessagingRocketMQNamespaceKey.String(val) -} - -// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.delivery_count" semantic -// conventions. It represents the number of deliveries that have been attempted -// for this message. -func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue { - return MessagingServiceBusMessageDeliveryCountKey.Int(val) -} - -// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has been -// accepted and stored in the entity. -func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingServiceBusMessageEnqueuedTimeKey.Int(val) -} - -// Enum values for messaging.operation.type -var ( - // A message is created. "Create" spans always refer to a single message and are - // used to provide a unique creation context for messages in batch sending - // scenarios. - // - // Stability: development - MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") - // One or more messages are provided for sending to an intermediary. If a single - // message is sent, the context of the "Send" span can be used as the creation - // context and no "Create" span needs to be created. - // - // Stability: development - MessagingOperationTypeSend = MessagingOperationTypeKey.String("send") - // One or more messages are requested by a consumer. This operation refers to - // pull-based scenarios, where consumers explicitly call methods of messaging - // SDKs to receive messages. - // - // Stability: development - MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") - // One or more messages are processed by a consumer. - // - // Stability: development - MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process") - // One or more messages are settled. - // - // Stability: development - MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") -) - -// Enum values for messaging.rocketmq.consumption_model -var ( - // Clustering consumption model - // Stability: development - MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering") - // Broadcasting consumption model - // Stability: development - MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting") -) - -// Enum values for messaging.rocketmq.message.type -var ( - // Normal message - // Stability: development - MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal") - // FIFO message - // Stability: development - MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo") - // Delay message - // Stability: development - MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay") - // Transaction message - // Stability: development - MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction") -) - -// Enum values for messaging.servicebus.disposition_status -var ( - // Message is completed - // Stability: development - MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete") - // Message is abandoned - // Stability: development - MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon") - // Message is sent to dead letter queue - // Stability: development - MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter") - // Message is deferred - // Stability: development - MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer") -) - -// Enum values for messaging.system -var ( - // Apache ActiveMQ - // Stability: development - MessagingSystemActiveMQ = MessagingSystemKey.String("activemq") - // Amazon Simple Notification Service (SNS) - // Stability: development - MessagingSystemAWSSNS = MessagingSystemKey.String("aws.sns") - // Amazon Simple Queue Service (SQS) - // Stability: development - MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs") - // Azure Event Grid - // Stability: development - MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid") - // Azure Event Hubs - // Stability: development - MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs") - // Azure Service Bus - // Stability: development - MessagingSystemServiceBus = MessagingSystemKey.String("servicebus") - // Google Cloud Pub/Sub - // Stability: development - MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub") - // Java Message Service - // Stability: development - MessagingSystemJMS = MessagingSystemKey.String("jms") - // Apache Kafka - // Stability: development - MessagingSystemKafka = MessagingSystemKey.String("kafka") - // RabbitMQ - // Stability: development - MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq") - // Apache RocketMQ - // Stability: development - MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq") - // Apache Pulsar - // Stability: development - MessagingSystemPulsar = MessagingSystemKey.String("pulsar") -) - -// Namespace: network -const ( - // NetworkCarrierICCKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier network. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: DE - NetworkCarrierICCKey = attribute.Key("network.carrier.icc") - - // NetworkCarrierMCCKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile carrier - // country code. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 310 - NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMNCKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile carrier - // network code. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 001 - NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of the - // mobile carrier. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: sprint - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkConnectionStateKey is the attribute Key conforming to the - // "network.connection.state" semantic conventions. It represents the state of - // network connection. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "close_wait" - // Note: Connection states are defined as part of the [rfc9293] - // - // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2 - NetworkConnectionStateKey = attribute.Key("network.connection.state") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the this - // describes more details regarding the connection.type. It may be the type of - // cell technology connection, but it could be used for describing details about - // a wifi connection. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: LTE - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the internet - // connection type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: wifi - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkInterfaceNameKey is the attribute Key conforming to the - // "network.interface.name" semantic conventions. It represents the network - // interface name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "lo", "eth0" - NetworkInterfaceNameKey = attribute.Key("network.interface.name") - - // NetworkIODirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network IO - // operation direction. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "transmit" - NetworkIODirectionKey = attribute.Key("network.io.direction") - - // NetworkLocalAddressKey is the attribute Key conforming to the - // "network.local.address" semantic conventions. It represents the local address - // of the network connection - IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "10.1.2.80", "/tmp/my.sock" - NetworkLocalAddressKey = attribute.Key("network.local.address") - - // NetworkLocalPortKey is the attribute Key conforming to the - // "network.local.port" semantic conventions. It represents the local port - // number of the network connection. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 65123 - NetworkLocalPortKey = attribute.Key("network.local.port") - - // NetworkPeerAddressKey is the attribute Key conforming to the - // "network.peer.address" semantic conventions. It represents the peer address - // of the network connection - IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "10.1.2.80", "/tmp/my.sock" - NetworkPeerAddressKey = attribute.Key("network.peer.address") - - // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port" - // semantic conventions. It represents the peer port number of the network - // connection. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 65123 - NetworkPeerPortKey = attribute.Key("network.peer.port") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the - // [OSI application layer] or non-OSI equivalent. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "amqp", "http", "mqtt" - // Note: The value SHOULD be normalized to lowercase. - // - // [OSI application layer]: https://wikipedia.org/wiki/Application_layer - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the actual - // version of the protocol used for network communication. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "1.1", "2" - // Note: If protocol version is subject to negotiation (for example using [ALPN] - // ), this attribute SHOULD be set to the negotiated version. If the actual - // protocol version is not known, this attribute SHOULD NOT be set. - // - // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") - - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the - // [OSI transport layer] or [inter-process communication method]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "tcp", "udp" - // Note: The value SHOULD be normalized to lowercase. - // - // Consider always setting the transport when setting a port number, since - // a port number is ambiguous without knowing the transport. For example - // different processes could be listening on TCP port 12345 and UDP port 12345. - // - // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer - // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic - // conventions. It represents the [OSI network layer] or non-OSI equivalent. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "ipv4", "ipv6" - // Note: The value SHOULD be normalized to lowercase. - // - // [OSI network layer]: https://wikipedia.org/wiki/Network_layer - NetworkTypeKey = attribute.Key("network.type") -) - -// NetworkCarrierICC returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierICC(val string) attribute.KeyValue { - return NetworkCarrierICCKey.String(val) -} - -// NetworkCarrierMCC returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMCC(val string) attribute.KeyValue { - return NetworkCarrierMCCKey.String(val) -} - -// NetworkCarrierMNC returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMNC(val string) attribute.KeyValue { - return NetworkCarrierMNCKey.String(val) -} - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkInterfaceName returns an attribute KeyValue conforming to the -// "network.interface.name" semantic conventions. It represents the network -// interface name. -func NetworkInterfaceName(val string) attribute.KeyValue { - return NetworkInterfaceNameKey.String(val) -} - -// NetworkLocalAddress returns an attribute KeyValue conforming to the -// "network.local.address" semantic conventions. It represents the local address -// of the network connection - IP address or Unix domain socket name. -func NetworkLocalAddress(val string) attribute.KeyValue { - return NetworkLocalAddressKey.String(val) -} - -// NetworkLocalPort returns an attribute KeyValue conforming to the -// "network.local.port" semantic conventions. It represents the local port number -// of the network connection. -func NetworkLocalPort(val int) attribute.KeyValue { - return NetworkLocalPortKey.Int(val) -} - -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address of -// the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) -} - -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) -} - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the -// [OSI application layer] or non-OSI equivalent. -// -// [OSI application layer]: https://wikipedia.org/wiki/Application_layer -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the actual -// version of the protocol used for network communication. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// Enum values for network.connection.state -var ( - // closed - // Stability: development - NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed") - // close_wait - // Stability: development - NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait") - // closing - // Stability: development - NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing") - // established - // Stability: development - NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established") - // fin_wait_1 - // Stability: development - NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1") - // fin_wait_2 - // Stability: development - NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2") - // last_ack - // Stability: development - NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack") - // listen - // Stability: development - NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen") - // syn_received - // Stability: development - NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received") - // syn_sent - // Stability: development - NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent") - // time_wait - // Stability: development - NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait") -) - -// Enum values for network.connection.subtype -var ( - // GPRS - // Stability: development - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - // Stability: development - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - // Stability: development - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - // Stability: development - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - // Stability: development - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - // Stability: development - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - // Stability: development - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - // Stability: development - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - // Stability: development - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - // Stability: development - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - // Stability: development - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - // Stability: development - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - // Stability: development - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - // Stability: development - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - // Stability: development - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - // Stability: development - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - // Stability: development - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - // Stability: development - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - // Stability: development - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - // Stability: development - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - // Stability: development - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -// Enum values for network.connection.type -var ( - // wifi - // Stability: development - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - // Stability: development - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - // Stability: development - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - // Stability: development - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - // Stability: development - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -// Enum values for network.io.direction -var ( - // transmit - // Stability: development - NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit") - // receive - // Stability: development - NetworkIODirectionReceive = NetworkIODirectionKey.String("receive") -) - -// Enum values for network.transport -var ( - // TCP - // Stability: stable - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - // Stability: stable - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe. - // Stability: stable - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - // Stability: stable - NetworkTransportUnix = NetworkTransportKey.String("unix") - // QUIC - // Stability: stable - NetworkTransportQUIC = NetworkTransportKey.String("quic") -) - -// Enum values for network.type -var ( - // IPv4 - // Stability: stable - NetworkTypeIPv4 = NetworkTypeKey.String("ipv4") - // IPv6 - // Stability: stable - NetworkTypeIPv6 = NetworkTypeKey.String("ipv6") -) - -// Namespace: oci -const ( - // OCIManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of the - // OCI image manifest. For container images specifically is the digest by which - // the container image is known. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4" - // Note: Follows [OCI Image Manifest Specification], and specifically the - // [Digest property]. - // An example can be found in [Example Image Manifest]. - // - // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md - // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests - // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest - OCIManifestDigestKey = attribute.Key("oci.manifest.digest") -) - -// OCIManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OCIManifestDigest(val string) attribute.KeyValue { - return OCIManifestDigestKey.String(val) -} - -// Namespace: openai -const ( - // OpenAIRequestServiceTierKey is the attribute Key conforming to the - // "openai.request.service_tier" semantic conventions. It represents the service - // tier requested. May be a specific tier, default, or auto. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "auto", "default" - OpenAIRequestServiceTierKey = attribute.Key("openai.request.service_tier") - - // OpenAIResponseServiceTierKey is the attribute Key conforming to the - // "openai.response.service_tier" semantic conventions. It represents the - // service tier used for the response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "scale", "default" - OpenAIResponseServiceTierKey = attribute.Key("openai.response.service_tier") - - // OpenAIResponseSystemFingerprintKey is the attribute Key conforming to the - // "openai.response.system_fingerprint" semantic conventions. It represents a - // fingerprint to track any eventual change in the Generative AI environment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "fp_44709d6fcb" - OpenAIResponseSystemFingerprintKey = attribute.Key("openai.response.system_fingerprint") -) - -// OpenAIResponseServiceTier returns an attribute KeyValue conforming to the -// "openai.response.service_tier" semantic conventions. It represents the service -// tier used for the response. -func OpenAIResponseServiceTier(val string) attribute.KeyValue { - return OpenAIResponseServiceTierKey.String(val) -} - -// OpenAIResponseSystemFingerprint returns an attribute KeyValue conforming to -// the "openai.response.system_fingerprint" semantic conventions. It represents a -// fingerprint to track any eventual change in the Generative AI environment. -func OpenAIResponseSystemFingerprint(val string) attribute.KeyValue { - return OpenAIResponseSystemFingerprintKey.String(val) -} - -// Enum values for openai.request.service_tier -var ( - // The system will utilize scale tier credits until they are exhausted. - // Stability: development - OpenAIRequestServiceTierAuto = OpenAIRequestServiceTierKey.String("auto") - // The system will utilize the default scale tier. - // Stability: development - OpenAIRequestServiceTierDefault = OpenAIRequestServiceTierKey.String("default") -) - -// Namespace: opentracing -const ( - // OpenTracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the parent-child - // Reference type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: The causal relationship between a child Span and a parent Span. - OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -// Enum values for opentracing.ref_type -var ( - // The parent Span depends on the child Span in some capacity - // Stability: development - OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - // Stability: development - OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from") -) - -// Namespace: os -const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic - // conventions. It represents the unique identifier for a particular build or - // compilation of the operating system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "TQ3C.230805.001.B2", "20E247", "22621" - OSBuildIDKey = attribute.Key("os.build_id") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to be - // parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS" - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "iOS", "Android", "Ubuntu" - OSNameKey = attribute.Key("os.name") - - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - OSTypeKey = attribute.Key("os.type") - - // OSVersionKey is the attribute Key conforming to the "os.version" semantic - // conventions. It represents the version string of the operating system as - // defined in [Version Attributes]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "14.2.1", "18.04.1" - // - // [Version Attributes]: /docs/resource/README.md#version-attributes - OSVersionKey = attribute.Key("os.version") -) - -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - -// OSDescription returns an attribute KeyValue conforming to the "os.description" -// semantic conventions. It represents the human readable (not intended to be -// parsed) OS version information, like e.g. reported by `ver` or -// `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating system -// as defined in [Version Attributes]. -// -// [Version Attributes]: /docs/resource/README.md#version-attributes -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// Enum values for os.type -var ( - // Microsoft Windows - // Stability: development - OSTypeWindows = OSTypeKey.String("windows") - // Linux - // Stability: development - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - // Stability: development - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - // Stability: development - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - // Stability: development - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - // Stability: development - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - // Stability: development - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - // Stability: development - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - // Stability: development - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - // Stability: development - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - // Stability: development - OSTypeZOS = OSTypeKey.String("zos") -) - -// Namespace: otel -const ( - // OTelComponentNameKey is the attribute Key conforming to the - // "otel.component.name" semantic conventions. It represents a name uniquely - // identifying the instance of the OpenTelemetry component within its containing - // SDK instance. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otlp_grpc_span_exporter/0", "custom-name" - // Note: Implementations SHOULD ensure a low cardinality for this attribute, - // even across application or SDK restarts. - // E.g. implementations MUST NOT use UUIDs as values for this attribute. - // - // Implementations MAY achieve these goals by following a - // `/` pattern, e.g. - // `batching_span_processor/0`. - // Hereby `otel.component.type` refers to the corresponding attribute value of - // the component. - // - // The value of `instance-counter` MAY be automatically assigned by the - // component and uniqueness within the enclosing SDK instance MUST be - // guaranteed. - // For example, `` MAY be implemented by using a monotonically - // increasing counter (starting with `0`), which is incremented every time an - // instance of the given component type is started. - // - // With this implementation, for example the first Batching Span Processor would - // have `batching_span_processor/0` - // as `otel.component.name`, the second one `batching_span_processor/1` and so - // on. - // These values will therefore be reused in the case of an application restart. - OTelComponentNameKey = attribute.Key("otel.component.name") - - // OTelComponentTypeKey is the attribute Key conforming to the - // "otel.component.type" semantic conventions. It represents a name identifying - // the type of the OpenTelemetry component. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "batching_span_processor", "com.example.MySpanExporter" - // Note: If none of the standardized values apply, implementations SHOULD use - // the language-defined name of the type. - // E.g. for Java the fully qualified classname SHOULD be used in this case. - OTelComponentTypeKey = attribute.Key("otel.component.type") - - // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name" - // semantic conventions. It represents the name of the instrumentation scope - ( - // `InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "io.opentelemetry.contrib.mongodb" - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeSchemaURLKey is the attribute Key conforming to the - // "otel.scope.schema_url" semantic conventions. It represents the schema URL of - // the instrumentation scope. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://opentelemetry.io/schemas/1.31.0" - OTelScopeSchemaURLKey = attribute.Key("otel.scope.schema_url") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of the - // instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "1.0.0" - OTelScopeVersionKey = attribute.Key("otel.scope.version") - - // OTelSpanParentOriginKey is the attribute Key conforming to the - // "otel.span.parent.origin" semantic conventions. It represents the determines - // whether the span has a parent span, and if so, - // [whether it is a remote parent]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote - OTelSpanParentOriginKey = attribute.Key("otel.span.parent.origin") - - // OTelSpanSamplingResultKey is the attribute Key conforming to the - // "otel.span.sampling_result" semantic conventions. It represents the result - // value of the sampler for this span. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result") - - // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code" - // semantic conventions. It represents the name of the code, either "OK" or - // "ERROR". MUST NOT be set if the status code is UNSET. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the description - // of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "resource not found" - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -// OTelComponentName returns an attribute KeyValue conforming to the -// "otel.component.name" semantic conventions. It represents a name uniquely -// identifying the instance of the OpenTelemetry component within its containing -// SDK instance. -func OTelComponentName(val string) attribute.KeyValue { - return OTelComponentNameKey.String(val) -} - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeSchemaURL returns an attribute KeyValue conforming to the -// "otel.scope.schema_url" semantic conventions. It represents the schema URL of -// the instrumentation scope. -func OTelScopeSchemaURL(val string) attribute.KeyValue { - return OTelScopeSchemaURLKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the description -// of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// Enum values for otel.component.type -var ( - // The builtin SDK batching span processor - // - // Stability: development - OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor") - // The builtin SDK simple span processor - // - // Stability: development - OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor") - // The builtin SDK batching log record processor - // - // Stability: development - OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor") - // The builtin SDK simple log record processor - // - // Stability: development - OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor") - // OTLP span exporter over gRPC with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter") - // OTLP span exporter over HTTP with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter") - // OTLP span exporter over HTTP with JSON serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter") - // Zipkin span exporter over HTTP - // - // Stability: development - OTelComponentTypeZipkinHTTPSpanExporter = OTelComponentTypeKey.String("zipkin_http_span_exporter") - // OTLP log record exporter over gRPC with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter") - // OTLP log record exporter over HTTP with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter") - // OTLP log record exporter over HTTP with JSON serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter") - // The builtin SDK periodically exporting metric reader - // - // Stability: development - OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader") - // OTLP metric exporter over gRPC with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter") - // OTLP metric exporter over HTTP with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter") - // OTLP metric exporter over HTTP with JSON serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter") - // Prometheus metric exporter over HTTP with the default text-based format - // - // Stability: development - OTelComponentTypePrometheusHTTPTextMetricExporter = OTelComponentTypeKey.String("prometheus_http_text_metric_exporter") -) - -// Enum values for otel.span.parent.origin -var ( - // The span does not have a parent, it is a root span - // Stability: development - OTelSpanParentOriginNone = OTelSpanParentOriginKey.String("none") - // The span has a parent and the parent's span context [isRemote()] is false - // Stability: development - // - // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote - OTelSpanParentOriginLocal = OTelSpanParentOriginKey.String("local") - // The span has a parent and the parent's span context [isRemote()] is true - // Stability: development - // - // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote - OTelSpanParentOriginRemote = OTelSpanParentOriginKey.String("remote") -) - -// Enum values for otel.span.sampling_result -var ( - // The span is not sampled and not recording - // Stability: development - OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP") - // The span is not sampled, but recording - // Stability: development - OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY") - // The span is sampled and recording - // Stability: development - OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE") -) - -// Enum values for otel.status_code -var ( - // The operation has been validated by an Application developer or Operator to - // have completed successfully. - // Stability: stable - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error. - // Stability: stable - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// Namespace: peer -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic - // conventions. It represents the [`service.name`] of the remote service. SHOULD - // be equal to the actual `service.name` resource attribute of the remote - // service if any. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: AuthTokenCache - // - // [`service.name`]: /docs/resource/README.md#service - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the "peer.service" -// semantic conventions. It represents the [`service.name`] of the remote -// service. SHOULD be equal to the actual `service.name` resource attribute of -// the remote service if any. -// -// [`service.name`]: /docs/resource/README.md#service -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// Namespace: process -const ( - // ProcessArgsCountKey is the attribute Key conforming to the - // "process.args_count" semantic conventions. It represents the length of the - // process.command_args array. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 4 - // Note: This field can be useful for querying or performing bucket analysis on - // how many arguments were provided to start a process. More arguments may be an - // indication of suspicious activity. - ProcessArgsCountKey = attribute.Key("process.args_count") - - // ProcessCommandKey is the attribute Key conforming to the "process.command" - // semantic conventions. It represents the command used to launch the process - // (i.e. the command name). On Linux based systems, can be set to the zeroth - // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter - // extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cmd/otelcol" - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received by - // the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this - // would be the full argv vector passed to `main`. SHOULD NOT be collected by - // default unless there is sanitization that excludes sensitive data. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cmd/otecol", "--config=config.yaml" - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full command - // used to launch the process as a single string representing the full command. - // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if - // you have to assemble it just for monitoring; use `process.command_args` - // instead. SHOULD NOT be collected by default unless there is sanitization that - // excludes sensitive data. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "C:\cmd\otecol --config="my directory\config.yaml"" - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessContextSwitchTypeKey is the attribute Key conforming to the - // "process.context_switch_type" semantic conventions. It represents the - // specifies whether the context switches for this data point were voluntary or - // involuntary. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") - - // ProcessCreationTimeKey is the attribute Key conforming to the - // "process.creation.time" semantic conventions. It represents the date and time - // the process was created, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2023-11-21T09:25:34.853Z" - ProcessCreationTimeKey = attribute.Key("process.creation.time") - - // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the - // "process.executable.build_id.gnu" semantic conventions. It represents the GNU - // build ID as found in the `.note.gnu.build-id` ELF section (hex string). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "c89b11207f6479603b0d49bf291c092c2b719293" - ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu") - - // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the - // "process.executable.build_id.go" semantic conventions. It represents the Go - // build ID as retrieved by `go tool buildid `. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY" - ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go") - - // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the - // "process.executable.build_id.htlhash" semantic conventions. It represents the - // profiling specific build ID for executables. See the OTel specification for - // Profiles for more information. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "600DCAFE4A110000F2BF38C493F5FB92" - ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name of the - // process executable. On Linux based systems, this SHOULD be set to the base - // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to - // the base name of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otelcol" - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full path - // to the process executable. On Linux based systems, can be set to the target - // of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/usr/bin/cmd/otelcol" - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code" - // semantic conventions. It represents the exit code of the process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 127 - ProcessExitCodeKey = attribute.Key("process.exit.code") - - // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time" - // semantic conventions. It represents the date and time the process exited, in - // ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2023-11-21T09:26:12.315Z" - ProcessExitTimeKey = attribute.Key("process.exit.time") - - // ProcessGroupLeaderPIDKey is the attribute Key conforming to the - // "process.group_leader.pid" semantic conventions. It represents the PID of the - // process's group leader. This is also the process group ID (PGID) of the - // process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 23 - ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") - - // ProcessInteractiveKey is the attribute Key conforming to the - // "process.interactive" semantic conventions. It represents the whether the - // process is connected to an interactive shell. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - ProcessInteractiveKey = attribute.Key("process.interactive") - - // ProcessLinuxCgroupKey is the attribute Key conforming to the - // "process.linux.cgroup" semantic conventions. It represents the control group - // associated with the process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope", - // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope" - // Note: Control groups (cgroups) are a kernel feature used to organize and - // manage process resources. This attribute provides the path(s) to the - // cgroup(s) associated with the process, which should match the contents of the - // [/proc/[PID]/cgroup] file. - // - // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html - ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns the - // process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "root" - ProcessOwnerKey = attribute.Key("process.owner") - - // ProcessPagingFaultTypeKey is the attribute Key conforming to the - // "process.paging.fault_type" semantic conventions. It represents the type of - // page fault for this data point. Type `major` is for major/hard page faults, - // and `minor` is for minor/soft page faults. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent Process - // identifier (PPID). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic - // conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRealUserIDKey is the attribute Key conforming to the - // "process.real_user.id" semantic conventions. It represents the real user ID - // (RUID) of the process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1000 - ProcessRealUserIDKey = attribute.Key("process.real_user.id") - - // ProcessRealUserNameKey is the attribute Key conforming to the - // "process.real_user.name" semantic conventions. It represents the username of - // the real user of the process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "operator" - ProcessRealUserNameKey = attribute.Key("process.real_user.name") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0 - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") - - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of the - // runtime of this process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "OpenJDK Runtime Environment" - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the version of - // the runtime of this process, as returned by the runtime without modification. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 14.0.2 - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - - // ProcessSavedUserIDKey is the attribute Key conforming to the - // "process.saved_user.id" semantic conventions. It represents the saved user ID - // (SUID) of the process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1002 - ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") - - // ProcessSavedUserNameKey is the attribute Key conforming to the - // "process.saved_user.name" semantic conventions. It represents the username of - // the saved user. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "operator" - ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") - - // ProcessSessionLeaderPIDKey is the attribute Key conforming to the - // "process.session_leader.pid" semantic conventions. It represents the PID of - // the process's session leader. This is also the session ID (SID) of the - // process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 14 - ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") - - // ProcessTitleKey is the attribute Key conforming to the "process.title" - // semantic conventions. It represents the process title (proctitle). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cat /etc/hostname", "xfce4-session", "bash" - // Note: In many Unix-like systems, process title (proctitle), is the string - // that represents the name or command line of a running process, displayed by - // system monitoring tools like ps, top, and htop. - ProcessTitleKey = attribute.Key("process.title") - - // ProcessUserIDKey is the attribute Key conforming to the "process.user.id" - // semantic conventions. It represents the effective user ID (EUID) of the - // process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1001 - ProcessUserIDKey = attribute.Key("process.user.id") - - // ProcessUserNameKey is the attribute Key conforming to the "process.user.name" - // semantic conventions. It represents the username of the effective user of the - // process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "root" - ProcessUserNameKey = attribute.Key("process.user.name") - - // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic - // conventions. It represents the virtual process identifier. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 12 - // Note: The process ID within a PID namespace. This is not necessarily unique - // across all processes on the host but it is unique within the process - // namespace that the process exists within. - ProcessVpidKey = attribute.Key("process.vpid") - - // ProcessWorkingDirectoryKey is the attribute Key conforming to the - // "process.working_directory" semantic conventions. It represents the working - // directory of the process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/root" - ProcessWorkingDirectoryKey = attribute.Key("process.working_directory") -) - -// ProcessArgsCount returns an attribute KeyValue conforming to the -// "process.args_count" semantic conventions. It represents the length of the -// process.command_args array. -func ProcessArgsCount(val int) attribute.KeyValue { - return ProcessArgsCountKey.Int(val) -} - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be set -// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the -// first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the command -// arguments (including the command/executable itself) as received by the -// process. On Linux-based systems (and some other Unixoid systems supporting -// procfs), can be set according to the list of null-delimited strings extracted -// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full -// argv vector passed to `main`. SHOULD NOT be collected by default unless there -// is sanitization that excludes sensitive data. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if -// you have to assemble it just for monitoring; use `process.command_args` -// instead. SHOULD NOT be collected by default unless there is sanitization that -// excludes sensitive data. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessCreationTime returns an attribute KeyValue conforming to the -// "process.creation.time" semantic conventions. It represents the date and time -// the process was created, in ISO 8601 format. -func ProcessCreationTime(val string) attribute.KeyValue { - return ProcessCreationTimeKey.String(val) -} - -// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the -// "process.environment_variable" semantic conventions. It represents the process -// environment variables, `` being the environment variable name, the value -// being the environment variable value. -func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue { - return attribute.String("process.environment_variable."+key, val) -} - -// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the -// "process.executable.build_id.gnu" semantic conventions. It represents the GNU -// build ID as found in the `.note.gnu.build-id` ELF section (hex string). -func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue { - return ProcessExecutableBuildIDGNUKey.String(val) -} - -// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the -// "process.executable.build_id.go" semantic conventions. It represents the Go -// build ID as retrieved by `go tool buildid `. -func ProcessExecutableBuildIDGo(val string) attribute.KeyValue { - return ProcessExecutableBuildIDGoKey.String(val) -} - -// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to -// the "process.executable.build_id.htlhash" semantic conventions. It represents -// the profiling specific build ID for executables. See the OTel specification -// for Profiles for more information. -func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue { - return ProcessExecutableBuildIDHtlhashKey.String(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of the -// process executable. On Linux based systems, this SHOULD be set to the base -// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the -// base name of `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path to -// the process executable. On Linux based systems, can be set to the target of -// `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessExitCode returns an attribute KeyValue conforming to the -// "process.exit.code" semantic conventions. It represents the exit code of the -// process. -func ProcessExitCode(val int) attribute.KeyValue { - return ProcessExitCodeKey.Int(val) -} - -// ProcessExitTime returns an attribute KeyValue conforming to the -// "process.exit.time" semantic conventions. It represents the date and time the -// process exited, in ISO 8601 format. -func ProcessExitTime(val string) attribute.KeyValue { - return ProcessExitTimeKey.String(val) -} - -// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the -// "process.group_leader.pid" semantic conventions. It represents the PID of the -// process's group leader. This is also the process group ID (PGID) of the -// process. -func ProcessGroupLeaderPID(val int) attribute.KeyValue { - return ProcessGroupLeaderPIDKey.Int(val) -} - -// ProcessInteractive returns an attribute KeyValue conforming to the -// "process.interactive" semantic conventions. It represents the whether the -// process is connected to an interactive shell. -func ProcessInteractive(val bool) attribute.KeyValue { - return ProcessInteractiveKey.Bool(val) -} - -// ProcessLinuxCgroup returns an attribute KeyValue conforming to the -// "process.linux.cgroup" semantic conventions. It represents the control group -// associated with the process. -func ProcessLinuxCgroup(val string) attribute.KeyValue { - return ProcessLinuxCgroupKey.String(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the "process.owner" -// semantic conventions. It represents the username of the user that owns the -// process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRealUserID returns an attribute KeyValue conforming to the -// "process.real_user.id" semantic conventions. It represents the real user ID -// (RUID) of the process. -func ProcessRealUserID(val int) attribute.KeyValue { - return ProcessRealUserIDKey.Int(val) -} - -// ProcessRealUserName returns an attribute KeyValue conforming to the -// "process.real_user.name" semantic conventions. It represents the username of -// the real user of the process. -func ProcessRealUserName(val string) attribute.KeyValue { - return ProcessRealUserNameKey.String(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// ProcessSavedUserID returns an attribute KeyValue conforming to the -// "process.saved_user.id" semantic conventions. It represents the saved user ID -// (SUID) of the process. -func ProcessSavedUserID(val int) attribute.KeyValue { - return ProcessSavedUserIDKey.Int(val) -} - -// ProcessSavedUserName returns an attribute KeyValue conforming to the -// "process.saved_user.name" semantic conventions. It represents the username of -// the saved user. -func ProcessSavedUserName(val string) attribute.KeyValue { - return ProcessSavedUserNameKey.String(val) -} - -// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the -// "process.session_leader.pid" semantic conventions. It represents the PID of -// the process's session leader. This is also the session ID (SID) of the -// process. -func ProcessSessionLeaderPID(val int) attribute.KeyValue { - return ProcessSessionLeaderPIDKey.Int(val) -} - -// ProcessTitle returns an attribute KeyValue conforming to the "process.title" -// semantic conventions. It represents the process title (proctitle). -func ProcessTitle(val string) attribute.KeyValue { - return ProcessTitleKey.String(val) -} - -// ProcessUserID returns an attribute KeyValue conforming to the -// "process.user.id" semantic conventions. It represents the effective user ID -// (EUID) of the process. -func ProcessUserID(val int) attribute.KeyValue { - return ProcessUserIDKey.Int(val) -} - -// ProcessUserName returns an attribute KeyValue conforming to the -// "process.user.name" semantic conventions. It represents the username of the -// effective user of the process. -func ProcessUserName(val string) attribute.KeyValue { - return ProcessUserNameKey.String(val) -} - -// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid" -// semantic conventions. It represents the virtual process identifier. -func ProcessVpid(val int) attribute.KeyValue { - return ProcessVpidKey.Int(val) -} - -// ProcessWorkingDirectory returns an attribute KeyValue conforming to the -// "process.working_directory" semantic conventions. It represents the working -// directory of the process. -func ProcessWorkingDirectory(val string) attribute.KeyValue { - return ProcessWorkingDirectoryKey.String(val) -} - -// Enum values for process.context_switch_type -var ( - // voluntary - // Stability: development - ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") - // involuntary - // Stability: development - ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") -) - -// Enum values for process.paging.fault_type -var ( - // major - // Stability: development - ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") - // minor - // Stability: development - ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") -) - -// Namespace: profile -const ( - // ProfileFrameTypeKey is the attribute Key conforming to the - // "profile.frame.type" semantic conventions. It represents the describes the - // interpreter or compiler of a single frame. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cpython" - ProfileFrameTypeKey = attribute.Key("profile.frame.type") -) - -// Enum values for profile.frame.type -var ( - // [.NET] - // - // Stability: development - // - // [.NET]: https://wikipedia.org/wiki/.NET - ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet") - // [JVM] - // - // Stability: development - // - // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine - ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm") - // [Kernel] - // - // Stability: development - // - // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system) - ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel") - // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a - // more precise value MUST be used. - // - // Stability: development - // - // [C]: https://wikipedia.org/wiki/C_(programming_language) - // [C++]: https://wikipedia.org/wiki/C%2B%2B - // [Go]: https://wikipedia.org/wiki/Go_(programming_language) - // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) - ProfileFrameTypeNative = ProfileFrameTypeKey.String("native") - // [Perl] - // - // Stability: development - // - // [Perl]: https://wikipedia.org/wiki/Perl - ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl") - // [PHP] - // - // Stability: development - // - // [PHP]: https://wikipedia.org/wiki/PHP - ProfileFrameTypePHP = ProfileFrameTypeKey.String("php") - // [Python] - // - // Stability: development - // - // [Python]: https://wikipedia.org/wiki/Python_(programming_language) - ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython") - // [Ruby] - // - // Stability: development - // - // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language) - ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby") - // [V8JS] - // - // Stability: development - // - // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine) - ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js") - // [Erlang] - // - // Stability: development - // - // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine) - ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam") - // [Go], - // - // Stability: development - // - // [Go]: https://wikipedia.org/wiki/Go_(programming_language) - ProfileFrameTypeGo = ProfileFrameTypeKey.String("go") - // [Rust] - // - // Stability: development - // - // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) - ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust") -) - -// Namespace: rpc -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes] of the Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [error codes]: https://connectrpc.com//docs/protocol/#error-codes - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") - - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the - // [numeric status code] of the gRPC request. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") - - // RPCJSONRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` - // property of response if it is an error response. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: -32700, 100 - RPCJSONRPCErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJSONRPCErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Parse error", "User already exists" - RPCJSONRPCErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") - - // RPCJSONRPCRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, string, - // `null` or missing (for notifications), value is expected to be cast to string - // for simplicity. Use empty string in case of `null` value. Omit entirely if - // this is a notification. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "10", "request-7", "" - RPCJSONRPCRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJSONRPCVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // doesn't specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2.0", "1.0" - RPCJSONRPCVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCMessageCompressedSizeKey is the attribute Key conforming to the - // "rpc.message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") - - // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" - // semantic conventions. It MUST be calculated as two different counters - // starting from `1` one for sent messages and one for received message.. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: This way we guarantee that the values will be consistent between - // different implementations. - RPCMessageIDKey = attribute.Key("rpc.message.id") - - // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type" - // semantic conventions. It represents the whether this is a received or sent - // message. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCMessageTypeKey = attribute.Key("rpc.message.type") - - // RPCMessageUncompressedSizeKey is the attribute Key conforming to the - // "rpc.message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic - // conventions. It represents the name of the (logical) method being called, - // must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: exampleMethod - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function.name` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic - // conventions. It represents the full (logical) name of the service being - // called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myservice.EchoService - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing class. - // The `code.namespace` attribute may be used to store the latter (despite the - // attribute name, it may include a class name; e.g., class with method actually - // executing the call on the server side, RPC client stub class on the client - // side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic - // conventions. It represents a string identifying the remoting system. See - // below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCSystemKey = attribute.Key("rpc.system") -) - -// RPCConnectRPCRequestMetadata returns an attribute KeyValue conforming to the -// "rpc.connect_rpc.request.metadata" semantic conventions. It represents the -// connect request metadata, `` being the normalized Connect Metadata key -// (lowercase), the value being the metadata values. -func RPCConnectRPCRequestMetadata(key string, val ...string) attribute.KeyValue { - return attribute.StringSlice("rpc.connect_rpc.request.metadata."+key, val) -} - -// RPCConnectRPCResponseMetadata returns an attribute KeyValue conforming to the -// "rpc.connect_rpc.response.metadata" semantic conventions. It represents the -// connect response metadata, `` being the normalized Connect Metadata key -// (lowercase), the value being the metadata values. -func RPCConnectRPCResponseMetadata(key string, val ...string) attribute.KeyValue { - return attribute.StringSlice("rpc.connect_rpc.response.metadata."+key, val) -} - -// RPCGRPCRequestMetadata returns an attribute KeyValue conforming to the -// "rpc.grpc.request.metadata" semantic conventions. It represents the gRPC -// request metadata, `` being the normalized gRPC Metadata key (lowercase), -// the value being the metadata values. -func RPCGRPCRequestMetadata(key string, val ...string) attribute.KeyValue { - return attribute.StringSlice("rpc.grpc.request.metadata."+key, val) -} - -// RPCGRPCResponseMetadata returns an attribute KeyValue conforming to the -// "rpc.grpc.response.metadata" semantic conventions. It represents the gRPC -// response metadata, `` being the normalized gRPC Metadata key (lowercase), -// the value being the metadata values. -func RPCGRPCResponseMetadata(key string, val ...string) attribute.KeyValue { - return attribute.StringSlice("rpc.grpc.response.metadata."+key, val) -} - -// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` -// property of response if it is an error response. -func RPCJSONRPCErrorCode(val int) attribute.KeyValue { - return RPCJSONRPCErrorCodeKey.Int(val) -} - -// RPCJSONRPCErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJSONRPCErrorMessage(val string) attribute.KeyValue { - return RPCJSONRPCErrorMessageKey.String(val) -} - -// RPCJSONRPCRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property -// of request or response. Since protocol allows id to be int, string, `null` or -// missing (for notifications), value is expected to be cast to string for -// simplicity. Use empty string in case of `null` value. Omit entirely if this is -// a notification. -func RPCJSONRPCRequestID(val string) attribute.KeyValue { - return RPCJSONRPCRequestIDKey.String(val) -} - -// RPCJSONRPCVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version -// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't -// specify this, the value can be omitted. -func RPCJSONRPCVersion(val string) attribute.KeyValue { - return RPCJSONRPCVersionKey.String(val) -} - -// RPCMessageCompressedSize returns an attribute KeyValue conforming to the -// "rpc.message.compressed_size" semantic conventions. It represents the -// compressed size of the message in bytes. -func RPCMessageCompressedSize(val int) attribute.KeyValue { - return RPCMessageCompressedSizeKey.Int(val) -} - -// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id" -// semantic conventions. It MUST be calculated as two different counters starting -// from `1` one for sent messages and one for received message.. -func RPCMessageID(val int) attribute.KeyValue { - return RPCMessageIDKey.Int(val) -} - -// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the -// "rpc.message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func RPCMessageUncompressedSize(val int) attribute.KeyValue { - return RPCMessageUncompressedSizeKey.Int(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// Enum values for rpc.connect_rpc.error_code -var ( - // cancelled - // Stability: development - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - // Stability: development - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - // Stability: development - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - // Stability: development - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - // Stability: development - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - // Stability: development - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - // Stability: development - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - // Stability: development - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - // Stability: development - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - // Stability: development - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - // Stability: development - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - // Stability: development - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - // Stability: development - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - // Stability: development - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - // Stability: development - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - // Stability: development - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) - -// Enum values for rpc.grpc.status_code -var ( - // OK - // Stability: development - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - // Stability: development - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - // Stability: development - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - // Stability: development - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - // Stability: development - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - // Stability: development - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - // Stability: development - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - // Stability: development - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - // Stability: development - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - // Stability: development - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - // Stability: development - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - // Stability: development - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - // Stability: development - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - // Stability: development - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - // Stability: development - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - // Stability: development - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - // Stability: development - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -// Enum values for rpc.message.type -var ( - // sent - // Stability: development - RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") - // received - // Stability: development - RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") -) - -// Enum values for rpc.system -var ( - // gRPC - // Stability: development - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - // Stability: development - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - // Stability: development - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - // Stability: development - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - // Stability: development - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// Namespace: security_rule -const ( - // SecurityRuleCategoryKey is the attribute Key conforming to the - // "security_rule.category" semantic conventions. It represents a categorization - // value keyword used by the entity using the rule for detection of this event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Attempted Information Leak" - SecurityRuleCategoryKey = attribute.Key("security_rule.category") - - // SecurityRuleDescriptionKey is the attribute Key conforming to the - // "security_rule.description" semantic conventions. It represents the - // description of the rule generating the event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Block requests to public DNS over HTTPS / TLS protocols" - SecurityRuleDescriptionKey = attribute.Key("security_rule.description") - - // SecurityRuleLicenseKey is the attribute Key conforming to the - // "security_rule.license" semantic conventions. It represents the name of the - // license under which the rule used to generate this event is made available. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Apache 2.0" - SecurityRuleLicenseKey = attribute.Key("security_rule.license") - - // SecurityRuleNameKey is the attribute Key conforming to the - // "security_rule.name" semantic conventions. It represents the name of the rule - // or signature generating the event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "BLOCK_DNS_over_TLS" - SecurityRuleNameKey = attribute.Key("security_rule.name") - - // SecurityRuleReferenceKey is the attribute Key conforming to the - // "security_rule.reference" semantic conventions. It represents the reference - // URL to additional information about the rule used to generate this event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS" - // Note: The URL can point to the vendor’s documentation about the rule. If - // that’s not available, it can also be a link to a more general page - // describing this type of alert. - SecurityRuleReferenceKey = attribute.Key("security_rule.reference") - - // SecurityRuleRulesetNameKey is the attribute Key conforming to the - // "security_rule.ruleset.name" semantic conventions. It represents the name of - // the ruleset, policy, group, or parent category in which the rule used to - // generate this event is a member. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Standard_Protocol_Filters" - SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name") - - // SecurityRuleUUIDKey is the attribute Key conforming to the - // "security_rule.uuid" semantic conventions. It represents a rule ID that is - // unique within the scope of a set or group of agents, observers, or other - // entities using the rule for detection of this event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011" - SecurityRuleUUIDKey = attribute.Key("security_rule.uuid") - - // SecurityRuleVersionKey is the attribute Key conforming to the - // "security_rule.version" semantic conventions. It represents the version / - // revision of the rule being used for analysis. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1.0.0" - SecurityRuleVersionKey = attribute.Key("security_rule.version") -) - -// SecurityRuleCategory returns an attribute KeyValue conforming to the -// "security_rule.category" semantic conventions. It represents a categorization -// value keyword used by the entity using the rule for detection of this event. -func SecurityRuleCategory(val string) attribute.KeyValue { - return SecurityRuleCategoryKey.String(val) -} - -// SecurityRuleDescription returns an attribute KeyValue conforming to the -// "security_rule.description" semantic conventions. It represents the -// description of the rule generating the event. -func SecurityRuleDescription(val string) attribute.KeyValue { - return SecurityRuleDescriptionKey.String(val) -} - -// SecurityRuleLicense returns an attribute KeyValue conforming to the -// "security_rule.license" semantic conventions. It represents the name of the -// license under which the rule used to generate this event is made available. -func SecurityRuleLicense(val string) attribute.KeyValue { - return SecurityRuleLicenseKey.String(val) -} - -// SecurityRuleName returns an attribute KeyValue conforming to the -// "security_rule.name" semantic conventions. It represents the name of the rule -// or signature generating the event. -func SecurityRuleName(val string) attribute.KeyValue { - return SecurityRuleNameKey.String(val) -} - -// SecurityRuleReference returns an attribute KeyValue conforming to the -// "security_rule.reference" semantic conventions. It represents the reference -// URL to additional information about the rule used to generate this event. -func SecurityRuleReference(val string) attribute.KeyValue { - return SecurityRuleReferenceKey.String(val) -} - -// SecurityRuleRulesetName returns an attribute KeyValue conforming to the -// "security_rule.ruleset.name" semantic conventions. It represents the name of -// the ruleset, policy, group, or parent category in which the rule used to -// generate this event is a member. -func SecurityRuleRulesetName(val string) attribute.KeyValue { - return SecurityRuleRulesetNameKey.String(val) -} - -// SecurityRuleUUID returns an attribute KeyValue conforming to the -// "security_rule.uuid" semantic conventions. It represents a rule ID that is -// unique within the scope of a set or group of agents, observers, or other -// entities using the rule for detection of this event. -func SecurityRuleUUID(val string) attribute.KeyValue { - return SecurityRuleUUIDKey.String(val) -} - -// SecurityRuleVersion returns an attribute KeyValue conforming to the -// "security_rule.version" semantic conventions. It represents the version / -// revision of the rule being used for analysis. -func SecurityRuleVersion(val string) attribute.KeyValue { - return SecurityRuleVersionKey.String(val) -} - -// Namespace: server -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the server domain name if available - // without reverse DNS lookup; otherwise, IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "example.com", "10.1.2.80", "/tmp/my.sock" - // Note: When observed from the client side, and when communicating through an - // intermediary, `server.address` SHOULD represent the server address behind any - // intermediaries, for example proxies, if it's available. - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" semantic - // conventions. It represents the server port number. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 80, 8080, 443 - // Note: When observed from the client side, and when communicating through an - // intermediary, `server.port` SHOULD represent the server port behind any - // intermediaries, for example proxies, if it's available. - ServerPortKey = attribute.Key("server.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the "server.address" -// semantic conventions. It represents the server domain name if available -// without reverse DNS lookup; otherwise, IP address or Unix domain socket name. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the server port number. -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// Namespace: service -const ( - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID of - // the service instance. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "627cc493-f310-47de-96bd-71410b7dec09" - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be globally - // unique). The ID helps to - // distinguish instances of the same service that exist at the same time (e.g. - // instances of a horizontally scaled - // service). - // - // Implementations, such as SDKs, are recommended to generate a random Version 1 - // or Version 4 [RFC - // 4122] UUID, but are free to use an inherent unique ID as - // the source of - // this value if stability is desirable. In that case, the ID SHOULD be used as - // source of a UUID Version 5 and - // SHOULD use the following UUID as the namespace: - // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. - // - // UUIDs are typically recommended, as only an opaque value for the purposes of - // identifying a service instance is - // needed. Similar to what can be seen in the man page for the - // [`/etc/machine-id`] file, the underlying - // data, such as pod name and namespace should be treated as confidential, being - // the user's choice to expose it - // or not via another resource attribute. - // - // For applications running behind an application server (like unicorn), we do - // not recommend using one identifier - // for all processes participating in the application. Instead, it's recommended - // each division (e.g. a worker - // thread in unicorn) to have its own instance.id. - // - // It's not recommended for a Collector to set `service.instance.id` if it can't - // unambiguously determine the - // service instance that is generating that telemetry. For instance, creating an - // UUID based on `pod.name` will - // likely be wrong, as the Collector might not know from which container within - // that pod the telemetry originated. - // However, Collectors can set the `service.instance.id` if they can - // unambiguously determine the service instance - // for that telemetry. This is typically the case for scraping receivers, as - // they know the target address and - // port. - // - // [RFC - // 4122]: https://www.ietf.org/rfc/rfc4122.txt - // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceNameKey is the attribute Key conforming to the "service.name" semantic - // conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "shoppingcart" - // Note: MUST be the same for all instances of horizontally scaled services. If - // the value was not specified, SDKs MUST fallback to `unknown_service:` - // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`. - // If `process.executable.name` is not available, the value MUST be set to - // `unknown_service`. - // - // [`process.executable.name`]: process.md - ServiceNameKey = attribute.Key("service.name") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Shop" - // Note: A string value having a meaning that helps to distinguish a group of - // services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` is - // expected to be unique for all services that have no explicit namespace - // defined (so the empty/unspecified namespace is simply one more valid - // namespace). Zero-length namespace string is assumed equal to unspecified - // namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - - // ServiceVersionKey is the attribute Key conforming to the "service.version" - // semantic conventions. It represents the version string of the service API or - // implementation. The format is not defined by these conventions. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "2.0.0", "a01dbef8a" - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of the -// service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceName returns an attribute KeyValue conforming to the "service.name" -// semantic conventions. It represents the logical name of the service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// Namespace: session -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" semantic - // conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 00112233-4455-6677-8899-aabbccddeeff - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 00112233-4455-6677-8899-aabbccddeeff - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) -} - -// Namespace: signalr -const ( - // SignalRConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the signalR - // HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "app_shutdown", "timeout" - SignalRConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalRTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the - // [SignalR transport type]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "web_sockets", "long_polling" - // - // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md - SignalRTransportKey = attribute.Key("signalr.transport") -) - -// Enum values for signalr.connection.status -var ( - // The connection was closed normally. - // Stability: stable - SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout. - // Stability: stable - SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down. - // Stability: stable - SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown") -) - -// Enum values for signalr.transport -var ( - // ServerSentEvents protocol - // Stability: stable - SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events") - // LongPolling protocol - // Stability: stable - SignalRTransportLongPolling = SignalRTransportKey.String("long_polling") - // WebSockets protocol - // Stability: stable - SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets") -) - -// Namespace: source -const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix domain - // socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock" - // Note: When observed from the destination side, and when communicating through - // an intermediary, `source.address` SHOULD represent the source address behind - // any intermediaries, for example proxies, if it's available. - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" semantic - // conventions. It represents the source port number. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceAddress returns an attribute KeyValue conforming to the "source.address" -// semantic conventions. It represents the source address - domain name if -// available without reverse DNS lookup; otherwise, IP address or Unix domain -// socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number. -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// Namespace: system -const ( - // SystemCPULogicalNumberKey is the attribute Key conforming to the - // "system.cpu.logical_number" semantic conventions. It represents the - // deprecated, use `cpu.logical_number` instead. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1 - SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") - - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "(identifier)" - SystemDeviceKey = attribute.Key("system.device") - - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the filesystem - // mode. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "rw, ro" - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/mnt/data" - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") - - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the filesystem - // state. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "used" - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the filesystem - // type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ext4" - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") - - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory state. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "free", "cached" - SystemMemoryStateKey = attribute.Key("system.memory.state") - - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "in" - SystemPagingDirectionKey = attribute.Key("system.paging.direction") - - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory paging - // state. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "free" - SystemPagingStateKey = attribute.Key("system.paging.state") - - // SystemPagingTypeKey is the attribute Key conforming to the - // "system.paging.type" semantic conventions. It represents the memory paging - // type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "minor" - SystemPagingTypeKey = attribute.Key("system.paging.type") - - // SystemProcessStatusKey is the attribute Key conforming to the - // "system.process.status" semantic conventions. It represents the process - // state, e.g., [Linux Process State Codes]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "running" - // - // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES - SystemProcessStatusKey = attribute.Key("system.process.status") -) - -// SystemCPULogicalNumber returns an attribute KeyValue conforming to the -// "system.cpu.logical_number" semantic conventions. It represents the -// deprecated, use `cpu.logical_number` instead. -func SystemCPULogicalNumber(val int) attribute.KeyValue { - return SystemCPULogicalNumberKey.Int(val) -} - -// SystemDevice returns an attribute KeyValue conforming to the "system.device" -// semantic conventions. It represents the device identifier. -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode. -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the -// "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path. -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Enum values for system.filesystem.state -var ( - // used - // Stability: development - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - // Stability: development - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - // Stability: development - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -// Enum values for system.filesystem.type -var ( - // fat32 - // Stability: development - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - // Stability: development - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - // Stability: development - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - // Stability: development - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - // Stability: development - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - // Stability: development - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// Enum values for system.memory.state -var ( - // Actual used virtual memory in bytes. - // Stability: development - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - // Stability: development - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // buffers - // Stability: development - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - // Stability: development - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) - -// Enum values for system.paging.direction -var ( - // in - // Stability: development - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - // Stability: development - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -// Enum values for system.paging.state -var ( - // used - // Stability: development - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - // Stability: development - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -// Enum values for system.paging.type -var ( - // major - // Stability: development - SystemPagingTypeMajor = SystemPagingTypeKey.String("major") - // minor - // Stability: development - SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") -) - -// Enum values for system.process.status -var ( - // running - // Stability: development - SystemProcessStatusRunning = SystemProcessStatusKey.String("running") - // sleeping - // Stability: development - SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") - // stopped - // Stability: development - SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") - // defunct - // Stability: development - SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") -) - -// Namespace: telemetry -const ( - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of the - // auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "parts-unlimited-java" - // Note: Official auto instrumentation agents and distributions SHOULD set the - // `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the version - // string of the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1.2.3" - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") - - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the language of - // the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "opentelemetry" - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to - // `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is used, - // this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module name of - // this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "1.2.3" - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") -) - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) -} - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version string -// of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// Enum values for telemetry.sdk.language -var ( - // cpp - // Stability: stable - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - // Stability: stable - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - // Stability: stable - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - // Stability: stable - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - // Stability: stable - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - // Stability: stable - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - // Stability: stable - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - // Stability: stable - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - // Stability: stable - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - // Stability: stable - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - // Stability: stable - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - // Stability: stable - TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs") -) - -// Namespace: test -const ( - // TestCaseNameKey is the attribute Key conforming to the "test.case.name" - // semantic conventions. It represents the fully qualified human readable name - // of the [test case]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1", - // "ExampleTestCase1_test1" - // - // [test case]: https://wikipedia.org/wiki/Test_case - TestCaseNameKey = attribute.Key("test.case.name") - - // TestCaseResultStatusKey is the attribute Key conforming to the - // "test.case.result.status" semantic conventions. It represents the status of - // the actual test case result from test execution. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "pass", "fail" - TestCaseResultStatusKey = attribute.Key("test.case.result.status") - - // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name" - // semantic conventions. It represents the human readable name of a [test suite] - // . - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "TestSuite1" - // - // [test suite]: https://wikipedia.org/wiki/Test_suite - TestSuiteNameKey = attribute.Key("test.suite.name") - - // TestSuiteRunStatusKey is the attribute Key conforming to the - // "test.suite.run.status" semantic conventions. It represents the status of the - // test suite run. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "success", "failure", "skipped", "aborted", "timed_out", - // "in_progress" - TestSuiteRunStatusKey = attribute.Key("test.suite.run.status") -) - -// TestCaseName returns an attribute KeyValue conforming to the "test.case.name" -// semantic conventions. It represents the fully qualified human readable name of -// the [test case]. -// -// [test case]: https://wikipedia.org/wiki/Test_case -func TestCaseName(val string) attribute.KeyValue { - return TestCaseNameKey.String(val) -} - -// TestSuiteName returns an attribute KeyValue conforming to the -// "test.suite.name" semantic conventions. It represents the human readable name -// of a [test suite]. -// -// [test suite]: https://wikipedia.org/wiki/Test_suite -func TestSuiteName(val string) attribute.KeyValue { - return TestSuiteNameKey.String(val) -} - -// Enum values for test.case.result.status -var ( - // pass - // Stability: development - TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass") - // fail - // Stability: development - TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail") -) - -// Enum values for test.suite.run.status -var ( - // success - // Stability: development - TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success") - // failure - // Stability: development - TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure") - // skipped - // Stability: development - TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped") - // aborted - // Stability: development - TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted") - // timed_out - // Stability: development - TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out") - // in_progress - // Stability: development - TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress") -) - -// Namespace: thread -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed to OS - // thread ID). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic - // conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: main - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic -// conventions. It represents the current "managed" thread ID (as opposed to OS -// thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Namespace: tls -const ( - // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic - // conventions. It represents the string indicating the [cipher] used during the - // current connection. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" - // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions` - // of the [registered TLS Cipher Suits]. - // - // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 - // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4 - TLSCipherKey = attribute.Key("tls.cipher") - - // TLSClientCertificateKey is the attribute Key conforming to the - // "tls.client.certificate" semantic conventions. It represents the PEM-encoded - // stand-alone certificate offered by the client. This is usually - // mutually-exclusive of `client.certificate_chain` since this value also exists - // in that list. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MII..." - TLSClientCertificateKey = attribute.Key("tls.client.certificate") - - // TLSClientCertificateChainKey is the attribute Key conforming to the - // "tls.client.certificate_chain" semantic conventions. It represents the array - // of PEM-encoded certificates that make up the certificate chain offered by the - // client. This is usually mutually-exclusive of `client.certificate` since that - // value should be the first certificate in the chain. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MII...", "MI..." - TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") - - // TLSClientHashMd5Key is the attribute Key conforming to the - // "tls.client.hash.md5" semantic conventions. It represents the certificate - // fingerprint using the MD5 digest of DER-encoded version of certificate - // offered by the client. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" - TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") - - // TLSClientHashSha1Key is the attribute Key conforming to the - // "tls.client.hash.sha1" semantic conventions. It represents the certificate - // fingerprint using the SHA1 digest of DER-encoded version of certificate - // offered by the client. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" - TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") - - // TLSClientHashSha256Key is the attribute Key conforming to the - // "tls.client.hash.sha256" semantic conventions. It represents the certificate - // fingerprint using the SHA256 digest of DER-encoded version of certificate - // offered by the client. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" - TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") - - // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer" - // semantic conventions. It represents the distinguished name of [subject] of - // the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" - // - // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 - TLSClientIssuerKey = attribute.Key("tls.client.issuer") - - // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" - // semantic conventions. It represents a hash that identifies clients based on - // how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "d4e5b18d6b55c71272893221c96ba240" - TLSClientJa3Key = attribute.Key("tls.client.ja3") - - // TLSClientNotAfterKey is the attribute Key conforming to the - // "tls.client.not_after" semantic conventions. It represents the date/Time - // indicating when client certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T00:00:00.000Z" - TLSClientNotAfterKey = attribute.Key("tls.client.not_after") - - // TLSClientNotBeforeKey is the attribute Key conforming to the - // "tls.client.not_before" semantic conventions. It represents the date/Time - // indicating when client certificate is first considered valid. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1970-01-01T00:00:00.000Z" - TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") - - // TLSClientSubjectKey is the attribute Key conforming to the - // "tls.client.subject" semantic conventions. It represents the distinguished - // name of subject of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com" - TLSClientSubjectKey = attribute.Key("tls.client.subject") - - // TLSClientSupportedCiphersKey is the attribute Key conforming to the - // "tls.client.supported_ciphers" semantic conventions. It represents the array - // of ciphers offered by the client during the client hello. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" - TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") - - // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic - // conventions. It represents the string indicating the curve used for the given - // cipher, when applicable. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "secp256r1" - TLSCurveKey = attribute.Key("tls.curve") - - // TLSEstablishedKey is the attribute Key conforming to the "tls.established" - // semantic conventions. It represents the boolean flag indicating if the TLS - // negotiation was successful and transitioned to an encrypted tunnel. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: true - TLSEstablishedKey = attribute.Key("tls.established") - - // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol" - // semantic conventions. It represents the string indicating the protocol being - // tunneled. Per the values in the [IANA registry], this string should be lower - // case. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "http/1.1" - // - // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids - TLSNextProtocolKey = attribute.Key("tls.next_protocol") - - // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name" - // semantic conventions. It represents the normalized lowercase protocol name - // parsed from original string of the negotiated [SSL/TLS protocol version]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values - TLSProtocolNameKey = attribute.Key("tls.protocol.name") - - // TLSProtocolVersionKey is the attribute Key conforming to the - // "tls.protocol.version" semantic conventions. It represents the numeric part - // of the version parsed from the original string of the negotiated - // [SSL/TLS protocol version]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1.2", "3" - // - // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values - TLSProtocolVersionKey = attribute.Key("tls.protocol.version") - - // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic - // conventions. It represents the boolean flag indicating if this TLS connection - // was resumed from an existing TLS negotiation. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: true - TLSResumedKey = attribute.Key("tls.resumed") - - // TLSServerCertificateKey is the attribute Key conforming to the - // "tls.server.certificate" semantic conventions. It represents the PEM-encoded - // stand-alone certificate offered by the server. This is usually - // mutually-exclusive of `server.certificate_chain` since this value also exists - // in that list. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MII..." - TLSServerCertificateKey = attribute.Key("tls.server.certificate") - - // TLSServerCertificateChainKey is the attribute Key conforming to the - // "tls.server.certificate_chain" semantic conventions. It represents the array - // of PEM-encoded certificates that make up the certificate chain offered by the - // server. This is usually mutually-exclusive of `server.certificate` since that - // value should be the first certificate in the chain. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MII...", "MI..." - TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") - - // TLSServerHashMd5Key is the attribute Key conforming to the - // "tls.server.hash.md5" semantic conventions. It represents the certificate - // fingerprint using the MD5 digest of DER-encoded version of certificate - // offered by the server. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" - TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") - - // TLSServerHashSha1Key is the attribute Key conforming to the - // "tls.server.hash.sha1" semantic conventions. It represents the certificate - // fingerprint using the SHA1 digest of DER-encoded version of certificate - // offered by the server. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" - TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") - - // TLSServerHashSha256Key is the attribute Key conforming to the - // "tls.server.hash.sha256" semantic conventions. It represents the certificate - // fingerprint using the SHA256 digest of DER-encoded version of certificate - // offered by the server. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" - TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") - - // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer" - // semantic conventions. It represents the distinguished name of [subject] of - // the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" - // - // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 - TLSServerIssuerKey = attribute.Key("tls.server.issuer") - - // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s" - // semantic conventions. It represents a hash that identifies servers based on - // how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "d4e5b18d6b55c71272893221c96ba240" - TLSServerJa3sKey = attribute.Key("tls.server.ja3s") - - // TLSServerNotAfterKey is the attribute Key conforming to the - // "tls.server.not_after" semantic conventions. It represents the date/Time - // indicating when server certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T00:00:00.000Z" - TLSServerNotAfterKey = attribute.Key("tls.server.not_after") - - // TLSServerNotBeforeKey is the attribute Key conforming to the - // "tls.server.not_before" semantic conventions. It represents the date/Time - // indicating when server certificate is first considered valid. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1970-01-01T00:00:00.000Z" - TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") - - // TLSServerSubjectKey is the attribute Key conforming to the - // "tls.server.subject" semantic conventions. It represents the distinguished - // name of subject of the x.509 certificate presented by the server. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com" - TLSServerSubjectKey = attribute.Key("tls.server.subject") -) - -// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" -// semantic conventions. It represents the string indicating the [cipher] used -// during the current connection. -// -// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 -func TLSCipher(val string) attribute.KeyValue { - return TLSCipherKey.String(val) -} - -// TLSClientCertificate returns an attribute KeyValue conforming to the -// "tls.client.certificate" semantic conventions. It represents the PEM-encoded -// stand-alone certificate offered by the client. This is usually -// mutually-exclusive of `client.certificate_chain` since this value also exists -// in that list. -func TLSClientCertificate(val string) attribute.KeyValue { - return TLSClientCertificateKey.String(val) -} - -// TLSClientCertificateChain returns an attribute KeyValue conforming to the -// "tls.client.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by the -// client. This is usually mutually-exclusive of `client.certificate` since that -// value should be the first certificate in the chain. -func TLSClientCertificateChain(val ...string) attribute.KeyValue { - return TLSClientCertificateChainKey.StringSlice(val) -} - -// TLSClientHashMd5 returns an attribute KeyValue conforming to the -// "tls.client.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate offered -// by the client. For consistency with other hash values, this value should be -// formatted as an uppercase hash. -func TLSClientHashMd5(val string) attribute.KeyValue { - return TLSClientHashMd5Key.String(val) -} - -// TLSClientHashSha1 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha1(val string) attribute.KeyValue { - return TLSClientHashSha1Key.String(val) -} - -// TLSClientHashSha256 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha256(val string) attribute.KeyValue { - return TLSClientHashSha256Key.String(val) -} - -// TLSClientIssuer returns an attribute KeyValue conforming to the -// "tls.client.issuer" semantic conventions. It represents the distinguished name -// of [subject] of the issuer of the x.509 certificate presented by the client. -// -// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 -func TLSClientIssuer(val string) attribute.KeyValue { - return TLSClientIssuerKey.String(val) -} - -// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3" -// semantic conventions. It represents a hash that identifies clients based on -// how they perform an SSL/TLS handshake. -func TLSClientJa3(val string) attribute.KeyValue { - return TLSClientJa3Key.String(val) -} - -// TLSClientNotAfter returns an attribute KeyValue conforming to the -// "tls.client.not_after" semantic conventions. It represents the date/Time -// indicating when client certificate is no longer considered valid. -func TLSClientNotAfter(val string) attribute.KeyValue { - return TLSClientNotAfterKey.String(val) -} - -// TLSClientNotBefore returns an attribute KeyValue conforming to the -// "tls.client.not_before" semantic conventions. It represents the date/Time -// indicating when client certificate is first considered valid. -func TLSClientNotBefore(val string) attribute.KeyValue { - return TLSClientNotBeforeKey.String(val) -} - -// TLSClientSubject returns an attribute KeyValue conforming to the -// "tls.client.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the client. -func TLSClientSubject(val string) attribute.KeyValue { - return TLSClientSubjectKey.String(val) -} - -// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the -// "tls.client.supported_ciphers" semantic conventions. It represents the array -// of ciphers offered by the client during the client hello. -func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { - return TLSClientSupportedCiphersKey.StringSlice(val) -} - -// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic -// conventions. It represents the string indicating the curve used for the given -// cipher, when applicable. -func TLSCurve(val string) attribute.KeyValue { - return TLSCurveKey.String(val) -} - -// TLSEstablished returns an attribute KeyValue conforming to the -// "tls.established" semantic conventions. It represents the boolean flag -// indicating if the TLS negotiation was successful and transitioned to an -// encrypted tunnel. -func TLSEstablished(val bool) attribute.KeyValue { - return TLSEstablishedKey.Bool(val) -} - -// TLSNextProtocol returns an attribute KeyValue conforming to the -// "tls.next_protocol" semantic conventions. It represents the string indicating -// the protocol being tunneled. Per the values in the [IANA registry], this -// string should be lower case. -// -// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids -func TLSNextProtocol(val string) attribute.KeyValue { - return TLSNextProtocolKey.String(val) -} - -// TLSProtocolVersion returns an attribute KeyValue conforming to the -// "tls.protocol.version" semantic conventions. It represents the numeric part of -// the version parsed from the original string of the negotiated -// [SSL/TLS protocol version]. -// -// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values -func TLSProtocolVersion(val string) attribute.KeyValue { - return TLSProtocolVersionKey.String(val) -} - -// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" -// semantic conventions. It represents the boolean flag indicating if this TLS -// connection was resumed from an existing TLS negotiation. -func TLSResumed(val bool) attribute.KeyValue { - return TLSResumedKey.Bool(val) -} - -// TLSServerCertificate returns an attribute KeyValue conforming to the -// "tls.server.certificate" semantic conventions. It represents the PEM-encoded -// stand-alone certificate offered by the server. This is usually -// mutually-exclusive of `server.certificate_chain` since this value also exists -// in that list. -func TLSServerCertificate(val string) attribute.KeyValue { - return TLSServerCertificateKey.String(val) -} - -// TLSServerCertificateChain returns an attribute KeyValue conforming to the -// "tls.server.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by the -// server. This is usually mutually-exclusive of `server.certificate` since that -// value should be the first certificate in the chain. -func TLSServerCertificateChain(val ...string) attribute.KeyValue { - return TLSServerCertificateChainKey.StringSlice(val) -} - -// TLSServerHashMd5 returns an attribute KeyValue conforming to the -// "tls.server.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate offered -// by the server. For consistency with other hash values, this value should be -// formatted as an uppercase hash. -func TLSServerHashMd5(val string) attribute.KeyValue { - return TLSServerHashMd5Key.String(val) -} - -// TLSServerHashSha1 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha1(val string) attribute.KeyValue { - return TLSServerHashSha1Key.String(val) -} - -// TLSServerHashSha256 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha256(val string) attribute.KeyValue { - return TLSServerHashSha256Key.String(val) -} - -// TLSServerIssuer returns an attribute KeyValue conforming to the -// "tls.server.issuer" semantic conventions. It represents the distinguished name -// of [subject] of the issuer of the x.509 certificate presented by the client. -// -// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 -func TLSServerIssuer(val string) attribute.KeyValue { - return TLSServerIssuerKey.String(val) -} - -// TLSServerJa3s returns an attribute KeyValue conforming to the -// "tls.server.ja3s" semantic conventions. It represents a hash that identifies -// servers based on how they perform an SSL/TLS handshake. -func TLSServerJa3s(val string) attribute.KeyValue { - return TLSServerJa3sKey.String(val) -} - -// TLSServerNotAfter returns an attribute KeyValue conforming to the -// "tls.server.not_after" semantic conventions. It represents the date/Time -// indicating when server certificate is no longer considered valid. -func TLSServerNotAfter(val string) attribute.KeyValue { - return TLSServerNotAfterKey.String(val) -} - -// TLSServerNotBefore returns an attribute KeyValue conforming to the -// "tls.server.not_before" semantic conventions. It represents the date/Time -// indicating when server certificate is first considered valid. -func TLSServerNotBefore(val string) attribute.KeyValue { - return TLSServerNotBeforeKey.String(val) -} - -// TLSServerSubject returns an attribute KeyValue conforming to the -// "tls.server.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the server. -func TLSServerSubject(val string) attribute.KeyValue { - return TLSServerSubjectKey.String(val) -} - -// Enum values for tls.protocol.name -var ( - // ssl - // Stability: development - TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") - // tls - // Stability: development - TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") -) - -// Namespace: url -const ( - // URLDomainKey is the attribute Key conforming to the "url.domain" semantic - // conventions. It represents the domain extracted from the `url.full`, such as - // "opentelemetry.io". - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2", - // "[1080:0:0:0:8:800:200C:417A]" - // Note: In some cases a URL may refer to an IP and/or port directly, without a - // domain name. In this case, the IP address would go to the domain field. If - // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[` - // and `]` characters should also be captured in the domain field. - // - // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2 - URLDomainKey = attribute.Key("url.domain") - - // URLExtensionKey is the attribute Key conforming to the "url.extension" - // semantic conventions. It represents the file extension extracted from the - // `url.full`, excluding the leading dot. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "png", "gz" - // Note: The file extension is only set if it exists, as not every url has a - // file extension. When the file name has multiple extensions `example.tar.gz`, - // only the last one should be captured `gz`, not `tar.gz`. - URLExtensionKey = attribute.Key("url.extension") - - // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic - // conventions. It represents the [URI fragment] component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "SemConv" - // - // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 - URLFragmentKey = attribute.Key("url.fragment") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network resource - // according to [RFC3986]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost" - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the fragment - // is not transmitted over HTTP, but if it is known, it SHOULD be included - // nevertheless. - // - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. - // In such case username and password SHOULD be redacted and attribute's value - // SHOULD be `https://REDACTED:REDACTED@www.example.com/`. - // - // `url.full` SHOULD capture the absolute URL when it is available (or can be - // reconstructed). - // - // Sensitive content provided in `url.full` SHOULD be scrubbed when - // instrumentations can identify it. - // - // - // Query string values for the following keys SHOULD be redacted by default and - // replaced by the - // value `REDACTED`: - // - // - [`AWSAccessKeyId`] - // - [`Signature`] - // - [`sig`] - // - [`X-Goog-Signature`] - // - // This list is subject to change over time. - // - // When a query string value is redacted, the query string key SHOULD still be - // preserved, e.g. - // `https://www.example.com/path?color=blue&sig=REDACTED`. - // - // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 - // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth - // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth - // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token - // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls - URLFullKey = attribute.Key("url.full") - - // URLOriginalKey is the attribute Key conforming to the "url.original" semantic - // conventions. It represents the unmodified original URL as seen in the event - // source. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", - // "search?q=OpenTelemetry" - // Note: In network monitoring, the observed URL may be a full URL, whereas in - // access logs, the URL is often just represented as a path. This field is meant - // to represent the URL as it was observed, complete or not. - // `url.original` might contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case password and - // username SHOULD NOT be redacted and attribute's value SHOULD remain the same. - URLOriginalKey = attribute.Key("url.original") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI path] component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "/search" - // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when - // instrumentations can identify it. - // - // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 - URLPathKey = attribute.Key("url.path") - - // URLPortKey is the attribute Key conforming to the "url.port" semantic - // conventions. It represents the port extracted from the `url.full`. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 443 - URLPortKey = attribute.Key("url.port") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI query] component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "q=OpenTelemetry" - // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when - // instrumentations can identify it. - // - // - // Query string values for the following keys SHOULD be redacted by default and - // replaced by the value `REDACTED`: - // - // - [`AWSAccessKeyId`] - // - [`Signature`] - // - [`sig`] - // - [`X-Goog-Signature`] - // - // This list is subject to change over time. - // - // When a query string value is redacted, the query string key SHOULD still be - // preserved, e.g. - // `q=OpenTelemetry&sig=REDACTED`. - // - // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 - // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth - // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth - // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token - // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls - URLQueryKey = attribute.Key("url.query") - - // URLRegisteredDomainKey is the attribute Key conforming to the - // "url.registered_domain" semantic conventions. It represents the highest - // registered url domain, stripped of the subdomain. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "example.com", "foo.co.uk" - // Note: This value can be determined precisely with the [public suffix list]. - // For example, the registered domain for `foo.example.com` is `example.com`. - // Trying to approximate this by simply taking the last two labels will not work - // well for TLDs such as `co.uk`. - // - // [public suffix list]: https://publicsuffix.org/ - URLRegisteredDomainKey = attribute.Key("url.registered_domain") - - // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic - // conventions. It represents the [URI scheme] component identifying the used - // protocol. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "https", "ftp", "telnet" - // - // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 - URLSchemeKey = attribute.Key("url.scheme") - - // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" - // semantic conventions. It represents the subdomain portion of a fully - // qualified domain name includes all of the names except the host name under - // the registered_domain. In a partially qualified domain, or if the - // qualification level of the full name cannot be determined, subdomain contains - // all of the names below the registered domain. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "east", "sub2.sub1" - // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the - // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the - // subdomain field should contain `sub2.sub1`, with no trailing period. - URLSubdomainKey = attribute.Key("url.subdomain") - - // URLTemplateKey is the attribute Key conforming to the "url.template" semantic - // conventions. It represents the low-cardinality template of an - // [absolute path reference]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/users/{id}", "/users/:id", "/users?id={id}" - // - // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 - URLTemplateKey = attribute.Key("url.template") - - // URLTopLevelDomainKey is the attribute Key conforming to the - // "url.top_level_domain" semantic conventions. It represents the effective top - // level domain (eTLD), also known as the domain suffix, is the last part of the - // domain name. For example, the top level domain for example.com is `com`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "com", "co.uk" - // Note: This value can be determined precisely with the [public suffix list]. - // - // [public suffix list]: https://publicsuffix.org/ - URLTopLevelDomainKey = attribute.Key("url.top_level_domain") -) - -// URLDomain returns an attribute KeyValue conforming to the "url.domain" -// semantic conventions. It represents the domain extracted from the `url.full`, -// such as "opentelemetry.io". -func URLDomain(val string) attribute.KeyValue { - return URLDomainKey.String(val) -} - -// URLExtension returns an attribute KeyValue conforming to the "url.extension" -// semantic conventions. It represents the file extension extracted from the -// `url.full`, excluding the leading dot. -func URLExtension(val string) attribute.KeyValue { - return URLExtensionKey.String(val) -} - -// URLFragment returns an attribute KeyValue conforming to the "url.fragment" -// semantic conventions. It represents the [URI fragment] component. -// -// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" semantic -// conventions. It represents the absolute URL describing a network resource -// according to [RFC3986]. -// -// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLOriginal returns an attribute KeyValue conforming to the "url.original" -// semantic conventions. It represents the unmodified original URL as seen in the -// event source. -func URLOriginal(val string) attribute.KeyValue { - return URLOriginalKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" semantic -// conventions. It represents the [URI path] component. -// -// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLPort returns an attribute KeyValue conforming to the "url.port" semantic -// conventions. It represents the port extracted from the `url.full`. -func URLPort(val int) attribute.KeyValue { - return URLPortKey.Int(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic -// conventions. It represents the [URI query] component. -// -// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLRegisteredDomain returns an attribute KeyValue conforming to the -// "url.registered_domain" semantic conventions. It represents the highest -// registered url domain, stripped of the subdomain. -func URLRegisteredDomain(val string) attribute.KeyValue { - return URLRegisteredDomainKey.String(val) -} - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI scheme] component identifying the -// used protocol. -// -// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain" -// semantic conventions. It represents the subdomain portion of a fully qualified -// domain name includes all of the names except the host name under the -// registered_domain. In a partially qualified domain, or if the qualification -// level of the full name cannot be determined, subdomain contains all of the -// names below the registered domain. -func URLSubdomain(val string) attribute.KeyValue { - return URLSubdomainKey.String(val) -} - -// URLTemplate returns an attribute KeyValue conforming to the "url.template" -// semantic conventions. It represents the low-cardinality template of an -// [absolute path reference]. -// -// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 -func URLTemplate(val string) attribute.KeyValue { - return URLTemplateKey.String(val) -} - -// URLTopLevelDomain returns an attribute KeyValue conforming to the -// "url.top_level_domain" semantic conventions. It represents the effective top -// level domain (eTLD), also known as the domain suffix, is the last part of the -// domain name. For example, the top level domain for example.com is `com`. -func URLTopLevelDomain(val string) attribute.KeyValue { - return URLTopLevelDomainKey.String(val) -} - -// Namespace: user -const ( - // UserEmailKey is the attribute Key conforming to the "user.email" semantic - // conventions. It represents the user email address. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "a.einstein@example.com" - UserEmailKey = attribute.Key("user.email") - - // UserFullNameKey is the attribute Key conforming to the "user.full_name" - // semantic conventions. It represents the user's full name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Albert Einstein" - UserFullNameKey = attribute.Key("user.full_name") - - // UserHashKey is the attribute Key conforming to the "user.hash" semantic - // conventions. It represents the unique user hash to correlate information for - // a user in anonymized form. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa" - // Note: Useful if `user.id` or `user.name` contain confidential information and - // cannot be used. - UserHashKey = attribute.Key("user.hash") - - // UserIDKey is the attribute Key conforming to the "user.id" semantic - // conventions. It represents the unique identifier of the user. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000" - UserIDKey = attribute.Key("user.id") - - // UserNameKey is the attribute Key conforming to the "user.name" semantic - // conventions. It represents the short name or login/username of the user. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "a.einstein" - UserNameKey = attribute.Key("user.name") - - // UserRolesKey is the attribute Key conforming to the "user.roles" semantic - // conventions. It represents the array of user roles at the time of the event. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "admin", "reporting_user" - UserRolesKey = attribute.Key("user.roles") -) - -// UserEmail returns an attribute KeyValue conforming to the "user.email" -// semantic conventions. It represents the user email address. -func UserEmail(val string) attribute.KeyValue { - return UserEmailKey.String(val) -} - -// UserFullName returns an attribute KeyValue conforming to the "user.full_name" -// semantic conventions. It represents the user's full name. -func UserFullName(val string) attribute.KeyValue { - return UserFullNameKey.String(val) -} - -// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic -// conventions. It represents the unique user hash to correlate information for a -// user in anonymized form. -func UserHash(val string) attribute.KeyValue { - return UserHashKey.String(val) -} - -// UserID returns an attribute KeyValue conforming to the "user.id" semantic -// conventions. It represents the unique identifier of the user. -func UserID(val string) attribute.KeyValue { - return UserIDKey.String(val) -} - -// UserName returns an attribute KeyValue conforming to the "user.name" semantic -// conventions. It represents the short name or login/username of the user. -func UserName(val string) attribute.KeyValue { - return UserNameKey.String(val) -} - -// UserRoles returns an attribute KeyValue conforming to the "user.roles" -// semantic conventions. It represents the array of user roles at the time of the -// event. -func UserRoles(val ...string) attribute.KeyValue { - return UserRolesKey.StringSlice(val) -} - -// Namespace: user_agent -const ( - // UserAgentNameKey is the attribute Key conforming to the "user_agent.name" - // semantic conventions. It represents the name of the user-agent extracted from - // original. Usually refers to the browser's name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Safari", "YourApp" - // Note: [Example] of extracting browser's name from original string. In the - // case of using a user-agent for non-browser products, such as microservices - // with multiple names/versions inside the `user_agent.original`, the most - // significant name SHOULD be selected. In such a scenario it should align with - // `user_agent.version` - // - // [Example]: https://www.whatsmyua.info - UserAgentNameKey = attribute.Key("user_agent.name") - - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of the - // [HTTP User-Agent] header sent by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0 - // grpc-java-okhttp/1.27.2" - // - // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent - UserAgentOriginalKey = attribute.Key("user_agent.original") - - // UserAgentOSNameKey is the attribute Key conforming to the - // "user_agent.os.name" semantic conventions. It represents the human readable - // operating system name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "iOS", "Android", "Ubuntu" - // Note: For mapping user agent strings to OS names, libraries such as - // [ua-parser] can be utilized. - // - // [ua-parser]: https://github.com/ua-parser - UserAgentOSNameKey = attribute.Key("user_agent.os.name") - - // UserAgentOSVersionKey is the attribute Key conforming to the - // "user_agent.os.version" semantic conventions. It represents the version - // string of the operating system as defined in [Version Attributes]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "14.2.1", "18.04.1" - // Note: For mapping user agent strings to OS versions, libraries such as - // [ua-parser] can be utilized. - // - // [Version Attributes]: /docs/resource/README.md#version-attributes - // [ua-parser]: https://github.com/ua-parser - UserAgentOSVersionKey = attribute.Key("user_agent.os.version") - - // UserAgentSyntheticTypeKey is the attribute Key conforming to the - // "user_agent.synthetic.type" semantic conventions. It represents the specifies - // the category of synthetic traffic, such as tests or bots. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: This attribute MAY be derived from the contents of the - // `user_agent.original` attribute. Components that populate the attribute are - // responsible for determining what they consider to be synthetic bot or test - // traffic. This attribute can either be set for self-identification purposes, - // or on telemetry detected to be generated as a result of a synthetic request. - // This attribute is useful for distinguishing between genuine client traffic - // and synthetic traffic generated by bots or tests. - UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type") - - // UserAgentVersionKey is the attribute Key conforming to the - // "user_agent.version" semantic conventions. It represents the version of the - // user-agent extracted from original. Usually refers to the browser's version. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "14.1.2", "1.0.0" - // Note: [Example] of extracting browser's version from original string. In the - // case of using a user-agent for non-browser products, such as microservices - // with multiple names/versions inside the `user_agent.original`, the most - // significant version SHOULD be selected. In such a scenario it should align - // with `user_agent.name` - // - // [Example]: https://www.whatsmyua.info - UserAgentVersionKey = attribute.Key("user_agent.version") -) - -// UserAgentName returns an attribute KeyValue conforming to the -// "user_agent.name" semantic conventions. It represents the name of the -// user-agent extracted from original. Usually refers to the browser's name. -func UserAgentName(val string) attribute.KeyValue { - return UserAgentNameKey.String(val) -} - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP User-Agent] header sent by the client. -// -// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} - -// UserAgentOSName returns an attribute KeyValue conforming to the -// "user_agent.os.name" semantic conventions. It represents the human readable -// operating system name. -func UserAgentOSName(val string) attribute.KeyValue { - return UserAgentOSNameKey.String(val) -} - -// UserAgentOSVersion returns an attribute KeyValue conforming to the -// "user_agent.os.version" semantic conventions. It represents the version string -// of the operating system as defined in [Version Attributes]. -// -// [Version Attributes]: /docs/resource/README.md#version-attributes -func UserAgentOSVersion(val string) attribute.KeyValue { - return UserAgentOSVersionKey.String(val) -} - -// UserAgentVersion returns an attribute KeyValue conforming to the -// "user_agent.version" semantic conventions. It represents the version of the -// user-agent extracted from original. Usually refers to the browser's version. -func UserAgentVersion(val string) attribute.KeyValue { - return UserAgentVersionKey.String(val) -} - -// Enum values for user_agent.synthetic.type -var ( - // Bot source. - // Stability: development - UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot") - // Synthetic test source. - // Stability: development - UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test") -) - -// Namespace: vcs -const ( - // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id" - // semantic conventions. It represents the ID of the change (pull request/merge - // request/changelist) if applicable. This is usually a unique (within - // repository) identifier generated by the VCS system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "123" - VCSChangeIDKey = attribute.Key("vcs.change.id") - - // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state" - // semantic conventions. It represents the state of the change (pull - // request/merge request/changelist). - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "open", "closed", "merged" - VCSChangeStateKey = attribute.Key("vcs.change.state") - - // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title" - // semantic conventions. It represents the human readable title of the change - // (pull request/merge request/changelist). This title is often a brief summary - // of the change and may get merged in to a ref as the commit summary. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update - // dependency" - VCSChangeTitleKey = attribute.Key("vcs.change.title") - - // VCSLineChangeTypeKey is the attribute Key conforming to the - // "vcs.line_change.type" semantic conventions. It represents the type of line - // change being measured on a branch or change. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "added", "removed" - VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type") - - // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name" - // semantic conventions. It represents the group owner within the version - // control system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-org", "myteam", "business-unit" - VCSOwnerNameKey = attribute.Key("vcs.owner.name") - - // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name" - // semantic conventions. It represents the name of the version control system - // provider. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "github", "gitlab", "gitea", "bitbucket" - VCSProviderNameKey = attribute.Key("vcs.provider.name") - - // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name" - // semantic conventions. It represents the name of the [reference] such as - // **branch** or **tag** in the repository. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-feature-branch", "tag-1-test" - // Note: `base` refers to the starting point of a change. For example, `main` - // would be the base reference of type branch if you've created a new - // reference of type branch from it and created new commits. - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name") - - // VCSRefBaseRevisionKey is the attribute Key conforming to the - // "vcs.ref.base.revision" semantic conventions. It represents the revision, - // literally [revised version], The revision most often refers to a commit - // object in Git, or a revision number in SVN. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", - // "main", "123", "HEAD" - // Note: `base` refers to the starting point of a change. For example, `main` - // would be the base reference of type branch if you've created a new - // reference of type branch from it and created new commits. The - // revision can be a full [hash value (see - // glossary)], - // of the recorded change to a ref within a repository pointing to a - // commit [commit] object. It does - // not necessarily have to be a hash; it can simply define a [revision - // number] - // which is an integer that is monotonically increasing. In cases where - // it is identical to the `ref.base.name`, it SHOULD still be included. - // It is up to the implementer to decide which value to set as the - // revision based on the VCS system and situational context. - // - // [revised version]: https://www.merriam-webster.com/dictionary/revision - // [hash value (see - // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf - // [commit]: https://git-scm.com/docs/git-commit - // [revision - // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html - VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision") - - // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type" - // semantic conventions. It represents the type of the [reference] in the - // repository. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "branch", "tag" - // Note: `base` refers to the starting point of a change. For example, `main` - // would be the base reference of type branch if you've created a new - // reference of type branch from it and created new commits. - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type") - - // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name" - // semantic conventions. It represents the name of the [reference] such as - // **branch** or **tag** in the repository. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-feature-branch", "tag-1-test" - // Note: `head` refers to where you are right now; the current reference at a - // given time. - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name") - - // VCSRefHeadRevisionKey is the attribute Key conforming to the - // "vcs.ref.head.revision" semantic conventions. It represents the revision, - // literally [revised version], The revision most often refers to a commit - // object in Git, or a revision number in SVN. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", - // "main", "123", "HEAD" - // Note: `head` refers to where you are right now; the current reference at a - // given time.The revision can be a full [hash value (see - // glossary)], - // of the recorded change to a ref within a repository pointing to a - // commit [commit] object. It does - // not necessarily have to be a hash; it can simply define a [revision - // number] - // which is an integer that is monotonically increasing. In cases where - // it is identical to the `ref.head.name`, it SHOULD still be included. - // It is up to the implementer to decide which value to set as the - // revision based on the VCS system and situational context. - // - // [revised version]: https://www.merriam-webster.com/dictionary/revision - // [hash value (see - // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf - // [commit]: https://git-scm.com/docs/git-commit - // [revision - // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html - VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision") - - // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type" - // semantic conventions. It represents the type of the [reference] in the - // repository. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "branch", "tag" - // Note: `head` refers to where you are right now; the current reference at a - // given time. - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type") - - // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic - // conventions. It represents the type of the [reference] in the repository. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "branch", "tag" - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefTypeKey = attribute.Key("vcs.ref.type") - - // VCSRepositoryNameKey is the attribute Key conforming to the - // "vcs.repository.name" semantic conventions. It represents the human readable - // name of the repository. It SHOULD NOT include any additional identifier like - // Group/SubGroup in GitLab or organization in GitHub. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "semantic-conventions", "my-cool-repo" - // Note: Due to it only being the name, it can clash with forks of the same - // repository if collecting telemetry across multiple orgs or groups in - // the same backends. - VCSRepositoryNameKey = attribute.Key("vcs.repository.name") - - // VCSRepositoryURLFullKey is the attribute Key conforming to the - // "vcs.repository.url.full" semantic conventions. It represents the - // [canonical URL] of the repository providing the complete HTTP(S) address in - // order to locate and identify the repository through a browser. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "https://github.com/opentelemetry/open-telemetry-collector-contrib", - // "https://gitlab.com/my-org/my-project/my-projects-project/repo" - // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include - // the `.git` extension. - // - // [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. - VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full") - - // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the - // "vcs.revision_delta.direction" semantic conventions. It represents the type - // of revision comparison. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ahead", "behind" - VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction") -) - -// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id" -// semantic conventions. It represents the ID of the change (pull request/merge -// request/changelist) if applicable. This is usually a unique (within -// repository) identifier generated by the VCS system. -func VCSChangeID(val string) attribute.KeyValue { - return VCSChangeIDKey.String(val) -} - -// VCSChangeTitle returns an attribute KeyValue conforming to the -// "vcs.change.title" semantic conventions. It represents the human readable -// title of the change (pull request/merge request/changelist). This title is -// often a brief summary of the change and may get merged in to a ref as the -// commit summary. -func VCSChangeTitle(val string) attribute.KeyValue { - return VCSChangeTitleKey.String(val) -} - -// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name" -// semantic conventions. It represents the group owner within the version control -// system. -func VCSOwnerName(val string) attribute.KeyValue { - return VCSOwnerNameKey.String(val) -} - -// VCSRefBaseName returns an attribute KeyValue conforming to the -// "vcs.ref.base.name" semantic conventions. It represents the name of the -// [reference] such as **branch** or **tag** in the repository. -// -// [reference]: https://git-scm.com/docs/gitglossary#def_ref -func VCSRefBaseName(val string) attribute.KeyValue { - return VCSRefBaseNameKey.String(val) -} - -// VCSRefBaseRevision returns an attribute KeyValue conforming to the -// "vcs.ref.base.revision" semantic conventions. It represents the revision, -// literally [revised version], The revision most often refers to a commit object -// in Git, or a revision number in SVN. -// -// [revised version]: https://www.merriam-webster.com/dictionary/revision -func VCSRefBaseRevision(val string) attribute.KeyValue { - return VCSRefBaseRevisionKey.String(val) -} - -// VCSRefHeadName returns an attribute KeyValue conforming to the -// "vcs.ref.head.name" semantic conventions. It represents the name of the -// [reference] such as **branch** or **tag** in the repository. -// -// [reference]: https://git-scm.com/docs/gitglossary#def_ref -func VCSRefHeadName(val string) attribute.KeyValue { - return VCSRefHeadNameKey.String(val) -} - -// VCSRefHeadRevision returns an attribute KeyValue conforming to the -// "vcs.ref.head.revision" semantic conventions. It represents the revision, -// literally [revised version], The revision most often refers to a commit object -// in Git, or a revision number in SVN. -// -// [revised version]: https://www.merriam-webster.com/dictionary/revision -func VCSRefHeadRevision(val string) attribute.KeyValue { - return VCSRefHeadRevisionKey.String(val) -} - -// VCSRepositoryName returns an attribute KeyValue conforming to the -// "vcs.repository.name" semantic conventions. It represents the human readable -// name of the repository. It SHOULD NOT include any additional identifier like -// Group/SubGroup in GitLab or organization in GitHub. -func VCSRepositoryName(val string) attribute.KeyValue { - return VCSRepositoryNameKey.String(val) -} - -// VCSRepositoryURLFull returns an attribute KeyValue conforming to the -// "vcs.repository.url.full" semantic conventions. It represents the -// [canonical URL] of the repository providing the complete HTTP(S) address in -// order to locate and identify the repository through a browser. -// -// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. -func VCSRepositoryURLFull(val string) attribute.KeyValue { - return VCSRepositoryURLFullKey.String(val) -} - -// Enum values for vcs.change.state -var ( - // Open means the change is currently active and under review. It hasn't been - // merged into the target branch yet, and it's still possible to make changes or - // add comments. - // Stability: development - VCSChangeStateOpen = VCSChangeStateKey.String("open") - // WIP (work-in-progress, draft) means the change is still in progress and not - // yet ready for a full review. It might still undergo significant changes. - // Stability: development - VCSChangeStateWip = VCSChangeStateKey.String("wip") - // Closed means the merge request has been closed without merging. This can - // happen for various reasons, such as the changes being deemed unnecessary, the - // issue being resolved in another way, or the author deciding to withdraw the - // request. - // Stability: development - VCSChangeStateClosed = VCSChangeStateKey.String("closed") - // Merged indicates that the change has been successfully integrated into the - // target codebase. - // Stability: development - VCSChangeStateMerged = VCSChangeStateKey.String("merged") -) - -// Enum values for vcs.line_change.type -var ( - // How many lines were added. - // Stability: development - VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added") - // How many lines were removed. - // Stability: development - VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed") -) - -// Enum values for vcs.provider.name -var ( - // [GitHub] - // Stability: development - // - // [GitHub]: https://github.com - VCSProviderNameGithub = VCSProviderNameKey.String("github") - // [GitLab] - // Stability: development - // - // [GitLab]: https://gitlab.com - VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab") - // [Gitea] - // Stability: development - // - // [Gitea]: https://gitea.io - VCSProviderNameGitea = VCSProviderNameKey.String("gitea") - // [Bitbucket] - // Stability: development - // - // [Bitbucket]: https://bitbucket.org - VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket") -) - -// Enum values for vcs.ref.base.type -var ( - // [branch] - // Stability: development - // - // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch - VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch") - // [tag] - // Stability: development - // - // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag - VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag") -) - -// Enum values for vcs.ref.head.type -var ( - // [branch] - // Stability: development - // - // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch - VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch") - // [tag] - // Stability: development - // - // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag - VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag") -) - -// Enum values for vcs.ref.type -var ( - // [branch] - // Stability: development - // - // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch - VCSRefTypeBranch = VCSRefTypeKey.String("branch") - // [tag] - // Stability: development - // - // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag - VCSRefTypeTag = VCSRefTypeKey.String("tag") -) - -// Enum values for vcs.revision_delta.direction -var ( - // How many revisions the change is behind the target ref. - // Stability: development - VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind") - // How many revisions the change is ahead of the target ref. - // Stability: development - VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead") -) - -// Namespace: webengine -const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the additional - // description of the web engine (e.g. detailed version and edition - // information). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final" - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "WildFly" - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of the - // web engine. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "21.0.0" - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineName returns an attribute KeyValue conforming to the "webengine.name" -// semantic conventions. It represents the name of the web engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the web -// engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} - -// Namespace: zos -const ( - // ZOSSmfIDKey is the attribute Key conforming to the "zos.smf.id" semantic - // conventions. It represents the System Management Facility (SMF) Identifier - // uniquely identified a z/OS system within a SYSPLEX or mainframe environment - // and is used for system and performance analysis. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "SYS1" - ZOSSmfIDKey = attribute.Key("zos.smf.id") - - // ZOSSysplexNameKey is the attribute Key conforming to the "zos.sysplex.name" - // semantic conventions. It represents the name of the SYSPLEX to which the z/OS - // system belongs too. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "SYSPLEX1" - ZOSSysplexNameKey = attribute.Key("zos.sysplex.name") -) - -// ZOSSmfID returns an attribute KeyValue conforming to the "zos.smf.id" semantic -// conventions. It represents the System Management Facility (SMF) Identifier -// uniquely identified a z/OS system within a SYSPLEX or mainframe environment -// and is used for system and performance analysis. -func ZOSSmfID(val string) attribute.KeyValue { - return ZOSSmfIDKey.String(val) -} - -// ZOSSysplexName returns an attribute KeyValue conforming to the -// "zos.sysplex.name" semantic conventions. It represents the name of the SYSPLEX -// to which the z/OS system belongs too. -func ZOSSysplexName(val string) attribute.KeyValue { - return ZOSSysplexNameKey.String(val) -} \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go deleted file mode 100644 index 1110103210..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.37.0 -// version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go deleted file mode 100644 index 267979c051..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" - -import ( - "reflect" - - "go.opentelemetry.io/otel/attribute" -) - -// ErrorType returns an [attribute.KeyValue] identifying the error type of err. -// -// If err is nil, the returned attribute has the default value -// [ErrorTypeOther]. -// -// If err's type has the method -// -// ErrorType() string -// -// then the returned attribute has the value of err.ErrorType(). Otherwise, the -// returned attribute has a value derived from the concrete type of err. -// -// The key of the returned attribute is [ErrorTypeKey]. -func ErrorType(err error) attribute.KeyValue { - if err == nil { - return ErrorTypeOther - } - - return ErrorTypeKey.String(errorType(err)) -} - -func errorType(err error) string { - var s string - if et, ok := err.(interface{ ErrorType() string }); ok { - // Prioritize the ErrorType method if available. - s = et.ErrorType() - } - if s == "" { - // Fallback to reflection if the ErrorType method is not supported or - // returns an empty value. - - t := reflect.TypeOf(err) - pkg, name := t.PkgPath(), t.Name() - if pkg != "" && name != "" { - s = pkg + "." + name - } else { - // The type has no package path or name (predeclared, not-defined, - // or alias for a not-defined type). - // - // This is not guaranteed to be unique, but is a best effort. - s = t.String() - } - } - return s -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go deleted file mode 100644 index e67469a4f6..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go deleted file mode 100644 index f8a0b70441..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.37.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/MIGRATION.md deleted file mode 100644 index e246b1692d..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/MIGRATION.md +++ /dev/null @@ -1,27 +0,0 @@ - -# Migration from v1.39.0 to v1.40.0 - -The `go.opentelemetry.io/otel/semconv/v1.40.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.39.0` with the following exceptions. - -## Removed - -The following declarations have been removed. -Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions. - -If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use. -If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case. - -- `ErrorMessage` -- `ErrorMessageKey` -- `RPCMessageCompressedSize` -- `RPCMessageCompressedSizeKey` -- `RPCMessageID` -- `RPCMessageIDKey` -- `RPCMessageTypeKey` -- `RPCMessageTypeReceived` -- `RPCMessageTypeSent` -- `RPCMessageUncompressedSize` -- `RPCMessageUncompressedSizeKey` - -[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions -[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/README.md deleted file mode 100644 index c51b7fb7b0..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.40.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.40.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.40.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/attribute_group.go deleted file mode 100644 index ee6b1f79d6..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/attribute_group.go +++ /dev/null @@ -1,16861 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0" - -import "go.opentelemetry.io/otel/attribute" - -// Namespace: android -const ( - // AndroidAppStateKey is the attribute Key conforming to the "android.app.state" - // semantic conventions. It represents the this attribute represents the state - // of the application. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "created" - // Note: The Android lifecycle states are defined in - // [Activity lifecycle callbacks], and from which the `OS identifiers` are - // derived. - // - // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc - AndroidAppStateKey = attribute.Key("android.app.state") - - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version (`os.version`) of - // the android operating system. More information can be found in the - // [Android API levels documentation]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "33", "32" - // - // [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found in the -// [Android API levels documentation]. -// -// [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// Enum values for android.app.state -var ( - // Any time before Activity.onResume() or, if the app has no Activity, - // Context.startService() has been called in the app for the first time. - // - // Stability: development - AndroidAppStateCreated = AndroidAppStateKey.String("created") - // Any time after Activity.onPause() or, if the app has no Activity, - // Context.stopService() has been called when the app was in the foreground - // state. - // - // Stability: development - AndroidAppStateBackground = AndroidAppStateKey.String("background") - // Any time after Activity.onResume() or, if the app has no Activity, - // Context.startService() has been called when the app was in either the created - // or background states. - // - // Stability: development - AndroidAppStateForeground = AndroidAppStateKey.String("foreground") -) - -// Namespace: app -const ( - // AppBuildIDKey is the attribute Key conforming to the "app.build_id" semantic - // conventions. It represents the unique identifier for a particular build or - // compilation of the application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "6cff0a7e-cefc-4668-96f5-1273d8b334d0", - // "9f2b833506aa6973a92fde9733e6271f", "my-app-1.0.0-code-123" - AppBuildIDKey = attribute.Key("app.build_id") - - // AppInstallationIDKey is the attribute Key conforming to the - // "app.installation.id" semantic conventions. It represents a unique identifier - // representing the installation of an application on a specific device. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092" - // Note: Its value SHOULD persist across launches of the same application - // installation, including through application upgrades. - // It SHOULD change if the application is uninstalled or if all applications of - // the vendor are uninstalled. - // Additionally, users might be able to reset this value (e.g. by clearing - // application data). - // If an app is installed multiple times on the same device (e.g. in different - // accounts on Android), each `app.installation.id` SHOULD have a different - // value. - // If multiple OpenTelemetry SDKs are used within the same application, they - // SHOULD use the same value for `app.installation.id`. - // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the - // `app.installation.id`. - // - // For iOS, this value SHOULD be equal to the [vendor identifier]. - // - // For Android, examples of `app.installation.id` implementations include: - // - // - [Firebase Installation ID]. - // - A globally unique UUID which is persisted across sessions in your - // application. - // - [App set ID]. - // - [`Settings.getString(Settings.Secure.ANDROID_ID)`]. - // - // More information about Android identifier best practices can be found in the - // [Android user data IDs guide]. - // - // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor - // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations - // [App set ID]: https://developer.android.com/identity/app-set-id - // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID - // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids - AppInstallationIDKey = attribute.Key("app.installation.id") - - // AppJankFrameCountKey is the attribute Key conforming to the - // "app.jank.frame_count" semantic conventions. It represents a number of frame - // renders that experienced jank. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 9, 42 - // Note: Depending on platform limitations, the value provided MAY be - // approximation. - AppJankFrameCountKey = attribute.Key("app.jank.frame_count") - - // AppJankPeriodKey is the attribute Key conforming to the "app.jank.period" - // semantic conventions. It represents the time period, in seconds, for which - // this jank is being reported. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0, 5.0, 10.24 - AppJankPeriodKey = attribute.Key("app.jank.period") - - // AppJankThresholdKey is the attribute Key conforming to the - // "app.jank.threshold" semantic conventions. It represents the minimum - // rendering threshold for this jank, in seconds. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0.016, 0.7, 1.024 - AppJankThresholdKey = attribute.Key("app.jank.threshold") - - // AppScreenCoordinateXKey is the attribute Key conforming to the - // "app.screen.coordinate.x" semantic conventions. It represents the x - // (horizontal) coordinate of a screen coordinate, in screen pixels. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0, 131 - AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x") - - // AppScreenCoordinateYKey is the attribute Key conforming to the - // "app.screen.coordinate.y" semantic conventions. It represents the y - // (vertical) component of a screen coordinate, in screen pixels. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 12, 99 - AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y") - - // AppScreenIDKey is the attribute Key conforming to the "app.screen.id" - // semantic conventions. It represents an identifier that uniquely - // differentiates this screen from other screens in the same application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", - // "com.example.app.MainActivity", "com.example.shop.ProductDetailFragment", - // "MyApp.ProfileView", "MyApp.ProfileViewController" - // Note: A screen represents only the part of the device display drawn by the - // app. It typically contains multiple widgets or UI components and is larger in - // scope than individual widgets. Multiple screens can coexist on the same - // display simultaneously (e.g., split view on tablets). - AppScreenIDKey = attribute.Key("app.screen.id") - - // AppScreenNameKey is the attribute Key conforming to the "app.screen.name" - // semantic conventions. It represents the name of an application screen. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MainActivity", "ProductDetailFragment", "ProfileView", - // "ProfileViewController" - // Note: A screen represents only the part of the device display drawn by the - // app. It typically contains multiple widgets or UI components and is larger in - // scope than individual widgets. Multiple screens can coexist on the same - // display simultaneously (e.g., split view on tablets). - AppScreenNameKey = attribute.Key("app.screen.name") - - // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id" - // semantic conventions. It represents an identifier that uniquely - // differentiates this widget from other widgets in the same application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829" - // Note: A widget is an application component, typically an on-screen visual GUI - // element. - AppWidgetIDKey = attribute.Key("app.widget.id") - - // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name" - // semantic conventions. It represents the name of an application widget. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "submit", "attack", "Clear Cart" - // Note: A widget is an application component, typically an on-screen visual GUI - // element. - AppWidgetNameKey = attribute.Key("app.widget.name") -) - -// AppBuildID returns an attribute KeyValue conforming to the "app.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the application. -func AppBuildID(val string) attribute.KeyValue { - return AppBuildIDKey.String(val) -} - -// AppInstallationID returns an attribute KeyValue conforming to the -// "app.installation.id" semantic conventions. It represents a unique identifier -// representing the installation of an application on a specific device. -func AppInstallationID(val string) attribute.KeyValue { - return AppInstallationIDKey.String(val) -} - -// AppJankFrameCount returns an attribute KeyValue conforming to the -// "app.jank.frame_count" semantic conventions. It represents a number of frame -// renders that experienced jank. -func AppJankFrameCount(val int) attribute.KeyValue { - return AppJankFrameCountKey.Int(val) -} - -// AppJankPeriod returns an attribute KeyValue conforming to the -// "app.jank.period" semantic conventions. It represents the time period, in -// seconds, for which this jank is being reported. -func AppJankPeriod(val float64) attribute.KeyValue { - return AppJankPeriodKey.Float64(val) -} - -// AppJankThreshold returns an attribute KeyValue conforming to the -// "app.jank.threshold" semantic conventions. It represents the minimum rendering -// threshold for this jank, in seconds. -func AppJankThreshold(val float64) attribute.KeyValue { - return AppJankThresholdKey.Float64(val) -} - -// AppScreenCoordinateX returns an attribute KeyValue conforming to the -// "app.screen.coordinate.x" semantic conventions. It represents the x -// (horizontal) coordinate of a screen coordinate, in screen pixels. -func AppScreenCoordinateX(val int) attribute.KeyValue { - return AppScreenCoordinateXKey.Int(val) -} - -// AppScreenCoordinateY returns an attribute KeyValue conforming to the -// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical) -// component of a screen coordinate, in screen pixels. -func AppScreenCoordinateY(val int) attribute.KeyValue { - return AppScreenCoordinateYKey.Int(val) -} - -// AppScreenID returns an attribute KeyValue conforming to the "app.screen.id" -// semantic conventions. It represents an identifier that uniquely differentiates -// this screen from other screens in the same application. -func AppScreenID(val string) attribute.KeyValue { - return AppScreenIDKey.String(val) -} - -// AppScreenName returns an attribute KeyValue conforming to the -// "app.screen.name" semantic conventions. It represents the name of an -// application screen. -func AppScreenName(val string) attribute.KeyValue { - return AppScreenNameKey.String(val) -} - -// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id" -// semantic conventions. It represents an identifier that uniquely differentiates -// this widget from other widgets in the same application. -func AppWidgetID(val string) attribute.KeyValue { - return AppWidgetIDKey.String(val) -} - -// AppWidgetName returns an attribute KeyValue conforming to the -// "app.widget.name" semantic conventions. It represents the name of an -// application widget. -func AppWidgetName(val string) attribute.KeyValue { - return AppWidgetNameKey.String(val) -} - -// Namespace: artifact -const ( - // ArtifactAttestationFilenameKey is the attribute Key conforming to the - // "artifact.attestation.filename" semantic conventions. It represents the - // provenance filename of the built attestation which directly relates to the - // build artifact filename. This filename SHOULD accompany the artifact at - // publish time. See the [SLSA Relationship] specification for more information. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "golang-binary-amd64-v0.1.0.attestation", - // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation", - // "file-name-package.tar.gz.intoto.json1" - // - // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations - ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename") - - // ArtifactAttestationHashKey is the attribute Key conforming to the - // "artifact.attestation.hash" semantic conventions. It represents the full - // [hash value (see glossary)], of the built attestation. Some envelopes in the - // [software attestation space] also refer to this as the **digest**. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408" - // - // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf - // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec - ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash") - - // ArtifactAttestationIDKey is the attribute Key conforming to the - // "artifact.attestation.id" semantic conventions. It represents the id of the - // build [software attestation]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "123" - // - // [software attestation]: https://slsa.dev/attestation-model - ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id") - - // ArtifactFilenameKey is the attribute Key conforming to the - // "artifact.filename" semantic conventions. It represents the human readable - // file name of the artifact, typically generated during build and release - // processes. Often includes the package name and version in the file name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0", - // "release-1.tar.gz", "file-name-package.tar.gz" - // Note: This file name can also act as the [Package Name] - // in cases where the package ecosystem maps accordingly. - // Additionally, the artifact [can be published] - // for others, but that is not a guarantee. - // - // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model - // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain - ArtifactFilenameKey = attribute.Key("artifact.filename") - - // ArtifactHashKey is the attribute Key conforming to the "artifact.hash" - // semantic conventions. It represents the full [hash value (see glossary)], - // often found in checksum.txt on a release of the artifact and used to verify - // package integrity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9" - // Note: The specific algorithm used to create the cryptographic hash value is - // not defined. In situations where an artifact has multiple - // cryptographic hashes, it is up to the implementer to choose which - // hash value to set here; this should be the most secure hash algorithm - // that is suitable for the situation and consistent with the - // corresponding attestation. The implementer can then provide the other - // hash values through an additional set of attribute extensions as they - // deem necessary. - // - // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf - ArtifactHashKey = attribute.Key("artifact.hash") - - // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl" - // semantic conventions. It represents the [Package URL] of the - // [package artifact] provides a standard way to identify and locate the - // packaged artifact. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "pkg:github/package-url/purl-spec@1209109710924", - // "pkg:npm/foo@12.12.3" - // - // [Package URL]: https://github.com/package-url/purl-spec - // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model - ArtifactPurlKey = attribute.Key("artifact.purl") - - // ArtifactVersionKey is the attribute Key conforming to the "artifact.version" - // semantic conventions. It represents the version of the artifact. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "v0.1.0", "1.2.1", "122691-build" - ArtifactVersionKey = attribute.Key("artifact.version") -) - -// ArtifactAttestationFilename returns an attribute KeyValue conforming to the -// "artifact.attestation.filename" semantic conventions. It represents the -// provenance filename of the built attestation which directly relates to the -// build artifact filename. This filename SHOULD accompany the artifact at -// publish time. See the [SLSA Relationship] specification for more information. -// -// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations -func ArtifactAttestationFilename(val string) attribute.KeyValue { - return ArtifactAttestationFilenameKey.String(val) -} - -// ArtifactAttestationHash returns an attribute KeyValue conforming to the -// "artifact.attestation.hash" semantic conventions. It represents the full -// [hash value (see glossary)], of the built attestation. Some envelopes in the -// [software attestation space] also refer to this as the **digest**. -// -// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf -// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec -func ArtifactAttestationHash(val string) attribute.KeyValue { - return ArtifactAttestationHashKey.String(val) -} - -// ArtifactAttestationID returns an attribute KeyValue conforming to the -// "artifact.attestation.id" semantic conventions. It represents the id of the -// build [software attestation]. -// -// [software attestation]: https://slsa.dev/attestation-model -func ArtifactAttestationID(val string) attribute.KeyValue { - return ArtifactAttestationIDKey.String(val) -} - -// ArtifactFilename returns an attribute KeyValue conforming to the -// "artifact.filename" semantic conventions. It represents the human readable -// file name of the artifact, typically generated during build and release -// processes. Often includes the package name and version in the file name. -func ArtifactFilename(val string) attribute.KeyValue { - return ArtifactFilenameKey.String(val) -} - -// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash" -// semantic conventions. It represents the full [hash value (see glossary)], -// often found in checksum.txt on a release of the artifact and used to verify -// package integrity. -// -// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf -func ArtifactHash(val string) attribute.KeyValue { - return ArtifactHashKey.String(val) -} - -// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl" -// semantic conventions. It represents the [Package URL] of the -// [package artifact] provides a standard way to identify and locate the packaged -// artifact. -// -// [Package URL]: https://github.com/package-url/purl-spec -// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model -func ArtifactPurl(val string) attribute.KeyValue { - return ArtifactPurlKey.String(val) -} - -// ArtifactVersion returns an attribute KeyValue conforming to the -// "artifact.version" semantic conventions. It represents the version of the -// artifact. -func ArtifactVersion(val string) attribute.KeyValue { - return ArtifactVersionKey.String(val) -} - -// Namespace: aws -const ( - // AWSBedrockGuardrailIDKey is the attribute Key conforming to the - // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique - // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and - // prevent unwanted behavior from model responses or user messages. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "sgi5gkybzqak" - // - // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html - AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id") - - // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the - // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the - // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a - // bank of information that can be queried by models to generate more relevant - // responses and augment prompts. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "XFWUPB9PAW" - // - // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html - AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id") - - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the - // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the - // JSON-serialized value of each item in the `AttributeDefinitions` request - // field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "AttributeName": "string", "AttributeType": "string" }" - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "lives", "id" - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the value - // of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, - // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": - // "string", "WriteCapacityUnits": number }" - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of the - // `Count` response parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the - // value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Users", "CatsTable" - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }" - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") - - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the - // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents - // the JSON-serialized value of each item of the `GlobalSecondaryIndexes` - // request field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }" - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value of - // the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "name_to_group" - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the - // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents - // the JSON-serialized value of the `ItemCollectionMetrics` response field. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, - // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : - // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": - // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }" - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of the - // `Limit` request parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the - // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents - // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request - // field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes": - // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" } }" - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value of - // the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems, - // ProductReviews" - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents - // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents - // the value of the `ProvisionedThroughput.WriteCapacityUnits` request - // parameter. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of - // the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of - // the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of the - // `Segment` request parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of the - // `Select` request parameter. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ALL_ATTRIBUTES", "COUNT" - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the number of - // items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys in - // the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Users", "Cats" - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the value - // of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS cluster]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" - // - // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container instance]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9" - // - // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch type] - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn" - // semantic conventions. It represents the ARN of a running [ECS task]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b", - // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" - // - // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the family name of - // the [ECS task definition] used to create the ECS task. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-family" - // - // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" - // semantic conventions. It represents the ID of a running ECS task. The ID MUST - // be extracted from `task.arn`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b", - // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" - AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision for - // the task definition used to create the ECS task. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "8", "26" - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") - - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS - // cluster. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") - - // AWSExtendedRequestIDKey is the attribute Key conforming to the - // "aws.extended_request_id" semantic conventions. It represents the AWS - // extended request ID as returned in the response header `x-amz-id-2`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ=" - AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id") - - // AWSKinesisStreamNameKey is the attribute Key conforming to the - // "aws.kinesis.stream_name" semantic conventions. It represents the name of the - // AWS Kinesis [stream] the request refers to. Corresponds to the - // `--stream-name` parameter of the Kinesis [describe-stream] operation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "some-stream-name" - // - // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html - // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html - AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name") - - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked - // ARN as provided on the `Context` passed to the function ( - // `Lambda-Runtime-Invoked-Function-Arn` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias" - // Note: This may be different from `cloud.resource_id` if an alias is involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") - - // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the - // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID - // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda - // function. It's contents are read by Lambda and used to trigger a function. - // This isn't available in the lambda execution context or the lambda runtime - // environtment. This is going to be populated by the AWS SDK for each language - // when that UUID is present. Some of these operations are - // Create/Delete/Get/List/Update EventSourceMapping. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7" - // - // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html - AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id") - - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource - // Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*" - // Note: See the [log group ARN format documentation]. - // - // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of the - // AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/aws/lambda/my-function", "opentelemetry-service" - // Note: Multiple log groups must be supported for cases like multi-container - // applications, where a single application has sidecar containers, and each - // write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the - // AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" - // Note: See the [log stream ARN format documentation]. One log group can - // contain several log streams, so these ARNs necessarily identify both a log - // group and a log stream. - // - // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) of the - // AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") - - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in the - // response headers `x-amzn-requestid`, `x-amzn-request-id` or - // `x-amz-request-id`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ" - AWSRequestIDKey = attribute.Key("aws.request_id") - - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request refers to. - // Corresponds to the `--bucket` parameter of the [S3 API] operations. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "some-bucket-name" - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - // - // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source object - // (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "someFile.yml" - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 API]. - // This applies in particular to the following operations: - // - // - [copy-object] - // - [upload-part-copy] - // - // - // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html - // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html - // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean" - // Note: The `delete` attribute is only applicable to the [delete-object] - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 API]. - // - // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html - // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 API] operations. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "someFile.yml" - // Note: The `key` attribute is applicable to all object-related S3 operations, - // i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - [copy-object] - // - [delete-object] - // - [get-object] - // - [head-object] - // - [put-object] - // - [restore-object] - // - [select-object-content] - // - [abort-multipart-upload] - // - [complete-multipart-upload] - // - [create-multipart-upload] - // - [list-parts] - // - [upload-part] - // - [upload-part-copy] - // - // - // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html - // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html - // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html - // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html - // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html - // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html - // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html - // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html - // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html - // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html - // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html - // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html - // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html - // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number of - // the part being uploaded in a multipart-upload operation. This is a positive - // integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the [upload-part] - // and [upload-part-copy] operations. - // The `part_number` attribute corresponds to the `--part-number` parameter of - // the - // [upload-part operation within the S3 API]. - // - // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html - // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html - // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id" - // semantic conventions. It represents the upload ID that identifies the - // multipart upload. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ" - // Note: The `upload_id` attribute applies to S3 multipart-upload operations and - // corresponds to the `--upload-id` parameter - // of the [S3 API] multipart operations. - // This applies in particular to the following operations: - // - // - [abort-multipart-upload] - // - [complete-multipart-upload] - // - [list-parts] - // - [upload-part] - // - [upload-part-copy] - // - // - // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html - // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html - // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html - // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html - // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html - // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") - - // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the - // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN - // of the Secret stored in the Secrets Mangger. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters" - AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn") - - // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn" - // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon - // SNS [topic] is a logical access point that acts as a communication channel. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE" - // - // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html - AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn") - - // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url" - // semantic conventions. It represents the URL of the AWS SQS Queue. It's a - // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is - // used to access the queue and perform actions on it. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue" - AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url") - - // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the - // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN - // of the AWS Step Functions Activity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting" - AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn") - - // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the - // "aws.step_functions.state_machine.arn" semantic conventions. It represents - // the ARN of the AWS Step Functions State Machine. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1" - AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn") -) - -// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the -// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique -// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and -// prevent unwanted behavior from model responses or user messages. -// -// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html -func AWSBedrockGuardrailID(val string) attribute.KeyValue { - return AWSBedrockGuardrailIDKey.String(val) -} - -// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the -// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique -// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of -// information that can be queried by models to generate more relevant responses -// and augment prompts. -// -// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html -func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue { - return AWSBedrockKnowledgeBaseIDKey.String(val) -} - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to -// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents -// the JSON-serialized value of each item in the `AttributeDefinitions` request -// field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the -// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value -// of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the -// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the -// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the -// value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to -// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field. -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of the -// `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to -// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents -// the JSON-serialized value of the `ItemCollectionMetrics` response field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to -// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents -// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request -// field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of the -// `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It -// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request -// parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming -// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It -// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request -// parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of -// the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the number of -// items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the -// `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value of -// the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an -// [ECS cluster]. -// -// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container instance]. -// -// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running -// [ECS task]. -// -// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the family name of -// the [ECS task definition] used to create the ECS task. -// -// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id" -// semantic conventions. It represents the ID of a running ECS task. The ID MUST -// be extracted from `task.arn`. -func AWSECSTaskID(val string) attribute.KeyValue { - return AWSECSTaskIDKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// the task definition used to create the ECS task. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// AWSExtendedRequestID returns an attribute KeyValue conforming to the -// "aws.extended_request_id" semantic conventions. It represents the AWS extended -// request ID as returned in the response header `x-amz-id-2`. -func AWSExtendedRequestID(val string) attribute.KeyValue { - return AWSExtendedRequestIDKey.String(val) -} - -// AWSKinesisStreamName returns an attribute KeyValue conforming to the -// "aws.kinesis.stream_name" semantic conventions. It represents the name of the -// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name` -// parameter of the Kinesis [describe-stream] operation. -// -// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html -// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html -func AWSKinesisStreamName(val string) attribute.KeyValue { - return AWSKinesisStreamNameKey.String(val) -} - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked -// ARN as provided on the `Context` passed to the function ( -// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` -// applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the -// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID -// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda -// function. It's contents are read by Lambda and used to trigger a function. -// This isn't available in the lambda execution context or the lambda runtime -// environtment. This is going to be populated by the AWS SDK for each language -// when that UUID is present. Some of these operations are -// Create/Delete/Get/List/Update EventSourceMapping. -// -// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html -func AWSLambdaResourceMappingID(val string) attribute.KeyValue { - return AWSLambdaResourceMappingIDKey.String(val) -} - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of the -// AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id" -// semantic conventions. It represents the AWS request ID as returned in the -// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id` -// . -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket" -// semantic conventions. It represents the S3 bucket name the request refers to. -// Corresponds to the `--bucket` parameter of the [S3 API] operations. -// -// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object (in -// the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete" -// semantic conventions. It represents the delete request container that -// specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic -// conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 API] operations. -// -// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the -// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of -// the Secret stored in the Secrets Mangger. -func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue { - return AWSSecretsmanagerSecretARNKey.String(val) -} - -// AWSSNSTopicARN returns an attribute KeyValue conforming to the -// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS -// Topic. An Amazon SNS [topic] is a logical access point that acts as a -// communication channel. -// -// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html -func AWSSNSTopicARN(val string) attribute.KeyValue { - return AWSSNSTopicARNKey.String(val) -} - -// AWSSQSQueueURL returns an attribute KeyValue conforming to the -// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS -// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service -// (SQS) and is used to access the queue and perform actions on it. -func AWSSQSQueueURL(val string) attribute.KeyValue { - return AWSSQSQueueURLKey.String(val) -} - -// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the -// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN -// of the AWS Step Functions Activity. -func AWSStepFunctionsActivityARN(val string) attribute.KeyValue { - return AWSStepFunctionsActivityARNKey.String(val) -} - -// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to -// the "aws.step_functions.state_machine.arn" semantic conventions. It represents -// the ARN of the AWS Step Functions State Machine. -func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue { - return AWSStepFunctionsStateMachineARNKey.String(val) -} - -// Enum values for aws.ecs.launchtype -var ( - // Amazon EC2 - // Stability: development - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // Amazon Fargate - // Stability: development - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// Namespace: azure -const ( - // AzureClientIDKey is the attribute Key conforming to the "azure.client.id" - // semantic conventions. It represents the unique identifier of the client - // instance. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1" - AzureClientIDKey = attribute.Key("azure.client.id") - - // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the - // "azure.cosmosdb.connection.mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode") - - // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the - // "azure.cosmosdb.consistency.level" semantic conventions. It represents the - // account or request [consistency level]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong", - // "Session" - // - // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels - AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level") - - // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to - // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It - // represents the list of regions contacted during operation in the order that - // they were contacted. If there is more than one region listed, it indicates - // that the operation was performed on multiple regions i.e. cross-regional - // call. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "North Central US", "Australia East", "Australia Southeast" - // Note: Region name matches the format of `displayName` in [Azure Location API] - // - // [Azure Location API]: https://learn.microsoft.com/rest/api/resources/subscriptions/list-locations - AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions") - - // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the - // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents - // the number of request units consumed by the operation. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 46.18, 1.0 - AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge") - - // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the - // "azure.cosmosdb.request.body.size" semantic conventions. It represents the - // request payload size in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size") - - // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the - // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents - // the cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1000, 1002 - AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code") - - // AzureResourceProviderNamespaceKey is the attribute Key conforming to the - // "azure.resource_provider.namespace" semantic conventions. It represents the - // [Azure Resource Provider Namespace] as recognized by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" - // - // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers - AzureResourceProviderNamespaceKey = attribute.Key("azure.resource_provider.namespace") - - // AzureServiceRequestIDKey is the attribute Key conforming to the - // "azure.service.request.id" semantic conventions. It represents the unique - // identifier of the service request. It's generated by the Azure service and - // returned with the response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "00000000-0000-0000-0000-000000000000" - AzureServiceRequestIDKey = attribute.Key("azure.service.request.id") -) - -// AzureClientID returns an attribute KeyValue conforming to the -// "azure.client.id" semantic conventions. It represents the unique identifier of -// the client instance. -func AzureClientID(val string) attribute.KeyValue { - return AzureClientIDKey.String(val) -} - -// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue -// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic -// conventions. It represents the list of regions contacted during operation in -// the order that they were contacted. If there is more than one region listed, -// it indicates that the operation was performed on multiple regions i.e. -// cross-regional call. -func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue { - return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val) -} - -// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming -// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It -// represents the number of request units consumed by the operation. -func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue { - return AzureCosmosDBOperationRequestChargeKey.Float64(val) -} - -// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the -// "azure.cosmosdb.request.body.size" semantic conventions. It represents the -// request payload size in bytes. -func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue { - return AzureCosmosDBRequestBodySizeKey.Int(val) -} - -// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to -// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It -// represents the cosmos DB sub status code. -func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { - return AzureCosmosDBResponseSubStatusCodeKey.Int(val) -} - -// AzureResourceProviderNamespace returns an attribute KeyValue conforming to the -// "azure.resource_provider.namespace" semantic conventions. It represents the -// [Azure Resource Provider Namespace] as recognized by the client. -// -// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers -func AzureResourceProviderNamespace(val string) attribute.KeyValue { - return AzureResourceProviderNamespaceKey.String(val) -} - -// AzureServiceRequestID returns an attribute KeyValue conforming to the -// "azure.service.request.id" semantic conventions. It represents the unique -// identifier of the service request. It's generated by the Azure service and -// returned with the response. -func AzureServiceRequestID(val string) attribute.KeyValue { - return AzureServiceRequestIDKey.String(val) -} - -// Enum values for azure.cosmosdb.connection.mode -var ( - // Gateway (HTTP) connection. - // Stability: development - AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway") - // Direct connection. - // Stability: development - AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct") -) - -// Enum values for azure.cosmosdb.consistency.level -var ( - // Strong - // Stability: development - AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong") - // Bounded Staleness - // Stability: development - AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness") - // Session - // Stability: development - AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session") - // Eventual - // Stability: development - AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual") - // Consistent Prefix - // Stability: development - AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix") -) - -// Namespace: browser -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99" - // Note: This value is intended to be taken from the [UA client hints API] ( - // `navigator.userAgentData.brands`). - // - // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the "browser.language" - // semantic conventions. It represents the preferred language of the user using - // the browser. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "en", "en-US", "fr", "fr-FR" - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the browser is - // running on a mobile device. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: This value is intended to be taken from the [UA client hints API] ( - // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be - // left unset. - // - // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the "browser.platform" - // semantic conventions. It represents the platform on which the browser is - // running. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Windows", "macOS", "Android" - // Note: This value is intended to be taken from the [UA client hints API] ( - // `navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD - // be left unset in order for the values to be consistent. - // The list of possible values is defined in the - // [W3C User-Agent Client Hints specification]. Note that some (but not all) of - // these values can overlap with values in the - // [`os.type` and `os.name` attributes]. However, for consistency, the values in - // the `browser.platform` attribute should capture the exact value that the user - // agent provides. - // - // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface - // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform - // [`os.type` and `os.name` attributes]: ./os.md - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands" -// semantic conventions. It represents the array of brand name and version -// separated by a space. -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred language -// of the user using the browser. -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile" -// semantic conventions. It represents a boolean that is true if the browser is -// running on a mobile device. -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running. -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// Namespace: cassandra -const ( - // CassandraConsistencyLevelKey is the attribute Key conforming to the - // "cassandra.consistency.level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from [CQL]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html - CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level") - - // CassandraCoordinatorDCKey is the attribute Key conforming to the - // "cassandra.coordinator.dc" semantic conventions. It represents the data - // center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: us-west-2 - CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc") - - // CassandraCoordinatorIDKey is the attribute Key conforming to the - // "cassandra.coordinator.id" semantic conventions. It represents the ID of the - // coordinating node for a query. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af - CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id") - - // CassandraPageSizeKey is the attribute Key conforming to the - // "cassandra.page.size" semantic conventions. It represents the fetch size used - // for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 5000 - CassandraPageSizeKey = attribute.Key("cassandra.page.size") - - // CassandraQueryIdempotentKey is the attribute Key conforming to the - // "cassandra.query.idempotent" semantic conventions. It represents the whether - // or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent") - - // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the - // "cassandra.speculative_execution.count" semantic conventions. It represents - // the number of times a query was speculatively executed. Not set or `0` if the - // query was not executed speculatively. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0, 2 - CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count") -) - -// CassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "cassandra.coordinator.dc" semantic conventions. It represents the data center -// of the coordinating node for a query. -func CassandraCoordinatorDC(val string) attribute.KeyValue { - return CassandraCoordinatorDCKey.String(val) -} - -// CassandraCoordinatorID returns an attribute KeyValue conforming to the -// "cassandra.coordinator.id" semantic conventions. It represents the ID of the -// coordinating node for a query. -func CassandraCoordinatorID(val string) attribute.KeyValue { - return CassandraCoordinatorIDKey.String(val) -} - -// CassandraPageSize returns an attribute KeyValue conforming to the -// "cassandra.page.size" semantic conventions. It represents the fetch size used -// for paging, i.e. how many rows will be returned at once. -func CassandraPageSize(val int) attribute.KeyValue { - return CassandraPageSizeKey.Int(val) -} - -// CassandraQueryIdempotent returns an attribute KeyValue conforming to the -// "cassandra.query.idempotent" semantic conventions. It represents the whether -// or not the query is idempotent. -func CassandraQueryIdempotent(val bool) attribute.KeyValue { - return CassandraQueryIdempotentKey.Bool(val) -} - -// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to -// the "cassandra.speculative_execution.count" semantic conventions. It -// represents the number of times a query was speculatively executed. Not set or -// `0` if the query was not executed speculatively. -func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return CassandraSpeculativeExecutionCountKey.Int(val) -} - -// Enum values for cassandra.consistency.level -var ( - // All - // Stability: development - CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all") - // Each Quorum - // Stability: development - CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum") - // Quorum - // Stability: development - CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum") - // Local Quorum - // Stability: development - CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum") - // One - // Stability: development - CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one") - // Two - // Stability: development - CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two") - // Three - // Stability: development - CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three") - // Local One - // Stability: development - CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one") - // Any - // Stability: development - CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any") - // Serial - // Stability: development - CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial") - // Local Serial - // Stability: development - CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial") -) - -// Namespace: cicd -const ( - // CICDPipelineActionNameKey is the attribute Key conforming to the - // "cicd.pipeline.action.name" semantic conventions. It represents the kind of - // action a pipeline run is performing. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "BUILD", "RUN", "SYNC" - CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name") - - // CICDPipelineNameKey is the attribute Key conforming to the - // "cicd.pipeline.name" semantic conventions. It represents the human readable - // name of the pipeline within a CI/CD system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Build and Test", "Lint", "Deploy Go Project", - // "deploy_to_environment" - CICDPipelineNameKey = attribute.Key("cicd.pipeline.name") - - // CICDPipelineResultKey is the attribute Key conforming to the - // "cicd.pipeline.result" semantic conventions. It represents the result of a - // pipeline run. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "success", "failure", "timeout", "skipped" - CICDPipelineResultKey = attribute.Key("cicd.pipeline.result") - - // CICDPipelineRunIDKey is the attribute Key conforming to the - // "cicd.pipeline.run.id" semantic conventions. It represents the unique - // identifier of a pipeline run within a CI/CD system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "120912" - CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id") - - // CICDPipelineRunStateKey is the attribute Key conforming to the - // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline - // run goes through these states during its lifecycle. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "pending", "executing", "finalizing" - CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state") - - // CICDPipelineRunURLFullKey is the attribute Key conforming to the - // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of - // the pipeline run, providing the complete address in order to locate and - // identify the pipeline run. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075" - // - // [URL]: https://wikipedia.org/wiki/URL - CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full") - - // CICDPipelineTaskNameKey is the attribute Key conforming to the - // "cicd.pipeline.task.name" semantic conventions. It represents the human - // readable name of a task within a pipeline. Task here most closely aligns with - // a [computing process] in a pipeline. Other terms for tasks include commands, - // steps, and procedures. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary" - // - // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) - CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name") - - // CICDPipelineTaskRunIDKey is the attribute Key conforming to the - // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique - // identifier of a task run within a pipeline. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "12097" - CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id") - - // CICDPipelineTaskRunResultKey is the attribute Key conforming to the - // "cicd.pipeline.task.run.result" semantic conventions. It represents the - // result of a task run. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "success", "failure", "timeout", "skipped" - CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result") - - // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the - // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the - // [URL] of the pipeline task run, providing the complete address in order to - // locate and identify the pipeline task run. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075" - // - // [URL]: https://wikipedia.org/wiki/URL - CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full") - - // CICDPipelineTaskTypeKey is the attribute Key conforming to the - // "cicd.pipeline.task.type" semantic conventions. It represents the type of the - // task within a pipeline. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "build", "test", "deploy" - CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type") - - // CICDSystemComponentKey is the attribute Key conforming to the - // "cicd.system.component" semantic conventions. It represents the name of a - // component of the CICD system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "controller", "scheduler", "agent" - CICDSystemComponentKey = attribute.Key("cicd.system.component") - - // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id" - // semantic conventions. It represents the unique identifier of a worker within - // a CICD system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "abc123", "10.0.1.2", "controller" - CICDWorkerIDKey = attribute.Key("cicd.worker.id") - - // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name" - // semantic conventions. It represents the name of a worker within a CICD - // system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "agent-abc", "controller", "Ubuntu LTS" - CICDWorkerNameKey = attribute.Key("cicd.worker.name") - - // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state" - // semantic conventions. It represents the state of a CICD worker / agent. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "idle", "busy", "down" - CICDWorkerStateKey = attribute.Key("cicd.worker.state") - - // CICDWorkerURLFullKey is the attribute Key conforming to the - // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the - // worker, providing the complete address in order to locate and identify the - // worker. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://cicd.example.org/worker/abc123" - // - // [URL]: https://wikipedia.org/wiki/URL - CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full") -) - -// CICDPipelineName returns an attribute KeyValue conforming to the -// "cicd.pipeline.name" semantic conventions. It represents the human readable -// name of the pipeline within a CI/CD system. -func CICDPipelineName(val string) attribute.KeyValue { - return CICDPipelineNameKey.String(val) -} - -// CICDPipelineRunID returns an attribute KeyValue conforming to the -// "cicd.pipeline.run.id" semantic conventions. It represents the unique -// identifier of a pipeline run within a CI/CD system. -func CICDPipelineRunID(val string) attribute.KeyValue { - return CICDPipelineRunIDKey.String(val) -} - -// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the -// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of -// the pipeline run, providing the complete address in order to locate and -// identify the pipeline run. -// -// [URL]: https://wikipedia.org/wiki/URL -func CICDPipelineRunURLFull(val string) attribute.KeyValue { - return CICDPipelineRunURLFullKey.String(val) -} - -// CICDPipelineTaskName returns an attribute KeyValue conforming to the -// "cicd.pipeline.task.name" semantic conventions. It represents the human -// readable name of a task within a pipeline. Task here most closely aligns with -// a [computing process] in a pipeline. Other terms for tasks include commands, -// steps, and procedures. -// -// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) -func CICDPipelineTaskName(val string) attribute.KeyValue { - return CICDPipelineTaskNameKey.String(val) -} - -// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the -// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique -// identifier of a task run within a pipeline. -func CICDPipelineTaskRunID(val string) attribute.KeyValue { - return CICDPipelineTaskRunIDKey.String(val) -} - -// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the -// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the -// [URL] of the pipeline task run, providing the complete address in order to -// locate and identify the pipeline task run. -// -// [URL]: https://wikipedia.org/wiki/URL -func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue { - return CICDPipelineTaskRunURLFullKey.String(val) -} - -// CICDSystemComponent returns an attribute KeyValue conforming to the -// "cicd.system.component" semantic conventions. It represents the name of a -// component of the CICD system. -func CICDSystemComponent(val string) attribute.KeyValue { - return CICDSystemComponentKey.String(val) -} - -// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id" -// semantic conventions. It represents the unique identifier of a worker within a -// CICD system. -func CICDWorkerID(val string) attribute.KeyValue { - return CICDWorkerIDKey.String(val) -} - -// CICDWorkerName returns an attribute KeyValue conforming to the -// "cicd.worker.name" semantic conventions. It represents the name of a worker -// within a CICD system. -func CICDWorkerName(val string) attribute.KeyValue { - return CICDWorkerNameKey.String(val) -} - -// CICDWorkerURLFull returns an attribute KeyValue conforming to the -// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the -// worker, providing the complete address in order to locate and identify the -// worker. -// -// [URL]: https://wikipedia.org/wiki/URL -func CICDWorkerURLFull(val string) attribute.KeyValue { - return CICDWorkerURLFullKey.String(val) -} - -// Enum values for cicd.pipeline.action.name -var ( - // The pipeline run is executing a build. - // Stability: development - CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD") - // The pipeline run is executing. - // Stability: development - CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN") - // The pipeline run is executing a sync. - // Stability: development - CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC") -) - -// Enum values for cicd.pipeline.result -var ( - // The pipeline run finished successfully. - // Stability: development - CICDPipelineResultSuccess = CICDPipelineResultKey.String("success") - // The pipeline run did not finish successfully, eg. due to a compile error or a - // failing test. Such failures are usually detected by non-zero exit codes of - // the tools executed in the pipeline run. - // Stability: development - CICDPipelineResultFailure = CICDPipelineResultKey.String("failure") - // The pipeline run failed due to an error in the CICD system, eg. due to the - // worker being killed. - // Stability: development - CICDPipelineResultError = CICDPipelineResultKey.String("error") - // A timeout caused the pipeline run to be interrupted. - // Stability: development - CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout") - // The pipeline run was cancelled, eg. by a user manually cancelling the - // pipeline run. - // Stability: development - CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation") - // The pipeline run was skipped, eg. due to a precondition not being met. - // Stability: development - CICDPipelineResultSkip = CICDPipelineResultKey.String("skip") -) - -// Enum values for cicd.pipeline.run.state -var ( - // The run pending state spans from the event triggering the pipeline run until - // the execution of the run starts (eg. time spent in a queue, provisioning - // agents, creating run resources). - // - // Stability: development - CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending") - // The executing state spans the execution of any run tasks (eg. build, test). - // Stability: development - CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing") - // The finalizing state spans from when the run has finished executing (eg. - // cleanup of run resources). - // Stability: development - CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing") -) - -// Enum values for cicd.pipeline.task.run.result -var ( - // The task run finished successfully. - // Stability: development - CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success") - // The task run did not finish successfully, eg. due to a compile error or a - // failing test. Such failures are usually detected by non-zero exit codes of - // the tools executed in the task run. - // Stability: development - CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure") - // The task run failed due to an error in the CICD system, eg. due to the worker - // being killed. - // Stability: development - CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error") - // A timeout caused the task run to be interrupted. - // Stability: development - CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout") - // The task run was cancelled, eg. by a user manually cancelling the task run. - // Stability: development - CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation") - // The task run was skipped, eg. due to a precondition not being met. - // Stability: development - CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip") -) - -// Enum values for cicd.pipeline.task.type -var ( - // build - // Stability: development - CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build") - // test - // Stability: development - CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test") - // deploy - // Stability: development - CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy") -) - -// Enum values for cicd.worker.state -var ( - // The worker is not performing work for the CICD system. It is available to the - // CICD system to perform work on (online / idle). - // Stability: development - CICDWorkerStateAvailable = CICDWorkerStateKey.String("available") - // The worker is performing work for the CICD system. - // Stability: development - CICDWorkerStateBusy = CICDWorkerStateKey.String("busy") - // The worker is not available to the CICD system (disconnected / down). - // Stability: development - CICDWorkerStateOffline = CICDWorkerStateKey.String("offline") -) - -// Namespace: client -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix domain - // socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock" - // Note: When observed from the server side, and when communicating through an - // intermediary, `client.address` SHOULD represent the client address behind any - // intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" semantic - // conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 65123 - // Note: When observed from the server side, and when communicating through an - // intermediary, `client.port` SHOULD represent the client port behind any - // intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the "client.address" -// semantic conventions. It represents the client address - domain name if -// available without reverse DNS lookup; otherwise, IP address or Unix domain -// socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// Namespace: cloud -const ( - // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id" - // semantic conventions. It represents the cloud account ID the resource is - // assigned to. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "111111111111", "opentelemetry" - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to increase - // availability. Availability zone represents the zone where the resource is - // running. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "us-east-1c" - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") - - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic - // conventions. It represents the geographical region within a cloud provider. - // When associated with a resource, this attribute specifies the region where - // the resource operates. When calling services or APIs deployed on a cloud, - // this attribute identifies the region where the called destination is - // deployed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "us-central1", "us-east-1" - // Note: Refer to your provider's docs to see the available regions, for example - // [Alibaba Cloud regions], [AWS regions], [Azure regions], - // [Google Cloud regions], or [Tencent Cloud regions]. - // - // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm - // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/ - // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/ - // [Google Cloud regions]: https://cloud.google.com/about/locations - // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091 - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id" - // semantic conventions. It represents the cloud provider-specific native - // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a - // [fully qualified resource ID] on Azure, a [full resource name] on GCP). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function", - // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID", - // "/subscriptions//resourceGroups/ - // /providers/Microsoft.Web/sites//functions/" - // Note: On some cloud providers, it may not be possible to determine the full - // ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud provider. - // The following well-known definitions MUST be used if you set this attribute - // and they apply: - // - // - **AWS Lambda:** The function [ARN]. - // Take care not to use the "invoked ARN" directly but replace any - // [alias suffix] - // with the resolved function version, as the same runtime instance may be - // invocable with - // multiple different aliases. - // - **GCP:** The [URI of the resource] - // - **Azure:** The [Fully Qualified Resource ID] of the invoked function, - // *not* the function app, having the form - // - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/` - // . - // This means that a span attribute MUST be used, as an Azure function app - // can host multiple functions that would usually share - // a TracerProvider. - // - // - // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id - // [full resource name]: https://google.aip.dev/122#full-resource-names - // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html - // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names - // [Fully Qualified Resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id - CloudResourceIDKey = attribute.Key("cloud.resource_id") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the "cloud.region" -// semantic conventions. It represents the geographical region within a cloud -// provider. When associated with a resource, this attribute specifies the region -// where the resource operates. When calling services or APIs deployed on a -// cloud, this attribute identifies the region where the called destination is -// deployed. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name] -// on GCP). -// -// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html -// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id -// [full resource name]: https://google.aip.dev/122#full-resource-names -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// Enum values for cloud.platform -var ( - // Akamai Cloud Compute - // Stability: development - CloudPlatformAkamaiCloudCompute = CloudPlatformKey.String("akamai_cloud.compute") - // Alibaba Cloud Elastic Compute Service - // Stability: development - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - // Stability: development - CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - // Stability: development - CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - // Stability: development - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - // Stability: development - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - // Stability: development - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - // Stability: development - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - // Stability: development - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - // Stability: development - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - // Stability: development - CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - // Stability: development - CloudPlatformAzureVM = CloudPlatformKey.String("azure.vm") - // Azure Container Apps - // Stability: development - CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure.container_apps") - // Azure Container Instances - // Stability: development - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure.container_instances") - // Azure Kubernetes Service - // Stability: development - CloudPlatformAzureAKS = CloudPlatformKey.String("azure.aks") - // Azure Functions - // Stability: development - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure.functions") - // Azure App Service - // Stability: development - CloudPlatformAzureAppService = CloudPlatformKey.String("azure.app_service") - // Azure Red Hat OpenShift - // Stability: development - CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure.openshift") - // Google Vertex AI Agent Engine - // Stability: development - CloudPlatformGCPAgentEngine = CloudPlatformKey.String("gcp.agent_engine") - // Google Bare Metal Solution (BMS) - // Stability: development - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - // Stability: development - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - // Stability: development - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - // Stability: development - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - // Stability: development - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - // Stability: development - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - // Stability: development - CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift") - // Server on Hetzner Cloud - // Stability: development - CloudPlatformHetznerCloudServer = CloudPlatformKey.String("hetzner.cloud_server") - // Red Hat OpenShift on IBM Cloud - // Stability: development - CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift") - // Compute on Oracle Cloud Infrastructure (OCI) - // Stability: development - CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute") - // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI) - // Stability: development - CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke") - // Tencent Cloud Cloud Virtual Machine (CVM) - // Stability: development - CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - // Stability: development - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - // Stability: development - CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf") - // Vultr Cloud Compute - // Stability: development - CloudPlatformVultrCloudCompute = CloudPlatformKey.String("vultr.cloud_compute") -) - -// Enum values for cloud.provider -var ( - // Akamai Cloud - // Stability: development - CloudProviderAkamaiCloud = CloudProviderKey.String("akamai_cloud") - // Alibaba Cloud - // Stability: development - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - // Stability: development - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - // Stability: development - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - // Stability: development - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - // Stability: development - CloudProviderHeroku = CloudProviderKey.String("heroku") - // Hetzner - // Stability: development - CloudProviderHetzner = CloudProviderKey.String("hetzner") - // IBM Cloud - // Stability: development - CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud") - // Oracle Cloud Infrastructure (OCI) - // Stability: development - CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud") - // Tencent Cloud - // Stability: development - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") - // Vultr - // Stability: development - CloudProviderVultr = CloudProviderKey.String("vultr") -) - -// Namespace: cloudevents -const ( - // CloudEventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the [event_id] - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001" - // - // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id - CloudEventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudEventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the [source] - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123", - // "my-service" - // - // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 - CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudEventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents specification] which the event uses. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0 - // - // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion - CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudEventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the [subject] - // of the event in the context of the event producer (identified by source). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: mynewfile.jpg - // - // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject - CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudEventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the [event_type] - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2" - // - // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type - CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudEventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the [event_id] -// uniquely identifies the event. -// -// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id -func CloudEventsEventID(val string) attribute.KeyValue { - return CloudEventsEventIDKey.String(val) -} - -// CloudEventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the [source] -// identifies the context in which an event happened. -// -// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 -func CloudEventsEventSource(val string) attribute.KeyValue { - return CloudEventsEventSourceKey.String(val) -} - -// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the -// "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents specification] which the event uses. -// -// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion -func CloudEventsEventSpecVersion(val string) attribute.KeyValue { - return CloudEventsEventSpecVersionKey.String(val) -} - -// CloudEventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the [subject] -// of the event in the context of the event producer (identified by source). -// -// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject -func CloudEventsEventSubject(val string) attribute.KeyValue { - return CloudEventsEventSubjectKey.String(val) -} - -// CloudEventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the [event_type] -// contains a value describing the type of event related to the originating -// occurrence. -// -// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type -func CloudEventsEventType(val string) attribute.KeyValue { - return CloudEventsEventTypeKey.String(val) -} - -// Namespace: cloudfoundry -const ( - // CloudFoundryAppIDKey is the attribute Key conforming to the - // "cloudfoundry.app.id" semantic conventions. It represents the guid of the - // application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.application_id`. This is the same value as - // reported by `cf app --guid`. - CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id") - - // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the - // "cloudfoundry.app.instance.id" semantic conventions. It represents the index - // of the application instance. 0 when just one instance is active. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0", "1" - // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] - // . - // It is used for logs and metrics emitted by CloudFoundry. It is - // supposed to contain the application instance index for applications - // deployed on the runtime. - // - // Application instrumentation should use the value from environment - // variable `CF_INSTANCE_INDEX`. - // - // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope - CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id") - - // CloudFoundryAppNameKey is the attribute Key conforming to the - // "cloudfoundry.app.name" semantic conventions. It represents the name of the - // application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-app-name" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.application_name`. This is the same value - // as reported by `cf apps`. - CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name") - - // CloudFoundryOrgIDKey is the attribute Key conforming to the - // "cloudfoundry.org.id" semantic conventions. It represents the guid of the - // CloudFoundry org the application is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.org_id`. This is the same value as - // reported by `cf org --guid`. - CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id") - - // CloudFoundryOrgNameKey is the attribute Key conforming to the - // "cloudfoundry.org.name" semantic conventions. It represents the name of the - // CloudFoundry organization the app is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-org-name" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.org_name`. This is the same value as - // reported by `cf orgs`. - CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name") - - // CloudFoundryProcessIDKey is the attribute Key conforming to the - // "cloudfoundry.process.id" semantic conventions. It represents the UID - // identifying the process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to - // `VCAP_APPLICATION.app_id` for applications deployed to the runtime. - // For system components, this could be the actual PID. - CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id") - - // CloudFoundryProcessTypeKey is the attribute Key conforming to the - // "cloudfoundry.process.type" semantic conventions. It represents the type of - // process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "web" - // Note: CloudFoundry applications can consist of multiple jobs. Usually the - // main process will be of type `web`. There can be additional background - // tasks or side-cars with different process types. - CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type") - - // CloudFoundrySpaceIDKey is the attribute Key conforming to the - // "cloudfoundry.space.id" semantic conventions. It represents the guid of the - // CloudFoundry space the application is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.space_id`. This is the same value as - // reported by `cf space --guid`. - CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id") - - // CloudFoundrySpaceNameKey is the attribute Key conforming to the - // "cloudfoundry.space.name" semantic conventions. It represents the name of the - // CloudFoundry space the application is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-space-name" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.space_name`. This is the same value as - // reported by `cf spaces`. - CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name") - - // CloudFoundrySystemIDKey is the attribute Key conforming to the - // "cloudfoundry.system.id" semantic conventions. It represents a guid or - // another name describing the event source. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cf/gorouter" - // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope]. - // It is used for logs and metrics emitted by CloudFoundry. It is - // supposed to contain the component name, e.g. "gorouter", for - // CloudFoundry components. - // - // When system components are instrumented, values from the - // [Bosh spec] - // should be used. The `system.id` should be set to - // `spec.deployment/spec.name`. - // - // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope - // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec - CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id") - - // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the - // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid - // describing the concrete instance of the event source. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] - // . - // It is used for logs and metrics emitted by CloudFoundry. It is - // supposed to contain the vm id for CloudFoundry components. - // - // When system components are instrumented, values from the - // [Bosh spec] - // should be used. The `system.instance.id` should be set to `spec.id`. - // - // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope - // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec - CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id") -) - -// CloudFoundryAppID returns an attribute KeyValue conforming to the -// "cloudfoundry.app.id" semantic conventions. It represents the guid of the -// application. -func CloudFoundryAppID(val string) attribute.KeyValue { - return CloudFoundryAppIDKey.String(val) -} - -// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the -// "cloudfoundry.app.instance.id" semantic conventions. It represents the index -// of the application instance. 0 when just one instance is active. -func CloudFoundryAppInstanceID(val string) attribute.KeyValue { - return CloudFoundryAppInstanceIDKey.String(val) -} - -// CloudFoundryAppName returns an attribute KeyValue conforming to the -// "cloudfoundry.app.name" semantic conventions. It represents the name of the -// application. -func CloudFoundryAppName(val string) attribute.KeyValue { - return CloudFoundryAppNameKey.String(val) -} - -// CloudFoundryOrgID returns an attribute KeyValue conforming to the -// "cloudfoundry.org.id" semantic conventions. It represents the guid of the -// CloudFoundry org the application is running in. -func CloudFoundryOrgID(val string) attribute.KeyValue { - return CloudFoundryOrgIDKey.String(val) -} - -// CloudFoundryOrgName returns an attribute KeyValue conforming to the -// "cloudfoundry.org.name" semantic conventions. It represents the name of the -// CloudFoundry organization the app is running in. -func CloudFoundryOrgName(val string) attribute.KeyValue { - return CloudFoundryOrgNameKey.String(val) -} - -// CloudFoundryProcessID returns an attribute KeyValue conforming to the -// "cloudfoundry.process.id" semantic conventions. It represents the UID -// identifying the process. -func CloudFoundryProcessID(val string) attribute.KeyValue { - return CloudFoundryProcessIDKey.String(val) -} - -// CloudFoundryProcessType returns an attribute KeyValue conforming to the -// "cloudfoundry.process.type" semantic conventions. It represents the type of -// process. -func CloudFoundryProcessType(val string) attribute.KeyValue { - return CloudFoundryProcessTypeKey.String(val) -} - -// CloudFoundrySpaceID returns an attribute KeyValue conforming to the -// "cloudfoundry.space.id" semantic conventions. It represents the guid of the -// CloudFoundry space the application is running in. -func CloudFoundrySpaceID(val string) attribute.KeyValue { - return CloudFoundrySpaceIDKey.String(val) -} - -// CloudFoundrySpaceName returns an attribute KeyValue conforming to the -// "cloudfoundry.space.name" semantic conventions. It represents the name of the -// CloudFoundry space the application is running in. -func CloudFoundrySpaceName(val string) attribute.KeyValue { - return CloudFoundrySpaceNameKey.String(val) -} - -// CloudFoundrySystemID returns an attribute KeyValue conforming to the -// "cloudfoundry.system.id" semantic conventions. It represents a guid or another -// name describing the event source. -func CloudFoundrySystemID(val string) attribute.KeyValue { - return CloudFoundrySystemIDKey.String(val) -} - -// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the -// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid -// describing the concrete instance of the event source. -func CloudFoundrySystemInstanceID(val string) attribute.KeyValue { - return CloudFoundrySystemInstanceIDKey.String(val) -} - -// Namespace: code -const ( - // CodeColumnNumberKey is the attribute Key conforming to the - // "code.column.number" semantic conventions. It represents the column number in - // `code.file.path` best representing the operation. It SHOULD point within the - // code unit named in `code.function.name`. This attribute MUST NOT be used on - // the Profile signal since the data is already captured in 'message Line'. This - // constraint is imposed to prevent redundancy and maintain data integrity. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - CodeColumnNumberKey = attribute.Key("code.column.number") - - // CodeFilePathKey is the attribute Key conforming to the "code.file.path" - // semantic conventions. It represents the source code file name that identifies - // the code unit as uniquely as possible (preferably an absolute file path). - // This attribute MUST NOT be used on the Profile signal since the data is - // already captured in 'message Function'. This constraint is imposed to prevent - // redundancy and maintain data integrity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: /usr/local/MyApplication/content_root/app/index.php - CodeFilePathKey = attribute.Key("code.file.path") - - // CodeFunctionNameKey is the attribute Key conforming to the - // "code.function.name" semantic conventions. It represents the method or - // function fully-qualified name without arguments. The value should fit the - // natural representation of the language runtime, which is also likely the same - // used within `code.stacktrace` attribute value. This attribute MUST NOT be - // used on the Profile signal since the data is already captured in 'message - // Function'. This constraint is imposed to prevent redundancy and maintain data - // integrity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "com.example.MyHttpService.serveRequest", - // "GuzzleHttp\Client::transfer", "fopen" - // Note: Values and format depends on each language runtime, thus it is - // impossible to provide an exhaustive list of examples. - // The values are usually the same (or prefixes of) the ones found in native - // stack trace representation stored in - // `code.stacktrace` without information on arguments. - // - // Examples: - // - // - Java method: `com.example.MyHttpService.serveRequest` - // - Java anonymous class method: `com.mycompany.Main$1.myMethod` - // - Java lambda method: - // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod` - // - PHP function: `GuzzleHttp\Client::transfer` - // - Go function: `github.com/my/repo/pkg.foo.func5` - // - Elixir: `OpenTelemetry.Ctx.new` - // - Erlang: `opentelemetry_ctx:new` - // - Rust: `playground::my_module::my_cool_func` - // - C function: `fopen` - CodeFunctionNameKey = attribute.Key("code.function.name") - - // CodeLineNumberKey is the attribute Key conforming to the "code.line.number" - // semantic conventions. It represents the line number in `code.file.path` best - // representing the operation. It SHOULD point within the code unit named in - // `code.function.name`. This attribute MUST NOT be used on the Profile signal - // since the data is already captured in 'message Line'. This constraint is - // imposed to prevent redundancy and maintain data integrity. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - CodeLineNumberKey = attribute.Key("code.line.number") - - // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace" - // semantic conventions. It represents a stacktrace as a string in the natural - // representation for the language runtime. The representation is identical to - // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile - // signal since the data is already captured in 'message Location'. This - // constraint is imposed to prevent redundancy and maintain data integrity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at - // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at - // com.example.GenerateTrace.main(GenerateTrace.java:5) - // - // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumnNumber returns an attribute KeyValue conforming to the -// "code.column.number" semantic conventions. It represents the column number in -// `code.file.path` best representing the operation. It SHOULD point within the -// code unit named in `code.function.name`. This attribute MUST NOT be used on -// the Profile signal since the data is already captured in 'message Line'. This -// constraint is imposed to prevent redundancy and maintain data integrity. -func CodeColumnNumber(val int) attribute.KeyValue { - return CodeColumnNumberKey.Int(val) -} - -// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path" -// semantic conventions. It represents the source code file name that identifies -// the code unit as uniquely as possible (preferably an absolute file path). This -// attribute MUST NOT be used on the Profile signal since the data is already -// captured in 'message Function'. This constraint is imposed to prevent -// redundancy and maintain data integrity. -func CodeFilePath(val string) attribute.KeyValue { - return CodeFilePathKey.String(val) -} - -// CodeFunctionName returns an attribute KeyValue conforming to the -// "code.function.name" semantic conventions. It represents the method or -// function fully-qualified name without arguments. The value should fit the -// natural representation of the language runtime, which is also likely the same -// used within `code.stacktrace` attribute value. This attribute MUST NOT be used -// on the Profile signal since the data is already captured in 'message -// Function'. This constraint is imposed to prevent redundancy and maintain data -// integrity. -func CodeFunctionName(val string) attribute.KeyValue { - return CodeFunctionNameKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the -// "code.line.number" semantic conventions. It represents the line number in -// `code.file.path` best representing the operation. It SHOULD point within the -// code unit named in `code.function.name`. This attribute MUST NOT be used on -// the Profile signal since the data is already captured in 'message Line'. This -// constraint is imposed to prevent redundancy and maintain data integrity. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a string -// in the natural representation for the language runtime. The representation is -// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the -// Profile signal since the data is already captured in 'message Location'. This -// constraint is imposed to prevent redundancy and maintain data integrity. -// -// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// Namespace: container -const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used to - // run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otelcontribcol" - // Note: If using embedded credentials or sensitive data, it is recommended to - // remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otelcontribcol", "--config", "config.yaml" - ContainerCommandArgsKey = attribute.Key("container.command_args") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full command - // run by the container as a single string representing the full command. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otelcontribcol --config config.yaml" - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerCSIPluginNameKey is the attribute Key conforming to the - // "container.csi.plugin.name" semantic conventions. It represents the name of - // the CSI ([Container Storage Interface]) plugin used by the volume. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "pd.csi.storage.gke.io" - // Note: This can sometimes be referred to as a "driver" in CSI implementations. - // This should represent the `name` field of the GetPluginInfo RPC. - // - // [Container Storage Interface]: https://github.com/container-storage-interface/spec - ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name") - - // ContainerCSIVolumeIDKey is the attribute Key conforming to the - // "container.csi.volume.id" semantic conventions. It represents the unique - // volume ID returned by the CSI ([Container Storage Interface]) plugin. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk" - // Note: This can sometimes be referred to as a "volume handle" in CSI - // implementations. This should represent the `Volume.volume_id` field in CSI - // spec. - // - // [Container Storage Interface]: https://github.com/container-storage-interface/spec - ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id") - - // ContainerIDKey is the attribute Key conforming to the "container.id" semantic - // conventions. It represents the container ID. Usually a UUID, as for example - // used to [identify Docker containers]. The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "a3bf90e006b2" - // - // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification - ContainerIDKey = attribute.Key("container.id") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime specific - // image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f" - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect [API] - // endpoint. - // K8s defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"` - // . - // The ID is assigned by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - // - // [API]: https://docs.docker.com/reference/api/engine/version/v1.52/#tag/Container/operation/ContainerInspect - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of the - // image the container was built on. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "gcr.io/opentelemetry/operator" - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the repo - // digests of the container image as provided by the container runtime. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: - // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", - // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" - // Note: [Docker] and [CRI] report those under the `RepoDigests` field. - // - // [Docker]: https://docs.docker.com/reference/api/engine/version/v1.52/#tag/Image/operation/ImageInspect - // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238 - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image Inspect]. Should be only - // the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "v1.27.1", "3.5.7-0" - // - // [Docker Image Inspect]: https://docs.docker.com/reference/api/engine/version/v1.52/#tag/Image/operation/ImageInspect - ContainerImageTagsKey = attribute.Key("container.image.tags") - - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-autoconf" - ContainerNameKey = attribute.Key("container.name") - - // ContainerRuntimeDescriptionKey is the attribute Key conforming to the - // "container.runtime.description" semantic conventions. It represents a - // description about the runtime which could include, for example details about - // the CRI/API version being used or other customisations. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "docker://19.3.1 - CRI: 1.22.0" - ContainerRuntimeDescriptionKey = attribute.Key("container.runtime.description") - - // ContainerRuntimeNameKey is the attribute Key conforming to the - // "container.runtime.name" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "docker", "containerd", "rkt" - ContainerRuntimeNameKey = attribute.Key("container.runtime.name") - - // ContainerRuntimeVersionKey is the attribute Key conforming to the - // "container.runtime.version" semantic conventions. It represents the version - // of the runtime of this process, as returned by the runtime without - // modification. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0.0 - ContainerRuntimeVersionKey = attribute.Key("container.runtime.version") -) - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full command -// run by the container as a single string representing the full command. -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerCSIPluginName returns an attribute KeyValue conforming to the -// "container.csi.plugin.name" semantic conventions. It represents the name of -// the CSI ([Container Storage Interface]) plugin used by the volume. -// -// [Container Storage Interface]: https://github.com/container-storage-interface/spec -func ContainerCSIPluginName(val string) attribute.KeyValue { - return ContainerCSIPluginNameKey.String(val) -} - -// ContainerCSIVolumeID returns an attribute KeyValue conforming to the -// "container.csi.volume.id" semantic conventions. It represents the unique -// volume ID returned by the CSI ([Container Storage Interface]) plugin. -// -// [Container Storage Interface]: https://github.com/container-storage-interface/spec -func ContainerCSIVolumeID(val string) attribute.KeyValue { - return ContainerCSIVolumeIDKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the "container.id" -// semantic conventions. It represents the container ID. Usually a UUID, as for -// example used to [identify Docker containers]. The UUID might be abbreviated. -// -// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime specific -// image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container image -// tags. An example can be found in [Docker Image Inspect]. Should be only the -// `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -// -// [Docker Image Inspect]: https://docs.docker.com/reference/api/engine/version/v1.52/#tag/Image/operation/ImageInspect -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) -} - -// ContainerLabel returns an attribute KeyValue conforming to the -// "container.label" semantic conventions. It represents the container labels, -// `` being the label name, the value being the label value. -func ContainerLabel(key string, val string) attribute.KeyValue { - return attribute.String("container.label."+key, val) -} - -// ContainerName returns an attribute KeyValue conforming to the "container.name" -// semantic conventions. It represents the container name used by container -// runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerRuntimeDescription returns an attribute KeyValue conforming to the -// "container.runtime.description" semantic conventions. It represents a -// description about the runtime which could include, for example details about -// the CRI/API version being used or other customisations. -func ContainerRuntimeDescription(val string) attribute.KeyValue { - return ContainerRuntimeDescriptionKey.String(val) -} - -// ContainerRuntimeName returns an attribute KeyValue conforming to the -// "container.runtime.name" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntimeName(val string) attribute.KeyValue { - return ContainerRuntimeNameKey.String(val) -} - -// ContainerRuntimeVersion returns an attribute KeyValue conforming to the -// "container.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without modification. -func ContainerRuntimeVersion(val string) attribute.KeyValue { - return ContainerRuntimeVersionKey.String(val) -} - -// Namespace: cpu -const ( - // CPULogicalNumberKey is the attribute Key conforming to the - // "cpu.logical_number" semantic conventions. It represents the logical CPU - // number [0..n-1]. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1 - CPULogicalNumberKey = attribute.Key("cpu.logical_number") - - // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic - // conventions. It represents the mode of the CPU. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "user", "system" - CPUModeKey = attribute.Key("cpu.mode") -) - -// CPULogicalNumber returns an attribute KeyValue conforming to the -// "cpu.logical_number" semantic conventions. It represents the logical CPU -// number [0..n-1]. -func CPULogicalNumber(val int) attribute.KeyValue { - return CPULogicalNumberKey.Int(val) -} - -// Enum values for cpu.mode -var ( - // User - // Stability: development - CPUModeUser = CPUModeKey.String("user") - // System - // Stability: development - CPUModeSystem = CPUModeKey.String("system") - // Nice - // Stability: development - CPUModeNice = CPUModeKey.String("nice") - // Idle - // Stability: development - CPUModeIdle = CPUModeKey.String("idle") - // IO Wait - // Stability: development - CPUModeIOWait = CPUModeKey.String("iowait") - // Interrupt - // Stability: development - CPUModeInterrupt = CPUModeKey.String("interrupt") - // Steal - // Stability: development - CPUModeSteal = CPUModeKey.String("steal") - // Kernel - // Stability: development - CPUModeKernel = CPUModeKey.String("kernel") -) - -// Namespace: db -const ( - // DBClientConnectionPoolNameKey is the attribute Key conforming to the - // "db.client.connection.pool.name" semantic conventions. It represents the name - // of the connection pool; unique within the instrumented application. In case - // the connection pool implementation doesn't provide a name, instrumentation - // SHOULD use a combination of parameters that would make the name unique, for - // example, combining attributes `server.address`, `server.port`, and - // `db.namespace`, formatted as `server.address:server.port/db.namespace`. - // Instrumentations that generate connection pool name following different - // patterns SHOULD document it. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "myDataSource" - DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name") - - // DBClientConnectionStateKey is the attribute Key conforming to the - // "db.client.connection.state" semantic conventions. It represents the state of - // a connection in the pool. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "idle" - DBClientConnectionStateKey = attribute.Key("db.client.connection.state") - - // DBCollectionNameKey is the attribute Key conforming to the - // "db.collection.name" semantic conventions. It represents the name of a - // collection (table, container) within the database. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "public.users", "customers" - // Note: It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - // - // The collection name SHOULD NOT be extracted from `db.query.text`, - // when the database system supports query text with multiple collections - // in non-batch operations. - // - // For batch operations, if the individual operations are known to have the same - // collection name then that collection name SHOULD be used. - DBCollectionNameKey = attribute.Key("db.collection.name") - - // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic - // conventions. It represents the name of the database, fully qualified within - // the server address and port. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "customers", "test.users" - // Note: If a database system has multiple namespace components, they SHOULD be - // concatenated from the most general to the most specific namespace component, - // using `|` as a separator between the components. Any missing components (and - // their associated separators) SHOULD be omitted. - // Semantic conventions for individual database systems SHOULD document what - // `db.namespace` means in the context of that system. - // It is RECOMMENDED to capture the value as provided by the application without - // attempting to do any case normalization. - DBNamespaceKey = attribute.Key("db.namespace") - - // DBOperationBatchSizeKey is the attribute Key conforming to the - // "db.operation.batch.size" semantic conventions. It represents the number of - // queries included in a batch operation. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 2, 3, 4 - // Note: Operations are only considered batches when they contain two or more - // operations, and so `db.operation.batch.size` SHOULD never be `1`. - DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size") - - // DBOperationNameKey is the attribute Key conforming to the "db.operation.name" - // semantic conventions. It represents the name of the operation or command - // being executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "findAndModify", "HMSET", "SELECT" - // Note: It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - // - // The operation name SHOULD NOT be extracted from `db.query.text`, - // when the database system supports query text with multiple operations - // in non-batch operations. - // - // If spaces can occur in the operation name, multiple consecutive spaces - // SHOULD be normalized to a single space. - // - // For batch operations, if the individual operations are known to have the same - // operation name - // then that operation name SHOULD be used prepended by `BATCH `, - // otherwise `db.operation.name` SHOULD be `BATCH` or some other database - // system specific term if more applicable. - DBOperationNameKey = attribute.Key("db.operation.name") - - // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary" - // semantic conventions. It represents the low cardinality summary of a database - // query. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get - // user by id" - // Note: The query summary describes a class of database queries and is useful - // as a grouping key, especially when analyzing telemetry for database - // calls involving complex queries. - // - // Summary may be available to the instrumentation through - // instrumentation hooks or other means. If it is not available, - // instrumentations - // that support query parsing SHOULD generate a summary following - // [Generating query summary] - // section. - // - // For batch operations, if the individual operations are known to have the same - // query summary - // then that query summary SHOULD be used prepended by `BATCH `, - // otherwise `db.query.summary` SHOULD be `BATCH` or some other database - // system specific term if more applicable. - // - // [Generating query summary]: /docs/db/database-spans.md#generating-a-summary-of-the-query - DBQuerySummaryKey = attribute.Key("db.query.summary") - - // DBQueryTextKey is the attribute Key conforming to the "db.query.text" - // semantic conventions. It represents the database query being executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?" - // Note: For sanitization see [Sanitization of `db.query.text`]. - // For batch operations, if the individual operations are known to have the same - // query text then that query text SHOULD be used, otherwise all of the - // individual query texts SHOULD be concatenated with separator `; ` or some - // other database system specific separator if more applicable. - // Parameterized query text SHOULD NOT be sanitized. Even though parameterized - // query text can potentially have sensitive data, by using a parameterized - // query the user is giving a strong signal that any sensitive data will be - // passed as parameter values, and the benefit to observability of capturing the - // static part of the query text by default outweighs the risk. - // - // [Sanitization of `db.query.text`]: /docs/db/database-spans.md#sanitization-of-dbquerytext - DBQueryTextKey = attribute.Key("db.query.text") - - // DBResponseReturnedRowsKey is the attribute Key conforming to the - // "db.response.returned_rows" semantic conventions. It represents the number of - // rows returned by the operation. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 10, 30, 1000 - DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows") - - // DBResponseStatusCodeKey is the attribute Key conforming to the - // "db.response.status_code" semantic conventions. It represents the database - // response status code. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "102", "ORA-17002", "08P01", "404" - // Note: The status code returned by the database. Usually it represents an - // error code, but may also represent partial success, warning, or differentiate - // between various types of successful outcomes. - // Semantic conventions for individual database systems SHOULD document what - // `db.response.status_code` means in the context of that system. - DBResponseStatusCodeKey = attribute.Key("db.response.status_code") - - // DBStoredProcedureNameKey is the attribute Key conforming to the - // "db.stored_procedure.name" semantic conventions. It represents the name of a - // stored procedure within the database. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "GetCustomer" - // Note: It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - // - // For batch operations, if the individual operations are known to have the same - // stored procedure name then that stored procedure name SHOULD be used. - DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name") - - // DBSystemNameKey is the attribute Key conforming to the "db.system.name" - // semantic conventions. It represents the database management system (DBMS) - // product as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: - // Note: The actual DBMS may differ from the one identified by the client. For - // example, when using PostgreSQL client libraries to connect to a CockroachDB, - // the `db.system.name` is set to `postgresql` based on the instrumentation's - // best knowledge. - DBSystemNameKey = attribute.Key("db.system.name") -) - -// DBClientConnectionPoolName returns an attribute KeyValue conforming to the -// "db.client.connection.pool.name" semantic conventions. It represents the name -// of the connection pool; unique within the instrumented application. In case -// the connection pool implementation doesn't provide a name, instrumentation -// SHOULD use a combination of parameters that would make the name unique, for -// example, combining attributes `server.address`, `server.port`, and -// `db.namespace`, formatted as `server.address:server.port/db.namespace`. -// Instrumentations that generate connection pool name following different -// patterns SHOULD document it. -func DBClientConnectionPoolName(val string) attribute.KeyValue { - return DBClientConnectionPoolNameKey.String(val) -} - -// DBCollectionName returns an attribute KeyValue conforming to the -// "db.collection.name" semantic conventions. It represents the name of a -// collection (table, container) within the database. -func DBCollectionName(val string) attribute.KeyValue { - return DBCollectionNameKey.String(val) -} - -// DBNamespace returns an attribute KeyValue conforming to the "db.namespace" -// semantic conventions. It represents the name of the database, fully qualified -// within the server address and port. -func DBNamespace(val string) attribute.KeyValue { - return DBNamespaceKey.String(val) -} - -// DBOperationBatchSize returns an attribute KeyValue conforming to the -// "db.operation.batch.size" semantic conventions. It represents the number of -// queries included in a batch operation. -func DBOperationBatchSize(val int) attribute.KeyValue { - return DBOperationBatchSizeKey.Int(val) -} - -// DBOperationName returns an attribute KeyValue conforming to the -// "db.operation.name" semantic conventions. It represents the name of the -// operation or command being executed. -func DBOperationName(val string) attribute.KeyValue { - return DBOperationNameKey.String(val) -} - -// DBOperationParameter returns an attribute KeyValue conforming to the -// "db.operation.parameter" semantic conventions. It represents a database -// operation parameter, with `` being the parameter name, and the attribute -// value being a string representation of the parameter value. -func DBOperationParameter(key string, val string) attribute.KeyValue { - return attribute.String("db.operation.parameter."+key, val) -} - -// DBQueryParameter returns an attribute KeyValue conforming to the -// "db.query.parameter" semantic conventions. It represents a database query -// parameter, with `` being the parameter name, and the attribute value -// being a string representation of the parameter value. -func DBQueryParameter(key string, val string) attribute.KeyValue { - return attribute.String("db.query.parameter."+key, val) -} - -// DBQuerySummary returns an attribute KeyValue conforming to the -// "db.query.summary" semantic conventions. It represents the low cardinality -// summary of a database query. -func DBQuerySummary(val string) attribute.KeyValue { - return DBQuerySummaryKey.String(val) -} - -// DBQueryText returns an attribute KeyValue conforming to the "db.query.text" -// semantic conventions. It represents the database query being executed. -func DBQueryText(val string) attribute.KeyValue { - return DBQueryTextKey.String(val) -} - -// DBResponseReturnedRows returns an attribute KeyValue conforming to the -// "db.response.returned_rows" semantic conventions. It represents the number of -// rows returned by the operation. -func DBResponseReturnedRows(val int) attribute.KeyValue { - return DBResponseReturnedRowsKey.Int(val) -} - -// DBResponseStatusCode returns an attribute KeyValue conforming to the -// "db.response.status_code" semantic conventions. It represents the database -// response status code. -func DBResponseStatusCode(val string) attribute.KeyValue { - return DBResponseStatusCodeKey.String(val) -} - -// DBStoredProcedureName returns an attribute KeyValue conforming to the -// "db.stored_procedure.name" semantic conventions. It represents the name of a -// stored procedure within the database. -func DBStoredProcedureName(val string) attribute.KeyValue { - return DBStoredProcedureNameKey.String(val) -} - -// Enum values for db.client.connection.state -var ( - // idle - // Stability: development - DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle") - // used - // Stability: development - DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used") -) - -// Enum values for db.system.name -var ( - // Some other SQL database. Fallback only. - // Stability: development - DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql") - // [Adabas (Adaptable Database System)] - // Stability: development - // - // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas - DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas") - // [Actian Ingres] - // Stability: development - // - // [Actian Ingres]: https://www.actian.com/databases/ingres/ - DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres") - // [Amazon DynamoDB] - // Stability: development - // - // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/ - DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb") - // [Amazon Redshift] - // Stability: development - // - // [Amazon Redshift]: https://aws.amazon.com/redshift/ - DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift") - // [Azure Cosmos DB] - // Stability: development - // - // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db - DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb") - // [InterSystems Caché] - // Stability: development - // - // [InterSystems Caché]: https://www.intersystems.com/products/cache/ - DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache") - // [Apache Cassandra] - // Stability: development - // - // [Apache Cassandra]: https://cassandra.apache.org/ - DBSystemNameCassandra = DBSystemNameKey.String("cassandra") - // [ClickHouse] - // Stability: development - // - // [ClickHouse]: https://clickhouse.com/ - DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse") - // [CockroachDB] - // Stability: development - // - // [CockroachDB]: https://www.cockroachlabs.com/ - DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb") - // [Couchbase] - // Stability: development - // - // [Couchbase]: https://www.couchbase.com/ - DBSystemNameCouchbase = DBSystemNameKey.String("couchbase") - // [Apache CouchDB] - // Stability: development - // - // [Apache CouchDB]: https://couchdb.apache.org/ - DBSystemNameCouchDB = DBSystemNameKey.String("couchdb") - // [Apache Derby] - // Stability: development - // - // [Apache Derby]: https://db.apache.org/derby/ - DBSystemNameDerby = DBSystemNameKey.String("derby") - // [Elasticsearch] - // Stability: development - // - // [Elasticsearch]: https://www.elastic.co/elasticsearch - DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch") - // [Firebird] - // Stability: development - // - // [Firebird]: https://www.firebirdsql.org/ - DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql") - // [Google Cloud Spanner] - // Stability: development - // - // [Google Cloud Spanner]: https://cloud.google.com/spanner - DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner") - // [Apache Geode] - // Stability: development - // - // [Apache Geode]: https://geode.apache.org/ - DBSystemNameGeode = DBSystemNameKey.String("geode") - // [H2 Database] - // Stability: development - // - // [H2 Database]: https://h2database.com/ - DBSystemNameH2database = DBSystemNameKey.String("h2database") - // [Apache HBase] - // Stability: development - // - // [Apache HBase]: https://hbase.apache.org/ - DBSystemNameHBase = DBSystemNameKey.String("hbase") - // [Apache Hive] - // Stability: development - // - // [Apache Hive]: https://hive.apache.org/ - DBSystemNameHive = DBSystemNameKey.String("hive") - // [HyperSQL Database] - // Stability: development - // - // [HyperSQL Database]: https://hsqldb.org/ - DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb") - // [IBM Db2] - // Stability: development - // - // [IBM Db2]: https://www.ibm.com/db2 - DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2") - // [IBM Informix] - // Stability: development - // - // [IBM Informix]: https://www.ibm.com/products/informix - DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix") - // [IBM Netezza] - // Stability: development - // - // [IBM Netezza]: https://www.ibm.com/products/netezza - DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza") - // [InfluxDB] - // Stability: development - // - // [InfluxDB]: https://www.influxdata.com/ - DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb") - // [Instant] - // Stability: development - // - // [Instant]: https://www.instantdb.com/ - DBSystemNameInstantDB = DBSystemNameKey.String("instantdb") - // [MariaDB] - // Stability: stable - // - // [MariaDB]: https://mariadb.org/ - DBSystemNameMariaDB = DBSystemNameKey.String("mariadb") - // [Memcached] - // Stability: development - // - // [Memcached]: https://memcached.org/ - DBSystemNameMemcached = DBSystemNameKey.String("memcached") - // [MongoDB] - // Stability: development - // - // [MongoDB]: https://www.mongodb.com/ - DBSystemNameMongoDB = DBSystemNameKey.String("mongodb") - // [Microsoft SQL Server] - // Stability: stable - // - // [Microsoft SQL Server]: https://www.microsoft.com/sql-server - DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server") - // [MySQL] - // Stability: stable - // - // [MySQL]: https://www.mysql.com/ - DBSystemNameMySQL = DBSystemNameKey.String("mysql") - // [Neo4j] - // Stability: development - // - // [Neo4j]: https://neo4j.com/ - DBSystemNameNeo4j = DBSystemNameKey.String("neo4j") - // [OpenSearch] - // Stability: development - // - // [OpenSearch]: https://opensearch.org/ - DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch") - // [Oracle Database] - // Stability: development - // - // [Oracle Database]: https://www.oracle.com/database/ - DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db") - // [PostgreSQL] - // Stability: stable - // - // [PostgreSQL]: https://www.postgresql.org/ - DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql") - // [Redis] - // Stability: development - // - // [Redis]: https://redis.io/ - DBSystemNameRedis = DBSystemNameKey.String("redis") - // [SAP HANA] - // Stability: development - // - // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html - DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana") - // [SAP MaxDB] - // Stability: development - // - // [SAP MaxDB]: https://maxdb.sap.com/ - DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb") - // [SQLite] - // Stability: development - // - // [SQLite]: https://www.sqlite.org/ - DBSystemNameSQLite = DBSystemNameKey.String("sqlite") - // [Teradata] - // Stability: development - // - // [Teradata]: https://www.teradata.com/ - DBSystemNameTeradata = DBSystemNameKey.String("teradata") - // [Trino] - // Stability: development - // - // [Trino]: https://trino.io/ - DBSystemNameTrino = DBSystemNameKey.String("trino") -) - -// Namespace: deployment -const ( - // DeploymentEnvironmentNameKey is the attribute Key conforming to the - // "deployment.environment.name" semantic conventions. It represents the name of - // the [deployment environment] (aka deployment tier). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "staging", "production" - // Note: `deployment.environment.name` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` resource - // attributes. - // This implies that resources carrying the following attribute combinations - // MUST be - // considered to be identifying the same service: - // - // - `service.name=frontend`, `deployment.environment.name=production` - // - `service.name=frontend`, `deployment.environment.name=staging`. - // - // - // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment - DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name") - - // DeploymentIDKey is the attribute Key conforming to the "deployment.id" - // semantic conventions. It represents the id of the deployment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1208" - DeploymentIDKey = attribute.Key("deployment.id") - - // DeploymentNameKey is the attribute Key conforming to the "deployment.name" - // semantic conventions. It represents the name of the deployment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "deploy my app", "deploy-frontend" - DeploymentNameKey = attribute.Key("deployment.name") - - // DeploymentStatusKey is the attribute Key conforming to the - // "deployment.status" semantic conventions. It represents the status of the - // deployment. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - DeploymentStatusKey = attribute.Key("deployment.status") -) - -// DeploymentEnvironmentName returns an attribute KeyValue conforming to the -// "deployment.environment.name" semantic conventions. It represents the name of -// the [deployment environment] (aka deployment tier). -// -// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment -func DeploymentEnvironmentName(val string) attribute.KeyValue { - return DeploymentEnvironmentNameKey.String(val) -} - -// DeploymentID returns an attribute KeyValue conforming to the "deployment.id" -// semantic conventions. It represents the id of the deployment. -func DeploymentID(val string) attribute.KeyValue { - return DeploymentIDKey.String(val) -} - -// DeploymentName returns an attribute KeyValue conforming to the -// "deployment.name" semantic conventions. It represents the name of the -// deployment. -func DeploymentName(val string) attribute.KeyValue { - return DeploymentNameKey.String(val) -} - -// Enum values for deployment.status -var ( - // failed - // Stability: development - DeploymentStatusFailed = DeploymentStatusKey.String("failed") - // succeeded - // Stability: development - DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded") -) - -// Namespace: destination -const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the destination - // address - domain name if available without reverse DNS lookup; otherwise, IP - // address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock" - // Note: When observed from the source side, and when communicating through an - // intermediary, `destination.address` SHOULD represent the destination address - // behind any intermediaries, for example proxies, if it's available. - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the "destination.port" - // semantic conventions. It represents the destination port number. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number. -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// Namespace: device -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "123456789012345", "01:23:45:67:89:AB" - // Note: Its value SHOULD be identical for all apps on a device and it SHOULD - // NOT change if an app is uninstalled and re-installed. - // However, it might be resettable by the user for all apps on a device. - // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be - // used as values. - // - // More information about Android identifier best practices can be found in the - // [Android user data IDs guide]. - // - // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution - // > should be taken when storing personal data or anything which can identify a - // > user. GDPR and data protection laws may apply, - // > ensure you do your own due diligence.> Due to these reasons, this - // > identifier is not recommended for consumer applications and will likely - // > result in rejection from both Google Play and App Store. - // > However, it may be appropriate for specific enterprise scenarios, such as - // > kiosk devices or enterprise-managed devices, with appropriate compliance - // > clearance. - // > Any instrumentation providing this identifier MUST implement it as an - // > opt-in feature.> See [`app.installation.id`]> for a more - // > privacy-preserving alternative. - // - // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids - // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id - DeviceIDKey = attribute.Key("device.id") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of the - // device manufacturer. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Apple", "Samsung" - // Note: The Android OS provides this field via [Build]. iOS apps SHOULD - // hardcode the value `Apple`. - // - // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER - DeviceManufacturerKey = attribute.Key("device.manufacturer") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "iPhone3,4", "SM-G920F" - // Note: It's recommended this value represents a machine-readable version of - // the model identifier rather than the market or consumer-friendly name of the - // device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the "device.model.name" - // semantic conventions. It represents the marketing name for the device model. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "iPhone 6s Plus", "Samsung Galaxy S6" - // Note: It's recommended this value represents a human-readable version of the - // device model rather than a machine-readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic -// conventions. It represents a unique identifier representing the device. -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer. -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device. -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name for -// the device model. -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// Namespace: disk -const ( - // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction" - // semantic conventions. It represents the disk IO operation direction. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "read" - DiskIODirectionKey = attribute.Key("disk.io.direction") -) - -// Enum values for disk.io.direction -var ( - // read - // Stability: development - DiskIODirectionRead = DiskIODirectionKey.String("read") - // write - // Stability: development - DiskIODirectionWrite = DiskIODirectionKey.String("write") -) - -// Namespace: dns -const ( - // DNSAnswersKey is the attribute Key conforming to the "dns.answers" semantic - // conventions. It represents the list of IPv4 or IPv6 addresses resolved during - // DNS lookup. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "10.0.0.1", "2001:0db8:85a3:0000:0000:8a2e:0370:7334" - DNSAnswersKey = attribute.Key("dns.answers") - - // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name" - // semantic conventions. It represents the name being queried. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "www.example.com", "opentelemetry.io" - // Note: The name represents the queried domain name as it appears in the DNS - // query without any additional normalization. - DNSQuestionNameKey = attribute.Key("dns.question.name") -) - -// DNSAnswers returns an attribute KeyValue conforming to the "dns.answers" -// semantic conventions. It represents the list of IPv4 or IPv6 addresses -// resolved during DNS lookup. -func DNSAnswers(val ...string) attribute.KeyValue { - return DNSAnswersKey.StringSlice(val) -} - -// DNSQuestionName returns an attribute KeyValue conforming to the -// "dns.question.name" semantic conventions. It represents the name being -// queried. -func DNSQuestionName(val string) attribute.KeyValue { - return DNSQuestionNameKey.String(val) -} - -// Namespace: elasticsearch -const ( - // ElasticsearchNodeNameKey is the attribute Key conforming to the - // "elasticsearch.node.name" semantic conventions. It represents the represents - // the human-readable identifier of the node/instance to which a request was - // routed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "instance-0000000001" - ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name") -) - -// ElasticsearchNodeName returns an attribute KeyValue conforming to the -// "elasticsearch.node.name" semantic conventions. It represents the represents -// the human-readable identifier of the node/instance to which a request was -// routed. -func ElasticsearchNodeName(val string) attribute.KeyValue { - return ElasticsearchNodeNameKey.String(val) -} - -// Namespace: enduser -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic - // conventions. It represents the unique identifier of an end user in the - // system. It maybe a username, email address, or other identifier. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "username" - // Note: Unique identifier of an end user in the system. - // - // > [!Warning] - // > This field contains sensitive (PII) information. - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id" - // semantic conventions. It represents the pseudonymous identifier of an end - // user. This identifier should be a random value that is not directly linked or - // associated with the end user's actual identity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "QdH5CAWJgqVT4rOr0qtumf" - // Note: Pseudonymous identifier of an end user. - // - // > [!Warning] - // > This field contains sensitive (linkable PII) information. - EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the unique identifier of an end user in -// the system. It maybe a username, email address, or other identifier. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserPseudoID returns an attribute KeyValue conforming to the -// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous -// identifier of an end user. This identifier should be a random value that is -// not directly linked or associated with the end user's actual identity. -func EnduserPseudoID(val string) attribute.KeyValue { - return EnduserPseudoIDKey.String(val) -} - -// Namespace: error -const ( - // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic - // conventions. It represents the describes a class of error the operation ended - // with. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "timeout", "java.net.UnknownHostException", - // "server_certificate_invalid", "500" - // Note: The `error.type` SHOULD be predictable, and SHOULD have low - // cardinality. - // - // When `error.type` is set to a type (e.g., an exception type), its - // canonical class name identifying the type within the artifact SHOULD be used. - // - // Instrumentations SHOULD document the list of errors they report. - // - // The cardinality of `error.type` within one instrumentation library SHOULD be - // low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query time - // when no - // additional filters are applied. - // - // If the operation has completed successfully, instrumentations SHOULD NOT set - // `error.type`. - // - // If a specific domain defines its own set of error identifiers (such as HTTP - // or RPC status codes), - // it's RECOMMENDED to: - // - // - Use a domain-specific attribute - // - Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) - -// Enum values for error.type -var ( - // A fallback error value to be used when the instrumentation doesn't define a - // custom value. - // - // Stability: stable - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) - -// Namespace: exception -const ( - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "Division by zero", "Can't convert 'int' object to str implicitly" - // Note: > [!WARNING] - // - // > This attribute may contain sensitive information. - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: Exception in thread "main" java.lang.RuntimeException: Test - // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at - // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at - // com.example.GenerateTrace.main(GenerateTrace.java:5) - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the exception - // should be preferred over the static type in languages that support it. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "java.net.ConnectException", "OSError" - ExceptionTypeKey = attribute.Key("exception.type") -) - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// ExceptionType returns an attribute KeyValue conforming to the "exception.type" -// semantic conventions. It represents the type of the exception (its -// fully-qualified class name, if applicable). The dynamic type of the exception -// should be preferred over the static type in languages that support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// Namespace: faas -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the serverless - // function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - FaaSColdstartKey = attribute.Key("faas.coldstart") - - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron Expression]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0/5 * * * ? * - // - // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name of - // the source on which the triggering operation was performed. For example, in - // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the - // database name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "myBucketName", "myDbName" - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or S3 is - // the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "myFile.txt", "myTableName" - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the describes - // the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string containing - // the time when the data was accessed in the [ISO 8601] format expressed in - // [UTC]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 2020-01-23T13:47:06Z - // - // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html - // [UTC]: https://www.w3.org/TR/NOTE-datetime - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a string, - // that will be potentially reused for other invocations to the same - // function/function version. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de" - // Note: - **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation ID of - // the current function invocation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28 - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") - - // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name" - // semantic conventions. It represents the name of the invoked function. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: my-function - // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked - // function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud region of - // the invoked function. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: eu-central-1 - // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked - // function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory" - // semantic conventions. It represents the amount of memory available to the - // serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Note: It's recommended to set this attribute since e.g. too little memory can - // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, - // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this - // information (which must be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this runtime - // instance executes. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-function", "myazurefunctionapp/some-function-name" - // Note: This is the name of the function as configured/deployed on the FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function.name`] - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud providers/products: - // - // - **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - // - // - // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes - FaaSNameKey = attribute.Key("faas.name") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation time - // in the [ISO 8601] format expressed in [UTC]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 2020-01-23T13:47:06Z - // - // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html - // [UTC]: https://www.w3.org/TR/NOTE-datetime - FaaSTimeKey = attribute.Key("faas.time") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic - // conventions. It represents the type of the trigger which caused this function - // invocation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - FaaSTriggerKey = attribute.Key("faas.trigger") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic - // conventions. It represents the immutable version of the function being - // executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "26", "pinkfroid-00002" - // Note: Depending on the cloud provider and platform, use: - // - // - **AWS Lambda:** The [function version] - // (an integer represented as a decimal string). - // - **Google Cloud Run (Services):** The [revision] - // (i.e., the function name plus the revision suffix). - // - **Google Cloud Functions:** The value of the - // [`K_REVISION` environment variable]. - // - **Azure Functions:** Not applicable. Do not set this attribute. - // - // - // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html - // [revision]: https://cloud.google.com/run/docs/managing/revisions - // [`K_REVISION` environment variable]: https://cloud.google.com/run/docs/container-contract#services-env-vars - FaaSVersionKey = attribute.Key("faas.version") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart" -// semantic conventions. It represents a boolean that is true if the serverless -// function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic -// conventions. It represents a string containing the schedule period as -// [Cron Expression]. -// -// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of the -// source on which the triggering operation was performed. For example, in Cloud -// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database -// name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 is -// the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO 8601] format expressed in -// [UTC]. -// -// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html -// [UTC]: https://www.w3.org/TR/NOTE-datetime -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance" -// semantic conventions. It represents the execution environment ID as a string, -// that will be potentially reused for other invocations to the same -// function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID of -// the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region of -// the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic -// conventions. It represents the name of the single function that this runtime -// instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic -// conventions. It represents a string containing the function invocation time in -// the [ISO 8601] format expressed in [UTC]. -// -// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html -// [UTC]: https://www.w3.org/TR/NOTE-datetime -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the "faas.version" -// semantic conventions. It represents the immutable version of the function -// being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// Enum values for faas.document.operation -var ( - // When a new object is created. - // Stability: development - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified. - // Stability: development - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted. - // Stability: development - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// Enum values for faas.invoked_provider -var ( - // Alibaba Cloud - // Stability: development - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - // Stability: development - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - // Stability: development - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - // Stability: development - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - // Stability: development - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -// Enum values for faas.trigger -var ( - // A response to some data source operation such as a database or filesystem - // read/write - // Stability: development - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - // Stability: development - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - // Stability: development - FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - // Stability: development - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - // Stability: development - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// Namespace: feature_flag -const ( - // FeatureFlagContextIDKey is the attribute Key conforming to the - // "feature_flag.context.id" semantic conventions. It represents the unique - // identifier for the flag evaluation context. For example, the targeting key. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" - FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") - - // FeatureFlagErrorMessageKey is the attribute Key conforming to the - // "feature_flag.error.message" semantic conventions. It represents a message - // providing more detail about an error that occurred during feature flag - // evaluation in human-readable form. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "Unexpected input type: string", "The user has exceeded their - // storage quota" - FeatureFlagErrorMessageKey = attribute.Key("feature_flag.error.message") - - // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key" - // semantic conventions. It represents the lookup key of the feature flag. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "logo-color" - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider.name" semantic conventions. It represents the - // identifies the feature flag provider. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "Flag Manager" - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name") - - // FeatureFlagResultReasonKey is the attribute Key conforming to the - // "feature_flag.result.reason" semantic conventions. It represents the reason - // code which shows how a feature flag value was determined. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "static", "targeting_match", "error", "default" - FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason") - - // FeatureFlagResultValueKey is the attribute Key conforming to the - // "feature_flag.result.value" semantic conventions. It represents the evaluated - // value of the feature flag. - // - // Type: any - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "#ff0000", true, 3 - // Note: With some feature flag providers, feature flag results can be quite - // large or contain private or sensitive details. - // Because of this, `feature_flag.result.variant` is often the preferred - // attribute if it is available. - // - // It may be desirable to redact or otherwise limit the size and scope of - // `feature_flag.result.value` if possible. - // Because the evaluated flag value is unstructured and may be any type, it is - // left to the instrumentation author to determine how best to achieve this. - FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value") - - // FeatureFlagResultVariantKey is the attribute Key conforming to the - // "feature_flag.result.variant" semantic conventions. It represents a semantic - // identifier for an evaluated flag value. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "red", "true", "on" - // Note: A semantic identifier, commonly referred to as a variant, provides a - // means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant") - - // FeatureFlagSetIDKey is the attribute Key conforming to the - // "feature_flag.set.id" semantic conventions. It represents the identifier of - // the [flag set] to which the feature flag belongs. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "proj-1", "ab98sgs", "service1/dev" - // - // [flag set]: https://openfeature.dev/specification/glossary/#flag-set - FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id") - - // FeatureFlagVersionKey is the attribute Key conforming to the - // "feature_flag.version" semantic conventions. It represents the version of the - // ruleset used during the evaluation. This may be any stable value which - // uniquely identifies the ruleset. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "1", "01ABCDEF" - FeatureFlagVersionKey = attribute.Key("feature_flag.version") -) - -// FeatureFlagContextID returns an attribute KeyValue conforming to the -// "feature_flag.context.id" semantic conventions. It represents the unique -// identifier for the flag evaluation context. For example, the targeting key. -func FeatureFlagContextID(val string) attribute.KeyValue { - return FeatureFlagContextIDKey.String(val) -} - -// FeatureFlagErrorMessage returns an attribute KeyValue conforming to the -// "feature_flag.error.message" semantic conventions. It represents a message -// providing more detail about an error that occurred during feature flag -// evaluation in human-readable form. -func FeatureFlagErrorMessage(val string) attribute.KeyValue { - return FeatureFlagErrorMessageKey.String(val) -} - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the lookup key of the -// feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider.name" semantic conventions. It represents the -// identifies the feature flag provider. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagResultVariant returns an attribute KeyValue conforming to the -// "feature_flag.result.variant" semantic conventions. It represents a semantic -// identifier for an evaluated flag value. -func FeatureFlagResultVariant(val string) attribute.KeyValue { - return FeatureFlagResultVariantKey.String(val) -} - -// FeatureFlagSetID returns an attribute KeyValue conforming to the -// "feature_flag.set.id" semantic conventions. It represents the identifier of -// the [flag set] to which the feature flag belongs. -// -// [flag set]: https://openfeature.dev/specification/glossary/#flag-set -func FeatureFlagSetID(val string) attribute.KeyValue { - return FeatureFlagSetIDKey.String(val) -} - -// FeatureFlagVersion returns an attribute KeyValue conforming to the -// "feature_flag.version" semantic conventions. It represents the version of the -// ruleset used during the evaluation. This may be any stable value which -// uniquely identifies the ruleset. -func FeatureFlagVersion(val string) attribute.KeyValue { - return FeatureFlagVersionKey.String(val) -} - -// Enum values for feature_flag.result.reason -var ( - // The resolved value is static (no dynamic evaluation). - // Stability: release_candidate - FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static") - // The resolved value fell back to a pre-configured value (no dynamic evaluation - // occurred or dynamic evaluation yielded no result). - // Stability: release_candidate - FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default") - // The resolved value was the result of a dynamic evaluation, such as a rule or - // specific user-targeting. - // Stability: release_candidate - FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match") - // The resolved value was the result of pseudorandom assignment. - // Stability: release_candidate - FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split") - // The resolved value was retrieved from cache. - // Stability: release_candidate - FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached") - // The resolved value was the result of the flag being disabled in the - // management system. - // Stability: release_candidate - FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled") - // The reason for the resolved value could not be determined. - // Stability: release_candidate - FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown") - // The resolved value is non-authoritative or possibly out of date - // Stability: release_candidate - FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale") - // The resolved value was the result of an error. - // Stability: release_candidate - FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error") -) - -// Namespace: file -const ( - // FileAccessedKey is the attribute Key conforming to the "file.accessed" - // semantic conventions. It represents the time when the file was last accessed, - // in ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T12:00:00Z" - // Note: This attribute might not be supported by some file systems — NFS, - // FAT32, in embedded OS, etc. - FileAccessedKey = attribute.Key("file.accessed") - - // FileAttributesKey is the attribute Key conforming to the "file.attributes" - // semantic conventions. It represents the array of file attributes. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "readonly", "hidden" - // Note: Attributes names depend on the OS or file system. Here’s a - // non-exhaustive list of values expected for this attribute: `archive`, - // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, - // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, - // `write`. - FileAttributesKey = attribute.Key("file.attributes") - - // FileChangedKey is the attribute Key conforming to the "file.changed" semantic - // conventions. It represents the time when the file attributes or metadata was - // last changed, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T12:00:00Z" - // Note: `file.changed` captures the time when any of the file's properties or - // attributes (including the content) are changed, while `file.modified` - // captures the timestamp when the file content is modified. - FileChangedKey = attribute.Key("file.changed") - - // FileCreatedKey is the attribute Key conforming to the "file.created" semantic - // conventions. It represents the time when the file was created, in ISO 8601 - // format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T12:00:00Z" - // Note: This attribute might not be supported by some file systems — NFS, - // FAT32, in embedded OS, etc. - FileCreatedKey = attribute.Key("file.created") - - // FileDirectoryKey is the attribute Key conforming to the "file.directory" - // semantic conventions. It represents the directory where the file is located. - // It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/home/user", "C:\Program Files\MyApp" - FileDirectoryKey = attribute.Key("file.directory") - - // FileExtensionKey is the attribute Key conforming to the "file.extension" - // semantic conventions. It represents the file extension, excluding the leading - // dot. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "png", "gz" - // Note: When the file name has multiple extensions (example.tar.gz), only the - // last one should be captured ("gz", not "tar.gz"). - FileExtensionKey = attribute.Key("file.extension") - - // FileForkNameKey is the attribute Key conforming to the "file.fork_name" - // semantic conventions. It represents the name of the fork. A fork is - // additional data associated with a filesystem object. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Zone.Identifier" - // Note: On Linux, a resource fork is used to store additional data with a - // filesystem object. A file always has at least one fork for the data portion, - // and additional forks may exist. - // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default - // data stream for a file is just called $DATA. Zone.Identifier is commonly used - // by Windows to track contents downloaded from the Internet. An ADS is - // typically of the form: C:\path\to\filename.extension:some_fork_name, and - // some_fork_name is the value that should populate `fork_name`. - // `filename.extension` should populate `file.name`, and `extension` should - // populate `file.extension`. The full path, `file.path`, will include the fork - // name. - FileForkNameKey = attribute.Key("file.fork_name") - - // FileGroupIDKey is the attribute Key conforming to the "file.group.id" - // semantic conventions. It represents the primary Group ID (GID) of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1000" - FileGroupIDKey = attribute.Key("file.group.id") - - // FileGroupNameKey is the attribute Key conforming to the "file.group.name" - // semantic conventions. It represents the primary group name of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "users" - FileGroupNameKey = attribute.Key("file.group.name") - - // FileInodeKey is the attribute Key conforming to the "file.inode" semantic - // conventions. It represents the inode representing the file in the filesystem. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "256383" - FileInodeKey = attribute.Key("file.inode") - - // FileModeKey is the attribute Key conforming to the "file.mode" semantic - // conventions. It represents the mode of the file in octal representation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0640" - FileModeKey = attribute.Key("file.mode") - - // FileModifiedKey is the attribute Key conforming to the "file.modified" - // semantic conventions. It represents the time when the file content was last - // modified, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T12:00:00Z" - FileModifiedKey = attribute.Key("file.modified") - - // FileNameKey is the attribute Key conforming to the "file.name" semantic - // conventions. It represents the name of the file including the extension, - // without the directory. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "example.png" - FileNameKey = attribute.Key("file.name") - - // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id" - // semantic conventions. It represents the user ID (UID) or security identifier - // (SID) of the file owner. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1000" - FileOwnerIDKey = attribute.Key("file.owner.id") - - // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name" - // semantic conventions. It represents the username of the file owner. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "root" - FileOwnerNameKey = attribute.Key("file.owner.name") - - // FilePathKey is the attribute Key conforming to the "file.path" semantic - // conventions. It represents the full path to the file, including the file - // name. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe" - FilePathKey = attribute.Key("file.path") - - // FileSizeKey is the attribute Key conforming to the "file.size" semantic - // conventions. It represents the file size in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - FileSizeKey = attribute.Key("file.size") - - // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the - // "file.symbolic_link.target_path" semantic conventions. It represents the path - // to the target of a symbolic link. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/usr/bin/python3" - // Note: This attribute is only applicable to symbolic links. - FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path") -) - -// FileAccessed returns an attribute KeyValue conforming to the "file.accessed" -// semantic conventions. It represents the time when the file was last accessed, -// in ISO 8601 format. -func FileAccessed(val string) attribute.KeyValue { - return FileAccessedKey.String(val) -} - -// FileAttributes returns an attribute KeyValue conforming to the -// "file.attributes" semantic conventions. It represents the array of file -// attributes. -func FileAttributes(val ...string) attribute.KeyValue { - return FileAttributesKey.StringSlice(val) -} - -// FileChanged returns an attribute KeyValue conforming to the "file.changed" -// semantic conventions. It represents the time when the file attributes or -// metadata was last changed, in ISO 8601 format. -func FileChanged(val string) attribute.KeyValue { - return FileChangedKey.String(val) -} - -// FileCreated returns an attribute KeyValue conforming to the "file.created" -// semantic conventions. It represents the time when the file was created, in ISO -// 8601 format. -func FileCreated(val string) attribute.KeyValue { - return FileCreatedKey.String(val) -} - -// FileDirectory returns an attribute KeyValue conforming to the "file.directory" -// semantic conventions. It represents the directory where the file is located. -// It should include the drive letter, when appropriate. -func FileDirectory(val string) attribute.KeyValue { - return FileDirectoryKey.String(val) -} - -// FileExtension returns an attribute KeyValue conforming to the "file.extension" -// semantic conventions. It represents the file extension, excluding the leading -// dot. -func FileExtension(val string) attribute.KeyValue { - return FileExtensionKey.String(val) -} - -// FileForkName returns an attribute KeyValue conforming to the "file.fork_name" -// semantic conventions. It represents the name of the fork. A fork is additional -// data associated with a filesystem object. -func FileForkName(val string) attribute.KeyValue { - return FileForkNameKey.String(val) -} - -// FileGroupID returns an attribute KeyValue conforming to the "file.group.id" -// semantic conventions. It represents the primary Group ID (GID) of the file. -func FileGroupID(val string) attribute.KeyValue { - return FileGroupIDKey.String(val) -} - -// FileGroupName returns an attribute KeyValue conforming to the -// "file.group.name" semantic conventions. It represents the primary group name -// of the file. -func FileGroupName(val string) attribute.KeyValue { - return FileGroupNameKey.String(val) -} - -// FileInode returns an attribute KeyValue conforming to the "file.inode" -// semantic conventions. It represents the inode representing the file in the -// filesystem. -func FileInode(val string) attribute.KeyValue { - return FileInodeKey.String(val) -} - -// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic -// conventions. It represents the mode of the file in octal representation. -func FileMode(val string) attribute.KeyValue { - return FileModeKey.String(val) -} - -// FileModified returns an attribute KeyValue conforming to the "file.modified" -// semantic conventions. It represents the time when the file content was last -// modified, in ISO 8601 format. -func FileModified(val string) attribute.KeyValue { - return FileModifiedKey.String(val) -} - -// FileName returns an attribute KeyValue conforming to the "file.name" semantic -// conventions. It represents the name of the file including the extension, -// without the directory. -func FileName(val string) attribute.KeyValue { - return FileNameKey.String(val) -} - -// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id" -// semantic conventions. It represents the user ID (UID) or security identifier -// (SID) of the file owner. -func FileOwnerID(val string) attribute.KeyValue { - return FileOwnerIDKey.String(val) -} - -// FileOwnerName returns an attribute KeyValue conforming to the -// "file.owner.name" semantic conventions. It represents the username of the file -// owner. -func FileOwnerName(val string) attribute.KeyValue { - return FileOwnerNameKey.String(val) -} - -// FilePath returns an attribute KeyValue conforming to the "file.path" semantic -// conventions. It represents the full path to the file, including the file name. -// It should include the drive letter, when appropriate. -func FilePath(val string) attribute.KeyValue { - return FilePathKey.String(val) -} - -// FileSize returns an attribute KeyValue conforming to the "file.size" semantic -// conventions. It represents the file size in bytes. -func FileSize(val int) attribute.KeyValue { - return FileSizeKey.Int(val) -} - -// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the -// "file.symbolic_link.target_path" semantic conventions. It represents the path -// to the target of a symbolic link. -func FileSymbolicLinkTargetPath(val string) attribute.KeyValue { - return FileSymbolicLinkTargetPathKey.String(val) -} - -// Namespace: gcp -const ( - // GCPAppHubApplicationContainerKey is the attribute Key conforming to the - // "gcp.apphub.application.container" semantic conventions. It represents the - // container within GCP where the AppHub application is defined. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "projects/my-container-project" - GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container") - - // GCPAppHubApplicationIDKey is the attribute Key conforming to the - // "gcp.apphub.application.id" semantic conventions. It represents the name of - // the application as configured in AppHub. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-application" - GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id") - - // GCPAppHubApplicationLocationKey is the attribute Key conforming to the - // "gcp.apphub.application.location" semantic conventions. It represents the GCP - // zone or region where the application is defined. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "us-central1" - GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location") - - // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the - // "gcp.apphub.service.criticality_type" semantic conventions. It represents the - // criticality of a service indicates its importance to the business. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: [See AppHub type enum] - // - // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type - GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type") - - // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the - // "gcp.apphub.service.environment_type" semantic conventions. It represents the - // environment of a service is the stage of a software lifecycle. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: [See AppHub environment type] - // - // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 - GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type") - - // GCPAppHubServiceIDKey is the attribute Key conforming to the - // "gcp.apphub.service.id" semantic conventions. It represents the name of the - // service as configured in AppHub. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-service" - GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id") - - // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the - // "gcp.apphub.workload.criticality_type" semantic conventions. It represents - // the criticality of a workload indicates its importance to the business. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: [See AppHub type enum] - // - // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type - GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type") - - // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the - // "gcp.apphub.workload.environment_type" semantic conventions. It represents - // the environment of a workload is the stage of a software lifecycle. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: [See AppHub environment type] - // - // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 - GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type") - - // GCPAppHubWorkloadIDKey is the attribute Key conforming to the - // "gcp.apphub.workload.id" semantic conventions. It represents the name of the - // workload as configured in AppHub. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-workload" - GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id") - - // GCPAppHubDestinationApplicationContainerKey is the attribute Key conforming - // to the "gcp.apphub_destination.application.container" semantic conventions. - // It represents the container within GCP where the AppHub destination - // application is defined. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "projects/my-container-project" - GCPAppHubDestinationApplicationContainerKey = attribute.Key("gcp.apphub_destination.application.container") - - // GCPAppHubDestinationApplicationIDKey is the attribute Key conforming to the - // "gcp.apphub_destination.application.id" semantic conventions. It represents - // the name of the destination application as configured in AppHub. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-application" - GCPAppHubDestinationApplicationIDKey = attribute.Key("gcp.apphub_destination.application.id") - - // GCPAppHubDestinationApplicationLocationKey is the attribute Key conforming to - // the "gcp.apphub_destination.application.location" semantic conventions. It - // represents the GCP zone or region where the destination application is - // defined. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "us-central1" - GCPAppHubDestinationApplicationLocationKey = attribute.Key("gcp.apphub_destination.application.location") - - // GCPAppHubDestinationServiceCriticalityTypeKey is the attribute Key conforming - // to the "gcp.apphub_destination.service.criticality_type" semantic - // conventions. It represents the criticality of a destination workload - // indicates its importance to the business as specified in [AppHub type enum]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type - GCPAppHubDestinationServiceCriticalityTypeKey = attribute.Key("gcp.apphub_destination.service.criticality_type") - - // GCPAppHubDestinationServiceEnvironmentTypeKey is the attribute Key conforming - // to the "gcp.apphub_destination.service.environment_type" semantic - // conventions. It represents the software lifecycle stage of a destination - // service as defined [AppHub environment type]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 - GCPAppHubDestinationServiceEnvironmentTypeKey = attribute.Key("gcp.apphub_destination.service.environment_type") - - // GCPAppHubDestinationServiceIDKey is the attribute Key conforming to the - // "gcp.apphub_destination.service.id" semantic conventions. It represents the - // name of the destination service as configured in AppHub. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-service" - GCPAppHubDestinationServiceIDKey = attribute.Key("gcp.apphub_destination.service.id") - - // GCPAppHubDestinationWorkloadCriticalityTypeKey is the attribute Key - // conforming to the "gcp.apphub_destination.workload.criticality_type" semantic - // conventions. It represents the criticality of a destination workload - // indicates its importance to the business as specified in [AppHub type enum]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type - GCPAppHubDestinationWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub_destination.workload.criticality_type") - - // GCPAppHubDestinationWorkloadEnvironmentTypeKey is the attribute Key - // conforming to the "gcp.apphub_destination.workload.environment_type" semantic - // conventions. It represents the environment of a destination workload is the - // stage of a software lifecycle as provided in the [AppHub environment type]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 - GCPAppHubDestinationWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub_destination.workload.environment_type") - - // GCPAppHubDestinationWorkloadIDKey is the attribute Key conforming to the - // "gcp.apphub_destination.workload.id" semantic conventions. It represents the - // name of the destination workload as configured in AppHub. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-workload" - GCPAppHubDestinationWorkloadIDKey = attribute.Key("gcp.apphub_destination.workload.id") - - // GCPClientServiceKey is the attribute Key conforming to the - // "gcp.client.service" semantic conventions. It represents the identifies the - // Google Cloud service for which the official client library is intended. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "appengine", "run", "firestore", "alloydb", "spanner" - // Note: Intended to be a stable identifier for Google Cloud client libraries - // that is uniform across implementation languages. The value should be derived - // from the canonical service domain for the service; for example, - // 'foo.googleapis.com' should result in a value of 'foo'. - GCPClientServiceKey = attribute.Key("gcp.client.service") - - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of - // the Cloud Run [execution] being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`] environment variable. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "job-name-xxxx", "sample-job-mdw84" - // - // [execution]: https://cloud.google.com/run/docs/managing/job-executions - // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index - // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] - // environment variable. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0, 1 - // - // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") - - // GCPGCEInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname - // of a GCE instance. This is the full value of the default or [custom hostname] - // . - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-host1234.example.com", - // "sample-vm.us-west1-b.c.my-project.internal" - // - // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm - GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGCEInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance name - // of a GCE instance. This is the value provided by `host.name`, the visible - // name of the instance in the Cloud Console UI, and the prefix for the default - // hostname of the instance as defined by the [default internal DNS name]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "instance-1", "my-vm-name" - // - // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names - GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name") - - // GCPGCEInstanceGroupManagerNameKey is the attribute Key conforming to the - // "gcp.gce.instance_group_manager.name" semantic conventions. It represents the - // name of the Instance Group Manager (IGM) that manages this VM, if any. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "web-igm", "my-managed-group" - GCPGCEInstanceGroupManagerNameKey = attribute.Key("gcp.gce.instance_group_manager.name") - - // GCPGCEInstanceGroupManagerRegionKey is the attribute Key conforming to the - // "gcp.gce.instance_group_manager.region" semantic conventions. It represents - // the region of a **regional** Instance Group Manager (e.g., `us-central1`). - // Set this **only** when the IGM is regional. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "us-central1", "europe-west1" - GCPGCEInstanceGroupManagerRegionKey = attribute.Key("gcp.gce.instance_group_manager.region") - - // GCPGCEInstanceGroupManagerZoneKey is the attribute Key conforming to the - // "gcp.gce.instance_group_manager.zone" semantic conventions. It represents the - // zone of a **zonal** Instance Group Manager (e.g., `us-central1-a`). Set this - // **only** when the IGM is zonal. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "us-central1-a", "europe-west1-b" - GCPGCEInstanceGroupManagerZoneKey = attribute.Key("gcp.gce.instance_group_manager.zone") -) - -// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the -// "gcp.apphub.application.container" semantic conventions. It represents the -// container within GCP where the AppHub application is defined. -func GCPAppHubApplicationContainer(val string) attribute.KeyValue { - return GCPAppHubApplicationContainerKey.String(val) -} - -// GCPAppHubApplicationID returns an attribute KeyValue conforming to the -// "gcp.apphub.application.id" semantic conventions. It represents the name of -// the application as configured in AppHub. -func GCPAppHubApplicationID(val string) attribute.KeyValue { - return GCPAppHubApplicationIDKey.String(val) -} - -// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the -// "gcp.apphub.application.location" semantic conventions. It represents the GCP -// zone or region where the application is defined. -func GCPAppHubApplicationLocation(val string) attribute.KeyValue { - return GCPAppHubApplicationLocationKey.String(val) -} - -// GCPAppHubServiceID returns an attribute KeyValue conforming to the -// "gcp.apphub.service.id" semantic conventions. It represents the name of the -// service as configured in AppHub. -func GCPAppHubServiceID(val string) attribute.KeyValue { - return GCPAppHubServiceIDKey.String(val) -} - -// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the -// "gcp.apphub.workload.id" semantic conventions. It represents the name of the -// workload as configured in AppHub. -func GCPAppHubWorkloadID(val string) attribute.KeyValue { - return GCPAppHubWorkloadIDKey.String(val) -} - -// GCPAppHubDestinationApplicationContainer returns an attribute KeyValue -// conforming to the "gcp.apphub_destination.application.container" semantic -// conventions. It represents the container within GCP where the AppHub -// destination application is defined. -func GCPAppHubDestinationApplicationContainer(val string) attribute.KeyValue { - return GCPAppHubDestinationApplicationContainerKey.String(val) -} - -// GCPAppHubDestinationApplicationID returns an attribute KeyValue conforming to -// the "gcp.apphub_destination.application.id" semantic conventions. It -// represents the name of the destination application as configured in AppHub. -func GCPAppHubDestinationApplicationID(val string) attribute.KeyValue { - return GCPAppHubDestinationApplicationIDKey.String(val) -} - -// GCPAppHubDestinationApplicationLocation returns an attribute KeyValue -// conforming to the "gcp.apphub_destination.application.location" semantic -// conventions. It represents the GCP zone or region where the destination -// application is defined. -func GCPAppHubDestinationApplicationLocation(val string) attribute.KeyValue { - return GCPAppHubDestinationApplicationLocationKey.String(val) -} - -// GCPAppHubDestinationServiceID returns an attribute KeyValue conforming to the -// "gcp.apphub_destination.service.id" semantic conventions. It represents the -// name of the destination service as configured in AppHub. -func GCPAppHubDestinationServiceID(val string) attribute.KeyValue { - return GCPAppHubDestinationServiceIDKey.String(val) -} - -// GCPAppHubDestinationWorkloadID returns an attribute KeyValue conforming to the -// "gcp.apphub_destination.workload.id" semantic conventions. It represents the -// name of the destination workload as configured in AppHub. -func GCPAppHubDestinationWorkloadID(val string) attribute.KeyValue { - return GCPAppHubDestinationWorkloadIDKey.String(val) -} - -// GCPClientService returns an attribute KeyValue conforming to the -// "gcp.client.service" semantic conventions. It represents the identifies the -// Google Cloud service for which the official client library is intended. -func GCPClientService(val string) attribute.KeyValue { - return GCPClientServiceKey.String(val) -} - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of -// the Cloud Run [execution] being run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`] environment variable. -// -// [execution]: https://cloud.google.com/run/docs/managing/job-executions -// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] -// environment variable. -// -// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom hostname] -// . -// -// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm -func GCPGCEInstanceHostname(val string) attribute.KeyValue { - return GCPGCEInstanceHostnameKey.String(val) -} - -// GCPGCEInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance name -// of a GCE instance. This is the value provided by `host.name`, the visible name -// of the instance in the Cloud Console UI, and the prefix for the default -// hostname of the instance as defined by the [default internal DNS name]. -// -// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names -func GCPGCEInstanceName(val string) attribute.KeyValue { - return GCPGCEInstanceNameKey.String(val) -} - -// GCPGCEInstanceGroupManagerName returns an attribute KeyValue conforming to the -// "gcp.gce.instance_group_manager.name" semantic conventions. It represents the -// name of the Instance Group Manager (IGM) that manages this VM, if any. -func GCPGCEInstanceGroupManagerName(val string) attribute.KeyValue { - return GCPGCEInstanceGroupManagerNameKey.String(val) -} - -// GCPGCEInstanceGroupManagerRegion returns an attribute KeyValue conforming to -// the "gcp.gce.instance_group_manager.region" semantic conventions. It -// represents the region of a **regional** Instance Group Manager (e.g., -// `us-central1`). Set this **only** when the IGM is regional. -func GCPGCEInstanceGroupManagerRegion(val string) attribute.KeyValue { - return GCPGCEInstanceGroupManagerRegionKey.String(val) -} - -// GCPGCEInstanceGroupManagerZone returns an attribute KeyValue conforming to the -// "gcp.gce.instance_group_manager.zone" semantic conventions. It represents the -// zone of a **zonal** Instance Group Manager (e.g., `us-central1-a`). Set this -// **only** when the IGM is zonal. -func GCPGCEInstanceGroupManagerZone(val string) attribute.KeyValue { - return GCPGCEInstanceGroupManagerZoneKey.String(val) -} - -// Enum values for gcp.apphub.service.criticality_type -var ( - // Mission critical service. - // Stability: development - GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL") - // High impact. - // Stability: development - GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH") - // Medium impact. - // Stability: development - GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM") - // Low impact. - // Stability: development - GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW") -) - -// Enum values for gcp.apphub.service.environment_type -var ( - // Production environment. - // Stability: development - GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION") - // Staging environment. - // Stability: development - GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING") - // Test environment. - // Stability: development - GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST") - // Development environment. - // Stability: development - GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT") -) - -// Enum values for gcp.apphub.workload.criticality_type -var ( - // Mission critical service. - // Stability: development - GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL") - // High impact. - // Stability: development - GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH") - // Medium impact. - // Stability: development - GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM") - // Low impact. - // Stability: development - GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW") -) - -// Enum values for gcp.apphub.workload.environment_type -var ( - // Production environment. - // Stability: development - GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION") - // Staging environment. - // Stability: development - GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING") - // Test environment. - // Stability: development - GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST") - // Development environment. - // Stability: development - GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT") -) - -// Enum values for gcp.apphub_destination.service.criticality_type -var ( - // Mission critical service. - // Stability: development - GCPAppHubDestinationServiceCriticalityTypeMissionCritical = GCPAppHubDestinationServiceCriticalityTypeKey.String("MISSION_CRITICAL") - // High impact. - // Stability: development - GCPAppHubDestinationServiceCriticalityTypeHigh = GCPAppHubDestinationServiceCriticalityTypeKey.String("HIGH") - // Medium impact. - // Stability: development - GCPAppHubDestinationServiceCriticalityTypeMedium = GCPAppHubDestinationServiceCriticalityTypeKey.String("MEDIUM") - // Low impact. - // Stability: development - GCPAppHubDestinationServiceCriticalityTypeLow = GCPAppHubDestinationServiceCriticalityTypeKey.String("LOW") -) - -// Enum values for gcp.apphub_destination.service.environment_type -var ( - // Production environment. - // Stability: development - GCPAppHubDestinationServiceEnvironmentTypeProduction = GCPAppHubDestinationServiceEnvironmentTypeKey.String("PRODUCTION") - // Staging environment. - // Stability: development - GCPAppHubDestinationServiceEnvironmentTypeStaging = GCPAppHubDestinationServiceEnvironmentTypeKey.String("STAGING") - // Test environment. - // Stability: development - GCPAppHubDestinationServiceEnvironmentTypeTest = GCPAppHubDestinationServiceEnvironmentTypeKey.String("TEST") - // Development environment. - // Stability: development - GCPAppHubDestinationServiceEnvironmentTypeDevelopment = GCPAppHubDestinationServiceEnvironmentTypeKey.String("DEVELOPMENT") -) - -// Enum values for gcp.apphub_destination.workload.criticality_type -var ( - // Mission critical service. - // Stability: development - GCPAppHubDestinationWorkloadCriticalityTypeMissionCritical = GCPAppHubDestinationWorkloadCriticalityTypeKey.String("MISSION_CRITICAL") - // High impact. - // Stability: development - GCPAppHubDestinationWorkloadCriticalityTypeHigh = GCPAppHubDestinationWorkloadCriticalityTypeKey.String("HIGH") - // Medium impact. - // Stability: development - GCPAppHubDestinationWorkloadCriticalityTypeMedium = GCPAppHubDestinationWorkloadCriticalityTypeKey.String("MEDIUM") - // Low impact. - // Stability: development - GCPAppHubDestinationWorkloadCriticalityTypeLow = GCPAppHubDestinationWorkloadCriticalityTypeKey.String("LOW") -) - -// Enum values for gcp.apphub_destination.workload.environment_type -var ( - // Production environment. - // Stability: development - GCPAppHubDestinationWorkloadEnvironmentTypeProduction = GCPAppHubDestinationWorkloadEnvironmentTypeKey.String("PRODUCTION") - // Staging environment. - // Stability: development - GCPAppHubDestinationWorkloadEnvironmentTypeStaging = GCPAppHubDestinationWorkloadEnvironmentTypeKey.String("STAGING") - // Test environment. - // Stability: development - GCPAppHubDestinationWorkloadEnvironmentTypeTest = GCPAppHubDestinationWorkloadEnvironmentTypeKey.String("TEST") - // Development environment. - // Stability: development - GCPAppHubDestinationWorkloadEnvironmentTypeDevelopment = GCPAppHubDestinationWorkloadEnvironmentTypeKey.String("DEVELOPMENT") -) - -// Namespace: gen_ai -const ( - // GenAIAgentDescriptionKey is the attribute Key conforming to the - // "gen_ai.agent.description" semantic conventions. It represents the free-form - // description of the GenAI agent provided by the application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Helps with math problems", "Generates fiction stories" - GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description") - - // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id" - // semantic conventions. It represents the unique identifier of the GenAI agent. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY" - GenAIAgentIDKey = attribute.Key("gen_ai.agent.id") - - // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name" - // semantic conventions. It represents the human-readable name of the GenAI - // agent provided by the application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Math Tutor", "Fiction Writer" - GenAIAgentNameKey = attribute.Key("gen_ai.agent.name") - - // GenAIAgentVersionKey is the attribute Key conforming to the - // "gen_ai.agent.version" semantic conventions. It represents the version of the - // GenAI agent. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1.0.0", "2025-05-01" - GenAIAgentVersionKey = attribute.Key("gen_ai.agent.version") - - // GenAIConversationIDKey is the attribute Key conforming to the - // "gen_ai.conversation.id" semantic conventions. It represents the unique - // identifier for a conversation (session, thread), used to store and correlate - // messages within this conversation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY" - GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id") - - // GenAIDataSourceIDKey is the attribute Key conforming to the - // "gen_ai.data_source.id" semantic conventions. It represents the data source - // identifier. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "H7STPQYOND" - // Note: Data sources are used by AI agents and RAG applications to store - // grounding data. A data source may be an external database, object store, - // document collection, website, or any other storage system used by the GenAI - // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier - // used by the GenAI system rather than a name specific to the external storage, - // such as a database or object store. Semantic conventions referencing - // `gen_ai.data_source.id` MAY also leverage additional attributes, such as - // `db.*`, to further identify and describe the data source. - GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id") - - // GenAIEmbeddingsDimensionCountKey is the attribute Key conforming to the - // "gen_ai.embeddings.dimension.count" semantic conventions. It represents the - // number of dimensions the resulting output embeddings should have. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 512, 1024 - GenAIEmbeddingsDimensionCountKey = attribute.Key("gen_ai.embeddings.dimension.count") - - // GenAIEvaluationExplanationKey is the attribute Key conforming to the - // "gen_ai.evaluation.explanation" semantic conventions. It represents a - // free-form explanation for the assigned score provided by the evaluator. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "The response is factually accurate but lacks sufficient detail to - // fully address the question." - GenAIEvaluationExplanationKey = attribute.Key("gen_ai.evaluation.explanation") - - // GenAIEvaluationNameKey is the attribute Key conforming to the - // "gen_ai.evaluation.name" semantic conventions. It represents the name of the - // evaluation metric used for the GenAI response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Relevance", "IntentResolution" - GenAIEvaluationNameKey = attribute.Key("gen_ai.evaluation.name") - - // GenAIEvaluationScoreLabelKey is the attribute Key conforming to the - // "gen_ai.evaluation.score.label" semantic conventions. It represents the human - // readable label for evaluation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "relevant", "not_relevant", "correct", "incorrect", "pass", "fail" - // Note: This attribute provides a human-readable interpretation of the - // evaluation score produced by an evaluator. For example, a score value of 1 - // could mean "relevant" in one evaluation system and "not relevant" in another, - // depending on the scoring range and evaluator. The label SHOULD have low - // cardinality. Possible values depend on the evaluation metric and evaluator - // used; implementations SHOULD document the possible values. - GenAIEvaluationScoreLabelKey = attribute.Key("gen_ai.evaluation.score.label") - - // GenAIEvaluationScoreValueKey is the attribute Key conforming to the - // "gen_ai.evaluation.score.value" semantic conventions. It represents the - // evaluation score returned by the evaluator. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 4.0 - GenAIEvaluationScoreValueKey = attribute.Key("gen_ai.evaluation.score.value") - - // GenAIInputMessagesKey is the attribute Key conforming to the - // "gen_ai.input.messages" semantic conventions. It represents the chat history - // provided to the model as an input. - // - // Type: any - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "[\n {\n "role": "user",\n "parts": [\n {\n "type": "text",\n - // "content": "Weather in Paris?"\n }\n ]\n },\n {\n "role": "assistant",\n - // "parts": [\n {\n "type": "tool_call",\n "id": - // "call_VSPygqKTWdrhaFErNvMV18Yl",\n "name": "get_weather",\n "arguments": {\n - // "location": "Paris"\n }\n }\n ]\n },\n {\n "role": "tool",\n "parts": [\n {\n - // "type": "tool_call_response",\n "id": " call_VSPygqKTWdrhaFErNvMV18Yl",\n - // "result": "rainy, 57°F"\n }\n ]\n }\n]\n" - // Note: Instrumentations MUST follow [Input messages JSON schema]. - // When the attribute is recorded on events, it MUST be recorded in structured - // form. When recorded on spans, it MAY be recorded as a JSON string if - // structured - // format is not supported and SHOULD be recorded in structured form otherwise. - // - // Messages MUST be provided in the order they were sent to the model. - // Instrumentations MAY provide a way for users to filter or truncate - // input messages. - // - // > [!Warning] - // > This attribute is likely to contain sensitive information including - // > user/PII data. - // - // See [Recording content on attributes] - // section for more details. - // - // [Input messages JSON schema]: /docs/gen-ai/gen-ai-input-messages.json - // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes - GenAIInputMessagesKey = attribute.Key("gen_ai.input.messages") - - // GenAIOperationNameKey is the attribute Key conforming to the - // "gen_ai.operation.name" semantic conventions. It represents the name of the - // operation being performed. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: If one of the predefined values applies, but specific system uses a - // different name it's RECOMMENDED to document it in the semantic conventions - // for specific GenAI system and use system-specific name in the - // instrumentation. If a different name is not documented, instrumentation - // libraries SHOULD use applicable predefined value. - GenAIOperationNameKey = attribute.Key("gen_ai.operation.name") - - // GenAIOutputMessagesKey is the attribute Key conforming to the - // "gen_ai.output.messages" semantic conventions. It represents the messages - // returned by the model where each message represents a specific model response - // (choice, candidate). - // - // Type: any - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "[\n {\n "role": "assistant",\n "parts": [\n {\n "type": "text",\n - // "content": "The weather in Paris is currently rainy with a temperature of - // 57°F."\n }\n ],\n "finish_reason": "stop"\n }\n]\n" - // Note: Instrumentations MUST follow [Output messages JSON schema] - // - // Each message represents a single output choice/candidate generated by - // the model. Each message corresponds to exactly one generation - // (choice/candidate) and vice versa - one choice cannot be split across - // multiple messages or one message cannot contain parts from multiple choices. - // - // When the attribute is recorded on events, it MUST be recorded in structured - // form. When recorded on spans, it MAY be recorded as a JSON string if - // structured - // format is not supported and SHOULD be recorded in structured form otherwise. - // - // Instrumentations MAY provide a way for users to filter or truncate - // output messages. - // - // > [!Warning] - // > This attribute is likely to contain sensitive information including - // > user/PII data. - // - // See [Recording content on attributes] - // section for more details. - // - // [Output messages JSON schema]: /docs/gen-ai/gen-ai-output-messages.json - // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes - GenAIOutputMessagesKey = attribute.Key("gen_ai.output.messages") - - // GenAIOutputTypeKey is the attribute Key conforming to the - // "gen_ai.output.type" semantic conventions. It represents the represents the - // content type requested by the client. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: This attribute SHOULD be used when the client requests output of a - // specific type. The model may return zero or more outputs of this type. - // This attribute specifies the output modality and not the actual output - // format. For example, if an image is requested, the actual output could be a - // URL pointing to an image file. - // Additional output format details may be recorded in the future in the - // `gen_ai.output.{type}.*` attributes. - GenAIOutputTypeKey = attribute.Key("gen_ai.output.type") - - // GenAIPromptNameKey is the attribute Key conforming to the - // "gen_ai.prompt.name" semantic conventions. It represents the name of the - // prompt that uniquely identifies it. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "analyze-code" - GenAIPromptNameKey = attribute.Key("gen_ai.prompt.name") - - // GenAIProviderNameKey is the attribute Key conforming to the - // "gen_ai.provider.name" semantic conventions. It represents the Generative AI - // provider as identified by the client or server instrumentation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: The attribute SHOULD be set based on the instrumentation's best - // knowledge and may differ from the actual model provider. - // - // Multiple providers, including Azure OpenAI, Gemini, and AI hosting platforms - // are accessible using the OpenAI REST API and corresponding client libraries, - // but may proxy or host models from different providers. - // - // The `gen_ai.request.model`, `gen_ai.response.model`, and `server.address` - // attributes may help identify the actual system in use. - // - // The `gen_ai.provider.name` attribute acts as a discriminator that - // identifies the GenAI telemetry format flavor specific to that provider - // within GenAI semantic conventions. - // It SHOULD be set consistently with provider-specific attributes and signals. - // For example, GenAI spans, metrics, and events related to AWS Bedrock - // should have the `gen_ai.provider.name` set to `aws.bedrock` and include - // applicable `aws.bedrock.*` attributes and are not expected to include - // `openai.*` attributes. - GenAIProviderNameKey = attribute.Key("gen_ai.provider.name") - - // GenAIRequestChoiceCountKey is the attribute Key conforming to the - // "gen_ai.request.choice.count" semantic conventions. It represents the target - // number of candidate completions to return. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 3 - GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count") - - // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the - // "gen_ai.request.encoding_formats" semantic conventions. It represents the - // encoding formats requested in an embeddings operation, if specified. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "base64"], ["float", "binary" - // Note: In some GenAI systems the encoding formats are called embedding types. - // Also, some GenAI systems only accept a single format per request. - GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats") - - // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the - // "gen_ai.request.frequency_penalty" semantic conventions. It represents the - // frequency penalty setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0.1 - GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty") - - // GenAIRequestMaxTokensKey is the attribute Key conforming to the - // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum - // number of tokens the model generates for a request. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 100 - GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") - - // GenAIRequestModelKey is the attribute Key conforming to the - // "gen_ai.request.model" semantic conventions. It represents the name of the - // GenAI model a request is being made to. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: gpt-4 - GenAIRequestModelKey = attribute.Key("gen_ai.request.model") - - // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the - // "gen_ai.request.presence_penalty" semantic conventions. It represents the - // presence penalty setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0.1 - GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty") - - // GenAIRequestSeedKey is the attribute Key conforming to the - // "gen_ai.request.seed" semantic conventions. It represents the requests with - // same seed value more likely to return same result. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 100 - GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed") - - // GenAIRequestStopSequencesKey is the attribute Key conforming to the - // "gen_ai.request.stop_sequences" semantic conventions. It represents the list - // of sequences that the model will use to stop generating further tokens. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "forest", "lived" - GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences") - - // GenAIRequestTemperatureKey is the attribute Key conforming to the - // "gen_ai.request.temperature" semantic conventions. It represents the - // temperature setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0.0 - GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") - - // GenAIRequestTopKKey is the attribute Key conforming to the - // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling - // setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0 - GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k") - - // GenAIRequestTopPKey is the attribute Key conforming to the - // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling - // setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0 - GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p") - - // GenAIResponseFinishReasonsKey is the attribute Key conforming to the - // "gen_ai.response.finish_reasons" semantic conventions. It represents the - // array of reasons the model stopped generating tokens, corresponding to each - // generation received. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "stop"], ["stop", "length" - GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") - - // GenAIResponseIDKey is the attribute Key conforming to the - // "gen_ai.response.id" semantic conventions. It represents the unique - // identifier for the completion. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "chatcmpl-123" - GenAIResponseIDKey = attribute.Key("gen_ai.response.id") - - // GenAIResponseModelKey is the attribute Key conforming to the - // "gen_ai.response.model" semantic conventions. It represents the name of the - // model that generated the response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "gpt-4-0613" - GenAIResponseModelKey = attribute.Key("gen_ai.response.model") - - // GenAIRetrievalDocumentsKey is the attribute Key conforming to the - // "gen_ai.retrieval.documents" semantic conventions. It represents the - // documents retrieved. - // - // Type: any - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "[\n {\n "id": "doc_123",\n "score": 0.95\n },\n {\n "id": - // "doc_456",\n "score": 0.87\n },\n {\n "id": "doc_789",\n "score": 0.82\n - // }\n]\n" - // Note: Instrumentations MUST follow [Retrieval documents JSON schema]. - // When the attribute is recorded on events, it MUST be recorded in structured - // form. When recorded on spans, it MAY be recorded as a JSON string if - // structured - // format is not supported and SHOULD be recorded in structured form otherwise. - // - // Each document object SHOULD contain at least the following properties: - // `id` (string): A unique identifier for the document, `score` (double): The - // relevance score of the document - // - // [Retrieval documents JSON schema]: /docs/gen-ai/gen-ai-retrieval-documents.json - GenAIRetrievalDocumentsKey = attribute.Key("gen_ai.retrieval.documents") - - // GenAIRetrievalQueryTextKey is the attribute Key conforming to the - // "gen_ai.retrieval.query.text" semantic conventions. It represents the query - // text used for retrieval. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "What is the capital of France?", "weather in Paris" - // Note: > [!Warning] - // - // > This attribute may contain sensitive information. - GenAIRetrievalQueryTextKey = attribute.Key("gen_ai.retrieval.query.text") - - // GenAISystemInstructionsKey is the attribute Key conforming to the - // "gen_ai.system_instructions" semantic conventions. It represents the system - // message or instructions provided to the GenAI model separately from the chat - // history. - // - // Type: any - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "[\n {\n "type": "text",\n "content": "You are an Agent that greet - // users, always use greetings tool to respond"\n }\n]\n", "[\n {\n "type": - // "text",\n "content": "You are a language translator."\n },\n {\n "type": - // "text",\n "content": "Your mission is to translate text in English to - // French."\n }\n]\n" - // Note: This attribute SHOULD be used when the corresponding provider or API - // allows to provide system instructions or messages separately from the - // chat history. - // - // Instructions that are part of the chat history SHOULD be recorded in - // `gen_ai.input.messages` attribute instead. - // - // Instrumentations MUST follow [System instructions JSON schema]. - // - // When recorded on spans, it MAY be recorded as a JSON string if structured - // format is not supported and SHOULD be recorded in structured form otherwise. - // - // Instrumentations MAY provide a way for users to filter or truncate - // system instructions. - // - // > [!Warning] - // > This attribute may contain sensitive information. - // - // See [Recording content on attributes] - // section for more details. - // - // [System instructions JSON schema]: /docs/gen-ai/gen-ai-system-instructions.json - // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes - GenAISystemInstructionsKey = attribute.Key("gen_ai.system_instructions") - - // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type" - // semantic conventions. It represents the type of token being counted. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "input", "output" - GenAITokenTypeKey = attribute.Key("gen_ai.token.type") - - // GenAIToolCallArgumentsKey is the attribute Key conforming to the - // "gen_ai.tool.call.arguments" semantic conventions. It represents the - // parameters passed to the tool call. - // - // Type: any - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{\n "location": "San Francisco?",\n "date": "2025-10-01"\n}\n" - // Note: > [!WARNING] - // - // > This attribute may contain sensitive information. - // - // It's expected to be an object - in case a serialized string is available - // to the instrumentation, the instrumentation SHOULD do the best effort to - // deserialize it to an object. When recorded on spans, it MAY be recorded as a - // JSON string if structured format is not supported and SHOULD be recorded in - // structured form otherwise. - GenAIToolCallArgumentsKey = attribute.Key("gen_ai.tool.call.arguments") - - // GenAIToolCallIDKey is the attribute Key conforming to the - // "gen_ai.tool.call.id" semantic conventions. It represents the tool call - // identifier. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4" - GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id") - - // GenAIToolCallResultKey is the attribute Key conforming to the - // "gen_ai.tool.call.result" semantic conventions. It represents the result - // returned by the tool call (if any and if execution was successful). - // - // Type: any - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{\n "temperature_range": {\n "high": 75,\n "low": 60\n },\n - // "conditions": "sunny"\n}\n" - // Note: > [!WARNING] - // - // > This attribute may contain sensitive information. - // - // It's expected to be an object - in case a serialized string is available - // to the instrumentation, the instrumentation SHOULD do the best effort to - // deserialize it to an object. When recorded on spans, it MAY be recorded as a - // JSON string if structured format is not supported and SHOULD be recorded in - // structured form otherwise. - GenAIToolCallResultKey = attribute.Key("gen_ai.tool.call.result") - - // GenAIToolDefinitionsKey is the attribute Key conforming to the - // "gen_ai.tool.definitions" semantic conventions. It represents the list of - // source system tool definitions available to the GenAI agent or model. - // - // Type: any - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "[\n {\n "type": "function",\n "name": "get_current_weather",\n - // "description": "Get the current weather in a given location",\n "parameters": - // {\n "type": "object",\n "properties": {\n "location": {\n "type": "string",\n - // "description": "The city and state, e.g. San Francisco, CA"\n },\n "unit": - // {\n "type": "string",\n "enum": [\n "celsius",\n "fahrenheit"\n ]\n }\n },\n - // "required": [\n "location",\n "unit"\n ]\n }\n }\n]\n" - // Note: The value of this attribute matches source system tool definition - // format. - // - // It's expected to be an array of objects where each object represents a tool - // definition. In case a serialized string is available - // to the instrumentation, the instrumentation SHOULD do the best effort to - // deserialize it to an array. When recorded on spans, it MAY be recorded as a - // JSON string if structured format is not supported and SHOULD be recorded in - // structured form otherwise. - // - // Since this attribute could be large, it's NOT RECOMMENDED to populate - // it by default. Instrumentations MAY provide a way to enable - // populating this attribute. - GenAIToolDefinitionsKey = attribute.Key("gen_ai.tool.definitions") - - // GenAIToolDescriptionKey is the attribute Key conforming to the - // "gen_ai.tool.description" semantic conventions. It represents the tool - // description. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Multiply two numbers" - GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description") - - // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name" - // semantic conventions. It represents the name of the tool utilized by the - // agent. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Flights" - GenAIToolNameKey = attribute.Key("gen_ai.tool.name") - - // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type" - // semantic conventions. It represents the type of the tool utilized by the - // agent. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "function", "extension", "datastore" - // Note: Extension: A tool executed on the agent-side to directly call external - // APIs, bridging the gap between the agent and real-world systems. - // Agent-side operations involve actions that are performed by the agent on the - // server or within the agent's controlled environment. - // Function: A tool executed on the client-side, where the agent generates - // parameters for a predefined function, and the client executes the logic. - // Client-side operations are actions taken on the user's end or within the - // client application. - // Datastore: A tool used by the agent to access and query structured or - // unstructured external data for retrieval-augmented tasks or knowledge - // updates. - GenAIToolTypeKey = attribute.Key("gen_ai.tool.type") - - // GenAIUsageCacheCreationInputTokensKey is the attribute Key conforming to the - // "gen_ai.usage.cache_creation.input_tokens" semantic conventions. It - // represents the number of input tokens written to a provider-managed cache. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 25 - // Note: The value SHOULD be included in `gen_ai.usage.input_tokens`. - GenAIUsageCacheCreationInputTokensKey = attribute.Key("gen_ai.usage.cache_creation.input_tokens") - - // GenAIUsageCacheReadInputTokensKey is the attribute Key conforming to the - // "gen_ai.usage.cache_read.input_tokens" semantic conventions. It represents - // the number of input tokens served from a provider-managed cache. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 50 - // Note: The value SHOULD be included in `gen_ai.usage.input_tokens`. - GenAIUsageCacheReadInputTokensKey = attribute.Key("gen_ai.usage.cache_read.input_tokens") - - // GenAIUsageInputTokensKey is the attribute Key conforming to the - // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of - // tokens used in the GenAI input (prompt). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 100 - // Note: This value SHOULD include all types of input tokens, including cached - // tokens. - // Instrumentations SHOULD make a best effort to populate this value, using a - // total - // provided by the provider when available or, depending on the provider API, - // by summing different token types parsed from the provider output. - GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens") - - // GenAIUsageOutputTokensKey is the attribute Key conforming to the - // "gen_ai.usage.output_tokens" semantic conventions. It represents the number - // of tokens used in the GenAI response (completion). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 180 - GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens") -) - -// GenAIAgentDescription returns an attribute KeyValue conforming to the -// "gen_ai.agent.description" semantic conventions. It represents the free-form -// description of the GenAI agent provided by the application. -func GenAIAgentDescription(val string) attribute.KeyValue { - return GenAIAgentDescriptionKey.String(val) -} - -// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id" -// semantic conventions. It represents the unique identifier of the GenAI agent. -func GenAIAgentID(val string) attribute.KeyValue { - return GenAIAgentIDKey.String(val) -} - -// GenAIAgentName returns an attribute KeyValue conforming to the -// "gen_ai.agent.name" semantic conventions. It represents the human-readable -// name of the GenAI agent provided by the application. -func GenAIAgentName(val string) attribute.KeyValue { - return GenAIAgentNameKey.String(val) -} - -// GenAIAgentVersion returns an attribute KeyValue conforming to the -// "gen_ai.agent.version" semantic conventions. It represents the version of the -// GenAI agent. -func GenAIAgentVersion(val string) attribute.KeyValue { - return GenAIAgentVersionKey.String(val) -} - -// GenAIConversationID returns an attribute KeyValue conforming to the -// "gen_ai.conversation.id" semantic conventions. It represents the unique -// identifier for a conversation (session, thread), used to store and correlate -// messages within this conversation. -func GenAIConversationID(val string) attribute.KeyValue { - return GenAIConversationIDKey.String(val) -} - -// GenAIDataSourceID returns an attribute KeyValue conforming to the -// "gen_ai.data_source.id" semantic conventions. It represents the data source -// identifier. -func GenAIDataSourceID(val string) attribute.KeyValue { - return GenAIDataSourceIDKey.String(val) -} - -// GenAIEmbeddingsDimensionCount returns an attribute KeyValue conforming to the -// "gen_ai.embeddings.dimension.count" semantic conventions. It represents the -// number of dimensions the resulting output embeddings should have. -func GenAIEmbeddingsDimensionCount(val int) attribute.KeyValue { - return GenAIEmbeddingsDimensionCountKey.Int(val) -} - -// GenAIEvaluationExplanation returns an attribute KeyValue conforming to the -// "gen_ai.evaluation.explanation" semantic conventions. It represents a -// free-form explanation for the assigned score provided by the evaluator. -func GenAIEvaluationExplanation(val string) attribute.KeyValue { - return GenAIEvaluationExplanationKey.String(val) -} - -// GenAIEvaluationName returns an attribute KeyValue conforming to the -// "gen_ai.evaluation.name" semantic conventions. It represents the name of the -// evaluation metric used for the GenAI response. -func GenAIEvaluationName(val string) attribute.KeyValue { - return GenAIEvaluationNameKey.String(val) -} - -// GenAIEvaluationScoreLabel returns an attribute KeyValue conforming to the -// "gen_ai.evaluation.score.label" semantic conventions. It represents the human -// readable label for evaluation. -func GenAIEvaluationScoreLabel(val string) attribute.KeyValue { - return GenAIEvaluationScoreLabelKey.String(val) -} - -// GenAIEvaluationScoreValue returns an attribute KeyValue conforming to the -// "gen_ai.evaluation.score.value" semantic conventions. It represents the -// evaluation score returned by the evaluator. -func GenAIEvaluationScoreValue(val float64) attribute.KeyValue { - return GenAIEvaluationScoreValueKey.Float64(val) -} - -// GenAIPromptName returns an attribute KeyValue conforming to the -// "gen_ai.prompt.name" semantic conventions. It represents the name of the -// prompt that uniquely identifies it. -func GenAIPromptName(val string) attribute.KeyValue { - return GenAIPromptNameKey.String(val) -} - -// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the -// "gen_ai.request.choice.count" semantic conventions. It represents the target -// number of candidate completions to return. -func GenAIRequestChoiceCount(val int) attribute.KeyValue { - return GenAIRequestChoiceCountKey.Int(val) -} - -// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the -// "gen_ai.request.encoding_formats" semantic conventions. It represents the -// encoding formats requested in an embeddings operation, if specified. -func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue { - return GenAIRequestEncodingFormatsKey.StringSlice(val) -} - -// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the -// "gen_ai.request.frequency_penalty" semantic conventions. It represents the -// frequency penalty setting for the GenAI request. -func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue { - return GenAIRequestFrequencyPenaltyKey.Float64(val) -} - -// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the -// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum -// number of tokens the model generates for a request. -func GenAIRequestMaxTokens(val int) attribute.KeyValue { - return GenAIRequestMaxTokensKey.Int(val) -} - -// GenAIRequestModel returns an attribute KeyValue conforming to the -// "gen_ai.request.model" semantic conventions. It represents the name of the -// GenAI model a request is being made to. -func GenAIRequestModel(val string) attribute.KeyValue { - return GenAIRequestModelKey.String(val) -} - -// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the -// "gen_ai.request.presence_penalty" semantic conventions. It represents the -// presence penalty setting for the GenAI request. -func GenAIRequestPresencePenalty(val float64) attribute.KeyValue { - return GenAIRequestPresencePenaltyKey.Float64(val) -} - -// GenAIRequestSeed returns an attribute KeyValue conforming to the -// "gen_ai.request.seed" semantic conventions. It represents the requests with -// same seed value more likely to return same result. -func GenAIRequestSeed(val int) attribute.KeyValue { - return GenAIRequestSeedKey.Int(val) -} - -// GenAIRequestStopSequences returns an attribute KeyValue conforming to the -// "gen_ai.request.stop_sequences" semantic conventions. It represents the list -// of sequences that the model will use to stop generating further tokens. -func GenAIRequestStopSequences(val ...string) attribute.KeyValue { - return GenAIRequestStopSequencesKey.StringSlice(val) -} - -// GenAIRequestTemperature returns an attribute KeyValue conforming to the -// "gen_ai.request.temperature" semantic conventions. It represents the -// temperature setting for the GenAI request. -func GenAIRequestTemperature(val float64) attribute.KeyValue { - return GenAIRequestTemperatureKey.Float64(val) -} - -// GenAIRequestTopK returns an attribute KeyValue conforming to the -// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling -// setting for the GenAI request. -func GenAIRequestTopK(val float64) attribute.KeyValue { - return GenAIRequestTopKKey.Float64(val) -} - -// GenAIRequestTopP returns an attribute KeyValue conforming to the -// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling -// setting for the GenAI request. -func GenAIRequestTopP(val float64) attribute.KeyValue { - return GenAIRequestTopPKey.Float64(val) -} - -// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the -// "gen_ai.response.finish_reasons" semantic conventions. It represents the array -// of reasons the model stopped generating tokens, corresponding to each -// generation received. -func GenAIResponseFinishReasons(val ...string) attribute.KeyValue { - return GenAIResponseFinishReasonsKey.StringSlice(val) -} - -// GenAIResponseID returns an attribute KeyValue conforming to the -// "gen_ai.response.id" semantic conventions. It represents the unique identifier -// for the completion. -func GenAIResponseID(val string) attribute.KeyValue { - return GenAIResponseIDKey.String(val) -} - -// GenAIResponseModel returns an attribute KeyValue conforming to the -// "gen_ai.response.model" semantic conventions. It represents the name of the -// model that generated the response. -func GenAIResponseModel(val string) attribute.KeyValue { - return GenAIResponseModelKey.String(val) -} - -// GenAIRetrievalQueryText returns an attribute KeyValue conforming to the -// "gen_ai.retrieval.query.text" semantic conventions. It represents the query -// text used for retrieval. -func GenAIRetrievalQueryText(val string) attribute.KeyValue { - return GenAIRetrievalQueryTextKey.String(val) -} - -// GenAIToolCallID returns an attribute KeyValue conforming to the -// "gen_ai.tool.call.id" semantic conventions. It represents the tool call -// identifier. -func GenAIToolCallID(val string) attribute.KeyValue { - return GenAIToolCallIDKey.String(val) -} - -// GenAIToolDescription returns an attribute KeyValue conforming to the -// "gen_ai.tool.description" semantic conventions. It represents the tool -// description. -func GenAIToolDescription(val string) attribute.KeyValue { - return GenAIToolDescriptionKey.String(val) -} - -// GenAIToolName returns an attribute KeyValue conforming to the -// "gen_ai.tool.name" semantic conventions. It represents the name of the tool -// utilized by the agent. -func GenAIToolName(val string) attribute.KeyValue { - return GenAIToolNameKey.String(val) -} - -// GenAIToolType returns an attribute KeyValue conforming to the -// "gen_ai.tool.type" semantic conventions. It represents the type of the tool -// utilized by the agent. -func GenAIToolType(val string) attribute.KeyValue { - return GenAIToolTypeKey.String(val) -} - -// GenAIUsageCacheCreationInputTokens returns an attribute KeyValue conforming to -// the "gen_ai.usage.cache_creation.input_tokens" semantic conventions. It -// represents the number of input tokens written to a provider-managed cache. -func GenAIUsageCacheCreationInputTokens(val int) attribute.KeyValue { - return GenAIUsageCacheCreationInputTokensKey.Int(val) -} - -// GenAIUsageCacheReadInputTokens returns an attribute KeyValue conforming to the -// "gen_ai.usage.cache_read.input_tokens" semantic conventions. It represents the -// number of input tokens served from a provider-managed cache. -func GenAIUsageCacheReadInputTokens(val int) attribute.KeyValue { - return GenAIUsageCacheReadInputTokensKey.Int(val) -} - -// GenAIUsageInputTokens returns an attribute KeyValue conforming to the -// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of -// tokens used in the GenAI input (prompt). -func GenAIUsageInputTokens(val int) attribute.KeyValue { - return GenAIUsageInputTokensKey.Int(val) -} - -// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the -// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of -// tokens used in the GenAI response (completion). -func GenAIUsageOutputTokens(val int) attribute.KeyValue { - return GenAIUsageOutputTokensKey.Int(val) -} - -// Enum values for gen_ai.operation.name -var ( - // Chat completion operation such as [OpenAI Chat API] - // Stability: development - // - // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat - GenAIOperationNameChat = GenAIOperationNameKey.String("chat") - // Multimodal content generation operation such as [Gemini Generate Content] - // Stability: development - // - // [Gemini Generate Content]: https://ai.google.dev/api/generate-content - GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content") - // Text completions operation such as [OpenAI Completions API (Legacy)] - // Stability: development - // - // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions - GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion") - // Embeddings operation such as [OpenAI Create embeddings API] - // Stability: development - // - // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create - GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings") - // Retrieval operation such as [OpenAI Search Vector Store API] - // Stability: development - // - // [OpenAI Search Vector Store API]: https://platform.openai.com/docs/api-reference/vector-stores/search - GenAIOperationNameRetrieval = GenAIOperationNameKey.String("retrieval") - // Create GenAI agent - // Stability: development - GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent") - // Invoke GenAI agent - // Stability: development - GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent") - // Execute a tool - // Stability: development - GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool") -) - -// Enum values for gen_ai.output.type -var ( - // Plain text - // Stability: development - GenAIOutputTypeText = GenAIOutputTypeKey.String("text") - // JSON object with known or unknown schema - // Stability: development - GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json") - // Image - // Stability: development - GenAIOutputTypeImage = GenAIOutputTypeKey.String("image") - // Speech - // Stability: development - GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech") -) - -// Enum values for gen_ai.provider.name -var ( - // [OpenAI] - // Stability: development - // - // [OpenAI]: https://openai.com/ - GenAIProviderNameOpenAI = GenAIProviderNameKey.String("openai") - // Any Google generative AI endpoint - // Stability: development - GenAIProviderNameGCPGenAI = GenAIProviderNameKey.String("gcp.gen_ai") - // [Vertex AI] - // Stability: development - // - // [Vertex AI]: https://cloud.google.com/vertex-ai - GenAIProviderNameGCPVertexAI = GenAIProviderNameKey.String("gcp.vertex_ai") - // [Gemini] - // Stability: development - // - // [Gemini]: https://cloud.google.com/products/gemini - GenAIProviderNameGCPGemini = GenAIProviderNameKey.String("gcp.gemini") - // [Anthropic] - // Stability: development - // - // [Anthropic]: https://www.anthropic.com/ - GenAIProviderNameAnthropic = GenAIProviderNameKey.String("anthropic") - // [Cohere] - // Stability: development - // - // [Cohere]: https://cohere.com/ - GenAIProviderNameCohere = GenAIProviderNameKey.String("cohere") - // Azure AI Inference - // Stability: development - GenAIProviderNameAzureAIInference = GenAIProviderNameKey.String("azure.ai.inference") - // [Azure OpenAI] - // Stability: development - // - // [Azure OpenAI]: https://azure.microsoft.com/products/ai-services/openai-service/ - GenAIProviderNameAzureAIOpenAI = GenAIProviderNameKey.String("azure.ai.openai") - // [IBM Watsonx AI] - // Stability: development - // - // [IBM Watsonx AI]: https://www.ibm.com/products/watsonx-ai - GenAIProviderNameIBMWatsonxAI = GenAIProviderNameKey.String("ibm.watsonx.ai") - // [AWS Bedrock] - // Stability: development - // - // [AWS Bedrock]: https://aws.amazon.com/bedrock - GenAIProviderNameAWSBedrock = GenAIProviderNameKey.String("aws.bedrock") - // [Perplexity] - // Stability: development - // - // [Perplexity]: https://www.perplexity.ai/ - GenAIProviderNamePerplexity = GenAIProviderNameKey.String("perplexity") - // [xAI] - // Stability: development - // - // [xAI]: https://x.ai/ - GenAIProviderNameXAI = GenAIProviderNameKey.String("x_ai") - // [DeepSeek] - // Stability: development - // - // [DeepSeek]: https://www.deepseek.com/ - GenAIProviderNameDeepseek = GenAIProviderNameKey.String("deepseek") - // [Groq] - // Stability: development - // - // [Groq]: https://groq.com/ - GenAIProviderNameGroq = GenAIProviderNameKey.String("groq") - // [Mistral AI] - // Stability: development - // - // [Mistral AI]: https://mistral.ai/ - GenAIProviderNameMistralAI = GenAIProviderNameKey.String("mistral_ai") -) - -// Enum values for gen_ai.token.type -var ( - // Input tokens (prompt, input, etc.) - // Stability: development - GenAITokenTypeInput = GenAITokenTypeKey.String("input") - // Output tokens (completion, response, etc.) - // Stability: development - GenAITokenTypeOutput = GenAITokenTypeKey.String("output") -) - -// Namespace: geo -const ( - // GeoContinentCodeKey is the attribute Key conforming to the - // "geo.continent.code" semantic conventions. It represents the two-letter code - // representing continent’s name. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - GeoContinentCodeKey = attribute.Key("geo.continent.code") - - // GeoCountryISOCodeKey is the attribute Key conforming to the - // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO - // Country Code ([ISO 3166-1 alpha2]). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CA" - // - // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes - GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code") - - // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name" - // semantic conventions. It represents the locality name. Represents the name of - // a city, town, village, or similar populated place. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Montreal", "Berlin" - GeoLocalityNameKey = attribute.Key("geo.locality.name") - - // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat" - // semantic conventions. It represents the latitude of the geo location in - // [WGS84]. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 45.505918 - // - // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 - GeoLocationLatKey = attribute.Key("geo.location.lat") - - // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon" - // semantic conventions. It represents the longitude of the geo location in - // [WGS84]. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: -73.61483 - // - // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 - GeoLocationLonKey = attribute.Key("geo.location.lon") - - // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code" - // semantic conventions. It represents the postal code associated with the - // location. Values appropriate for this field may also be known as a postcode - // or ZIP code and will vary widely from country to country. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "94040" - GeoPostalCodeKey = attribute.Key("geo.postal_code") - - // GeoRegionISOCodeKey is the attribute Key conforming to the - // "geo.region.iso_code" semantic conventions. It represents the region ISO code - // ([ISO 3166-2]). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CA-QC" - // - // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 - GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code") -) - -// GeoCountryISOCode returns an attribute KeyValue conforming to the -// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO -// Country Code ([ISO 3166-1 alpha2]). -// -// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes -func GeoCountryISOCode(val string) attribute.KeyValue { - return GeoCountryISOCodeKey.String(val) -} - -// GeoLocalityName returns an attribute KeyValue conforming to the -// "geo.locality.name" semantic conventions. It represents the locality name. -// Represents the name of a city, town, village, or similar populated place. -func GeoLocalityName(val string) attribute.KeyValue { - return GeoLocalityNameKey.String(val) -} - -// GeoLocationLat returns an attribute KeyValue conforming to the -// "geo.location.lat" semantic conventions. It represents the latitude of the geo -// location in [WGS84]. -// -// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 -func GeoLocationLat(val float64) attribute.KeyValue { - return GeoLocationLatKey.Float64(val) -} - -// GeoLocationLon returns an attribute KeyValue conforming to the -// "geo.location.lon" semantic conventions. It represents the longitude of the -// geo location in [WGS84]. -// -// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 -func GeoLocationLon(val float64) attribute.KeyValue { - return GeoLocationLonKey.Float64(val) -} - -// GeoPostalCode returns an attribute KeyValue conforming to the -// "geo.postal_code" semantic conventions. It represents the postal code -// associated with the location. Values appropriate for this field may also be -// known as a postcode or ZIP code and will vary widely from country to country. -func GeoPostalCode(val string) attribute.KeyValue { - return GeoPostalCodeKey.String(val) -} - -// GeoRegionISOCode returns an attribute KeyValue conforming to the -// "geo.region.iso_code" semantic conventions. It represents the region ISO code -// ([ISO 3166-2]). -// -// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 -func GeoRegionISOCode(val string) attribute.KeyValue { - return GeoRegionISOCodeKey.String(val) -} - -// Enum values for geo.continent.code -var ( - // Africa - // Stability: development - GeoContinentCodeAf = GeoContinentCodeKey.String("AF") - // Antarctica - // Stability: development - GeoContinentCodeAn = GeoContinentCodeKey.String("AN") - // Asia - // Stability: development - GeoContinentCodeAs = GeoContinentCodeKey.String("AS") - // Europe - // Stability: development - GeoContinentCodeEu = GeoContinentCodeKey.String("EU") - // North America - // Stability: development - GeoContinentCodeNa = GeoContinentCodeKey.String("NA") - // Oceania - // Stability: development - GeoContinentCodeOc = GeoContinentCodeKey.String("OC") - // South America - // Stability: development - GeoContinentCodeSa = GeoContinentCodeKey.String("SA") -) - -// Namespace: go -const ( - // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type" - // semantic conventions. It represents the type of memory. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "other", "stack" - GoMemoryTypeKey = attribute.Key("go.memory.type") -) - -// Enum values for go.memory.type -var ( - // Memory allocated from the heap that is reserved for stack space, whether or - // not it is currently in-use. - // Stability: development - GoMemoryTypeStack = GoMemoryTypeKey.String("stack") - // Memory used by the Go runtime, excluding other categories of memory usage - // described in this enumeration. - // Stability: development - GoMemoryTypeOther = GoMemoryTypeKey.String("other") -) - -// Namespace: graphql -const ( - // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document" - // semantic conventions. It represents the GraphQL document being executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: query findBookById { bookById(id: ?) { name } } - // Note: The value may be sanitized to exclude sensitive information. - GraphQLDocumentKey = attribute.Key("graphql.document") - - // GraphQLOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of the - // operation being executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: findBookById - GraphQLOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphQLOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of the - // operation being executed. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "query", "mutation", "subscription" - GraphQLOperationTypeKey = attribute.Key("graphql.operation.type") -) - -// GraphQLDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphQLDocument(val string) attribute.KeyValue { - return GraphQLDocumentKey.String(val) -} - -// GraphQLOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphQLOperationName(val string) attribute.KeyValue { - return GraphQLOperationNameKey.String(val) -} - -// Enum values for graphql.operation.type -var ( - // GraphQL query - // Stability: development - GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query") - // GraphQL mutation - // Stability: development - GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation") - // GraphQL subscription - // Stability: development - GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription") -) - -// Namespace: heroku -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da" - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit hash - // for the current release. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "e6134959463efd8966b20e75b913cafe3f5ec" - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents the - // time and date the release was created. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2022-10-23T18:00:42Z" - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id" -// semantic conventions. It represents the unique identifier for the application. -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release. -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the -// "heroku.release.creation_timestamp" semantic conventions. It represents the -// time and date the release was created. -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// Namespace: host -const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is running - // on. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of - // level 2 memory cache available to the processor (in Bytes). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") - - // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family" - // semantic conventions. It represents the family or generation of the CPU. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "6", "PA-RISC 1.1e" - HostCPUFamilyKey = attribute.Key("host.cpu.family") - - // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id" - // semantic conventions. It represents the model identifier. It provides more - // granular information about the CPU, distinguishing it from other CPUs within - // the same family. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "6", "9000/778/B180L" - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") - - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz" - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") - - // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping" - // semantic conventions. It represents the stepping or core revisions. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1", "r1p1" - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") - - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "GenuineIntel" - // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX - // registers. Writing these to memory in this order results in a 12-character - // string. - // - // [CPUID]: https://wiki.osdev.org/CPUID - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") - - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be the - // instance_id assigned by the cloud provider. For non-containerized systems, - // this should be the `machine-id`. See the table below for the sources to use - // to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "fdbf79e8af94cb7f9e8df36789187052" - HostIDKey = attribute.Key("host.id") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the VM image ID or host OS image ID. For - // Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ami-07b06b442921831e5" - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the "host.image.name" - // semantic conventions. It represents the name of the VM image or OS install - // the host was instantiated from. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905" - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version string - // of the VM image or host OS as defined in [Version Attributes]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0.1" - // - // [Version Attributes]: /docs/resource/README.md#version-attributes - HostImageVersionKey = attribute.Key("host.image.version") - - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, excluding - // loopback interfaces. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e" - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC 5952] format. - // - // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html - HostIPKey = attribute.Key("host.ip") - - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, excluding - // loopback interfaces. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F" - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as - // hyphen-separated octets in uppercase hexadecimal form from most to least - // significant. - // - // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf - HostMacKey = attribute.Key("host.mac") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified hostname, - // or another name specified by the user. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-test" - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "n1-standard-1" - HostTypeKey = attribute.Key("host.type") -) - -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or generation -// of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model identifier. -// It provides more granular information about the CPU, distinguishing it from -// other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val string) attribute.KeyValue { - return HostCPUSteppingKey.String(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use to -// determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the "host.image.id" -// semantic conventions. It represents the VM image ID or host OS image ID. For -// Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM image -// or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string of -// the VM image or host OS as defined in [Version Attributes]. -// -// [Version Attributes]: /docs/resource/README.md#version-attributes -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic -// conventions. It represents the available MAC addresses of the host, excluding -// loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" semantic -// conventions. It represents the name of the host. On Unix systems, it may -// contain what the hostname command returns, or the fully qualified hostname, or -// another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" semantic -// conventions. It represents the type of host. For Cloud, this must be the -// machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// Enum values for host.arch -var ( - // AMD64 - // Stability: development - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - // Stability: development - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - // Stability: development - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - // Stability: development - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - // Stability: development - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - // Stability: development - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - // Stability: development - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - // Stability: development - HostArchX86 = HostArchKey.String("x86") -) - -// Namespace: http -const ( - // HTTPConnectionStateKey is the attribute Key conforming to the - // "http.connection.state" semantic conventions. It represents the state of the - // HTTP connection in the HTTP connection pool. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "active", "idle" - HTTPConnectionStateKey = attribute.Key("http.connection.state") - - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of the - // request payload body in bytes. This is the number of bytes transferred - // excluding headers and is often, but not always, present as the - // [Content-Length] header. For requests using transport encoding, this should - // be the compressed size. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the HTTP request - // method. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "GET", "POST", "HEAD" - // Note: HTTP request method value SHOULD be "known" to the instrumentation. - // By default, this convention defines "known" methods as the ones listed in - // [RFC9110], - // the PATCH method defined in [RFC5789] - // and the QUERY method defined in [httpbis-safe-method-w-body]. - // - // If the HTTP request method is not known to instrumentation, it MUST set the - // `http.request.method` attribute to `_OTHER`. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of - // case-sensitive known HTTP methods. - // - // - // If this override is done via declarative configuration, then the list MUST be - // configurable via the `known_methods` property - // (an array of case-sensitive strings with minimum items 0) under - // `.instrumentation/development.general.http.client` and/or - // `.instrumentation/development.general.http.server`. - // - // In either case, this list MUST be a full override of the default known - // methods, - // it is not a list of known methods in addition to the defaults. - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods to be - // case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - // - // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods - // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html - // [httpbis-safe-method-w-body]: https://datatracker.ietf.org/doc/draft-ietf-httpbis-safe-method-w-body/?include_text=1 - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "GeT", "ACL", "foo" - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestResendCountKey is the attribute Key conforming to the - // "http.request.resend_count" semantic conventions. It represents the ordinal - // number of request resending attempt (for any reason, including redirects). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending (e.g. - // redirection, authorization failure, 503 Server Unavailable, network issues, - // or any other). - HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") - - // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size" - // semantic conventions. It represents the total size of the request in bytes. - // This should be the total number of bytes sent over the wire, including the - // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request - // body if any. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - HTTPRequestSizeKey = attribute.Key("http.request.size") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size of the - // response payload body in bytes. This is the number of bytes transferred - // excluding headers and is often, but not always, present as the - // [Content-Length] header. For requests using transport encoding, this should - // be the compressed size. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") - - // HTTPResponseSizeKey is the attribute Key conforming to the - // "http.response.size" semantic conventions. It represents the total size of - // the response in bytes. This should be the total number of bytes sent over the - // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), - // headers, and response body and trailers if any. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - HTTPResponseSizeKey = attribute.Key("http.response.size") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status code]. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 200 - // - // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic - // conventions. It represents the matched route template for the request. This - // MUST be low-cardinality and include all static path segments, with dynamic - // path segments represented with placeholders. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "/users/:userID?", "my-controller/my-action/{id?}" - // Note: MUST NOT be populated when this is not supported by the HTTP server - // framework as the route attribute should have low-cardinality and the URI path - // can NOT substitute it. - // SHOULD include the [application root] if there is one. - // - // A static path segment is a part of the route template with a fixed, - // low-cardinality value. This includes literal strings like `/users/` and - // placeholders that - // are constrained to a finite, predefined set of values, e.g. `{controller}` or - // `{action}`. - // - // A dynamic path segment is a placeholder for a value that can have high - // cardinality and is not constrained to a predefined list like static path - // segments. - // - // Instrumentations SHOULD use routing information provided by the corresponding - // web framework. They SHOULD pick the most precise source of routing - // information and MAY - // support custom route formatting. Instrumentations SHOULD document the format - // and the API used to obtain the route string. - // - // [application root]: /docs/http/http-spans.md#http-server-definitions - HTTPRouteKey = attribute.Key("http.route") -) - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length] header. For requests using transport encoding, this should be -// the compressed size. -// -// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPRequestHeader returns an attribute KeyValue conforming to the -// "http.request.header" semantic conventions. It represents the HTTP request -// headers, `` being the normalized HTTP Header name (lowercase), the value -// being the header values. -func HTTPRequestHeader(key string, val ...string) attribute.KeyValue { - return attribute.StringSlice("http.request.header."+key, val) -} - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) -} - -// HTTPRequestSize returns an attribute KeyValue conforming to the -// "http.request.size" semantic conventions. It represents the total size of the -// request in bytes. This should be the total number of bytes sent over the wire, -// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, -// and request body if any. -func HTTPRequestSize(val int) attribute.KeyValue { - return HTTPRequestSizeKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of the -// response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length] header. For requests using transport encoding, this should be -// the compressed size. -// -// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// HTTPResponseHeader returns an attribute KeyValue conforming to the -// "http.response.header" semantic conventions. It represents the HTTP response -// headers, `` being the normalized HTTP Header name (lowercase), the value -// being the header values. -func HTTPResponseHeader(key string, val ...string) attribute.KeyValue { - return attribute.StringSlice("http.response.header."+key, val) -} - -// HTTPResponseSize returns an attribute KeyValue conforming to the -// "http.response.size" semantic conventions. It represents the total size of the -// response in bytes. This should be the total number of bytes sent over the -// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and response body and trailers if any. -func HTTPResponseSize(val int) attribute.KeyValue { - return HTTPResponseSizeKey.Int(val) -} - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the -// [HTTP response status code]. -// -// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route template for the -// request. This MUST be low-cardinality and include all static path segments, -// with dynamic path segments represented with placeholders. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Enum values for http.connection.state -var ( - // active state. - // Stability: development - HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") - // idle state. - // Stability: development - HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") -) - -// Enum values for http.request.method -var ( - // CONNECT method. - // Stability: stable - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method. - // Stability: stable - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method. - // Stability: stable - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method. - // Stability: stable - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method. - // Stability: stable - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method. - // Stability: stable - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method. - // Stability: stable - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method. - // Stability: stable - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method. - // Stability: stable - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // QUERY method. - // Stability: development - HTTPRequestMethodQuery = HTTPRequestMethodKey.String("QUERY") - // Any HTTP method that the instrumentation has no prior knowledge of. - // Stability: stable - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// Namespace: hw -const ( - // HwBatteryCapacityKey is the attribute Key conforming to the - // "hw.battery.capacity" semantic conventions. It represents the design capacity - // in Watts-hours or Amper-hours. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9.3Ah", "50Wh" - HwBatteryCapacityKey = attribute.Key("hw.battery.capacity") - - // HwBatteryChemistryKey is the attribute Key conforming to the - // "hw.battery.chemistry" semantic conventions. It represents the battery - // [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Li-ion", "NiMH" - // - // [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html - HwBatteryChemistryKey = attribute.Key("hw.battery.chemistry") - - // HwBatteryStateKey is the attribute Key conforming to the "hw.battery.state" - // semantic conventions. It represents the current state of the battery. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HwBatteryStateKey = attribute.Key("hw.battery.state") - - // HwBiosVersionKey is the attribute Key conforming to the "hw.bios_version" - // semantic conventions. It represents the BIOS version of the hardware - // component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1.2.3" - HwBiosVersionKey = attribute.Key("hw.bios_version") - - // HwDriverVersionKey is the attribute Key conforming to the "hw.driver_version" - // semantic conventions. It represents the driver version for the hardware - // component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "10.2.1-3" - HwDriverVersionKey = attribute.Key("hw.driver_version") - - // HwEnclosureTypeKey is the attribute Key conforming to the "hw.enclosure.type" - // semantic conventions. It represents the type of the enclosure (useful for - // modular systems). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Computer", "Storage", "Switch" - HwEnclosureTypeKey = attribute.Key("hw.enclosure.type") - - // HwFirmwareVersionKey is the attribute Key conforming to the - // "hw.firmware_version" semantic conventions. It represents the firmware - // version of the hardware component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2.0.1" - HwFirmwareVersionKey = attribute.Key("hw.firmware_version") - - // HwGpuTaskKey is the attribute Key conforming to the "hw.gpu.task" semantic - // conventions. It represents the type of task the GPU is performing. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HwGpuTaskKey = attribute.Key("hw.gpu.task") - - // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. - // It represents an identifier for the hardware component, unique within the - // monitored host. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "win32battery_battery_testsysa33_1" - HwIDKey = attribute.Key("hw.id") - - // HwLimitTypeKey is the attribute Key conforming to the "hw.limit_type" - // semantic conventions. It represents the type of limit for hardware - // components. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HwLimitTypeKey = attribute.Key("hw.limit_type") - - // HwLogicalDiskRaidLevelKey is the attribute Key conforming to the - // "hw.logical_disk.raid_level" semantic conventions. It represents the RAID - // Level of the logical disk. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "RAID0+1", "RAID5", "RAID10" - HwLogicalDiskRaidLevelKey = attribute.Key("hw.logical_disk.raid_level") - - // HwLogicalDiskStateKey is the attribute Key conforming to the - // "hw.logical_disk.state" semantic conventions. It represents the state of the - // logical disk space usage. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HwLogicalDiskStateKey = attribute.Key("hw.logical_disk.state") - - // HwMemoryTypeKey is the attribute Key conforming to the "hw.memory.type" - // semantic conventions. It represents the type of the memory module. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "DDR4", "DDR5", "LPDDR5" - HwMemoryTypeKey = attribute.Key("hw.memory.type") - - // HwModelKey is the attribute Key conforming to the "hw.model" semantic - // conventions. It represents the descriptive model name of the hardware - // component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "PERC H740P", "Intel(R) Core(TM) i7-10700K", "Dell XPS 15 Battery" - HwModelKey = attribute.Key("hw.model") - - // HwNameKey is the attribute Key conforming to the "hw.name" semantic - // conventions. It represents an easily-recognizable name for the hardware - // component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "eth0" - HwNameKey = attribute.Key("hw.name") - - // HwNetworkLogicalAddressesKey is the attribute Key conforming to the - // "hw.network.logical_addresses" semantic conventions. It represents the - // logical addresses of the adapter (e.g. IP address, or WWPN). - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "172.16.8.21", "57.11.193.42" - HwNetworkLogicalAddressesKey = attribute.Key("hw.network.logical_addresses") - - // HwNetworkPhysicalAddressKey is the attribute Key conforming to the - // "hw.network.physical_address" semantic conventions. It represents the - // physical address of the adapter (e.g. MAC address, or WWNN). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "00-90-F5-E9-7B-36" - HwNetworkPhysicalAddressKey = attribute.Key("hw.network.physical_address") - - // HwParentKey is the attribute Key conforming to the "hw.parent" semantic - // conventions. It represents the unique identifier of the parent component - // (typically the `hw.id` attribute of the enclosure, or disk controller). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "dellStorage_perc_0" - HwParentKey = attribute.Key("hw.parent") - - // HwPhysicalDiskSmartAttributeKey is the attribute Key conforming to the - // "hw.physical_disk.smart_attribute" semantic conventions. It represents the - // [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute - // of the physical disk. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Spin Retry Count", "Seek Error Rate", "Raw Read Error Rate" - // - // [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. - HwPhysicalDiskSmartAttributeKey = attribute.Key("hw.physical_disk.smart_attribute") - - // HwPhysicalDiskStateKey is the attribute Key conforming to the - // "hw.physical_disk.state" semantic conventions. It represents the state of the - // physical disk endurance utilization. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HwPhysicalDiskStateKey = attribute.Key("hw.physical_disk.state") - - // HwPhysicalDiskTypeKey is the attribute Key conforming to the - // "hw.physical_disk.type" semantic conventions. It represents the type of the - // physical disk. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "HDD", "SSD", "10K" - HwPhysicalDiskTypeKey = attribute.Key("hw.physical_disk.type") - - // HwSensorLocationKey is the attribute Key conforming to the - // "hw.sensor_location" semantic conventions. It represents the location of the - // sensor. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cpu0", "ps1", "INLET", "CPU0_DIE", "AMBIENT", "MOTHERBOARD", "PS0 - // V3_3", "MAIN_12V", "CPU_VCORE" - HwSensorLocationKey = attribute.Key("hw.sensor_location") - - // HwSerialNumberKey is the attribute Key conforming to the "hw.serial_number" - // semantic conventions. It represents the serial number of the hardware - // component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CNFCP0123456789" - HwSerialNumberKey = attribute.Key("hw.serial_number") - - // HwStateKey is the attribute Key conforming to the "hw.state" semantic - // conventions. It represents the current state of the component. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HwStateKey = attribute.Key("hw.state") - - // HwTapeDriveOperationTypeKey is the attribute Key conforming to the - // "hw.tape_drive.operation_type" semantic conventions. It represents the type - // of tape drive operation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HwTapeDriveOperationTypeKey = attribute.Key("hw.tape_drive.operation_type") - - // HwTypeKey is the attribute Key conforming to the "hw.type" semantic - // conventions. It represents the type of the component. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: Describes the category of the hardware component for which `hw.state` - // is being reported. For example, `hw.type=temperature` along with - // `hw.state=degraded` would indicate that the temperature of the hardware - // component has been reported as `degraded`. - HwTypeKey = attribute.Key("hw.type") - - // HwVendorKey is the attribute Key conforming to the "hw.vendor" semantic - // conventions. It represents the vendor name of the hardware component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Dell", "HP", "Intel", "AMD", "LSI", "Lenovo" - HwVendorKey = attribute.Key("hw.vendor") -) - -// HwBatteryCapacity returns an attribute KeyValue conforming to the -// "hw.battery.capacity" semantic conventions. It represents the design capacity -// in Watts-hours or Amper-hours. -func HwBatteryCapacity(val string) attribute.KeyValue { - return HwBatteryCapacityKey.String(val) -} - -// HwBatteryChemistry returns an attribute KeyValue conforming to the -// "hw.battery.chemistry" semantic conventions. It represents the battery -// [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. -// -// [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html -func HwBatteryChemistry(val string) attribute.KeyValue { - return HwBatteryChemistryKey.String(val) -} - -// HwBiosVersion returns an attribute KeyValue conforming to the -// "hw.bios_version" semantic conventions. It represents the BIOS version of the -// hardware component. -func HwBiosVersion(val string) attribute.KeyValue { - return HwBiosVersionKey.String(val) -} - -// HwDriverVersion returns an attribute KeyValue conforming to the -// "hw.driver_version" semantic conventions. It represents the driver version for -// the hardware component. -func HwDriverVersion(val string) attribute.KeyValue { - return HwDriverVersionKey.String(val) -} - -// HwEnclosureType returns an attribute KeyValue conforming to the -// "hw.enclosure.type" semantic conventions. It represents the type of the -// enclosure (useful for modular systems). -func HwEnclosureType(val string) attribute.KeyValue { - return HwEnclosureTypeKey.String(val) -} - -// HwFirmwareVersion returns an attribute KeyValue conforming to the -// "hw.firmware_version" semantic conventions. It represents the firmware version -// of the hardware component. -func HwFirmwareVersion(val string) attribute.KeyValue { - return HwFirmwareVersionKey.String(val) -} - -// HwID returns an attribute KeyValue conforming to the "hw.id" semantic -// conventions. It represents an identifier for the hardware component, unique -// within the monitored host. -func HwID(val string) attribute.KeyValue { - return HwIDKey.String(val) -} - -// HwLogicalDiskRaidLevel returns an attribute KeyValue conforming to the -// "hw.logical_disk.raid_level" semantic conventions. It represents the RAID -// Level of the logical disk. -func HwLogicalDiskRaidLevel(val string) attribute.KeyValue { - return HwLogicalDiskRaidLevelKey.String(val) -} - -// HwMemoryType returns an attribute KeyValue conforming to the "hw.memory.type" -// semantic conventions. It represents the type of the memory module. -func HwMemoryType(val string) attribute.KeyValue { - return HwMemoryTypeKey.String(val) -} - -// HwModel returns an attribute KeyValue conforming to the "hw.model" semantic -// conventions. It represents the descriptive model name of the hardware -// component. -func HwModel(val string) attribute.KeyValue { - return HwModelKey.String(val) -} - -// HwName returns an attribute KeyValue conforming to the "hw.name" semantic -// conventions. It represents an easily-recognizable name for the hardware -// component. -func HwName(val string) attribute.KeyValue { - return HwNameKey.String(val) -} - -// HwNetworkLogicalAddresses returns an attribute KeyValue conforming to the -// "hw.network.logical_addresses" semantic conventions. It represents the logical -// addresses of the adapter (e.g. IP address, or WWPN). -func HwNetworkLogicalAddresses(val ...string) attribute.KeyValue { - return HwNetworkLogicalAddressesKey.StringSlice(val) -} - -// HwNetworkPhysicalAddress returns an attribute KeyValue conforming to the -// "hw.network.physical_address" semantic conventions. It represents the physical -// address of the adapter (e.g. MAC address, or WWNN). -func HwNetworkPhysicalAddress(val string) attribute.KeyValue { - return HwNetworkPhysicalAddressKey.String(val) -} - -// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic -// conventions. It represents the unique identifier of the parent component -// (typically the `hw.id` attribute of the enclosure, or disk controller). -func HwParent(val string) attribute.KeyValue { - return HwParentKey.String(val) -} - -// HwPhysicalDiskSmartAttribute returns an attribute KeyValue conforming to the -// "hw.physical_disk.smart_attribute" semantic conventions. It represents the -// [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute -// of the physical disk. -// -// [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. -func HwPhysicalDiskSmartAttribute(val string) attribute.KeyValue { - return HwPhysicalDiskSmartAttributeKey.String(val) -} - -// HwPhysicalDiskType returns an attribute KeyValue conforming to the -// "hw.physical_disk.type" semantic conventions. It represents the type of the -// physical disk. -func HwPhysicalDiskType(val string) attribute.KeyValue { - return HwPhysicalDiskTypeKey.String(val) -} - -// HwSensorLocation returns an attribute KeyValue conforming to the -// "hw.sensor_location" semantic conventions. It represents the location of the -// sensor. -func HwSensorLocation(val string) attribute.KeyValue { - return HwSensorLocationKey.String(val) -} - -// HwSerialNumber returns an attribute KeyValue conforming to the -// "hw.serial_number" semantic conventions. It represents the serial number of -// the hardware component. -func HwSerialNumber(val string) attribute.KeyValue { - return HwSerialNumberKey.String(val) -} - -// HwVendor returns an attribute KeyValue conforming to the "hw.vendor" semantic -// conventions. It represents the vendor name of the hardware component. -func HwVendor(val string) attribute.KeyValue { - return HwVendorKey.String(val) -} - -// Enum values for hw.battery.state -var ( - // Charging - // Stability: development - HwBatteryStateCharging = HwBatteryStateKey.String("charging") - // Discharging - // Stability: development - HwBatteryStateDischarging = HwBatteryStateKey.String("discharging") -) - -// Enum values for hw.gpu.task -var ( - // Decoder - // Stability: development - HwGpuTaskDecoder = HwGpuTaskKey.String("decoder") - // Encoder - // Stability: development - HwGpuTaskEncoder = HwGpuTaskKey.String("encoder") - // General - // Stability: development - HwGpuTaskGeneral = HwGpuTaskKey.String("general") -) - -// Enum values for hw.limit_type -var ( - // Critical - // Stability: development - HwLimitTypeCritical = HwLimitTypeKey.String("critical") - // Degraded - // Stability: development - HwLimitTypeDegraded = HwLimitTypeKey.String("degraded") - // High Critical - // Stability: development - HwLimitTypeHighCritical = HwLimitTypeKey.String("high.critical") - // High Degraded - // Stability: development - HwLimitTypeHighDegraded = HwLimitTypeKey.String("high.degraded") - // Low Critical - // Stability: development - HwLimitTypeLowCritical = HwLimitTypeKey.String("low.critical") - // Low Degraded - // Stability: development - HwLimitTypeLowDegraded = HwLimitTypeKey.String("low.degraded") - // Maximum - // Stability: development - HwLimitTypeMax = HwLimitTypeKey.String("max") - // Throttled - // Stability: development - HwLimitTypeThrottled = HwLimitTypeKey.String("throttled") - // Turbo - // Stability: development - HwLimitTypeTurbo = HwLimitTypeKey.String("turbo") -) - -// Enum values for hw.logical_disk.state -var ( - // Used - // Stability: development - HwLogicalDiskStateUsed = HwLogicalDiskStateKey.String("used") - // Free - // Stability: development - HwLogicalDiskStateFree = HwLogicalDiskStateKey.String("free") -) - -// Enum values for hw.physical_disk.state -var ( - // Remaining - // Stability: development - HwPhysicalDiskStateRemaining = HwPhysicalDiskStateKey.String("remaining") -) - -// Enum values for hw.state -var ( - // Degraded - // Stability: development - HwStateDegraded = HwStateKey.String("degraded") - // Failed - // Stability: development - HwStateFailed = HwStateKey.String("failed") - // Needs Cleaning - // Stability: development - HwStateNeedsCleaning = HwStateKey.String("needs_cleaning") - // OK - // Stability: development - HwStateOk = HwStateKey.String("ok") - // Predicted Failure - // Stability: development - HwStatePredictedFailure = HwStateKey.String("predicted_failure") -) - -// Enum values for hw.tape_drive.operation_type -var ( - // Mount - // Stability: development - HwTapeDriveOperationTypeMount = HwTapeDriveOperationTypeKey.String("mount") - // Unmount - // Stability: development - HwTapeDriveOperationTypeUnmount = HwTapeDriveOperationTypeKey.String("unmount") - // Clean - // Stability: development - HwTapeDriveOperationTypeClean = HwTapeDriveOperationTypeKey.String("clean") -) - -// Enum values for hw.type -var ( - // Battery - // Stability: development - HwTypeBattery = HwTypeKey.String("battery") - // CPU - // Stability: development - HwTypeCPU = HwTypeKey.String("cpu") - // Disk controller - // Stability: development - HwTypeDiskController = HwTypeKey.String("disk_controller") - // Enclosure - // Stability: development - HwTypeEnclosure = HwTypeKey.String("enclosure") - // Fan - // Stability: development - HwTypeFan = HwTypeKey.String("fan") - // GPU - // Stability: development - HwTypeGpu = HwTypeKey.String("gpu") - // Logical disk - // Stability: development - HwTypeLogicalDisk = HwTypeKey.String("logical_disk") - // Memory - // Stability: development - HwTypeMemory = HwTypeKey.String("memory") - // Network - // Stability: development - HwTypeNetwork = HwTypeKey.String("network") - // Physical disk - // Stability: development - HwTypePhysicalDisk = HwTypeKey.String("physical_disk") - // Power supply - // Stability: development - HwTypePowerSupply = HwTypeKey.String("power_supply") - // Tape drive - // Stability: development - HwTypeTapeDrive = HwTypeKey.String("tape_drive") - // Temperature - // Stability: development - HwTypeTemperature = HwTypeKey.String("temperature") - // Voltage - // Stability: development - HwTypeVoltage = HwTypeKey.String("voltage") -) - -// Namespace: ios -const ( - // IOSAppStateKey is the attribute Key conforming to the "ios.app.state" - // semantic conventions. It represents the this attribute represents the state - // of the application. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: The iOS lifecycle states are defined in the - // [UIApplicationDelegate documentation], and from which the `OS terminology` - // column values are derived. - // - // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate - IOSAppStateKey = attribute.Key("ios.app.state") -) - -// Enum values for ios.app.state -var ( - // The app has become `active`. Associated with UIKit notification - // `applicationDidBecomeActive`. - // - // Stability: development - IOSAppStateActive = IOSAppStateKey.String("active") - // The app is now `inactive`. Associated with UIKit notification - // `applicationWillResignActive`. - // - // Stability: development - IOSAppStateInactive = IOSAppStateKey.String("inactive") - // The app is now in the background. This value is associated with UIKit - // notification `applicationDidEnterBackground`. - // - // Stability: development - IOSAppStateBackground = IOSAppStateKey.String("background") - // The app is now in the foreground. This value is associated with UIKit - // notification `applicationWillEnterForeground`. - // - // Stability: development - IOSAppStateForeground = IOSAppStateKey.String("foreground") - // The app is about to terminate. Associated with UIKit notification - // `applicationWillTerminate`. - // - // Stability: development - IOSAppStateTerminate = IOSAppStateKey.String("terminate") -) - -// Namespace: jsonrpc -const ( - // JSONRPCProtocolVersionKey is the attribute Key conforming to the - // "jsonrpc.protocol.version" semantic conventions. It represents the protocol - // version, as specified in the `jsonrpc` property of the request and its - // corresponding response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2.0", "1.0" - JSONRPCProtocolVersionKey = attribute.Key("jsonrpc.protocol.version") - - // JSONRPCRequestIDKey is the attribute Key conforming to the - // "jsonrpc.request.id" semantic conventions. It represents a string - // representation of the `id` property of the request and its corresponding - // response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "10", "request-7" - // Note: Under the [JSON-RPC specification], the `id` property may be a string, - // number, null, or omitted entirely. When omitted, the request is treated as a - // notification. Using `null` is not equivalent to omitting the `id`, but it is - // discouraged. - // Instrumentations SHOULD NOT capture this attribute when the `id` is `null` or - // omitted. - // - // [JSON-RPC specification]: https://www.jsonrpc.org/specification - JSONRPCRequestIDKey = attribute.Key("jsonrpc.request.id") -) - -// JSONRPCProtocolVersion returns an attribute KeyValue conforming to the -// "jsonrpc.protocol.version" semantic conventions. It represents the protocol -// version, as specified in the `jsonrpc` property of the request and its -// corresponding response. -func JSONRPCProtocolVersion(val string) attribute.KeyValue { - return JSONRPCProtocolVersionKey.String(val) -} - -// JSONRPCRequestID returns an attribute KeyValue conforming to the -// "jsonrpc.request.id" semantic conventions. It represents a string -// representation of the `id` property of the request and its corresponding -// response. -func JSONRPCRequestID(val string) attribute.KeyValue { - return JSONRPCRequestIDKey.String(val) -} - -// Namespace: k8s -const ( - // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name" - // semantic conventions. It represents the name of the cluster. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "opentelemetry-cluster" - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid" - // semantic conventions. It represents a pseudo-ID for the cluster, set to the - // UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8s cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8s ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T X.667]. - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // > different from all other UUIDs generated before 3603 A.D., or is - // > extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - // - // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") - - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "redis" - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the number - // of times the container was restarted. This attribute can be used to identify - // a particular container (running or stopped) within a container spec. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") - - // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to - // the "k8s.container.status.last_terminated_reason" semantic conventions. It - // represents the last terminated reason of the Container. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Evicted", "Error" - K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") - - // K8SContainerStatusReasonKey is the attribute Key conforming to the - // "k8s.container.status.reason" semantic conventions. It represents the reason - // for the container state. Corresponds to the `reason` field of the: - // [K8s ContainerStateWaiting] or [K8s ContainerStateTerminated]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ContainerCreating", "CrashLoopBackOff", - // "CreateContainerConfigError", "ErrImagePull", "ImagePullBackOff", - // "OOMKilled", "Completed", "Error", "ContainerCannotRun" - // - // [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core - // [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core - K8SContainerStatusReasonKey = attribute.Key("k8s.container.status.reason") - - // K8SContainerStatusStateKey is the attribute Key conforming to the - // "k8s.container.status.state" semantic conventions. It represents the state of - // the container. [K8s ContainerState]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "terminated", "running", "waiting" - // - // [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core - K8SContainerStatusStateKey = attribute.Key("k8s.container.status.state") - - // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name" - // semantic conventions. It represents the name of the CronJob. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "opentelemetry" - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") - - // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid" - // semantic conventions. It represents the UID of the CronJob. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "opentelemetry" - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") - - // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid" - // semantic conventions. It represents the UID of the DaemonSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of the - // Deployment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "opentelemetry" - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") - - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SHPAMetricTypeKey is the attribute Key conforming to the - // "k8s.hpa.metric.type" semantic conventions. It represents the type of metric - // source for the horizontal pod autoscaler. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Resource", "ContainerResource" - // Note: This attribute reflects the `type` field of spec.metrics[] in the HPA. - K8SHPAMetricTypeKey = attribute.Key("k8s.hpa.metric.type") - - // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic - // conventions. It represents the name of the horizontal pod autoscaler. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SHPANameKey = attribute.Key("k8s.hpa.name") - - // K8SHPAScaletargetrefAPIVersionKey is the attribute Key conforming to the - // "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the - // API version of the target resource to scale for the HorizontalPodAutoscaler. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "apps/v1", "autoscaling/v2" - // Note: This maps to the `apiVersion` field in the `scaleTargetRef` of the HPA - // spec. - K8SHPAScaletargetrefAPIVersionKey = attribute.Key("k8s.hpa.scaletargetref.api_version") - - // K8SHPAScaletargetrefKindKey is the attribute Key conforming to the - // "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of - // the target resource to scale for the HorizontalPodAutoscaler. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Deployment", "StatefulSet" - // Note: This maps to the `kind` field in the `scaleTargetRef` of the HPA spec. - K8SHPAScaletargetrefKindKey = attribute.Key("k8s.hpa.scaletargetref.kind") - - // K8SHPAScaletargetrefNameKey is the attribute Key conforming to the - // "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of - // the target resource to scale for the HorizontalPodAutoscaler. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-deployment", "my-statefulset" - // Note: This maps to the `name` field in the `scaleTargetRef` of the HPA spec. - K8SHPAScaletargetrefNameKey = attribute.Key("k8s.hpa.scaletargetref.name") - - // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic - // conventions. It represents the UID of the horizontal pod autoscaler. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SHPAUIDKey = attribute.Key("k8s.hpa.uid") - - // K8SHugepageSizeKey is the attribute Key conforming to the "k8s.hugepage.size" - // semantic conventions. It represents the size (identifier) of the K8s huge - // page. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2Mi" - K8SHugepageSizeKey = attribute.Key("k8s.hugepage.size") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic - // conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "opentelemetry" - K8SJobNameKey = attribute.Key("k8s.job.name") - - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic - // conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "default" - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") - - // K8SNamespacePhaseKey is the attribute Key conforming to the - // "k8s.namespace.phase" semantic conventions. It represents the phase of the - // K8s namespace. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "active", "terminating" - // Note: This attribute aligns with the `phase` field of the - // [K8s NamespaceStatus] - // - // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core - K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase") - - // K8SNodeConditionStatusKey is the attribute Key conforming to the - // "k8s.node.condition.status" semantic conventions. It represents the status of - // the condition, one of True, False, Unknown. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "true", "false", "unknown" - // Note: This attribute aligns with the `status` field of the - // [NodeCondition] - // - // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core - K8SNodeConditionStatusKey = attribute.Key("k8s.node.condition.status") - - // K8SNodeConditionTypeKey is the attribute Key conforming to the - // "k8s.node.condition.type" semantic conventions. It represents the condition - // type of a K8s Node. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Ready", "DiskPressure" - // Note: K8s Node conditions as described - // by [K8s documentation]. - // - // This attribute aligns with the `type` field of the - // [NodeCondition] - // - // The set of possible values is not limited to those listed here. Managed - // Kubernetes environments, - // or custom controllers MAY introduce additional node condition types. - // When this occurs, the exact value as reported by the Kubernetes API SHOULD be - // used. - // - // [K8s documentation]: https://v1-32.docs.kubernetes.io/docs/reference/node/node-status/#condition - // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core - K8SNodeConditionTypeKey = attribute.Key("k8s.node.condition.type") - - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "node-1" - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic - // conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2" - K8SNodeUIDKey = attribute.Key("k8s.node.uid") - - // K8SPodHostnameKey is the attribute Key conforming to the "k8s.pod.hostname" - // semantic conventions. It represents the specifies the hostname of the Pod. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "collector-gateway" - // Note: The K8s Pod spec has an optional hostname field, which can be used to - // specify a hostname. - // Refer to [K8s docs] - // for more information about this field. - // - // This attribute aligns with the `hostname` field of the - // [K8s PodSpec]. - // - // [K8s docs]: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-hostname-and-subdomain-field - // [K8s PodSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#podspec-v1-core - K8SPodHostnameKey = attribute.Key("k8s.pod.hostname") - - // K8SPodIPKey is the attribute Key conforming to the "k8s.pod.ip" semantic - // conventions. It represents the IP address allocated to the Pod. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "172.18.0.2" - // Note: This attribute aligns with the `podIP` field of the - // [K8s PodStatus]. - // - // [K8s PodStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#podstatus-v1-core - K8SPodIPKey = attribute.Key("k8s.pod.ip") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic - // conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "opentelemetry-pod-autoconf" - K8SPodNameKey = attribute.Key("k8s.pod.name") - - // K8SPodStartTimeKey is the attribute Key conforming to the - // "k8s.pod.start_time" semantic conventions. It represents the start timestamp - // of the Pod. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "2025-12-04T08:41:03Z" - // Note: Date and time at which the object was acknowledged by the Kubelet. - // This is before the Kubelet pulled the container image(s) for the pod. - // - // This attribute aligns with the `startTime` field of the - // [K8s PodStatus], - // in ISO 8601 (RFC 3339 compatible) format. - // - // [K8s PodStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#podstatus-v1-core - K8SPodStartTimeKey = attribute.Key("k8s.pod.start_time") - - // K8SPodStatusPhaseKey is the attribute Key conforming to the - // "k8s.pod.status.phase" semantic conventions. It represents the phase for the - // pod. Corresponds to the `phase` field of the: [K8s PodStatus]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Pending", "Running" - // - // [K8s PodStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#podstatus-v1-core - K8SPodStatusPhaseKey = attribute.Key("k8s.pod.status.phase") - - // K8SPodStatusReasonKey is the attribute Key conforming to the - // "k8s.pod.status.reason" semantic conventions. It represents the reason for - // the pod state. Corresponds to the `reason` field of the: [K8s PodStatus]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Evicted", "NodeAffinity" - // - // [K8s PodStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#podstatus-v1-core - K8SPodStatusReasonKey = attribute.Key("k8s.pod.status.reason") - - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic - // conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "opentelemetry" - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") - - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SReplicationControllerNameKey is the attribute Key conforming to the - // "k8s.replicationcontroller.name" semantic conventions. It represents the name - // of the replication controller. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name") - - // K8SReplicationControllerUIDKey is the attribute Key conforming to the - // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID - // of the replication controller. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid") - - // K8SResourceQuotaNameKey is the attribute Key conforming to the - // "k8s.resourcequota.name" semantic conventions. It represents the name of the - // resource quota. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name") - - // K8SResourceQuotaResourceNameKey is the attribute Key conforming to the - // "k8s.resourcequota.resource_name" semantic conventions. It represents the - // name of the K8s resource a resource quota defines. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "count/replicationcontrollers" - // Note: The value for this attribute can be either the full - // `count/[.]` string (e.g., count/deployments.apps, - // count/pods), or, for certain core Kubernetes resources, just the resource - // name (e.g., pods, services, configmaps). Both forms are supported by - // Kubernetes for object count quotas. See - // [Kubernetes Resource Quotas documentation] for more details. - // - // [Kubernetes Resource Quotas documentation]: https://kubernetes.io/docs/concepts/policy/resource-quotas/#quota-on-object-count - K8SResourceQuotaResourceNameKey = attribute.Key("k8s.resourcequota.resource_name") - - // K8SResourceQuotaUIDKey is the attribute Key conforming to the - // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the - // resource quota. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid") - - // K8SServiceEndpointAddressTypeKey is the attribute Key conforming to the - // "k8s.service.endpoint.address_type" semantic conventions. It represents the - // address type of the service endpoint. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "IPv4", "IPv6" - // Note: The network address family or type of the endpoint. - // This attribute aligns with the `addressType` field of the - // [K8s EndpointSlice]. - // It is used to differentiate metrics when a Service is backed by multiple - // address types - // (e.g., in dual-stack clusters). - // - // [K8s EndpointSlice]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1/ - K8SServiceEndpointAddressTypeKey = attribute.Key("k8s.service.endpoint.address_type") - - // K8SServiceEndpointConditionKey is the attribute Key conforming to the - // "k8s.service.endpoint.condition" semantic conventions. It represents the - // condition of the service endpoint. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ready", "serving", "terminating" - // Note: The current operational condition of the service endpoint. - // An endpoint can have multiple conditions set at once (e.g., both `serving` - // and `terminating` during rollout). - // This attribute aligns with the condition fields in the [K8s EndpointSlice]. - // - // [K8s EndpointSlice]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1/ - K8SServiceEndpointConditionKey = attribute.Key("k8s.service.endpoint.condition") - - // K8SServiceEndpointZoneKey is the attribute Key conforming to the - // "k8s.service.endpoint.zone" semantic conventions. It represents the zone of - // the service endpoint. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "us-east-1a", "us-west-2b", "zone-a", "" - // Note: The zone where the endpoint is located, typically corresponding to a - // failure domain. - // This attribute aligns with the `zone` field of endpoints in the - // [K8s EndpointSlice]. - // It enables zone-aware monitoring of service endpoint distribution and - // supports - // features like [Topology Aware Routing]. - // - // If the zone is not populated (e.g., nodes without the - // `topology.kubernetes.io/zone` label), - // the attribute value will be an empty string. - // - // [K8s EndpointSlice]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1/ - // [Topology Aware Routing]: https://kubernetes.io/docs/concepts/services-networking/topology-aware-routing/ - K8SServiceEndpointZoneKey = attribute.Key("k8s.service.endpoint.zone") - - // K8SServiceNameKey is the attribute Key conforming to the "k8s.service.name" - // semantic conventions. It represents the name of the Service. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-service" - K8SServiceNameKey = attribute.Key("k8s.service.name") - - // K8SServicePublishNotReadyAddressesKey is the attribute Key conforming to the - // "k8s.service.publish_not_ready_addresses" semantic conventions. It represents - // the whether the Service publishes not-ready endpoints. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: true, false - // Note: Whether the Service is configured to publish endpoints before the pods - // are ready. - // This attribute is typically used to indicate that a Service (such as a - // headless - // Service for a StatefulSet) allows peer discovery before pods pass their - // readiness probes. - // It aligns with the `publishNotReadyAddresses` field of the - // [K8s ServiceSpec]. - // - // [K8s ServiceSpec]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec - K8SServicePublishNotReadyAddressesKey = attribute.Key("k8s.service.publish_not_ready_addresses") - - // K8SServiceTrafficDistributionKey is the attribute Key conforming to the - // "k8s.service.traffic_distribution" semantic conventions. It represents the - // traffic distribution policy for the Service. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "PreferSameZone", "PreferSameNode" - // Note: Specifies how traffic is distributed to endpoints for this Service. - // This attribute aligns with the `trafficDistribution` field of the - // [K8s ServiceSpec]. - // Known values include `PreferSameZone` (prefer endpoints in the same zone as - // the client) and - // `PreferSameNode` (prefer endpoints on the same node, fallback to same zone, - // then cluster-wide). - // If this field is not set on the Service, the attribute SHOULD NOT be emitted. - // When not set, Kubernetes distributes traffic evenly across all endpoints - // cluster-wide. - // - // [K8s ServiceSpec]: https://kubernetes.io/docs/reference/networking/virtual-ips/#traffic-distribution - K8SServiceTrafficDistributionKey = attribute.Key("k8s.service.traffic_distribution") - - // K8SServiceTypeKey is the attribute Key conforming to the "k8s.service.type" - // semantic conventions. It represents the type of the Kubernetes Service. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ClusterIP", "NodePort", "LoadBalancer" - // Note: This attribute aligns with the `type` field of the - // [K8s ServiceSpec]. - // - // [K8s ServiceSpec]: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec - K8SServiceTypeKey = attribute.Key("k8s.service.type") - - // K8SServiceUIDKey is the attribute Key conforming to the "k8s.service.uid" - // semantic conventions. It represents the UID of the Service. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SServiceUIDKey = attribute.Key("k8s.service.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "opentelemetry" - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Beta - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") - - // K8SStorageclassNameKey is the attribute Key conforming to the - // "k8s.storageclass.name" semantic conventions. It represents the name of K8s - // [StorageClass] object. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "gold.storageclass.storage.k8s.io" - // - // [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io - K8SStorageclassNameKey = attribute.Key("k8s.storageclass.name") - - // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name" - // semantic conventions. It represents the name of the K8s volume. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "volume0" - K8SVolumeNameKey = attribute.Key("k8s.volume.name") - - // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type" - // semantic conventions. It represents the type of the K8s volume. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "emptyDir", "persistentVolumeClaim" - K8SVolumeTypeKey = attribute.Key("k8s.volume.type") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify a -// particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue -// conforming to the "k8s.container.status.last_terminated_reason" semantic -// conventions. It represents the last terminated reason of the Container. -func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { - return K8SContainerStatusLastTerminatedReasonKey.String(val) -} - -// K8SCronJobAnnotation returns an attribute KeyValue conforming to the -// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob -// annotation placed on the CronJob, the `` being the annotation name, the -// value being the annotation value. -func K8SCronJobAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.cronjob.annotation."+key, val) -} - -// K8SCronJobLabel returns an attribute KeyValue conforming to the -// "k8s.cronjob.label" semantic conventions. It represents the label placed on -// the CronJob, the `` being the label name, the value being the label -// value. -func K8SCronJobLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.cronjob.label."+key, val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the -// "k8s.daemonset.annotation" semantic conventions. It represents the annotation -// placed on the DaemonSet, the `` being the annotation name, the value -// being the annotation value, even if the value is empty. -func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.daemonset.annotation."+key, val) -} - -// K8SDaemonSetLabel returns an attribute KeyValue conforming to the -// "k8s.daemonset.label" semantic conventions. It represents the label placed on -// the DaemonSet, the `` being the label name, the value being the label -// value, even if the value is empty. -func K8SDaemonSetLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.daemonset.label."+key, val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the -// "k8s.deployment.annotation" semantic conventions. It represents the annotation -// placed on the Deployment, the `` being the annotation name, the value -// being the annotation value, even if the value is empty. -func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.deployment.annotation."+key, val) -} - -// K8SDeploymentLabel returns an attribute KeyValue conforming to the -// "k8s.deployment.label" semantic conventions. It represents the label placed on -// the Deployment, the `` being the label name, the value being the label -// value, even if the value is empty. -func K8SDeploymentLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.deployment.label."+key, val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SHPAMetricType returns an attribute KeyValue conforming to the -// "k8s.hpa.metric.type" semantic conventions. It represents the type of metric -// source for the horizontal pod autoscaler. -func K8SHPAMetricType(val string) attribute.KeyValue { - return K8SHPAMetricTypeKey.String(val) -} - -// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name" -// semantic conventions. It represents the name of the horizontal pod autoscaler. -func K8SHPAName(val string) attribute.KeyValue { - return K8SHPANameKey.String(val) -} - -// K8SHPAScaletargetrefAPIVersion returns an attribute KeyValue conforming to the -// "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the -// API version of the target resource to scale for the HorizontalPodAutoscaler. -func K8SHPAScaletargetrefAPIVersion(val string) attribute.KeyValue { - return K8SHPAScaletargetrefAPIVersionKey.String(val) -} - -// K8SHPAScaletargetrefKind returns an attribute KeyValue conforming to the -// "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of -// the target resource to scale for the HorizontalPodAutoscaler. -func K8SHPAScaletargetrefKind(val string) attribute.KeyValue { - return K8SHPAScaletargetrefKindKey.String(val) -} - -// K8SHPAScaletargetrefName returns an attribute KeyValue conforming to the -// "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of -// the target resource to scale for the HorizontalPodAutoscaler. -func K8SHPAScaletargetrefName(val string) attribute.KeyValue { - return K8SHPAScaletargetrefNameKey.String(val) -} - -// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid" -// semantic conventions. It represents the UID of the horizontal pod autoscaler. -func K8SHPAUID(val string) attribute.KeyValue { - return K8SHPAUIDKey.String(val) -} - -// K8SHugepageSize returns an attribute KeyValue conforming to the -// "k8s.hugepage.size" semantic conventions. It represents the size (identifier) -// of the K8s huge page. -func K8SHugepageSize(val string) attribute.KeyValue { - return K8SHugepageSizeKey.String(val) -} - -// K8SJobAnnotation returns an attribute KeyValue conforming to the -// "k8s.job.annotation" semantic conventions. It represents the annotation placed -// on the Job, the `` being the annotation name, the value being the -// annotation value, even if the value is empty. -func K8SJobAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.job.annotation."+key, val) -} - -// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label" -// semantic conventions. It represents the label placed on the Job, the `` -// being the label name, the value being the label value, even if the value is -// empty. -func K8SJobLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.job.label."+key, val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the -// "k8s.namespace.annotation" semantic conventions. It represents the annotation -// placed on the Namespace, the `` being the annotation name, the value -// being the annotation value, even if the value is empty. -func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.namespace.annotation."+key, val) -} - -// K8SNamespaceLabel returns an attribute KeyValue conforming to the -// "k8s.namespace.label" semantic conventions. It represents the label placed on -// the Namespace, the `` being the label name, the value being the label -// value, even if the value is empty. -func K8SNamespaceLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.namespace.label."+key, val) -} - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeAnnotation returns an attribute KeyValue conforming to the -// "k8s.node.annotation" semantic conventions. It represents the annotation -// placed on the Node, the `` being the annotation name, the value being the -// annotation value, even if the value is empty. -func K8SNodeAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.node.annotation."+key, val) -} - -// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label" -// semantic conventions. It represents the label placed on the Node, the `` -// being the label name, the value being the label value, even if the value is -// empty. -func K8SNodeLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.node.label."+key, val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" -// semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodAnnotation returns an attribute KeyValue conforming to the -// "k8s.pod.annotation" semantic conventions. It represents the annotation placed -// on the Pod, the `` being the annotation name, the value being the -// annotation value. -func K8SPodAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.pod.annotation."+key, val) -} - -// K8SPodHostname returns an attribute KeyValue conforming to the -// "k8s.pod.hostname" semantic conventions. It represents the specifies the -// hostname of the Pod. -func K8SPodHostname(val string) attribute.KeyValue { - return K8SPodHostnameKey.String(val) -} - -// K8SPodIP returns an attribute KeyValue conforming to the "k8s.pod.ip" semantic -// conventions. It represents the IP address allocated to the Pod. -func K8SPodIP(val string) attribute.KeyValue { - return K8SPodIPKey.String(val) -} - -// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label" -// semantic conventions. It represents the label placed on the Pod, the `` -// being the label name, the value being the label value. -func K8SPodLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.pod.label."+key, val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodStartTime returns an attribute KeyValue conforming to the -// "k8s.pod.start_time" semantic conventions. It represents the start timestamp -// of the Pod. -func K8SPodStartTime(val string) attribute.KeyValue { - return K8SPodStartTimeKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the -// "k8s.replicaset.annotation" semantic conventions. It represents the annotation -// placed on the ReplicaSet, the `` being the annotation name, the value -// being the annotation value, even if the value is empty. -func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.replicaset.annotation."+key, val) -} - -// K8SReplicaSetLabel returns an attribute KeyValue conforming to the -// "k8s.replicaset.label" semantic conventions. It represents the label placed on -// the ReplicaSet, the `` being the label name, the value being the label -// value, even if the value is empty. -func K8SReplicaSetLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.replicaset.label."+key, val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SReplicationControllerName returns an attribute KeyValue conforming to the -// "k8s.replicationcontroller.name" semantic conventions. It represents the name -// of the replication controller. -func K8SReplicationControllerName(val string) attribute.KeyValue { - return K8SReplicationControllerNameKey.String(val) -} - -// K8SReplicationControllerUID returns an attribute KeyValue conforming to the -// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of -// the replication controller. -func K8SReplicationControllerUID(val string) attribute.KeyValue { - return K8SReplicationControllerUIDKey.String(val) -} - -// K8SResourceQuotaName returns an attribute KeyValue conforming to the -// "k8s.resourcequota.name" semantic conventions. It represents the name of the -// resource quota. -func K8SResourceQuotaName(val string) attribute.KeyValue { - return K8SResourceQuotaNameKey.String(val) -} - -// K8SResourceQuotaResourceName returns an attribute KeyValue conforming to the -// "k8s.resourcequota.resource_name" semantic conventions. It represents the name -// of the K8s resource a resource quota defines. -func K8SResourceQuotaResourceName(val string) attribute.KeyValue { - return K8SResourceQuotaResourceNameKey.String(val) -} - -// K8SResourceQuotaUID returns an attribute KeyValue conforming to the -// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the -// resource quota. -func K8SResourceQuotaUID(val string) attribute.KeyValue { - return K8SResourceQuotaUIDKey.String(val) -} - -// K8SServiceAnnotation returns an attribute KeyValue conforming to the -// "k8s.service.annotation" semantic conventions. It represents the annotation -// placed on the Service, the `` being the annotation name, the value being -// the annotation value, even if the value is empty. -func K8SServiceAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.service.annotation."+key, val) -} - -// K8SServiceEndpointZone returns an attribute KeyValue conforming to the -// "k8s.service.endpoint.zone" semantic conventions. It represents the zone of -// the service endpoint. -func K8SServiceEndpointZone(val string) attribute.KeyValue { - return K8SServiceEndpointZoneKey.String(val) -} - -// K8SServiceLabel returns an attribute KeyValue conforming to the -// "k8s.service.label" semantic conventions. It represents the label placed on -// the Service, the `` being the label name, the value being the label -// value, even if the value is empty. -func K8SServiceLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.service.label."+key, val) -} - -// K8SServiceName returns an attribute KeyValue conforming to the -// "k8s.service.name" semantic conventions. It represents the name of the -// Service. -func K8SServiceName(val string) attribute.KeyValue { - return K8SServiceNameKey.String(val) -} - -// K8SServicePublishNotReadyAddresses returns an attribute KeyValue conforming to -// the "k8s.service.publish_not_ready_addresses" semantic conventions. It -// represents the whether the Service publishes not-ready endpoints. -func K8SServicePublishNotReadyAddresses(val bool) attribute.KeyValue { - return K8SServicePublishNotReadyAddressesKey.Bool(val) -} - -// K8SServiceSelector returns an attribute KeyValue conforming to the -// "k8s.service.selector" semantic conventions. It represents the selector -// key-value pair placed on the Service, the `` being the selector key, the -// value being the selector value. -func K8SServiceSelector(key string, val string) attribute.KeyValue { - return attribute.String("k8s.service.selector."+key, val) -} - -// K8SServiceTrafficDistribution returns an attribute KeyValue conforming to the -// "k8s.service.traffic_distribution" semantic conventions. It represents the -// traffic distribution policy for the Service. -func K8SServiceTrafficDistribution(val string) attribute.KeyValue { - return K8SServiceTrafficDistributionKey.String(val) -} - -// K8SServiceUID returns an attribute KeyValue conforming to the -// "k8s.service.uid" semantic conventions. It represents the UID of the Service. -func K8SServiceUID(val string) attribute.KeyValue { - return K8SServiceUIDKey.String(val) -} - -// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the -// "k8s.statefulset.annotation" semantic conventions. It represents the -// annotation placed on the StatefulSet, the `` being the annotation name, -// the value being the annotation value, even if the value is empty. -func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.statefulset.annotation."+key, val) -} - -// K8SStatefulSetLabel returns an attribute KeyValue conforming to the -// "k8s.statefulset.label" semantic conventions. It represents the label placed -// on the StatefulSet, the `` being the label name, the value being the -// label value, even if the value is empty. -func K8SStatefulSetLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.statefulset.label."+key, val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// K8SStorageclassName returns an attribute KeyValue conforming to the -// "k8s.storageclass.name" semantic conventions. It represents the name of K8s -// [StorageClass] object. -// -// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io -func K8SStorageclassName(val string) attribute.KeyValue { - return K8SStorageclassNameKey.String(val) -} - -// K8SVolumeName returns an attribute KeyValue conforming to the -// "k8s.volume.name" semantic conventions. It represents the name of the K8s -// volume. -func K8SVolumeName(val string) attribute.KeyValue { - return K8SVolumeNameKey.String(val) -} - -// Enum values for k8s.container.status.reason -var ( - // The container is being created. - // Stability: development - K8SContainerStatusReasonContainerCreating = K8SContainerStatusReasonKey.String("ContainerCreating") - // The container is in a crash loop back off state. - // Stability: development - K8SContainerStatusReasonCrashLoopBackOff = K8SContainerStatusReasonKey.String("CrashLoopBackOff") - // There was an error creating the container configuration. - // Stability: development - K8SContainerStatusReasonCreateContainerConfigError = K8SContainerStatusReasonKey.String("CreateContainerConfigError") - // There was an error pulling the container image. - // Stability: development - K8SContainerStatusReasonErrImagePull = K8SContainerStatusReasonKey.String("ErrImagePull") - // The container image pull is in back off state. - // Stability: development - K8SContainerStatusReasonImagePullBackOff = K8SContainerStatusReasonKey.String("ImagePullBackOff") - // The container was killed due to out of memory. - // Stability: development - K8SContainerStatusReasonOomKilled = K8SContainerStatusReasonKey.String("OOMKilled") - // The container has completed execution. - // Stability: development - K8SContainerStatusReasonCompleted = K8SContainerStatusReasonKey.String("Completed") - // There was an error with the container. - // Stability: development - K8SContainerStatusReasonError = K8SContainerStatusReasonKey.String("Error") - // The container cannot run. - // Stability: development - K8SContainerStatusReasonContainerCannotRun = K8SContainerStatusReasonKey.String("ContainerCannotRun") -) - -// Enum values for k8s.container.status.state -var ( - // The container has terminated. - // Stability: development - K8SContainerStatusStateTerminated = K8SContainerStatusStateKey.String("terminated") - // The container is running. - // Stability: development - K8SContainerStatusStateRunning = K8SContainerStatusStateKey.String("running") - // The container is waiting. - // Stability: development - K8SContainerStatusStateWaiting = K8SContainerStatusStateKey.String("waiting") -) - -// Enum values for k8s.namespace.phase -var ( - // Active namespace phase as described by [K8s API] - // Stability: development - // - // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase - K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active") - // Terminating namespace phase as described by [K8s API] - // Stability: development - // - // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase - K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating") -) - -// Enum values for k8s.node.condition.status -var ( - // condition_true - // Stability: development - K8SNodeConditionStatusConditionTrue = K8SNodeConditionStatusKey.String("true") - // condition_false - // Stability: development - K8SNodeConditionStatusConditionFalse = K8SNodeConditionStatusKey.String("false") - // condition_unknown - // Stability: development - K8SNodeConditionStatusConditionUnknown = K8SNodeConditionStatusKey.String("unknown") -) - -// Enum values for k8s.node.condition.type -var ( - // The node is healthy and ready to accept pods - // Stability: development - K8SNodeConditionTypeReady = K8SNodeConditionTypeKey.String("Ready") - // Pressure exists on the disk size—that is, if the disk capacity is low - // Stability: development - K8SNodeConditionTypeDiskPressure = K8SNodeConditionTypeKey.String("DiskPressure") - // Pressure exists on the node memory—that is, if the node memory is low - // Stability: development - K8SNodeConditionTypeMemoryPressure = K8SNodeConditionTypeKey.String("MemoryPressure") - // Pressure exists on the processes—that is, if there are too many processes - // on the node - // Stability: development - K8SNodeConditionTypePIDPressure = K8SNodeConditionTypeKey.String("PIDPressure") - // The network for the node is not correctly configured - // Stability: development - K8SNodeConditionTypeNetworkUnavailable = K8SNodeConditionTypeKey.String("NetworkUnavailable") -) - -// Enum values for k8s.pod.status.phase -var ( - // The pod has been accepted by the system, but one or more of the containers - // has not been started. This includes time before being bound to a node, as - // well as time spent pulling images onto the host. - // - // Stability: development - K8SPodStatusPhasePending = K8SPodStatusPhaseKey.String("Pending") - // The pod has been bound to a node and all of the containers have been started. - // At least one container is still running or is in the process of being - // restarted. - // - // Stability: development - K8SPodStatusPhaseRunning = K8SPodStatusPhaseKey.String("Running") - // All containers in the pod have voluntarily terminated with a container exit - // code of 0, and the system is not going to restart any of these containers. - // - // Stability: development - K8SPodStatusPhaseSucceeded = K8SPodStatusPhaseKey.String("Succeeded") - // All containers in the pod have terminated, and at least one container has - // terminated in a failure (exited with a non-zero exit code or was stopped by - // the system). - // - // Stability: development - K8SPodStatusPhaseFailed = K8SPodStatusPhaseKey.String("Failed") - // For some reason the state of the pod could not be obtained, typically due to - // an error in communicating with the host of the pod. - // - // Stability: development - K8SPodStatusPhaseUnknown = K8SPodStatusPhaseKey.String("Unknown") -) - -// Enum values for k8s.pod.status.reason -var ( - // The pod is evicted. - // Stability: development - K8SPodStatusReasonEvicted = K8SPodStatusReasonKey.String("Evicted") - // The pod is in a status because of its node affinity - // Stability: development - K8SPodStatusReasonNodeAffinity = K8SPodStatusReasonKey.String("NodeAffinity") - // The reason on a pod when its state cannot be confirmed as kubelet is - // unresponsive on the node it is (was) running. - // - // Stability: development - K8SPodStatusReasonNodeLost = K8SPodStatusReasonKey.String("NodeLost") - // The node is shutdown - // Stability: development - K8SPodStatusReasonShutdown = K8SPodStatusReasonKey.String("Shutdown") - // The pod was rejected admission to the node because of an error during - // admission that could not be categorized. - // - // Stability: development - K8SPodStatusReasonUnexpectedAdmissionError = K8SPodStatusReasonKey.String("UnexpectedAdmissionError") -) - -// Enum values for k8s.service.endpoint.address_type -var ( - // IPv4 address type - // Stability: development - K8SServiceEndpointAddressTypeIPv4 = K8SServiceEndpointAddressTypeKey.String("IPv4") - // IPv6 address type - // Stability: development - K8SServiceEndpointAddressTypeIPv6 = K8SServiceEndpointAddressTypeKey.String("IPv6") - // FQDN address type - // Stability: development - K8SServiceEndpointAddressTypeFqdn = K8SServiceEndpointAddressTypeKey.String("FQDN") -) - -// Enum values for k8s.service.endpoint.condition -var ( - // The endpoint is ready to receive new connections. - // Stability: development - K8SServiceEndpointConditionReady = K8SServiceEndpointConditionKey.String("ready") - // The endpoint is currently handling traffic. - // Stability: development - K8SServiceEndpointConditionServing = K8SServiceEndpointConditionKey.String("serving") - // The endpoint is in the process of shutting down. - // Stability: development - K8SServiceEndpointConditionTerminating = K8SServiceEndpointConditionKey.String("terminating") -) - -// Enum values for k8s.service.type -var ( - // ClusterIP service type - // Stability: development - K8SServiceTypeClusterIP = K8SServiceTypeKey.String("ClusterIP") - // NodePort service type - // Stability: development - K8SServiceTypeNodePort = K8SServiceTypeKey.String("NodePort") - // LoadBalancer service type - // Stability: development - K8SServiceTypeLoadBalancer = K8SServiceTypeKey.String("LoadBalancer") - // ExternalName service type - // Stability: development - K8SServiceTypeExternalName = K8SServiceTypeKey.String("ExternalName") -) - -// Enum values for k8s.volume.type -var ( - // A [persistentVolumeClaim] volume - // Stability: development - // - // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim - K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim") - // A [configMap] volume - // Stability: development - // - // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap - K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap") - // A [downwardAPI] volume - // Stability: development - // - // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi - K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI") - // An [emptyDir] volume - // Stability: development - // - // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir - K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir") - // A [secret] volume - // Stability: development - // - // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret - K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret") - // A [local] volume - // Stability: development - // - // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local - K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local") -) - -// Namespace: log -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "audit.log" - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the basename of - // the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "uuid.log" - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/var/log/mysql/audit.log" - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full path to - // the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/var/lib/docker/uuid.log" - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") - - // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic - // conventions. It represents the stream associated with the log. See below for - // a list of well-known values. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - LogIostreamKey = attribute.Key("log.iostream") - - // LogRecordOriginalKey is the attribute Key conforming to the - // "log.record.original" semantic conventions. It represents the complete - // original Log Record. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - - // Something happened", "[INFO] 8/3/24 12:34:56 Something happened" - // Note: This value MAY be added when processing a Log Record which was - // originally transmitted as a string or equivalent data type AND the Body field - // of the Log Record does not contain the same value. (e.g. a syslog or a log - // record read from a file.) - LogRecordOriginalKey = attribute.Key("log.record.original") - - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log Record. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV" - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an - // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other - // identifiers (e.g. UUID) may be used as needed. - // - // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogFileName returns an attribute KeyValue conforming to the "log.file.name" -// semantic conventions. It represents the basename of the file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the "log.file.path" -// semantic conventions. It represents the full path to the file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path to -// the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// LogRecordOriginal returns an attribute KeyValue conforming to the -// "log.record.original" semantic conventions. It represents the complete -// original Log Record. -func LogRecordOriginal(val string) attribute.KeyValue { - return LogRecordOriginalKey.String(val) -} - -// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid" -// semantic conventions. It represents a unique identifier for the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Enum values for log.iostream -var ( - // Logs from stdout stream - // Stability: development - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - // Stability: development - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// Namespace: mainframe -const ( - // MainframeLparNameKey is the attribute Key conforming to the - // "mainframe.lpar.name" semantic conventions. It represents the name of the - // logical partition that hosts a systems with a mainframe operating system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "LPAR01" - MainframeLparNameKey = attribute.Key("mainframe.lpar.name") -) - -// MainframeLparName returns an attribute KeyValue conforming to the -// "mainframe.lpar.name" semantic conventions. It represents the name of the -// logical partition that hosts a systems with a mainframe operating system. -func MainframeLparName(val string) attribute.KeyValue { - return MainframeLparNameKey.String(val) -} - -// Namespace: mcp -const ( - // McpMethodNameKey is the attribute Key conforming to the "mcp.method.name" - // semantic conventions. It represents the name of the request or notification - // method. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - McpMethodNameKey = attribute.Key("mcp.method.name") - - // McpProtocolVersionKey is the attribute Key conforming to the - // "mcp.protocol.version" semantic conventions. It represents the [version] of - // the Model Context Protocol used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2025-06-18" - // - // [version]: https://modelcontextprotocol.io/specification/versioning - McpProtocolVersionKey = attribute.Key("mcp.protocol.version") - - // McpResourceURIKey is the attribute Key conforming to the "mcp.resource.uri" - // semantic conventions. It represents the value of the resource uri. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "postgres://database/customers/schema", - // "file:///home/user/documents/report.pdf" - // Note: This is a URI of the resource provided in the following requests or - // notifications: `resources/read`, `resources/subscribe`, - // `resources/unsubscribe`, or `notifications/resources/updated`. - McpResourceURIKey = attribute.Key("mcp.resource.uri") - - // McpSessionIDKey is the attribute Key conforming to the "mcp.session.id" - // semantic conventions. It represents the identifies [MCP session]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "191c4850af6c49e08843a3f6c80e5046" - // - // [MCP session]: https://modelcontextprotocol.io/specification/2025-06-18/basic/transports#session-management - McpSessionIDKey = attribute.Key("mcp.session.id") -) - -// McpProtocolVersion returns an attribute KeyValue conforming to the -// "mcp.protocol.version" semantic conventions. It represents the [version] of -// the Model Context Protocol used. -// -// [version]: https://modelcontextprotocol.io/specification/versioning -func McpProtocolVersion(val string) attribute.KeyValue { - return McpProtocolVersionKey.String(val) -} - -// McpResourceURI returns an attribute KeyValue conforming to the -// "mcp.resource.uri" semantic conventions. It represents the value of the -// resource uri. -func McpResourceURI(val string) attribute.KeyValue { - return McpResourceURIKey.String(val) -} - -// McpSessionID returns an attribute KeyValue conforming to the "mcp.session.id" -// semantic conventions. It represents the identifies [MCP session]. -// -// [MCP session]: https://modelcontextprotocol.io/specification/2025-06-18/basic/transports#session-management -func McpSessionID(val string) attribute.KeyValue { - return McpSessionIDKey.String(val) -} - -// Enum values for mcp.method.name -var ( - // Notification cancelling a previously-issued request. - // - // Stability: development - McpMethodNameNotificationsCancelled = McpMethodNameKey.String("notifications/cancelled") - // Request to initialize the MCP client. - // - // Stability: development - McpMethodNameInitialize = McpMethodNameKey.String("initialize") - // Notification indicating that the MCP client has been initialized. - // - // Stability: development - McpMethodNameNotificationsInitialized = McpMethodNameKey.String("notifications/initialized") - // Notification indicating the progress for a long-running operation. - // - // Stability: development - McpMethodNameNotificationsProgress = McpMethodNameKey.String("notifications/progress") - // Request to check that the other party is still alive. - // - // Stability: development - McpMethodNamePing = McpMethodNameKey.String("ping") - // Request to list resources available on server. - // - // Stability: development - McpMethodNameResourcesList = McpMethodNameKey.String("resources/list") - // Request to list resource templates available on server. - // - // Stability: development - McpMethodNameResourcesTemplatesList = McpMethodNameKey.String("resources/templates/list") - // Request to read a resource. - // - // Stability: development - McpMethodNameResourcesRead = McpMethodNameKey.String("resources/read") - // Notification indicating that the list of resources has changed. - // - // Stability: development - McpMethodNameNotificationsResourcesListChanged = McpMethodNameKey.String("notifications/resources/list_changed") - // Request to subscribe to a resource. - // - // Stability: development - McpMethodNameResourcesSubscribe = McpMethodNameKey.String("resources/subscribe") - // Request to unsubscribe from resource updates. - // - // Stability: development - McpMethodNameResourcesUnsubscribe = McpMethodNameKey.String("resources/unsubscribe") - // Notification indicating that a resource has been updated. - // - // Stability: development - McpMethodNameNotificationsResourcesUpdated = McpMethodNameKey.String("notifications/resources/updated") - // Request to list prompts available on server. - // - // Stability: development - McpMethodNamePromptsList = McpMethodNameKey.String("prompts/list") - // Request to get a prompt. - // - // Stability: development - McpMethodNamePromptsGet = McpMethodNameKey.String("prompts/get") - // Notification indicating that the list of prompts has changed. - // - // Stability: development - McpMethodNameNotificationsPromptsListChanged = McpMethodNameKey.String("notifications/prompts/list_changed") - // Request to list tools available on server. - // - // Stability: development - McpMethodNameToolsList = McpMethodNameKey.String("tools/list") - // Request to call a tool. - // - // Stability: development - McpMethodNameToolsCall = McpMethodNameKey.String("tools/call") - // Notification indicating that the list of tools has changed. - // - // Stability: development - McpMethodNameNotificationsToolsListChanged = McpMethodNameKey.String("notifications/tools/list_changed") - // Request to set the logging level. - // - // Stability: development - McpMethodNameLoggingSetLevel = McpMethodNameKey.String("logging/setLevel") - // Notification indicating that a message has been received. - // - // Stability: development - McpMethodNameNotificationsMessage = McpMethodNameKey.String("notifications/message") - // Request to create a sampling message. - // - // Stability: development - McpMethodNameSamplingCreateMessage = McpMethodNameKey.String("sampling/createMessage") - // Request to complete a prompt. - // - // Stability: development - McpMethodNameCompletionComplete = McpMethodNameKey.String("completion/complete") - // Request to list roots available on server. - // - // Stability: development - McpMethodNameRootsList = McpMethodNameKey.String("roots/list") - // Notification indicating that the list of roots has changed. - // - // Stability: development - McpMethodNameNotificationsRootsListChanged = McpMethodNameKey.String("notifications/roots/list_changed") - // Request from the server to elicit additional information from the user via - // the client - // - // Stability: development - McpMethodNameElicitationCreate = McpMethodNameKey.String("elicitation/create") -) - -// Namespace: messaging -const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the batching - // operation. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client library - // supports both batch and single-message API for the same operation, - // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs - // and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client.id" semantic conventions. It represents a unique identifier - // for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "client-5", "myhost@8742@s8083jm" - MessagingClientIDKey = attribute.Key("messaging.client.id") - - // MessagingConsumerGroupNameKey is the attribute Key conforming to the - // "messaging.consumer.group.name" semantic conventions. It represents the name - // of the consumer group with which a consumer is associated. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-group", "indexer" - // Note: Semantic conventions for individual messaging systems SHOULD document - // whether `messaging.consumer.group.name` is applicable and what it means in - // the context of that system. - MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") - - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the message - // destination name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MyQueue", "MyTopic" - // Note: Destination name SHOULD uniquely identify a specific queue, topic or - // other entity within the broker. If - // the broker doesn't have such notion, the destination name SHOULD uniquely - // identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationPartitionIDKey is the attribute Key conforming to the - // "messaging.destination.partition.id" semantic conventions. It represents the - // identifier of the partition messages are sent to or received from, unique - // within the `messaging.destination.name`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1 - MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") - - // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to - // the "messaging.destination.subscription.name" semantic conventions. It - // represents the name of the destination subscription from which a message is - // consumed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "subscription-a" - // Note: Semantic conventions for individual messaging systems SHOULD document - // whether `messaging.destination.subscription.name` is applicable and what it - // means in the context of that system. - MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the low - // cardinality representation of the messaging destination name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/customers/{customerId}" - // Note: Destination names could be constructed from templates. An example would - // be a destination name involving a user name or product id. Although the - // destination name in this case is of high cardinality, the underlying template - // is of low cardinality and can be effectively used for grouping and - // aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might not - // exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to - // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It - // represents the UTC epoch seconds at which the message has been accepted and - // stored in the entity. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") - - // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to - // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It - // represents the ack deadline in seconds set for the modify ack deadline - // request. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") - - // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the - // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the - // ack id for a given message. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: ack_id - MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") - - // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions. - // It represents the delivery attempt for a given message. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") - - // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to - // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It - // represents the ordering key for a given message. If the attribute is not - // present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: ordering_key - MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the message - // keys in Kafka are used for grouping alike messages to ensure they're - // processed on the same partition. They differ from `messaging.message.id` in - // that they're not unique. If the key is `null`, the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myKey - // Note: If the key type is not string, it's string representation has to be - // supplied for the attribute. If the key has no unambiguous, canonical string - // form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents a - // boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") - - // MessagingKafkaOffsetKey is the attribute Key conforming to the - // "messaging.kafka.offset" semantic conventions. It represents the offset of a - // record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset") - - // MessagingMessageBodySizeKey is the attribute Key conforming to the - // "messaging.message.body.size" semantic conventions. It represents the size of - // the message body in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Note: This can refer to both the compressed or uncompressed body size. If - // both sizes are known, the uncompressed - // body size should be used. - MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents the - // conversation ID identifying the conversation to which the message belongs, - // represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: MyConversationId - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the - // "messaging.message.envelope.size" semantic conventions. It represents the - // size of the message body and metadata in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Note: This can refer to both the compressed or uncompressed size. If both - // sizes are known, the uncompressed - // size should be used. - MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") - - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used by - // the messaging system as an identifier for the message, represented as a - // string. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 452a7c7c7c7048c2f887f61572b18fc2 - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingOperationNameKey is the attribute Key conforming to the - // "messaging.operation.name" semantic conventions. It represents the - // system-specific name of the messaging operation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ack", "nack", "send" - MessagingOperationNameKey = attribute.Key("messaging.operation.name") - - // MessagingOperationTypeKey is the attribute Key conforming to the - // "messaging.operation.type" semantic conventions. It represents a string - // identifying the type of the messaging operation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationTypeKey = attribute.Key("messaging.operation.type") - - // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to - // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It - // represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myKey - MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the - // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents - // the rabbitMQ message delivery tag. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") - - // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the - // "messaging.rocketmq.consumption_model" semantic conventions. It represents - // the model of message consumption. This only applies to consumer spans. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") - - // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to - // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It - // represents the delay time level for delay message, which determines the - // message delay time. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming - // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions. - // It represents the timestamp in milliseconds that the delay message is - // expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents the it - // is essential for FIFO message. Messages that belong to the same message group - // are always processed one by one within the same consumer group. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myMessageGroup - MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents the - // key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "keyA", "keyB" - MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketMQMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: tagA - MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents the - // type of message. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketMQNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myNamespace - MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - - // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to - // the "messaging.servicebus.disposition_status" semantic conventions. It - // represents the describes the [settlement type]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock - MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") - - // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to - // the "messaging.servicebus.message.delivery_count" semantic conventions. It - // represents the number of deliveries that have been attempted for this - // message. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") - - // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to - // the "messaging.servicebus.message.enqueued_time" semantic conventions. It - // represents the UTC epoch seconds at which the message has been accepted and - // stored in the entity. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") - - // MessagingSystemKey is the attribute Key conforming to the "messaging.system" - // semantic conventions. It represents the messaging system as identified by the - // client instrumentation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: The actual messaging system may differ from the one known by the - // client. For example, when using Kafka client libraries to communicate with - // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the - // instrumentation's best knowledge. - MessagingSystemKey = attribute.Key("messaging.system") -) - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to the -// "messaging.batch.message_count" semantic conventions. It represents the number -// of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client.id" semantic conventions. It represents a unique identifier -// for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// MessagingConsumerGroupName returns an attribute KeyValue conforming to the -// "messaging.consumer.group.name" semantic conventions. It represents the name -// of the consumer group with which a consumer is associated. -func MessagingConsumerGroupName(val string) attribute.KeyValue { - return MessagingConsumerGroupNameKey.String(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the -// "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be unnamed -// or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name. -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationPartitionID returns an attribute KeyValue conforming to -// the "messaging.destination.partition.id" semantic conventions. It represents -// the identifier of the partition messages are sent to or received from, unique -// within the `messaging.destination.name`. -func MessagingDestinationPartitionID(val string) attribute.KeyValue { - return MessagingDestinationPartitionIDKey.String(val) -} - -// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming -// to the "messaging.destination.subscription.name" semantic conventions. It -// represents the name of the destination subscription from which a message is -// consumed. -func MessagingDestinationSubscriptionName(val string) attribute.KeyValue { - return MessagingDestinationSubscriptionNameKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to the -// "messaging.destination.template" semantic conventions. It represents the low -// cardinality representation of the messaging destination name. -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to the -// "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming -// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It -// represents the UTC epoch seconds at which the message has been accepted and -// stored in the entity. -func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingEventHubsMessageEnqueuedTimeKey.Int(val) -} - -// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming -// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It -// represents the ack deadline in seconds set for the modify ack deadline -// request. -func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue { - return MessagingGCPPubSubMessageAckDeadlineKey.Int(val) -} - -// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the -// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the -// ack id for a given message. -func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue { - return MessagingGCPPubSubMessageAckIDKey.String(val) -} - -// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic -// conventions. It represents the delivery attempt for a given message. -func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue { - return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val) -} - -// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming -// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It -// represents the ordering key for a given message. If the attribute is not -// present, the message does not have an ordering key. -func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubSubMessageOrderingKeyKey.String(val) -} - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the message -// keys in Kafka are used for grouping alike messages to ensure they're processed -// on the same partition. They differ from `messaging.message.id` in that they're -// not unique. If the key is `null`, the attribute MUST NOT be set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the -// "messaging.kafka.message.tombstone" semantic conventions. It represents a -// boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// MessagingKafkaOffset returns an attribute KeyValue conforming to the -// "messaging.kafka.offset" semantic conventions. It represents the offset of a -// record in the corresponding Kafka partition. -func MessagingKafkaOffset(val int) attribute.KeyValue { - return MessagingKafkaOffsetKey.Int(val) -} - -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size of -// the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming to the -// "messaging.message.conversation_id" semantic conventions. It represents the -// conversation ID identifying the conversation to which the message belongs, -// represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the -// "messaging.message.envelope.size" semantic conventions. It represents the size -// of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by the -// messaging system as an identifier for the message, represented as a string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingOperationName returns an attribute KeyValue conforming to the -// "messaging.operation.name" semantic conventions. It represents the -// system-specific name of the messaging operation. -func MessagingOperationName(val string) attribute.KeyValue { - return MessagingOperationNameKey.String(val) -} - -// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitMQDestinationRoutingKeyKey.String(val) -} - -// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming -// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It -// represents the rabbitMQ message delivery tag. -func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue { - return MessagingRabbitMQMessageDeliveryTagKey.Int(val) -} - -// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketMQMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketMQMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the -// "messaging.rocketmq.message.group" semantic conventions. It represents the it -// is essential for FIFO message. Messages that belong to the same message group -// are always processed one by one within the same consumer group. -func MessagingRocketMQMessageGroup(val string) attribute.KeyValue { - return MessagingRocketMQMessageGroupKey.String(val) -} - -// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the -// "messaging.rocketmq.message.keys" semantic conventions. It represents the -// key(s) of message, another way to mark message besides message id. -func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketMQMessageKeysKey.StringSlice(val) -} - -// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the -// "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketMQMessageTag(val string) attribute.KeyValue { - return MessagingRocketMQMessageTagKey.String(val) -} - -// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the -// "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketMQNamespace(val string) attribute.KeyValue { - return MessagingRocketMQNamespaceKey.String(val) -} - -// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.delivery_count" semantic -// conventions. It represents the number of deliveries that have been attempted -// for this message. -func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue { - return MessagingServiceBusMessageDeliveryCountKey.Int(val) -} - -// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has been -// accepted and stored in the entity. -func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingServiceBusMessageEnqueuedTimeKey.Int(val) -} - -// Enum values for messaging.operation.type -var ( - // A message is created. "Create" spans always refer to a single message and are - // used to provide a unique creation context for messages in batch sending - // scenarios. - // - // Stability: development - MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") - // One or more messages are provided for sending to an intermediary. If a single - // message is sent, the context of the "Send" span can be used as the creation - // context and no "Create" span needs to be created. - // - // Stability: development - MessagingOperationTypeSend = MessagingOperationTypeKey.String("send") - // One or more messages are requested by a consumer. This operation refers to - // pull-based scenarios, where consumers explicitly call methods of messaging - // SDKs to receive messages. - // - // Stability: development - MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") - // One or more messages are processed by a consumer. - // - // Stability: development - MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process") - // One or more messages are settled. - // - // Stability: development - MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") -) - -// Enum values for messaging.rocketmq.consumption_model -var ( - // Clustering consumption model - // Stability: development - MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering") - // Broadcasting consumption model - // Stability: development - MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting") -) - -// Enum values for messaging.rocketmq.message.type -var ( - // Normal message - // Stability: development - MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal") - // FIFO message - // Stability: development - MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo") - // Delay message - // Stability: development - MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay") - // Transaction message - // Stability: development - MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction") -) - -// Enum values for messaging.servicebus.disposition_status -var ( - // Message is completed - // Stability: development - MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete") - // Message is abandoned - // Stability: development - MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon") - // Message is sent to dead letter queue - // Stability: development - MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter") - // Message is deferred - // Stability: development - MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer") -) - -// Enum values for messaging.system -var ( - // Apache ActiveMQ - // Stability: development - MessagingSystemActiveMQ = MessagingSystemKey.String("activemq") - // Amazon Simple Notification Service (SNS) - // Stability: development - MessagingSystemAWSSNS = MessagingSystemKey.String("aws.sns") - // Amazon Simple Queue Service (SQS) - // Stability: development - MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs") - // Azure Event Grid - // Stability: development - MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid") - // Azure Event Hubs - // Stability: development - MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs") - // Azure Service Bus - // Stability: development - MessagingSystemServiceBus = MessagingSystemKey.String("servicebus") - // Google Cloud Pub/Sub - // Stability: development - MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub") - // Java Message Service - // Stability: development - MessagingSystemJMS = MessagingSystemKey.String("jms") - // Apache Kafka - // Stability: development - MessagingSystemKafka = MessagingSystemKey.String("kafka") - // RabbitMQ - // Stability: development - MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq") - // Apache RocketMQ - // Stability: development - MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq") - // Apache Pulsar - // Stability: development - MessagingSystemPulsar = MessagingSystemKey.String("pulsar") -) - -// Namespace: network -const ( - // NetworkCarrierICCKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier network. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: DE - NetworkCarrierICCKey = attribute.Key("network.carrier.icc") - - // NetworkCarrierMCCKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile carrier - // country code. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 310 - NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMNCKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile carrier - // network code. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 001 - NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of the - // mobile carrier. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: sprint - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkConnectionStateKey is the attribute Key conforming to the - // "network.connection.state" semantic conventions. It represents the state of - // network connection. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "close_wait" - // Note: Connection states are defined as part of the [rfc9293] - // - // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2 - NetworkConnectionStateKey = attribute.Key("network.connection.state") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the this - // describes more details regarding the connection.type. It may be the type of - // cell technology connection, but it could be used for describing details about - // a wifi connection. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: LTE - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the internet - // connection type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: wifi - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkInterfaceNameKey is the attribute Key conforming to the - // "network.interface.name" semantic conventions. It represents the network - // interface name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "lo", "eth0" - NetworkInterfaceNameKey = attribute.Key("network.interface.name") - - // NetworkIODirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network IO - // operation direction. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "transmit" - NetworkIODirectionKey = attribute.Key("network.io.direction") - - // NetworkLocalAddressKey is the attribute Key conforming to the - // "network.local.address" semantic conventions. It represents the local address - // of the network connection - IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "10.1.2.80", "/tmp/my.sock" - NetworkLocalAddressKey = attribute.Key("network.local.address") - - // NetworkLocalPortKey is the attribute Key conforming to the - // "network.local.port" semantic conventions. It represents the local port - // number of the network connection. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 65123 - NetworkLocalPortKey = attribute.Key("network.local.port") - - // NetworkPeerAddressKey is the attribute Key conforming to the - // "network.peer.address" semantic conventions. It represents the peer address - // of the network connection - IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "10.1.2.80", "/tmp/my.sock" - NetworkPeerAddressKey = attribute.Key("network.peer.address") - - // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port" - // semantic conventions. It represents the peer port number of the network - // connection. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 65123 - NetworkPeerPortKey = attribute.Key("network.peer.port") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the - // [OSI application layer] or non-OSI equivalent. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "amqp", "http", "mqtt" - // Note: The value SHOULD be normalized to lowercase. - // - // [OSI application layer]: https://wikipedia.org/wiki/Application_layer - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the actual - // version of the protocol used for network communication. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "1.1", "2" - // Note: If protocol version is subject to negotiation (for example using [ALPN] - // ), this attribute SHOULD be set to the negotiated version. If the actual - // protocol version is not known, this attribute SHOULD NOT be set. - // - // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") - - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the - // [OSI transport layer] or [inter-process communication method]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "tcp", "udp" - // Note: The value SHOULD be normalized to lowercase. - // - // Consider always setting the transport when setting a port number, since - // a port number is ambiguous without knowing the transport. For example - // different processes could be listening on TCP port 12345 and UDP port 12345. - // - // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer - // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic - // conventions. It represents the [OSI network layer] or non-OSI equivalent. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "ipv4", "ipv6" - // Note: The value SHOULD be normalized to lowercase. - // - // [OSI network layer]: https://wikipedia.org/wiki/Network_layer - NetworkTypeKey = attribute.Key("network.type") -) - -// NetworkCarrierICC returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierICC(val string) attribute.KeyValue { - return NetworkCarrierICCKey.String(val) -} - -// NetworkCarrierMCC returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMCC(val string) attribute.KeyValue { - return NetworkCarrierMCCKey.String(val) -} - -// NetworkCarrierMNC returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMNC(val string) attribute.KeyValue { - return NetworkCarrierMNCKey.String(val) -} - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkInterfaceName returns an attribute KeyValue conforming to the -// "network.interface.name" semantic conventions. It represents the network -// interface name. -func NetworkInterfaceName(val string) attribute.KeyValue { - return NetworkInterfaceNameKey.String(val) -} - -// NetworkLocalAddress returns an attribute KeyValue conforming to the -// "network.local.address" semantic conventions. It represents the local address -// of the network connection - IP address or Unix domain socket name. -func NetworkLocalAddress(val string) attribute.KeyValue { - return NetworkLocalAddressKey.String(val) -} - -// NetworkLocalPort returns an attribute KeyValue conforming to the -// "network.local.port" semantic conventions. It represents the local port number -// of the network connection. -func NetworkLocalPort(val int) attribute.KeyValue { - return NetworkLocalPortKey.Int(val) -} - -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address of -// the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) -} - -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) -} - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the -// [OSI application layer] or non-OSI equivalent. -// -// [OSI application layer]: https://wikipedia.org/wiki/Application_layer -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the actual -// version of the protocol used for network communication. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// Enum values for network.connection.state -var ( - // closed - // Stability: development - NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed") - // close_wait - // Stability: development - NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait") - // closing - // Stability: development - NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing") - // established - // Stability: development - NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established") - // fin_wait_1 - // Stability: development - NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1") - // fin_wait_2 - // Stability: development - NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2") - // last_ack - // Stability: development - NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack") - // listen - // Stability: development - NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen") - // syn_received - // Stability: development - NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received") - // syn_sent - // Stability: development - NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent") - // time_wait - // Stability: development - NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait") -) - -// Enum values for network.connection.subtype -var ( - // GPRS - // Stability: development - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - // Stability: development - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - // Stability: development - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - // Stability: development - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - // Stability: development - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - // Stability: development - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - // Stability: development - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - // Stability: development - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - // Stability: development - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - // Stability: development - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - // Stability: development - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - // Stability: development - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - // Stability: development - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - // Stability: development - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - // Stability: development - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - // Stability: development - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - // Stability: development - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - // Stability: development - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - // Stability: development - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - // Stability: development - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - // Stability: development - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -// Enum values for network.connection.type -var ( - // wifi - // Stability: development - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - // Stability: development - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - // Stability: development - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - // Stability: development - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - // Stability: development - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -// Enum values for network.io.direction -var ( - // transmit - // Stability: development - NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit") - // receive - // Stability: development - NetworkIODirectionReceive = NetworkIODirectionKey.String("receive") -) - -// Enum values for network.transport -var ( - // TCP - // Stability: stable - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - // Stability: stable - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe. - // Stability: stable - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - // Stability: stable - NetworkTransportUnix = NetworkTransportKey.String("unix") - // QUIC - // Stability: stable - NetworkTransportQUIC = NetworkTransportKey.String("quic") -) - -// Enum values for network.type -var ( - // IPv4 - // Stability: stable - NetworkTypeIPv4 = NetworkTypeKey.String("ipv4") - // IPv6 - // Stability: stable - NetworkTypeIPv6 = NetworkTypeKey.String("ipv6") -) - -// Namespace: nfs -const ( - // NfsOperationNameKey is the attribute Key conforming to the - // "nfs.operation.name" semantic conventions. It represents the NFSv4+ operation - // name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "OPEN", "READ", "GETATTR" - NfsOperationNameKey = attribute.Key("nfs.operation.name") - - // NfsServerRepcacheStatusKey is the attribute Key conforming to the - // "nfs.server.repcache.status" semantic conventions. It represents the linux: - // one of "hit" (NFSD_STATS_RC_HITS), "miss" (NFSD_STATS_RC_MISSES), or - // "nocache" (NFSD_STATS_RC_NOCACHE -- uncacheable). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: hit - NfsServerRepcacheStatusKey = attribute.Key("nfs.server.repcache.status") -) - -// NfsOperationName returns an attribute KeyValue conforming to the -// "nfs.operation.name" semantic conventions. It represents the NFSv4+ operation -// name. -func NfsOperationName(val string) attribute.KeyValue { - return NfsOperationNameKey.String(val) -} - -// NfsServerRepcacheStatus returns an attribute KeyValue conforming to the -// "nfs.server.repcache.status" semantic conventions. It represents the linux: -// one of "hit" (NFSD_STATS_RC_HITS), "miss" (NFSD_STATS_RC_MISSES), or "nocache" -// (NFSD_STATS_RC_NOCACHE -- uncacheable). -func NfsServerRepcacheStatus(val string) attribute.KeyValue { - return NfsServerRepcacheStatusKey.String(val) -} - -// Namespace: oci -const ( - // OCIManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of the - // OCI image manifest. For container images specifically is the digest by which - // the container image is known. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4" - // Note: Follows [OCI Image Manifest Specification], and specifically the - // [Digest property]. - // An example can be found in [Example Image Manifest]. - // - // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md - // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests - // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest - OCIManifestDigestKey = attribute.Key("oci.manifest.digest") -) - -// OCIManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OCIManifestDigest(val string) attribute.KeyValue { - return OCIManifestDigestKey.String(val) -} - -// Namespace: onc_rpc -const ( - // OncRPCProcedureNameKey is the attribute Key conforming to the - // "onc_rpc.procedure.name" semantic conventions. It represents the ONC/Sun RPC - // procedure name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "OPEN", "READ", "GETATTR" - OncRPCProcedureNameKey = attribute.Key("onc_rpc.procedure.name") - - // OncRPCProcedureNumberKey is the attribute Key conforming to the - // "onc_rpc.procedure.number" semantic conventions. It represents the ONC/Sun - // RPC procedure number. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - OncRPCProcedureNumberKey = attribute.Key("onc_rpc.procedure.number") - - // OncRPCProgramNameKey is the attribute Key conforming to the - // "onc_rpc.program.name" semantic conventions. It represents the ONC/Sun RPC - // program name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "portmapper", "nfs" - OncRPCProgramNameKey = attribute.Key("onc_rpc.program.name") - - // OncRPCVersionKey is the attribute Key conforming to the "onc_rpc.version" - // semantic conventions. It represents the ONC/Sun RPC program version. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - OncRPCVersionKey = attribute.Key("onc_rpc.version") -) - -// OncRPCProcedureName returns an attribute KeyValue conforming to the -// "onc_rpc.procedure.name" semantic conventions. It represents the ONC/Sun RPC -// procedure name. -func OncRPCProcedureName(val string) attribute.KeyValue { - return OncRPCProcedureNameKey.String(val) -} - -// OncRPCProcedureNumber returns an attribute KeyValue conforming to the -// "onc_rpc.procedure.number" semantic conventions. It represents the ONC/Sun RPC -// procedure number. -func OncRPCProcedureNumber(val int) attribute.KeyValue { - return OncRPCProcedureNumberKey.Int(val) -} - -// OncRPCProgramName returns an attribute KeyValue conforming to the -// "onc_rpc.program.name" semantic conventions. It represents the ONC/Sun RPC -// program name. -func OncRPCProgramName(val string) attribute.KeyValue { - return OncRPCProgramNameKey.String(val) -} - -// OncRPCVersion returns an attribute KeyValue conforming to the -// "onc_rpc.version" semantic conventions. It represents the ONC/Sun RPC program -// version. -func OncRPCVersion(val int) attribute.KeyValue { - return OncRPCVersionKey.Int(val) -} - -// Namespace: openai -const ( - // OpenAIAPITypeKey is the attribute Key conforming to the "openai.api.type" - // semantic conventions. It represents the type of OpenAI API being used. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - OpenAIAPITypeKey = attribute.Key("openai.api.type") - - // OpenAIRequestServiceTierKey is the attribute Key conforming to the - // "openai.request.service_tier" semantic conventions. It represents the service - // tier requested. May be a specific tier, default, or auto. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "auto", "default" - OpenAIRequestServiceTierKey = attribute.Key("openai.request.service_tier") - - // OpenAIResponseServiceTierKey is the attribute Key conforming to the - // "openai.response.service_tier" semantic conventions. It represents the - // service tier used for the response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "scale", "default" - OpenAIResponseServiceTierKey = attribute.Key("openai.response.service_tier") - - // OpenAIResponseSystemFingerprintKey is the attribute Key conforming to the - // "openai.response.system_fingerprint" semantic conventions. It represents a - // fingerprint to track any eventual change in the Generative AI environment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "fp_44709d6fcb" - OpenAIResponseSystemFingerprintKey = attribute.Key("openai.response.system_fingerprint") -) - -// OpenAIResponseServiceTier returns an attribute KeyValue conforming to the -// "openai.response.service_tier" semantic conventions. It represents the service -// tier used for the response. -func OpenAIResponseServiceTier(val string) attribute.KeyValue { - return OpenAIResponseServiceTierKey.String(val) -} - -// OpenAIResponseSystemFingerprint returns an attribute KeyValue conforming to -// the "openai.response.system_fingerprint" semantic conventions. It represents a -// fingerprint to track any eventual change in the Generative AI environment. -func OpenAIResponseSystemFingerprint(val string) attribute.KeyValue { - return OpenAIResponseSystemFingerprintKey.String(val) -} - -// Enum values for openai.api.type -var ( - // The OpenAI [Chat Completions API]. - // Stability: development - // - // [Chat Completions API]: https://developers.openai.com/api/reference/chat-completions/overview - OpenAIAPITypeChatCompletions = OpenAIAPITypeKey.String("chat_completions") - // The OpenAI [Responses API]. - // Stability: development - // - // [Responses API]: https://developers.openai.com/api/reference/responses/overview - OpenAIAPITypeResponses = OpenAIAPITypeKey.String("responses") -) - -// Enum values for openai.request.service_tier -var ( - // The system will utilize scale tier credits until they are exhausted. - // Stability: development - OpenAIRequestServiceTierAuto = OpenAIRequestServiceTierKey.String("auto") - // The system will utilize the default scale tier. - // Stability: development - OpenAIRequestServiceTierDefault = OpenAIRequestServiceTierKey.String("default") -) - -// Namespace: openshift -const ( - // OpenShiftClusterquotaNameKey is the attribute Key conforming to the - // "openshift.clusterquota.name" semantic conventions. It represents the name of - // the cluster quota. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - OpenShiftClusterquotaNameKey = attribute.Key("openshift.clusterquota.name") - - // OpenShiftClusterquotaUIDKey is the attribute Key conforming to the - // "openshift.clusterquota.uid" semantic conventions. It represents the UID of - // the cluster quota. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - OpenShiftClusterquotaUIDKey = attribute.Key("openshift.clusterquota.uid") -) - -// OpenShiftClusterquotaName returns an attribute KeyValue conforming to the -// "openshift.clusterquota.name" semantic conventions. It represents the name of -// the cluster quota. -func OpenShiftClusterquotaName(val string) attribute.KeyValue { - return OpenShiftClusterquotaNameKey.String(val) -} - -// OpenShiftClusterquotaUID returns an attribute KeyValue conforming to the -// "openshift.clusterquota.uid" semantic conventions. It represents the UID of -// the cluster quota. -func OpenShiftClusterquotaUID(val string) attribute.KeyValue { - return OpenShiftClusterquotaUIDKey.String(val) -} - -// Namespace: opentracing -const ( - // OpenTracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the parent-child - // Reference type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: The causal relationship between a child Span and a parent Span. - OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -// Enum values for opentracing.ref_type -var ( - // The parent Span depends on the child Span in some capacity - // Stability: development - OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - // Stability: development - OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from") -) - -// Namespace: oracle -const ( - // OracleDBDomainKey is the attribute Key conforming to the "oracle.db.domain" - // semantic conventions. It represents the database domain associated with the - // connection. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "example.com", "corp.internal", "prod.db.local" - // Note: This attribute SHOULD be set to the value of the `DB_DOMAIN` - // initialization parameter, - // as exposed in `v$parameter`. `DB_DOMAIN` defines the domain portion of the - // global - // database name and SHOULD be configured when a database is, or may become, - // part of a - // distributed environment. Its value consists of one or more valid identifiers - // (alphanumeric ASCII characters) separated by periods. - OracleDBDomainKey = attribute.Key("oracle.db.domain") - - // OracleDBInstanceNameKey is the attribute Key conforming to the - // "oracle.db.instance.name" semantic conventions. It represents the instance - // name associated with the connection in an Oracle Real Application Clusters - // environment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ORCL1", "ORCL2", "ORCL3" - // Note: There can be multiple instances associated with a single database - // service. It indicates the - // unique instance name to which the connection is currently bound. For non-RAC - // databases, this value - // defaults to the `oracle.db.name`. - OracleDBInstanceNameKey = attribute.Key("oracle.db.instance.name") - - // OracleDBNameKey is the attribute Key conforming to the "oracle.db.name" - // semantic conventions. It represents the database name associated with the - // connection. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ORCL1", "FREE" - // Note: This attribute SHOULD be set to the value of the parameter `DB_NAME` - // exposed in `v$parameter`. - OracleDBNameKey = attribute.Key("oracle.db.name") - - // OracleDBPdbKey is the attribute Key conforming to the "oracle.db.pdb" - // semantic conventions. It represents the pluggable database (PDB) name - // associated with the connection. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "PDB1", "FREEPDB" - // Note: This attribute SHOULD reflect the PDB that the session is currently - // connected to. - // If instrumentation cannot reliably obtain the active PDB name for each - // operation - // without issuing an additional query (such as `SELECT SYS_CONTEXT`), it is - // RECOMMENDED to fall back to the PDB name specified at connection - // establishment. - OracleDBPdbKey = attribute.Key("oracle.db.pdb") - - // OracleDBServiceKey is the attribute Key conforming to the "oracle.db.service" - // semantic conventions. It represents the service name currently associated - // with the database connection. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "order-processing-service", "db_low.adb.oraclecloud.com", - // "db_high.adb.oraclecloud.com" - // Note: The effective service name for a connection can change during its - // lifetime, - // for example after executing sql, `ALTER SESSION`. If an instrumentation - // cannot reliably - // obtain the current service name for each operation without issuing an - // additional - // query (such as `SELECT SYS_CONTEXT`), it is RECOMMENDED to fall back to the - // service name originally provided at connection establishment. - OracleDBServiceKey = attribute.Key("oracle.db.service") -) - -// OracleDBDomain returns an attribute KeyValue conforming to the -// "oracle.db.domain" semantic conventions. It represents the database domain -// associated with the connection. -func OracleDBDomain(val string) attribute.KeyValue { - return OracleDBDomainKey.String(val) -} - -// OracleDBInstanceName returns an attribute KeyValue conforming to the -// "oracle.db.instance.name" semantic conventions. It represents the instance -// name associated with the connection in an Oracle Real Application Clusters -// environment. -func OracleDBInstanceName(val string) attribute.KeyValue { - return OracleDBInstanceNameKey.String(val) -} - -// OracleDBName returns an attribute KeyValue conforming to the "oracle.db.name" -// semantic conventions. It represents the database name associated with the -// connection. -func OracleDBName(val string) attribute.KeyValue { - return OracleDBNameKey.String(val) -} - -// OracleDBPdb returns an attribute KeyValue conforming to the "oracle.db.pdb" -// semantic conventions. It represents the pluggable database (PDB) name -// associated with the connection. -func OracleDBPdb(val string) attribute.KeyValue { - return OracleDBPdbKey.String(val) -} - -// OracleDBService returns an attribute KeyValue conforming to the -// "oracle.db.service" semantic conventions. It represents the service name -// currently associated with the database connection. -func OracleDBService(val string) attribute.KeyValue { - return OracleDBServiceKey.String(val) -} - -// Namespace: oracle_cloud -const ( - // OracleCloudRealmKey is the attribute Key conforming to the - // "oracle_cloud.realm" semantic conventions. It represents the OCI realm - // identifier that indicates the isolated partition in which the tenancy and its - // resources reside. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "oc1", "oc2" - // Note: See [OCI documentation on realms] - // - // [OCI documentation on realms]: https://docs.oracle.com/iaas/Content/General/Concepts/regions.htm - OracleCloudRealmKey = attribute.Key("oracle_cloud.realm") -) - -// OracleCloudRealm returns an attribute KeyValue conforming to the -// "oracle_cloud.realm" semantic conventions. It represents the OCI realm -// identifier that indicates the isolated partition in which the tenancy and its -// resources reside. -func OracleCloudRealm(val string) attribute.KeyValue { - return OracleCloudRealmKey.String(val) -} - -// Namespace: os -const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic - // conventions. It represents the unique identifier for a particular build or - // compilation of the operating system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "TQ3C.230805.001.B2", "20E247", "22621" - OSBuildIDKey = attribute.Key("os.build_id") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to be - // parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS" - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "iOS", "Android", "Ubuntu" - OSNameKey = attribute.Key("os.name") - - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - OSTypeKey = attribute.Key("os.type") - - // OSVersionKey is the attribute Key conforming to the "os.version" semantic - // conventions. It represents the version string of the operating system as - // defined in [Version Attributes]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "14.2.1", "18.04.1" - // - // [Version Attributes]: /docs/resource/README.md#version-attributes - OSVersionKey = attribute.Key("os.version") -) - -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - -// OSDescription returns an attribute KeyValue conforming to the "os.description" -// semantic conventions. It represents the human readable (not intended to be -// parsed) OS version information, like e.g. reported by `ver` or -// `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating system -// as defined in [Version Attributes]. -// -// [Version Attributes]: /docs/resource/README.md#version-attributes -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// Enum values for os.type -var ( - // Microsoft Windows - // Stability: development - OSTypeWindows = OSTypeKey.String("windows") - // Linux - // Stability: development - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - // Stability: development - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - // Stability: development - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - // Stability: development - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - // Stability: development - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - // Stability: development - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - // Stability: development - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - // Stability: development - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - // Stability: development - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - // Stability: development - OSTypeZOS = OSTypeKey.String("zos") -) - -// Namespace: otel -const ( - // OTelComponentNameKey is the attribute Key conforming to the - // "otel.component.name" semantic conventions. It represents a name uniquely - // identifying the instance of the OpenTelemetry component within its containing - // SDK instance. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otlp_grpc_span_exporter/0", "custom-name" - // Note: Implementations SHOULD ensure a low cardinality for this attribute, - // even across application or SDK restarts. - // E.g. implementations MUST NOT use UUIDs as values for this attribute. - // - // Implementations MAY achieve these goals by following a - // `/` pattern, e.g. - // `batching_span_processor/0`. - // Hereby `otel.component.type` refers to the corresponding attribute value of - // the component. - // - // The value of `instance-counter` MAY be automatically assigned by the - // component and uniqueness within the enclosing SDK instance MUST be - // guaranteed. - // For example, `` MAY be implemented by using a monotonically - // increasing counter (starting with `0`), which is incremented every time an - // instance of the given component type is started. - // - // With this implementation, for example the first Batching Span Processor would - // have `batching_span_processor/0` - // as `otel.component.name`, the second one `batching_span_processor/1` and so - // on. - // These values will therefore be reused in the case of an application restart. - OTelComponentNameKey = attribute.Key("otel.component.name") - - // OTelComponentTypeKey is the attribute Key conforming to the - // "otel.component.type" semantic conventions. It represents a name identifying - // the type of the OpenTelemetry component. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "batching_span_processor", "com.example.MySpanExporter" - // Note: If none of the standardized values apply, implementations SHOULD use - // the language-defined name of the type. - // E.g. for Java the fully qualified classname SHOULD be used in this case. - OTelComponentTypeKey = attribute.Key("otel.component.type") - - // OTelEventNameKey is the attribute Key conforming to the "otel.event.name" - // semantic conventions. It represents the identifies the class / type of event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "browser.mouse.click", "device.app.lifecycle" - // Note: This attribute SHOULD be used by non-OTLP exporters when destination - // does not support `EventName` or equivalent field. This attribute MAY be used - // by applications using existing logging libraries so that it can be used to - // set the `EventName` field by Collector or SDK components. - OTelEventNameKey = attribute.Key("otel.event.name") - - // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name" - // semantic conventions. It represents the name of the instrumentation scope - ( - // `InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "io.opentelemetry.contrib.mongodb" - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeSchemaURLKey is the attribute Key conforming to the - // "otel.scope.schema_url" semantic conventions. It represents the schema URL of - // the instrumentation scope. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://opentelemetry.io/schemas/1.31.0" - OTelScopeSchemaURLKey = attribute.Key("otel.scope.schema_url") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of the - // instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "1.0.0" - OTelScopeVersionKey = attribute.Key("otel.scope.version") - - // OTelSpanParentOriginKey is the attribute Key conforming to the - // "otel.span.parent.origin" semantic conventions. It represents the determines - // whether the span has a parent span, and if so, - // [whether it is a remote parent]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote - OTelSpanParentOriginKey = attribute.Key("otel.span.parent.origin") - - // OTelSpanSamplingResultKey is the attribute Key conforming to the - // "otel.span.sampling_result" semantic conventions. It represents the result - // value of the sampler for this span. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result") - - // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code" - // semantic conventions. It represents the name of the code, either "OK" or - // "ERROR". MUST NOT be set if the status code is UNSET. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the description - // of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "resource not found" - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -// OTelComponentName returns an attribute KeyValue conforming to the -// "otel.component.name" semantic conventions. It represents a name uniquely -// identifying the instance of the OpenTelemetry component within its containing -// SDK instance. -func OTelComponentName(val string) attribute.KeyValue { - return OTelComponentNameKey.String(val) -} - -// OTelEventName returns an attribute KeyValue conforming to the -// "otel.event.name" semantic conventions. It represents the identifies the class -// / type of event. -func OTelEventName(val string) attribute.KeyValue { - return OTelEventNameKey.String(val) -} - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeSchemaURL returns an attribute KeyValue conforming to the -// "otel.scope.schema_url" semantic conventions. It represents the schema URL of -// the instrumentation scope. -func OTelScopeSchemaURL(val string) attribute.KeyValue { - return OTelScopeSchemaURLKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the description -// of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// Enum values for otel.component.type -var ( - // The builtin SDK batching span processor - // - // Stability: development - OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor") - // The builtin SDK simple span processor - // - // Stability: development - OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor") - // The builtin SDK batching log record processor - // - // Stability: development - OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor") - // The builtin SDK simple log record processor - // - // Stability: development - OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor") - // OTLP span exporter over gRPC with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter") - // OTLP span exporter over HTTP with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter") - // OTLP span exporter over HTTP with JSON serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter") - // Zipkin span exporter over HTTP - // - // Stability: development - OTelComponentTypeZipkinHTTPSpanExporter = OTelComponentTypeKey.String("zipkin_http_span_exporter") - // OTLP log record exporter over gRPC with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter") - // OTLP log record exporter over HTTP with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter") - // OTLP log record exporter over HTTP with JSON serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter") - // The builtin SDK periodically exporting metric reader - // - // Stability: development - OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader") - // OTLP metric exporter over gRPC with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter") - // OTLP metric exporter over HTTP with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter") - // OTLP metric exporter over HTTP with JSON serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter") - // Prometheus metric exporter over HTTP with the default text-based format - // - // Stability: development - OTelComponentTypePrometheusHTTPTextMetricExporter = OTelComponentTypeKey.String("prometheus_http_text_metric_exporter") -) - -// Enum values for otel.span.parent.origin -var ( - // The span does not have a parent, it is a root span - // Stability: development - OTelSpanParentOriginNone = OTelSpanParentOriginKey.String("none") - // The span has a parent and the parent's span context [isRemote()] is false - // Stability: development - // - // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote - OTelSpanParentOriginLocal = OTelSpanParentOriginKey.String("local") - // The span has a parent and the parent's span context [isRemote()] is true - // Stability: development - // - // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote - OTelSpanParentOriginRemote = OTelSpanParentOriginKey.String("remote") -) - -// Enum values for otel.span.sampling_result -var ( - // The span is not sampled and not recording - // Stability: development - OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP") - // The span is not sampled, but recording - // Stability: development - OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY") - // The span is sampled and recording - // Stability: development - OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE") -) - -// Enum values for otel.status_code -var ( - // The operation has been validated by an Application developer or Operator to - // have completed successfully. - // Stability: stable - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error. - // Stability: stable - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// Namespace: pprof -const ( - // PprofLocationIsFoldedKey is the attribute Key conforming to the - // "pprof.location.is_folded" semantic conventions. It represents the provides - // an indication that multiple symbols map to this location's address, for - // example due to identical code folding by the linker. In that case the line - // information represents one of the multiple symbols. This field must be - // recomputed when the symbolization state of the profile changes. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - PprofLocationIsFoldedKey = attribute.Key("pprof.location.is_folded") - - // PprofMappingHasFilenamesKey is the attribute Key conforming to the - // "pprof.mapping.has_filenames" semantic conventions. It represents the - // indicates that there are filenames related to this mapping. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - PprofMappingHasFilenamesKey = attribute.Key("pprof.mapping.has_filenames") - - // PprofMappingHasFunctionsKey is the attribute Key conforming to the - // "pprof.mapping.has_functions" semantic conventions. It represents the - // indicates that there are functions related to this mapping. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - PprofMappingHasFunctionsKey = attribute.Key("pprof.mapping.has_functions") - - // PprofMappingHasInlineFramesKey is the attribute Key conforming to the - // "pprof.mapping.has_inline_frames" semantic conventions. It represents the - // indicates that there are inline frames related to this mapping. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - PprofMappingHasInlineFramesKey = attribute.Key("pprof.mapping.has_inline_frames") - - // PprofMappingHasLineNumbersKey is the attribute Key conforming to the - // "pprof.mapping.has_line_numbers" semantic conventions. It represents the - // indicates that there are line numbers related to this mapping. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - PprofMappingHasLineNumbersKey = attribute.Key("pprof.mapping.has_line_numbers") - - // PprofProfileCommentKey is the attribute Key conforming to the - // "pprof.profile.comment" semantic conventions. It represents the free-form - // text associated with the profile. This field should not be used to store any - // machine-readable information, it is only for human-friendly content. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "hello world", "bazinga" - PprofProfileCommentKey = attribute.Key("pprof.profile.comment") - - // PprofProfileDocURLKey is the attribute Key conforming to the - // "pprof.profile.doc_url" semantic conventions. It represents the documentation - // link for this profile type. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "http://pprof.example.com/cpu-profile.html" - // Note: The URL must be absolute and may be missing if the profile was - // generated by code that did not supply a link - PprofProfileDocURLKey = attribute.Key("pprof.profile.doc_url") - - // PprofProfileDropFramesKey is the attribute Key conforming to the - // "pprof.profile.drop_frames" semantic conventions. It represents the frames - // with Function.function_name fully matching the regexp will be dropped from - // the samples, along with their successors. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/foobar/" - PprofProfileDropFramesKey = attribute.Key("pprof.profile.drop_frames") - - // PprofProfileKeepFramesKey is the attribute Key conforming to the - // "pprof.profile.keep_frames" semantic conventions. It represents the frames - // with Function.function_name fully matching the regexp will be kept, even if - // it matches drop_frames. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/bazinga/" - PprofProfileKeepFramesKey = attribute.Key("pprof.profile.keep_frames") - - // PprofScopeDefaultSampleTypeKey is the attribute Key conforming to the - // "pprof.scope.default_sample_type" semantic conventions. It represents the - // records the pprof's default_sample_type in the original profile. Not set if - // the default sample type was missing. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cpu" - // Note: This attribute, if present, MUST be set at the scope level - // (resource_profiles[].scope_profiles[].scope.attributes[]). - PprofScopeDefaultSampleTypeKey = attribute.Key("pprof.scope.default_sample_type") - - // PprofScopeSampleTypeOrderKey is the attribute Key conforming to the - // "pprof.scope.sample_type_order" semantic conventions. It represents the - // records the indexes of the sample types in the original profile. - // - // Type: int[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 3, 0, 1, 2 - // Note: This attribute, if present, MUST be set at the scope level - // (resource_profiles[].scope_profiles[].scope.attributes[]). - PprofScopeSampleTypeOrderKey = attribute.Key("pprof.scope.sample_type_order") -) - -// PprofLocationIsFolded returns an attribute KeyValue conforming to the -// "pprof.location.is_folded" semantic conventions. It represents the provides an -// indication that multiple symbols map to this location's address, for example -// due to identical code folding by the linker. In that case the line information -// represents one of the multiple symbols. This field must be recomputed when the -// symbolization state of the profile changes. -func PprofLocationIsFolded(val bool) attribute.KeyValue { - return PprofLocationIsFoldedKey.Bool(val) -} - -// PprofMappingHasFilenames returns an attribute KeyValue conforming to the -// "pprof.mapping.has_filenames" semantic conventions. It represents the -// indicates that there are filenames related to this mapping. -func PprofMappingHasFilenames(val bool) attribute.KeyValue { - return PprofMappingHasFilenamesKey.Bool(val) -} - -// PprofMappingHasFunctions returns an attribute KeyValue conforming to the -// "pprof.mapping.has_functions" semantic conventions. It represents the -// indicates that there are functions related to this mapping. -func PprofMappingHasFunctions(val bool) attribute.KeyValue { - return PprofMappingHasFunctionsKey.Bool(val) -} - -// PprofMappingHasInlineFrames returns an attribute KeyValue conforming to the -// "pprof.mapping.has_inline_frames" semantic conventions. It represents the -// indicates that there are inline frames related to this mapping. -func PprofMappingHasInlineFrames(val bool) attribute.KeyValue { - return PprofMappingHasInlineFramesKey.Bool(val) -} - -// PprofMappingHasLineNumbers returns an attribute KeyValue conforming to the -// "pprof.mapping.has_line_numbers" semantic conventions. It represents the -// indicates that there are line numbers related to this mapping. -func PprofMappingHasLineNumbers(val bool) attribute.KeyValue { - return PprofMappingHasLineNumbersKey.Bool(val) -} - -// PprofProfileComment returns an attribute KeyValue conforming to the -// "pprof.profile.comment" semantic conventions. It represents the free-form text -// associated with the profile. This field should not be used to store any -// machine-readable information, it is only for human-friendly content. -func PprofProfileComment(val ...string) attribute.KeyValue { - return PprofProfileCommentKey.StringSlice(val) -} - -// PprofProfileDocURL returns an attribute KeyValue conforming to the -// "pprof.profile.doc_url" semantic conventions. It represents the documentation -// link for this profile type. -func PprofProfileDocURL(val string) attribute.KeyValue { - return PprofProfileDocURLKey.String(val) -} - -// PprofProfileDropFrames returns an attribute KeyValue conforming to the -// "pprof.profile.drop_frames" semantic conventions. It represents the frames -// with Function.function_name fully matching the regexp will be dropped from the -// samples, along with their successors. -func PprofProfileDropFrames(val string) attribute.KeyValue { - return PprofProfileDropFramesKey.String(val) -} - -// PprofProfileKeepFrames returns an attribute KeyValue conforming to the -// "pprof.profile.keep_frames" semantic conventions. It represents the frames -// with Function.function_name fully matching the regexp will be kept, even if it -// matches drop_frames. -func PprofProfileKeepFrames(val string) attribute.KeyValue { - return PprofProfileKeepFramesKey.String(val) -} - -// PprofScopeDefaultSampleType returns an attribute KeyValue conforming to the -// "pprof.scope.default_sample_type" semantic conventions. It represents the -// records the pprof's default_sample_type in the original profile. Not set if -// the default sample type was missing. -func PprofScopeDefaultSampleType(val string) attribute.KeyValue { - return PprofScopeDefaultSampleTypeKey.String(val) -} - -// PprofScopeSampleTypeOrder returns an attribute KeyValue conforming to the -// "pprof.scope.sample_type_order" semantic conventions. It represents the -// records the indexes of the sample types in the original profile. -func PprofScopeSampleTypeOrder(val ...int) attribute.KeyValue { - return PprofScopeSampleTypeOrderKey.IntSlice(val) -} - -// Namespace: process -const ( - // ProcessArgsCountKey is the attribute Key conforming to the - // "process.args_count" semantic conventions. It represents the length of the - // process.command_args array. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 4 - // Note: This field can be useful for querying or performing bucket analysis on - // how many arguments were provided to start a process. More arguments may be an - // indication of suspicious activity. - ProcessArgsCountKey = attribute.Key("process.args_count") - - // ProcessCommandKey is the attribute Key conforming to the "process.command" - // semantic conventions. It represents the command used to launch the process - // (i.e. the command name). On Linux based systems, can be set to the zeroth - // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter - // extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cmd/otelcol" - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received by - // the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this - // would be the full argv vector passed to `main`. SHOULD NOT be collected by - // default unless there is sanitization that excludes sensitive data. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cmd/otecol", "--config=config.yaml" - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full command - // used to launch the process as a single string representing the full command. - // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if - // you have to assemble it just for monitoring; use `process.command_args` - // instead. SHOULD NOT be collected by default unless there is sanitization that - // excludes sensitive data. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "C:\cmd\otecol --config="my directory\config.yaml"" - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessContextSwitchTypeKey is the attribute Key conforming to the - // "process.context_switch.type" semantic conventions. It represents the - // specifies whether the context switches for this data point were voluntary or - // involuntary. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - ProcessContextSwitchTypeKey = attribute.Key("process.context_switch.type") - - // ProcessCreationTimeKey is the attribute Key conforming to the - // "process.creation.time" semantic conventions. It represents the date and time - // the process was created, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2023-11-21T09:25:34.853Z" - ProcessCreationTimeKey = attribute.Key("process.creation.time") - - // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the - // "process.executable.build_id.gnu" semantic conventions. It represents the GNU - // build ID as found in the `.note.gnu.build-id` ELF section (hex string). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "c89b11207f6479603b0d49bf291c092c2b719293" - ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu") - - // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the - // "process.executable.build_id.go" semantic conventions. It represents the Go - // build ID as retrieved by `go tool buildid `. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY" - ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go") - - // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the - // "process.executable.build_id.htlhash" semantic conventions. It represents the - // profiling specific build ID for executables. See the OTel specification for - // Profiles for more information. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "600DCAFE4A110000F2BF38C493F5FB92" - ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name of the - // process executable. On Linux based systems, this SHOULD be set to the base - // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to - // the base name of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otelcol" - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full path - // to the process executable. On Linux based systems, can be set to the target - // of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/usr/bin/cmd/otelcol" - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code" - // semantic conventions. It represents the exit code of the process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 127 - ProcessExitCodeKey = attribute.Key("process.exit.code") - - // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time" - // semantic conventions. It represents the date and time the process exited, in - // ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2023-11-21T09:26:12.315Z" - ProcessExitTimeKey = attribute.Key("process.exit.time") - - // ProcessGroupLeaderPIDKey is the attribute Key conforming to the - // "process.group_leader.pid" semantic conventions. It represents the PID of the - // process's group leader. This is also the process group ID (PGID) of the - // process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 23 - ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") - - // ProcessInteractiveKey is the attribute Key conforming to the - // "process.interactive" semantic conventions. It represents the whether the - // process is connected to an interactive shell. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - ProcessInteractiveKey = attribute.Key("process.interactive") - - // ProcessLinuxCgroupKey is the attribute Key conforming to the - // "process.linux.cgroup" semantic conventions. It represents the control group - // associated with the process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope", - // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope" - // Note: Control groups (cgroups) are a kernel feature used to organize and - // manage process resources. This attribute provides the path(s) to the - // cgroup(s) associated with the process, which should match the contents of the - // [/proc/[PID]/cgroup] file. - // - // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html - ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns the - // process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "root" - ProcessOwnerKey = attribute.Key("process.owner") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent Process - // identifier (PPID). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic - // conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRealUserIDKey is the attribute Key conforming to the - // "process.real_user.id" semantic conventions. It represents the real user ID - // (RUID) of the process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1000 - ProcessRealUserIDKey = attribute.Key("process.real_user.id") - - // ProcessRealUserNameKey is the attribute Key conforming to the - // "process.real_user.name" semantic conventions. It represents the username of - // the real user of the process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "operator" - ProcessRealUserNameKey = attribute.Key("process.real_user.name") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0 - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") - - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of the - // runtime of this process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "OpenJDK Runtime Environment" - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the version of - // the runtime of this process, as returned by the runtime without modification. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 14.0.2 - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - - // ProcessSavedUserIDKey is the attribute Key conforming to the - // "process.saved_user.id" semantic conventions. It represents the saved user ID - // (SUID) of the process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1002 - ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") - - // ProcessSavedUserNameKey is the attribute Key conforming to the - // "process.saved_user.name" semantic conventions. It represents the username of - // the saved user. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "operator" - ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") - - // ProcessSessionLeaderPIDKey is the attribute Key conforming to the - // "process.session_leader.pid" semantic conventions. It represents the PID of - // the process's session leader. This is also the session ID (SID) of the - // process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 14 - ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") - - // ProcessStateKey is the attribute Key conforming to the "process.state" - // semantic conventions. It represents the process state, e.g., - // [Linux Process State Codes]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "running" - // - // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES - ProcessStateKey = attribute.Key("process.state") - - // ProcessTitleKey is the attribute Key conforming to the "process.title" - // semantic conventions. It represents the process title (proctitle). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cat /etc/hostname", "xfce4-session", "bash" - // Note: In many Unix-like systems, process title (proctitle), is the string - // that represents the name or command line of a running process, displayed by - // system monitoring tools like ps, top, and htop. - ProcessTitleKey = attribute.Key("process.title") - - // ProcessUserIDKey is the attribute Key conforming to the "process.user.id" - // semantic conventions. It represents the effective user ID (EUID) of the - // process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1001 - ProcessUserIDKey = attribute.Key("process.user.id") - - // ProcessUserNameKey is the attribute Key conforming to the "process.user.name" - // semantic conventions. It represents the username of the effective user of the - // process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "root" - ProcessUserNameKey = attribute.Key("process.user.name") - - // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic - // conventions. It represents the virtual process identifier. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 12 - // Note: The process ID within a PID namespace. This is not necessarily unique - // across all processes on the host but it is unique within the process - // namespace that the process exists within. - ProcessVpidKey = attribute.Key("process.vpid") - - // ProcessWorkingDirectoryKey is the attribute Key conforming to the - // "process.working_directory" semantic conventions. It represents the working - // directory of the process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/root" - ProcessWorkingDirectoryKey = attribute.Key("process.working_directory") -) - -// ProcessArgsCount returns an attribute KeyValue conforming to the -// "process.args_count" semantic conventions. It represents the length of the -// process.command_args array. -func ProcessArgsCount(val int) attribute.KeyValue { - return ProcessArgsCountKey.Int(val) -} - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be set -// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the -// first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the command -// arguments (including the command/executable itself) as received by the -// process. On Linux-based systems (and some other Unixoid systems supporting -// procfs), can be set according to the list of null-delimited strings extracted -// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full -// argv vector passed to `main`. SHOULD NOT be collected by default unless there -// is sanitization that excludes sensitive data. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if -// you have to assemble it just for monitoring; use `process.command_args` -// instead. SHOULD NOT be collected by default unless there is sanitization that -// excludes sensitive data. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessCreationTime returns an attribute KeyValue conforming to the -// "process.creation.time" semantic conventions. It represents the date and time -// the process was created, in ISO 8601 format. -func ProcessCreationTime(val string) attribute.KeyValue { - return ProcessCreationTimeKey.String(val) -} - -// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the -// "process.environment_variable" semantic conventions. It represents the process -// environment variables, `` being the environment variable name, the value -// being the environment variable value. -func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue { - return attribute.String("process.environment_variable."+key, val) -} - -// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the -// "process.executable.build_id.gnu" semantic conventions. It represents the GNU -// build ID as found in the `.note.gnu.build-id` ELF section (hex string). -func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue { - return ProcessExecutableBuildIDGNUKey.String(val) -} - -// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the -// "process.executable.build_id.go" semantic conventions. It represents the Go -// build ID as retrieved by `go tool buildid `. -func ProcessExecutableBuildIDGo(val string) attribute.KeyValue { - return ProcessExecutableBuildIDGoKey.String(val) -} - -// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to -// the "process.executable.build_id.htlhash" semantic conventions. It represents -// the profiling specific build ID for executables. See the OTel specification -// for Profiles for more information. -func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue { - return ProcessExecutableBuildIDHtlhashKey.String(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of the -// process executable. On Linux based systems, this SHOULD be set to the base -// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the -// base name of `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path to -// the process executable. On Linux based systems, can be set to the target of -// `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessExitCode returns an attribute KeyValue conforming to the -// "process.exit.code" semantic conventions. It represents the exit code of the -// process. -func ProcessExitCode(val int) attribute.KeyValue { - return ProcessExitCodeKey.Int(val) -} - -// ProcessExitTime returns an attribute KeyValue conforming to the -// "process.exit.time" semantic conventions. It represents the date and time the -// process exited, in ISO 8601 format. -func ProcessExitTime(val string) attribute.KeyValue { - return ProcessExitTimeKey.String(val) -} - -// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the -// "process.group_leader.pid" semantic conventions. It represents the PID of the -// process's group leader. This is also the process group ID (PGID) of the -// process. -func ProcessGroupLeaderPID(val int) attribute.KeyValue { - return ProcessGroupLeaderPIDKey.Int(val) -} - -// ProcessInteractive returns an attribute KeyValue conforming to the -// "process.interactive" semantic conventions. It represents the whether the -// process is connected to an interactive shell. -func ProcessInteractive(val bool) attribute.KeyValue { - return ProcessInteractiveKey.Bool(val) -} - -// ProcessLinuxCgroup returns an attribute KeyValue conforming to the -// "process.linux.cgroup" semantic conventions. It represents the control group -// associated with the process. -func ProcessLinuxCgroup(val string) attribute.KeyValue { - return ProcessLinuxCgroupKey.String(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the "process.owner" -// semantic conventions. It represents the username of the user that owns the -// process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRealUserID returns an attribute KeyValue conforming to the -// "process.real_user.id" semantic conventions. It represents the real user ID -// (RUID) of the process. -func ProcessRealUserID(val int) attribute.KeyValue { - return ProcessRealUserIDKey.Int(val) -} - -// ProcessRealUserName returns an attribute KeyValue conforming to the -// "process.real_user.name" semantic conventions. It represents the username of -// the real user of the process. -func ProcessRealUserName(val string) attribute.KeyValue { - return ProcessRealUserNameKey.String(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// ProcessSavedUserID returns an attribute KeyValue conforming to the -// "process.saved_user.id" semantic conventions. It represents the saved user ID -// (SUID) of the process. -func ProcessSavedUserID(val int) attribute.KeyValue { - return ProcessSavedUserIDKey.Int(val) -} - -// ProcessSavedUserName returns an attribute KeyValue conforming to the -// "process.saved_user.name" semantic conventions. It represents the username of -// the saved user. -func ProcessSavedUserName(val string) attribute.KeyValue { - return ProcessSavedUserNameKey.String(val) -} - -// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the -// "process.session_leader.pid" semantic conventions. It represents the PID of -// the process's session leader. This is also the session ID (SID) of the -// process. -func ProcessSessionLeaderPID(val int) attribute.KeyValue { - return ProcessSessionLeaderPIDKey.Int(val) -} - -// ProcessTitle returns an attribute KeyValue conforming to the "process.title" -// semantic conventions. It represents the process title (proctitle). -func ProcessTitle(val string) attribute.KeyValue { - return ProcessTitleKey.String(val) -} - -// ProcessUserID returns an attribute KeyValue conforming to the -// "process.user.id" semantic conventions. It represents the effective user ID -// (EUID) of the process. -func ProcessUserID(val int) attribute.KeyValue { - return ProcessUserIDKey.Int(val) -} - -// ProcessUserName returns an attribute KeyValue conforming to the -// "process.user.name" semantic conventions. It represents the username of the -// effective user of the process. -func ProcessUserName(val string) attribute.KeyValue { - return ProcessUserNameKey.String(val) -} - -// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid" -// semantic conventions. It represents the virtual process identifier. -func ProcessVpid(val int) attribute.KeyValue { - return ProcessVpidKey.Int(val) -} - -// ProcessWorkingDirectory returns an attribute KeyValue conforming to the -// "process.working_directory" semantic conventions. It represents the working -// directory of the process. -func ProcessWorkingDirectory(val string) attribute.KeyValue { - return ProcessWorkingDirectoryKey.String(val) -} - -// Enum values for process.context_switch.type -var ( - // voluntary - // Stability: development - ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") - // involuntary - // Stability: development - ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") -) - -// Enum values for process.state -var ( - // running - // Stability: development - ProcessStateRunning = ProcessStateKey.String("running") - // sleeping - // Stability: development - ProcessStateSleeping = ProcessStateKey.String("sleeping") - // stopped - // Stability: development - ProcessStateStopped = ProcessStateKey.String("stopped") - // defunct - // Stability: development - ProcessStateDefunct = ProcessStateKey.String("defunct") -) - -// Namespace: profile -const ( - // ProfileFrameTypeKey is the attribute Key conforming to the - // "profile.frame.type" semantic conventions. It represents the describes the - // interpreter or compiler of a single frame. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cpython" - ProfileFrameTypeKey = attribute.Key("profile.frame.type") -) - -// Enum values for profile.frame.type -var ( - // [.NET] - // - // Stability: development - // - // [.NET]: https://wikipedia.org/wiki/.NET - ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet") - // [JVM] - // - // Stability: development - // - // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine - ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm") - // [Kernel] - // - // Stability: development - // - // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system) - ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel") - // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a - // more precise value MUST be used. - // - // Stability: development - // - // [C]: https://wikipedia.org/wiki/C_(programming_language) - // [C++]: https://wikipedia.org/wiki/C%2B%2B - // [Go]: https://wikipedia.org/wiki/Go_(programming_language) - // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) - ProfileFrameTypeNative = ProfileFrameTypeKey.String("native") - // [Perl] - // - // Stability: development - // - // [Perl]: https://wikipedia.org/wiki/Perl - ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl") - // [PHP] - // - // Stability: development - // - // [PHP]: https://wikipedia.org/wiki/PHP - ProfileFrameTypePHP = ProfileFrameTypeKey.String("php") - // [Python] - // - // Stability: development - // - // [Python]: https://wikipedia.org/wiki/Python_(programming_language) - ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython") - // [Ruby] - // - // Stability: development - // - // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language) - ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby") - // [V8JS] - // - // Stability: development - // - // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine) - ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js") - // [Erlang] - // - // Stability: development - // - // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine) - ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam") - // [Go], - // - // Stability: development - // - // [Go]: https://wikipedia.org/wiki/Go_(programming_language) - ProfileFrameTypeGo = ProfileFrameTypeKey.String("go") - // [Rust] - // - // Stability: development - // - // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) - ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust") -) - -// Namespace: rpc -const ( - // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic - // conventions. It represents the fully-qualified logical name of the method - // from the RPC interface perspective. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "com.example.ExampleService/exampleMethod", "EchoService/Echo", - // "_OTHER" - // Note: The method name MAY have unbounded cardinality in edge or error cases. - // - // Some RPC frameworks or libraries provide a fixed set of recognized methods - // for client stubs and server implementations. Instrumentations for such - // frameworks MUST set this attribute to the original method name only - // when the method is recognized by the framework or library. - // - // When the method is not recognized, for example, when the server receives - // a request for a method that is not predefined on the server, or when - // instrumentation is not able to reliably detect if the method is predefined, - // the attribute MUST be set to `_OTHER`. In such cases, tracing - // instrumentations MUST also set `rpc.method_original` attribute to - // the original method value. - // - // If the RPC instrumentation could end up converting valid RPC methods to - // `_OTHER`, then it SHOULD provide a way to configure the list of recognized - // RPC methods. - // - // The `rpc.method` can be different from the name of any implementing - // method/function. - // The `code.function.name` attribute may be used to record the fully-qualified - // method actually executing the call on the server side, or the - // RPC client stub method on the client side. - RPCMethodKey = attribute.Key("rpc.method") - - // RPCMethodOriginalKey is the attribute Key conforming to the - // "rpc.method_original" semantic conventions. It represents the original name - // of the method used by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "com.myservice.EchoService/catchAll", - // "com.myservice.EchoService/unknownMethod", "InvalidMethod" - RPCMethodOriginalKey = attribute.Key("rpc.method_original") - - // RPCResponseStatusCodeKey is the attribute Key conforming to the - // "rpc.response.status_code" semantic conventions. It represents the status - // code of the RPC returned by the RPC server or generated by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "OK", "DEADLINE_EXCEEDED", "-32602" - // Note: Usually it represents an error code, but may also represent partial - // success, warning, or differentiate between various types of successful - // outcomes. - // Semantic conventions for individual RPC frameworks SHOULD document what - // `rpc.response.status_code` means in the context of that system and which - // values are considered to represent errors. - RPCResponseStatusCodeKey = attribute.Key("rpc.response.status_code") - - // RPCSystemNameKey is the attribute Key conforming to the "rpc.system.name" - // semantic conventions. It represents the Remote Procedure Call (RPC) system. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: - // Note: The client and server RPC systems may differ for the same RPC - // interaction. For example, a client may use Apache Dubbo or Connect RPC to - // communicate with a server that uses gRPC since both protocols provide - // compatibility with gRPC. - RPCSystemNameKey = attribute.Key("rpc.system.name") -) - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the fully-qualified logical name of the -// method from the RPC interface perspective. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// RPCMethodOriginal returns an attribute KeyValue conforming to the -// "rpc.method_original" semantic conventions. It represents the original name of -// the method used by the client. -func RPCMethodOriginal(val string) attribute.KeyValue { - return RPCMethodOriginalKey.String(val) -} - -// RPCRequestMetadata returns an attribute KeyValue conforming to the -// "rpc.request.metadata" semantic conventions. It represents the RPC request -// metadata, `` being the normalized RPC metadata key (lowercase), the value -// being the metadata values. -func RPCRequestMetadata(key string, val ...string) attribute.KeyValue { - return attribute.StringSlice("rpc.request.metadata."+key, val) -} - -// RPCResponseMetadata returns an attribute KeyValue conforming to the -// "rpc.response.metadata" semantic conventions. It represents the RPC response -// metadata, `` being the normalized RPC metadata key (lowercase), the value -// being the metadata values. -func RPCResponseMetadata(key string, val ...string) attribute.KeyValue { - return attribute.StringSlice("rpc.response.metadata."+key, val) -} - -// RPCResponseStatusCode returns an attribute KeyValue conforming to the -// "rpc.response.status_code" semantic conventions. It represents the status code -// of the RPC returned by the RPC server or generated by the client. -func RPCResponseStatusCode(val string) attribute.KeyValue { - return RPCResponseStatusCodeKey.String(val) -} - -// Enum values for rpc.system.name -var ( - // [gRPC] - // Stability: release_candidate - // - // [gRPC]: https://grpc.io/ - RPCSystemNameGRPC = RPCSystemNameKey.String("grpc") - // [Apache Dubbo] - // Stability: release_candidate - // - // [Apache Dubbo]: https://dubbo.apache.org/ - RPCSystemNameDubbo = RPCSystemNameKey.String("dubbo") - // [Connect RPC] - // Stability: development - // - // [Connect RPC]: https://connectrpc.com/ - RPCSystemNameConnectrpc = RPCSystemNameKey.String("connectrpc") - // [JSON-RPC] - // Stability: development - // - // [JSON-RPC]: https://www.jsonrpc.org/ - RPCSystemNameJSONRPC = RPCSystemNameKey.String("jsonrpc") -) - -// Namespace: security_rule -const ( - // SecurityRuleCategoryKey is the attribute Key conforming to the - // "security_rule.category" semantic conventions. It represents a categorization - // value keyword used by the entity using the rule for detection of this event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Attempted Information Leak" - SecurityRuleCategoryKey = attribute.Key("security_rule.category") - - // SecurityRuleDescriptionKey is the attribute Key conforming to the - // "security_rule.description" semantic conventions. It represents the - // description of the rule generating the event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Block requests to public DNS over HTTPS / TLS protocols" - SecurityRuleDescriptionKey = attribute.Key("security_rule.description") - - // SecurityRuleLicenseKey is the attribute Key conforming to the - // "security_rule.license" semantic conventions. It represents the name of the - // license under which the rule used to generate this event is made available. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Apache 2.0" - SecurityRuleLicenseKey = attribute.Key("security_rule.license") - - // SecurityRuleNameKey is the attribute Key conforming to the - // "security_rule.name" semantic conventions. It represents the name of the rule - // or signature generating the event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "BLOCK_DNS_over_TLS" - SecurityRuleNameKey = attribute.Key("security_rule.name") - - // SecurityRuleReferenceKey is the attribute Key conforming to the - // "security_rule.reference" semantic conventions. It represents the reference - // URL to additional information about the rule used to generate this event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS" - // Note: The URL can point to the vendor’s documentation about the rule. If - // that’s not available, it can also be a link to a more general page - // describing this type of alert. - SecurityRuleReferenceKey = attribute.Key("security_rule.reference") - - // SecurityRuleRulesetNameKey is the attribute Key conforming to the - // "security_rule.ruleset.name" semantic conventions. It represents the name of - // the ruleset, policy, group, or parent category in which the rule used to - // generate this event is a member. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Standard_Protocol_Filters" - SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name") - - // SecurityRuleUUIDKey is the attribute Key conforming to the - // "security_rule.uuid" semantic conventions. It represents a rule ID that is - // unique within the scope of a set or group of agents, observers, or other - // entities using the rule for detection of this event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011" - SecurityRuleUUIDKey = attribute.Key("security_rule.uuid") - - // SecurityRuleVersionKey is the attribute Key conforming to the - // "security_rule.version" semantic conventions. It represents the version / - // revision of the rule being used for analysis. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1.0.0" - SecurityRuleVersionKey = attribute.Key("security_rule.version") -) - -// SecurityRuleCategory returns an attribute KeyValue conforming to the -// "security_rule.category" semantic conventions. It represents a categorization -// value keyword used by the entity using the rule for detection of this event. -func SecurityRuleCategory(val string) attribute.KeyValue { - return SecurityRuleCategoryKey.String(val) -} - -// SecurityRuleDescription returns an attribute KeyValue conforming to the -// "security_rule.description" semantic conventions. It represents the -// description of the rule generating the event. -func SecurityRuleDescription(val string) attribute.KeyValue { - return SecurityRuleDescriptionKey.String(val) -} - -// SecurityRuleLicense returns an attribute KeyValue conforming to the -// "security_rule.license" semantic conventions. It represents the name of the -// license under which the rule used to generate this event is made available. -func SecurityRuleLicense(val string) attribute.KeyValue { - return SecurityRuleLicenseKey.String(val) -} - -// SecurityRuleName returns an attribute KeyValue conforming to the -// "security_rule.name" semantic conventions. It represents the name of the rule -// or signature generating the event. -func SecurityRuleName(val string) attribute.KeyValue { - return SecurityRuleNameKey.String(val) -} - -// SecurityRuleReference returns an attribute KeyValue conforming to the -// "security_rule.reference" semantic conventions. It represents the reference -// URL to additional information about the rule used to generate this event. -func SecurityRuleReference(val string) attribute.KeyValue { - return SecurityRuleReferenceKey.String(val) -} - -// SecurityRuleRulesetName returns an attribute KeyValue conforming to the -// "security_rule.ruleset.name" semantic conventions. It represents the name of -// the ruleset, policy, group, or parent category in which the rule used to -// generate this event is a member. -func SecurityRuleRulesetName(val string) attribute.KeyValue { - return SecurityRuleRulesetNameKey.String(val) -} - -// SecurityRuleUUID returns an attribute KeyValue conforming to the -// "security_rule.uuid" semantic conventions. It represents a rule ID that is -// unique within the scope of a set or group of agents, observers, or other -// entities using the rule for detection of this event. -func SecurityRuleUUID(val string) attribute.KeyValue { - return SecurityRuleUUIDKey.String(val) -} - -// SecurityRuleVersion returns an attribute KeyValue conforming to the -// "security_rule.version" semantic conventions. It represents the version / -// revision of the rule being used for analysis. -func SecurityRuleVersion(val string) attribute.KeyValue { - return SecurityRuleVersionKey.String(val) -} - -// Namespace: server -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the server domain name if available - // without reverse DNS lookup; otherwise, IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "example.com", "10.1.2.80", "/tmp/my.sock" - // Note: When observed from the client side, and when communicating through an - // intermediary, `server.address` SHOULD represent the server address behind any - // intermediaries, for example proxies, if it's available. - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" semantic - // conventions. It represents the server port number. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 80, 8080, 443 - // Note: When observed from the client side, and when communicating through an - // intermediary, `server.port` SHOULD represent the server port behind any - // intermediaries, for example proxies, if it's available. - ServerPortKey = attribute.Key("server.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the "server.address" -// semantic conventions. It represents the server domain name if available -// without reverse DNS lookup; otherwise, IP address or Unix domain socket name. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the server port number. -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// Namespace: service -const ( - // ServiceCriticalityKey is the attribute Key conforming to the - // "service.criticality" semantic conventions. It represents the operational - // criticality of the service. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "critical", "high", "medium", "low" - // Note: Application developers are encouraged to set `service.criticality` to - // express the operational importance of their services. Telemetry consumers MAY - // use this attribute to optimize telemetry collection or improve user - // experience. - ServiceCriticalityKey = attribute.Key("service.criticality") - - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID of - // the service instance. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "627cc493-f310-47de-96bd-71410b7dec09" - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be globally - // unique). The ID helps to - // distinguish instances of the same service that exist at the same time (e.g. - // instances of a horizontally scaled - // service). - // - // Implementations, such as SDKs, are recommended to generate a random Version 1 - // or Version 4 [RFC - // 4122] UUID, but are free to use an inherent unique ID as - // the source of - // this value if stability is desirable. In that case, the ID SHOULD be used as - // source of a UUID Version 5 and - // SHOULD use the following UUID as the namespace: - // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. - // - // UUIDs are typically recommended, as only an opaque value for the purposes of - // identifying a service instance is - // needed. Similar to what can be seen in the man page for the - // [`/etc/machine-id`] file, the underlying - // data, such as pod name and namespace should be treated as confidential, being - // the user's choice to expose it - // or not via another resource attribute. - // - // For applications running behind an application server (like unicorn), we do - // not recommend using one identifier - // for all processes participating in the application. Instead, it's recommended - // each division (e.g. a worker - // thread in unicorn) to have its own instance.id. - // - // It's not recommended for a Collector to set `service.instance.id` if it can't - // unambiguously determine the - // service instance that is generating that telemetry. For instance, creating an - // UUID based on `pod.name` will - // likely be wrong, as the Collector might not know from which container within - // that pod the telemetry originated. - // However, Collectors can set the `service.instance.id` if they can - // unambiguously determine the service instance - // for that telemetry. This is typically the case for scraping receivers, as - // they know the target address and - // port. - // - // [RFC - // 4122]: https://www.ietf.org/rfc/rfc4122.txt - // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceNameKey is the attribute Key conforming to the "service.name" semantic - // conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "shoppingcart" - // Note: MUST be the same for all instances of horizontally scaled services. If - // the value was not specified, SDKs MUST fallback to `unknown_service:` - // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`. - // If `process.executable.name` is not available, the value MUST be set to - // `unknown_service`. - // - // [`process.executable.name`]: process.md - ServiceNameKey = attribute.Key("service.name") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "Shop" - // Note: A string value having a meaning that helps to distinguish a group of - // services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` is - // expected to be unique for all services that have no explicit namespace - // defined (so the empty/unspecified namespace is simply one more valid - // namespace). Zero-length namespace string is assumed equal to unspecified - // namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - - // ServicePeerNameKey is the attribute Key conforming to the "service.peer.name" - // semantic conventions. It represents the logical name of the service on the - // other side of the connection. SHOULD be equal to the actual [`service.name`] - // resource attribute of the remote service if any. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "shoppingcart" - // - // [`service.name`]: /docs/resource/README.md#service - ServicePeerNameKey = attribute.Key("service.peer.name") - - // ServicePeerNamespaceKey is the attribute Key conforming to the - // "service.peer.namespace" semantic conventions. It represents the logical - // namespace of the service on the other side of the connection. SHOULD be equal - // to the actual [`service.namespace`] resource attribute of the remote service - // if any. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Shop" - // - // [`service.namespace`]: /docs/resource/README.md#service - ServicePeerNamespaceKey = attribute.Key("service.peer.namespace") - - // ServiceVersionKey is the attribute Key conforming to the "service.version" - // semantic conventions. It represents the version string of the service - // component. The format is not defined by these conventions. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "2.0.0", "a01dbef8a" - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of the -// service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceName returns an attribute KeyValue conforming to the "service.name" -// semantic conventions. It represents the logical name of the service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServicePeerName returns an attribute KeyValue conforming to the -// "service.peer.name" semantic conventions. It represents the logical name of -// the service on the other side of the connection. SHOULD be equal to the actual -// [`service.name`] resource attribute of the remote service if any. -// -// [`service.name`]: /docs/resource/README.md#service -func ServicePeerName(val string) attribute.KeyValue { - return ServicePeerNameKey.String(val) -} - -// ServicePeerNamespace returns an attribute KeyValue conforming to the -// "service.peer.namespace" semantic conventions. It represents the logical -// namespace of the service on the other side of the connection. SHOULD be equal -// to the actual [`service.namespace`] resource attribute of the remote service -// if any. -// -// [`service.namespace`]: /docs/resource/README.md#service -func ServicePeerNamespace(val string) attribute.KeyValue { - return ServicePeerNamespaceKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service component. The format is not defined by these conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// Enum values for service.criticality -var ( - // Service is business-critical; downtime directly impacts revenue, user - // experience, or core functionality. - // - // Stability: development - ServiceCriticalityCritical = ServiceCriticalityKey.String("critical") - // Service is important but has degradation tolerance or fallback mechanisms. - // - // Stability: development - ServiceCriticalityHigh = ServiceCriticalityKey.String("high") - // Service provides supplementary functionality; degradation has limited user - // impact. - // - // Stability: development - ServiceCriticalityMedium = ServiceCriticalityKey.String("medium") - // Service is non-essential to core operations; used for background tasks or - // internal tools. - // - // Stability: development - ServiceCriticalityLow = ServiceCriticalityKey.String("low") -) - -// Namespace: session -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" semantic - // conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 00112233-4455-6677-8899-aabbccddeeff - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 00112233-4455-6677-8899-aabbccddeeff - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) -} - -// Namespace: signalr -const ( - // SignalRConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the signalR - // HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "app_shutdown", "timeout" - SignalRConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalRTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the - // [SignalR transport type]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "web_sockets", "long_polling" - // - // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md - SignalRTransportKey = attribute.Key("signalr.transport") -) - -// Enum values for signalr.connection.status -var ( - // The connection was closed normally. - // Stability: stable - SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout. - // Stability: stable - SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down. - // Stability: stable - SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown") -) - -// Enum values for signalr.transport -var ( - // ServerSentEvents protocol - // Stability: stable - SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events") - // LongPolling protocol - // Stability: stable - SignalRTransportLongPolling = SignalRTransportKey.String("long_polling") - // WebSockets protocol - // Stability: stable - SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets") -) - -// Namespace: source -const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix domain - // socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock" - // Note: When observed from the destination side, and when communicating through - // an intermediary, `source.address` SHOULD represent the source address behind - // any intermediaries, for example proxies, if it's available. - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" semantic - // conventions. It represents the source port number. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceAddress returns an attribute KeyValue conforming to the "source.address" -// semantic conventions. It represents the source address - domain name if -// available without reverse DNS lookup; otherwise, IP address or Unix domain -// socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number. -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// Namespace: system -const ( - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "(identifier)" - SystemDeviceKey = attribute.Key("system.device") - - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the filesystem - // mode. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "rw, ro" - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/mnt/data" - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") - - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the filesystem - // state. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "used" - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the filesystem - // type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ext4" - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") - - // SystemMemoryLinuxSlabStateKey is the attribute Key conforming to the - // "system.memory.linux.slab.state" semantic conventions. It represents the - // Linux Slab memory state. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "reclaimable", "unreclaimable" - SystemMemoryLinuxSlabStateKey = attribute.Key("system.memory.linux.slab.state") - - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory state. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "free", "cached" - SystemMemoryStateKey = attribute.Key("system.memory.state") - - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "in" - SystemPagingDirectionKey = attribute.Key("system.paging.direction") - - // SystemPagingFaultTypeKey is the attribute Key conforming to the - // "system.paging.fault.type" semantic conventions. It represents the paging - // fault type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "minor" - SystemPagingFaultTypeKey = attribute.Key("system.paging.fault.type") - - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory paging - // state. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "free" - SystemPagingStateKey = attribute.Key("system.paging.state") -) - -// SystemDevice returns an attribute KeyValue conforming to the "system.device" -// semantic conventions. It represents the device identifier. -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode. -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the -// "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path. -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Enum values for system.filesystem.state -var ( - // used - // Stability: development - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - // Stability: development - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - // Stability: development - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -// Enum values for system.filesystem.type -var ( - // fat32 - // Stability: development - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - // Stability: development - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - // Stability: development - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - // Stability: development - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - // Stability: development - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - // Stability: development - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// Enum values for system.memory.linux.slab.state -var ( - // reclaimable - // Stability: development - SystemMemoryLinuxSlabStateReclaimable = SystemMemoryLinuxSlabStateKey.String("reclaimable") - // unreclaimable - // Stability: development - SystemMemoryLinuxSlabStateUnreclaimable = SystemMemoryLinuxSlabStateKey.String("unreclaimable") -) - -// Enum values for system.memory.state -var ( - // Actual used virtual memory in bytes. - // Stability: development - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - // Stability: development - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // buffers - // Stability: development - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - // Stability: development - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) - -// Enum values for system.paging.direction -var ( - // in - // Stability: development - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - // Stability: development - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -// Enum values for system.paging.fault.type -var ( - // major - // Stability: development - SystemPagingFaultTypeMajor = SystemPagingFaultTypeKey.String("major") - // minor - // Stability: development - SystemPagingFaultTypeMinor = SystemPagingFaultTypeKey.String("minor") -) - -// Enum values for system.paging.state -var ( - // used - // Stability: development - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - // Stability: development - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -// Namespace: telemetry -const ( - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of the - // auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "parts-unlimited-java" - // Note: Official auto instrumentation agents and distributions SHOULD set the - // `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the version - // string of the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1.2.3" - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") - - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the language of - // the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "opentelemetry" - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to - // `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is used, - // this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module name of - // this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "1.2.3" - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") -) - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) -} - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version string -// of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// Enum values for telemetry.sdk.language -var ( - // cpp - // Stability: stable - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - // Stability: stable - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - // Stability: stable - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - // Stability: stable - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - // Stability: stable - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - // Stability: stable - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - // Stability: stable - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - // Stability: stable - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - // Stability: stable - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - // Stability: stable - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - // Stability: stable - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - // Stability: stable - TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs") -) - -// Namespace: test -const ( - // TestCaseNameKey is the attribute Key conforming to the "test.case.name" - // semantic conventions. It represents the fully qualified human readable name - // of the [test case]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1", - // "ExampleTestCase1_test1" - // - // [test case]: https://wikipedia.org/wiki/Test_case - TestCaseNameKey = attribute.Key("test.case.name") - - // TestCaseResultStatusKey is the attribute Key conforming to the - // "test.case.result.status" semantic conventions. It represents the status of - // the actual test case result from test execution. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "pass", "fail" - TestCaseResultStatusKey = attribute.Key("test.case.result.status") - - // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name" - // semantic conventions. It represents the human readable name of a [test suite] - // . - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "TestSuite1" - // - // [test suite]: https://wikipedia.org/wiki/Test_suite - TestSuiteNameKey = attribute.Key("test.suite.name") - - // TestSuiteRunStatusKey is the attribute Key conforming to the - // "test.suite.run.status" semantic conventions. It represents the status of the - // test suite run. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "success", "failure", "skipped", "aborted", "timed_out", - // "in_progress" - TestSuiteRunStatusKey = attribute.Key("test.suite.run.status") -) - -// TestCaseName returns an attribute KeyValue conforming to the "test.case.name" -// semantic conventions. It represents the fully qualified human readable name of -// the [test case]. -// -// [test case]: https://wikipedia.org/wiki/Test_case -func TestCaseName(val string) attribute.KeyValue { - return TestCaseNameKey.String(val) -} - -// TestSuiteName returns an attribute KeyValue conforming to the -// "test.suite.name" semantic conventions. It represents the human readable name -// of a [test suite]. -// -// [test suite]: https://wikipedia.org/wiki/Test_suite -func TestSuiteName(val string) attribute.KeyValue { - return TestSuiteNameKey.String(val) -} - -// Enum values for test.case.result.status -var ( - // pass - // Stability: development - TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass") - // fail - // Stability: development - TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail") -) - -// Enum values for test.suite.run.status -var ( - // success - // Stability: development - TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success") - // failure - // Stability: development - TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure") - // skipped - // Stability: development - TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped") - // aborted - // Stability: development - TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted") - // timed_out - // Stability: development - TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out") - // in_progress - // Stability: development - TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress") -) - -// Namespace: thread -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed to OS - // thread ID). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Note: - // Examples of where the value can be extracted from: - // - // | Language or platform | Source | - // | --- | --- | - // | JVM | `Thread.currentThread().threadId()` | - // | .NET | `Thread.CurrentThread.ManagedThreadId` | - // | Python | `threading.current_thread().ident` | - // | Ruby | `Thread.current.object_id` | - // | C++ | `std::this_thread::get_id()` | - // | Erlang | `erlang:self()` | - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic - // conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: main - // Note: - // Examples of where the value can be extracted from: - // - // | Language or platform | Source | - // | --- | --- | - // | JVM | `Thread.currentThread().getName()` | - // | .NET | `Thread.CurrentThread.Name` | - // | Python | `threading.current_thread().name` | - // | Ruby | `Thread.current.name` | - // | Erlang | `erlang:process_info(self(), registered_name)` | - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic -// conventions. It represents the current "managed" thread ID (as opposed to OS -// thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Namespace: tls -const ( - // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic - // conventions. It represents the string indicating the [cipher] used during the - // current connection. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" - // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions` - // of the [registered TLS Cipher Suits]. - // - // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 - // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4 - TLSCipherKey = attribute.Key("tls.cipher") - - // TLSClientCertificateKey is the attribute Key conforming to the - // "tls.client.certificate" semantic conventions. It represents the PEM-encoded - // stand-alone certificate offered by the client. This is usually - // mutually-exclusive of `client.certificate_chain` since this value also exists - // in that list. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MII..." - TLSClientCertificateKey = attribute.Key("tls.client.certificate") - - // TLSClientCertificateChainKey is the attribute Key conforming to the - // "tls.client.certificate_chain" semantic conventions. It represents the array - // of PEM-encoded certificates that make up the certificate chain offered by the - // client. This is usually mutually-exclusive of `client.certificate` since that - // value should be the first certificate in the chain. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MII...", "MI..." - TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") - - // TLSClientHashMd5Key is the attribute Key conforming to the - // "tls.client.hash.md5" semantic conventions. It represents the certificate - // fingerprint using the MD5 digest of DER-encoded version of certificate - // offered by the client. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" - TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") - - // TLSClientHashSha1Key is the attribute Key conforming to the - // "tls.client.hash.sha1" semantic conventions. It represents the certificate - // fingerprint using the SHA1 digest of DER-encoded version of certificate - // offered by the client. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" - TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") - - // TLSClientHashSha256Key is the attribute Key conforming to the - // "tls.client.hash.sha256" semantic conventions. It represents the certificate - // fingerprint using the SHA256 digest of DER-encoded version of certificate - // offered by the client. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" - TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") - - // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer" - // semantic conventions. It represents the distinguished name of [subject] of - // the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" - // - // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 - TLSClientIssuerKey = attribute.Key("tls.client.issuer") - - // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" - // semantic conventions. It represents a hash that identifies clients based on - // how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "d4e5b18d6b55c71272893221c96ba240" - TLSClientJa3Key = attribute.Key("tls.client.ja3") - - // TLSClientNotAfterKey is the attribute Key conforming to the - // "tls.client.not_after" semantic conventions. It represents the date/Time - // indicating when client certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T00:00:00.000Z" - TLSClientNotAfterKey = attribute.Key("tls.client.not_after") - - // TLSClientNotBeforeKey is the attribute Key conforming to the - // "tls.client.not_before" semantic conventions. It represents the date/Time - // indicating when client certificate is first considered valid. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1970-01-01T00:00:00.000Z" - TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") - - // TLSClientSubjectKey is the attribute Key conforming to the - // "tls.client.subject" semantic conventions. It represents the distinguished - // name of subject of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com" - TLSClientSubjectKey = attribute.Key("tls.client.subject") - - // TLSClientSupportedCiphersKey is the attribute Key conforming to the - // "tls.client.supported_ciphers" semantic conventions. It represents the array - // of ciphers offered by the client during the client hello. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" - TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") - - // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic - // conventions. It represents the string indicating the curve used for the given - // cipher, when applicable. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "secp256r1" - TLSCurveKey = attribute.Key("tls.curve") - - // TLSEstablishedKey is the attribute Key conforming to the "tls.established" - // semantic conventions. It represents the boolean flag indicating if the TLS - // negotiation was successful and transitioned to an encrypted tunnel. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: true - TLSEstablishedKey = attribute.Key("tls.established") - - // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol" - // semantic conventions. It represents the string indicating the protocol being - // tunneled. Per the values in the [IANA registry], this string should be lower - // case. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "http/1.1" - // - // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids - TLSNextProtocolKey = attribute.Key("tls.next_protocol") - - // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name" - // semantic conventions. It represents the normalized lowercase protocol name - // parsed from original string of the negotiated [SSL/TLS protocol version]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values - TLSProtocolNameKey = attribute.Key("tls.protocol.name") - - // TLSProtocolVersionKey is the attribute Key conforming to the - // "tls.protocol.version" semantic conventions. It represents the numeric part - // of the version parsed from the original string of the negotiated - // [SSL/TLS protocol version]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1.2", "3" - // - // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values - TLSProtocolVersionKey = attribute.Key("tls.protocol.version") - - // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic - // conventions. It represents the boolean flag indicating if this TLS connection - // was resumed from an existing TLS negotiation. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: true - TLSResumedKey = attribute.Key("tls.resumed") - - // TLSServerCertificateKey is the attribute Key conforming to the - // "tls.server.certificate" semantic conventions. It represents the PEM-encoded - // stand-alone certificate offered by the server. This is usually - // mutually-exclusive of `server.certificate_chain` since this value also exists - // in that list. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MII..." - TLSServerCertificateKey = attribute.Key("tls.server.certificate") - - // TLSServerCertificateChainKey is the attribute Key conforming to the - // "tls.server.certificate_chain" semantic conventions. It represents the array - // of PEM-encoded certificates that make up the certificate chain offered by the - // server. This is usually mutually-exclusive of `server.certificate` since that - // value should be the first certificate in the chain. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MII...", "MI..." - TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") - - // TLSServerHashMd5Key is the attribute Key conforming to the - // "tls.server.hash.md5" semantic conventions. It represents the certificate - // fingerprint using the MD5 digest of DER-encoded version of certificate - // offered by the server. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" - TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") - - // TLSServerHashSha1Key is the attribute Key conforming to the - // "tls.server.hash.sha1" semantic conventions. It represents the certificate - // fingerprint using the SHA1 digest of DER-encoded version of certificate - // offered by the server. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" - TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") - - // TLSServerHashSha256Key is the attribute Key conforming to the - // "tls.server.hash.sha256" semantic conventions. It represents the certificate - // fingerprint using the SHA256 digest of DER-encoded version of certificate - // offered by the server. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" - TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") - - // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer" - // semantic conventions. It represents the distinguished name of [subject] of - // the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" - // - // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 - TLSServerIssuerKey = attribute.Key("tls.server.issuer") - - // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s" - // semantic conventions. It represents a hash that identifies servers based on - // how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "d4e5b18d6b55c71272893221c96ba240" - TLSServerJa3sKey = attribute.Key("tls.server.ja3s") - - // TLSServerNotAfterKey is the attribute Key conforming to the - // "tls.server.not_after" semantic conventions. It represents the date/Time - // indicating when server certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T00:00:00.000Z" - TLSServerNotAfterKey = attribute.Key("tls.server.not_after") - - // TLSServerNotBeforeKey is the attribute Key conforming to the - // "tls.server.not_before" semantic conventions. It represents the date/Time - // indicating when server certificate is first considered valid. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1970-01-01T00:00:00.000Z" - TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") - - // TLSServerSubjectKey is the attribute Key conforming to the - // "tls.server.subject" semantic conventions. It represents the distinguished - // name of subject of the x.509 certificate presented by the server. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com" - TLSServerSubjectKey = attribute.Key("tls.server.subject") -) - -// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" -// semantic conventions. It represents the string indicating the [cipher] used -// during the current connection. -// -// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 -func TLSCipher(val string) attribute.KeyValue { - return TLSCipherKey.String(val) -} - -// TLSClientCertificate returns an attribute KeyValue conforming to the -// "tls.client.certificate" semantic conventions. It represents the PEM-encoded -// stand-alone certificate offered by the client. This is usually -// mutually-exclusive of `client.certificate_chain` since this value also exists -// in that list. -func TLSClientCertificate(val string) attribute.KeyValue { - return TLSClientCertificateKey.String(val) -} - -// TLSClientCertificateChain returns an attribute KeyValue conforming to the -// "tls.client.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by the -// client. This is usually mutually-exclusive of `client.certificate` since that -// value should be the first certificate in the chain. -func TLSClientCertificateChain(val ...string) attribute.KeyValue { - return TLSClientCertificateChainKey.StringSlice(val) -} - -// TLSClientHashMd5 returns an attribute KeyValue conforming to the -// "tls.client.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate offered -// by the client. For consistency with other hash values, this value should be -// formatted as an uppercase hash. -func TLSClientHashMd5(val string) attribute.KeyValue { - return TLSClientHashMd5Key.String(val) -} - -// TLSClientHashSha1 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha1(val string) attribute.KeyValue { - return TLSClientHashSha1Key.String(val) -} - -// TLSClientHashSha256 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha256(val string) attribute.KeyValue { - return TLSClientHashSha256Key.String(val) -} - -// TLSClientIssuer returns an attribute KeyValue conforming to the -// "tls.client.issuer" semantic conventions. It represents the distinguished name -// of [subject] of the issuer of the x.509 certificate presented by the client. -// -// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 -func TLSClientIssuer(val string) attribute.KeyValue { - return TLSClientIssuerKey.String(val) -} - -// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3" -// semantic conventions. It represents a hash that identifies clients based on -// how they perform an SSL/TLS handshake. -func TLSClientJa3(val string) attribute.KeyValue { - return TLSClientJa3Key.String(val) -} - -// TLSClientNotAfter returns an attribute KeyValue conforming to the -// "tls.client.not_after" semantic conventions. It represents the date/Time -// indicating when client certificate is no longer considered valid. -func TLSClientNotAfter(val string) attribute.KeyValue { - return TLSClientNotAfterKey.String(val) -} - -// TLSClientNotBefore returns an attribute KeyValue conforming to the -// "tls.client.not_before" semantic conventions. It represents the date/Time -// indicating when client certificate is first considered valid. -func TLSClientNotBefore(val string) attribute.KeyValue { - return TLSClientNotBeforeKey.String(val) -} - -// TLSClientSubject returns an attribute KeyValue conforming to the -// "tls.client.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the client. -func TLSClientSubject(val string) attribute.KeyValue { - return TLSClientSubjectKey.String(val) -} - -// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the -// "tls.client.supported_ciphers" semantic conventions. It represents the array -// of ciphers offered by the client during the client hello. -func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { - return TLSClientSupportedCiphersKey.StringSlice(val) -} - -// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic -// conventions. It represents the string indicating the curve used for the given -// cipher, when applicable. -func TLSCurve(val string) attribute.KeyValue { - return TLSCurveKey.String(val) -} - -// TLSEstablished returns an attribute KeyValue conforming to the -// "tls.established" semantic conventions. It represents the boolean flag -// indicating if the TLS negotiation was successful and transitioned to an -// encrypted tunnel. -func TLSEstablished(val bool) attribute.KeyValue { - return TLSEstablishedKey.Bool(val) -} - -// TLSNextProtocol returns an attribute KeyValue conforming to the -// "tls.next_protocol" semantic conventions. It represents the string indicating -// the protocol being tunneled. Per the values in the [IANA registry], this -// string should be lower case. -// -// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids -func TLSNextProtocol(val string) attribute.KeyValue { - return TLSNextProtocolKey.String(val) -} - -// TLSProtocolVersion returns an attribute KeyValue conforming to the -// "tls.protocol.version" semantic conventions. It represents the numeric part of -// the version parsed from the original string of the negotiated -// [SSL/TLS protocol version]. -// -// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values -func TLSProtocolVersion(val string) attribute.KeyValue { - return TLSProtocolVersionKey.String(val) -} - -// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" -// semantic conventions. It represents the boolean flag indicating if this TLS -// connection was resumed from an existing TLS negotiation. -func TLSResumed(val bool) attribute.KeyValue { - return TLSResumedKey.Bool(val) -} - -// TLSServerCertificate returns an attribute KeyValue conforming to the -// "tls.server.certificate" semantic conventions. It represents the PEM-encoded -// stand-alone certificate offered by the server. This is usually -// mutually-exclusive of `server.certificate_chain` since this value also exists -// in that list. -func TLSServerCertificate(val string) attribute.KeyValue { - return TLSServerCertificateKey.String(val) -} - -// TLSServerCertificateChain returns an attribute KeyValue conforming to the -// "tls.server.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by the -// server. This is usually mutually-exclusive of `server.certificate` since that -// value should be the first certificate in the chain. -func TLSServerCertificateChain(val ...string) attribute.KeyValue { - return TLSServerCertificateChainKey.StringSlice(val) -} - -// TLSServerHashMd5 returns an attribute KeyValue conforming to the -// "tls.server.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate offered -// by the server. For consistency with other hash values, this value should be -// formatted as an uppercase hash. -func TLSServerHashMd5(val string) attribute.KeyValue { - return TLSServerHashMd5Key.String(val) -} - -// TLSServerHashSha1 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha1(val string) attribute.KeyValue { - return TLSServerHashSha1Key.String(val) -} - -// TLSServerHashSha256 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha256(val string) attribute.KeyValue { - return TLSServerHashSha256Key.String(val) -} - -// TLSServerIssuer returns an attribute KeyValue conforming to the -// "tls.server.issuer" semantic conventions. It represents the distinguished name -// of [subject] of the issuer of the x.509 certificate presented by the client. -// -// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 -func TLSServerIssuer(val string) attribute.KeyValue { - return TLSServerIssuerKey.String(val) -} - -// TLSServerJa3s returns an attribute KeyValue conforming to the -// "tls.server.ja3s" semantic conventions. It represents a hash that identifies -// servers based on how they perform an SSL/TLS handshake. -func TLSServerJa3s(val string) attribute.KeyValue { - return TLSServerJa3sKey.String(val) -} - -// TLSServerNotAfter returns an attribute KeyValue conforming to the -// "tls.server.not_after" semantic conventions. It represents the date/Time -// indicating when server certificate is no longer considered valid. -func TLSServerNotAfter(val string) attribute.KeyValue { - return TLSServerNotAfterKey.String(val) -} - -// TLSServerNotBefore returns an attribute KeyValue conforming to the -// "tls.server.not_before" semantic conventions. It represents the date/Time -// indicating when server certificate is first considered valid. -func TLSServerNotBefore(val string) attribute.KeyValue { - return TLSServerNotBeforeKey.String(val) -} - -// TLSServerSubject returns an attribute KeyValue conforming to the -// "tls.server.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the server. -func TLSServerSubject(val string) attribute.KeyValue { - return TLSServerSubjectKey.String(val) -} - -// Enum values for tls.protocol.name -var ( - // ssl - // Stability: development - TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") - // tls - // Stability: development - TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") -) - -// Namespace: url -const ( - // URLDomainKey is the attribute Key conforming to the "url.domain" semantic - // conventions. It represents the domain extracted from the `url.full`, such as - // "opentelemetry.io". - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2", - // "[1080:0:0:0:8:800:200C:417A]" - // Note: In some cases a URL may refer to an IP and/or port directly, without a - // domain name. In this case, the IP address would go to the domain field. If - // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[` - // and `]` characters should also be captured in the domain field. - // - // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2 - URLDomainKey = attribute.Key("url.domain") - - // URLExtensionKey is the attribute Key conforming to the "url.extension" - // semantic conventions. It represents the file extension extracted from the - // `url.full`, excluding the leading dot. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "png", "gz" - // Note: The file extension is only set if it exists, as not every url has a - // file extension. When the file name has multiple extensions `example.tar.gz`, - // only the last one should be captured `gz`, not `tar.gz`. - URLExtensionKey = attribute.Key("url.extension") - - // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic - // conventions. It represents the [URI fragment] component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "SemConv" - // - // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 - URLFragmentKey = attribute.Key("url.fragment") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network resource - // according to [RFC3986]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost" - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the fragment - // is not transmitted over HTTP, but if it is known, it SHOULD be included - // nevertheless. - // - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. - // In such case username and password SHOULD be redacted and attribute's value - // SHOULD be `https://REDACTED:REDACTED@www.example.com/`. - // - // `url.full` SHOULD capture the absolute URL when it is available (or can be - // reconstructed). - // - // Sensitive content provided in `url.full` SHOULD be scrubbed when - // instrumentations can identify it. - // - // - // Query string values for the following keys SHOULD be redacted by default and - // replaced by the - // value `REDACTED`: - // - // - [`AWSAccessKeyId`] - // - [`Signature`] - // - [`sig`] - // - [`X-Goog-Signature`] - // - // This list is subject to change over time. - // - // Matching of query parameter keys against the sensitive list SHOULD be - // case-sensitive. - // - // - // Instrumentation MAY provide a way to override this list via declarative - // configuration. - // If so, it SHOULD use the `sensitive_query_parameters` property - // (an array of case-sensitive strings with minimum items 0) under - // `.instrumentation/development.general.sanitization.url`. - // This list is a full override of the default sensitive query parameter keys, - // it is not a list of keys in addition to the defaults. - // - // When a query string value is redacted, the query string key SHOULD still be - // preserved, e.g. - // `https://www.example.com/path?color=blue&sig=REDACTED`. - // - // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 - // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth - // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth - // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token - // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls - URLFullKey = attribute.Key("url.full") - - // URLOriginalKey is the attribute Key conforming to the "url.original" semantic - // conventions. It represents the unmodified original URL as seen in the event - // source. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", - // "search?q=OpenTelemetry" - // Note: In network monitoring, the observed URL may be a full URL, whereas in - // access logs, the URL is often just represented as a path. This field is meant - // to represent the URL as it was observed, complete or not. - // `url.original` might contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case password and - // username SHOULD NOT be redacted and attribute's value SHOULD remain the same. - URLOriginalKey = attribute.Key("url.original") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI path] component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "/search" - // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when - // instrumentations can identify it. - // - // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 - URLPathKey = attribute.Key("url.path") - - // URLPortKey is the attribute Key conforming to the "url.port" semantic - // conventions. It represents the port extracted from the `url.full`. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 443 - URLPortKey = attribute.Key("url.port") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI query] component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "q=OpenTelemetry" - // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when - // instrumentations can identify it. - // - // - // Query string values for the following keys SHOULD be redacted by default and - // replaced by the value `REDACTED`: - // - // - [`AWSAccessKeyId`] - // - [`Signature`] - // - [`sig`] - // - [`X-Goog-Signature`] - // - // This list is subject to change over time. - // - // Matching of query parameter keys against the sensitive list SHOULD be - // case-sensitive. - // - // Instrumentation MAY provide a way to override this list via declarative - // configuration. - // If so, it SHOULD use the `sensitive_query_parameters` property - // (an array of case-sensitive strings with minimum items 0) under - // `.instrumentation/development.general.sanitization.url`. - // This list is a full override of the default sensitive query parameter keys, - // it is not a list of keys in addition to the defaults. - // - // When a query string value is redacted, the query string key SHOULD still be - // preserved, e.g. - // `q=OpenTelemetry&sig=REDACTED`. - // - // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 - // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth - // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth - // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token - // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls - URLQueryKey = attribute.Key("url.query") - - // URLRegisteredDomainKey is the attribute Key conforming to the - // "url.registered_domain" semantic conventions. It represents the highest - // registered url domain, stripped of the subdomain. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "example.com", "foo.co.uk" - // Note: This value can be determined precisely with the [public suffix list]. - // For example, the registered domain for `foo.example.com` is `example.com`. - // Trying to approximate this by simply taking the last two labels will not work - // well for TLDs such as `co.uk`. - // - // [public suffix list]: https://publicsuffix.org/ - URLRegisteredDomainKey = attribute.Key("url.registered_domain") - - // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic - // conventions. It represents the [URI scheme] component identifying the used - // protocol. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "https", "ftp", "telnet" - // - // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 - URLSchemeKey = attribute.Key("url.scheme") - - // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" - // semantic conventions. It represents the subdomain portion of a fully - // qualified domain name includes all of the names except the host name under - // the registered_domain. In a partially qualified domain, or if the - // qualification level of the full name cannot be determined, subdomain contains - // all of the names below the registered domain. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "east", "sub2.sub1" - // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the - // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the - // subdomain field should contain `sub2.sub1`, with no trailing period. - URLSubdomainKey = attribute.Key("url.subdomain") - - // URLTemplateKey is the attribute Key conforming to the "url.template" semantic - // conventions. It represents the low-cardinality template of an - // [absolute path reference]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/users/{id}", "/users/:id", "/users?id={id}" - // - // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 - URLTemplateKey = attribute.Key("url.template") - - // URLTopLevelDomainKey is the attribute Key conforming to the - // "url.top_level_domain" semantic conventions. It represents the effective top - // level domain (eTLD), also known as the domain suffix, is the last part of the - // domain name. For example, the top level domain for example.com is `com`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "com", "co.uk" - // Note: This value can be determined precisely with the [public suffix list]. - // - // [public suffix list]: https://publicsuffix.org/ - URLTopLevelDomainKey = attribute.Key("url.top_level_domain") -) - -// URLDomain returns an attribute KeyValue conforming to the "url.domain" -// semantic conventions. It represents the domain extracted from the `url.full`, -// such as "opentelemetry.io". -func URLDomain(val string) attribute.KeyValue { - return URLDomainKey.String(val) -} - -// URLExtension returns an attribute KeyValue conforming to the "url.extension" -// semantic conventions. It represents the file extension extracted from the -// `url.full`, excluding the leading dot. -func URLExtension(val string) attribute.KeyValue { - return URLExtensionKey.String(val) -} - -// URLFragment returns an attribute KeyValue conforming to the "url.fragment" -// semantic conventions. It represents the [URI fragment] component. -// -// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" semantic -// conventions. It represents the absolute URL describing a network resource -// according to [RFC3986]. -// -// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLOriginal returns an attribute KeyValue conforming to the "url.original" -// semantic conventions. It represents the unmodified original URL as seen in the -// event source. -func URLOriginal(val string) attribute.KeyValue { - return URLOriginalKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" semantic -// conventions. It represents the [URI path] component. -// -// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLPort returns an attribute KeyValue conforming to the "url.port" semantic -// conventions. It represents the port extracted from the `url.full`. -func URLPort(val int) attribute.KeyValue { - return URLPortKey.Int(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic -// conventions. It represents the [URI query] component. -// -// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLRegisteredDomain returns an attribute KeyValue conforming to the -// "url.registered_domain" semantic conventions. It represents the highest -// registered url domain, stripped of the subdomain. -func URLRegisteredDomain(val string) attribute.KeyValue { - return URLRegisteredDomainKey.String(val) -} - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI scheme] component identifying the -// used protocol. -// -// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain" -// semantic conventions. It represents the subdomain portion of a fully qualified -// domain name includes all of the names except the host name under the -// registered_domain. In a partially qualified domain, or if the qualification -// level of the full name cannot be determined, subdomain contains all of the -// names below the registered domain. -func URLSubdomain(val string) attribute.KeyValue { - return URLSubdomainKey.String(val) -} - -// URLTemplate returns an attribute KeyValue conforming to the "url.template" -// semantic conventions. It represents the low-cardinality template of an -// [absolute path reference]. -// -// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 -func URLTemplate(val string) attribute.KeyValue { - return URLTemplateKey.String(val) -} - -// URLTopLevelDomain returns an attribute KeyValue conforming to the -// "url.top_level_domain" semantic conventions. It represents the effective top -// level domain (eTLD), also known as the domain suffix, is the last part of the -// domain name. For example, the top level domain for example.com is `com`. -func URLTopLevelDomain(val string) attribute.KeyValue { - return URLTopLevelDomainKey.String(val) -} - -// Namespace: user -const ( - // UserEmailKey is the attribute Key conforming to the "user.email" semantic - // conventions. It represents the user email address. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "a.einstein@example.com" - UserEmailKey = attribute.Key("user.email") - - // UserFullNameKey is the attribute Key conforming to the "user.full_name" - // semantic conventions. It represents the user's full name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Albert Einstein" - UserFullNameKey = attribute.Key("user.full_name") - - // UserHashKey is the attribute Key conforming to the "user.hash" semantic - // conventions. It represents the unique user hash to correlate information for - // a user in anonymized form. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa" - // Note: Useful if `user.id` or `user.name` contain confidential information and - // cannot be used. - UserHashKey = attribute.Key("user.hash") - - // UserIDKey is the attribute Key conforming to the "user.id" semantic - // conventions. It represents the unique identifier of the user. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000" - UserIDKey = attribute.Key("user.id") - - // UserNameKey is the attribute Key conforming to the "user.name" semantic - // conventions. It represents the short name or login/username of the user. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "a.einstein" - UserNameKey = attribute.Key("user.name") - - // UserRolesKey is the attribute Key conforming to the "user.roles" semantic - // conventions. It represents the array of user roles at the time of the event. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "admin", "reporting_user" - UserRolesKey = attribute.Key("user.roles") -) - -// UserEmail returns an attribute KeyValue conforming to the "user.email" -// semantic conventions. It represents the user email address. -func UserEmail(val string) attribute.KeyValue { - return UserEmailKey.String(val) -} - -// UserFullName returns an attribute KeyValue conforming to the "user.full_name" -// semantic conventions. It represents the user's full name. -func UserFullName(val string) attribute.KeyValue { - return UserFullNameKey.String(val) -} - -// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic -// conventions. It represents the unique user hash to correlate information for a -// user in anonymized form. -func UserHash(val string) attribute.KeyValue { - return UserHashKey.String(val) -} - -// UserID returns an attribute KeyValue conforming to the "user.id" semantic -// conventions. It represents the unique identifier of the user. -func UserID(val string) attribute.KeyValue { - return UserIDKey.String(val) -} - -// UserName returns an attribute KeyValue conforming to the "user.name" semantic -// conventions. It represents the short name or login/username of the user. -func UserName(val string) attribute.KeyValue { - return UserNameKey.String(val) -} - -// UserRoles returns an attribute KeyValue conforming to the "user.roles" -// semantic conventions. It represents the array of user roles at the time of the -// event. -func UserRoles(val ...string) attribute.KeyValue { - return UserRolesKey.StringSlice(val) -} - -// Namespace: user_agent -const ( - // UserAgentNameKey is the attribute Key conforming to the "user_agent.name" - // semantic conventions. It represents the name of the user-agent extracted from - // original. Usually refers to the browser's name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Safari", "YourApp" - // Note: [Example] of extracting browser's name from original string. In the - // case of using a user-agent for non-browser products, such as microservices - // with multiple names/versions inside the `user_agent.original`, the most - // significant name SHOULD be selected. In such a scenario it should align with - // `user_agent.version` - // - // [Example]: https://uaparser.dev/#demo - UserAgentNameKey = attribute.Key("user_agent.name") - - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of the - // [HTTP User-Agent] header sent by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0 - // grpc-java-okhttp/1.27.2" - // - // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent - UserAgentOriginalKey = attribute.Key("user_agent.original") - - // UserAgentOSNameKey is the attribute Key conforming to the - // "user_agent.os.name" semantic conventions. It represents the human readable - // operating system name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "iOS", "Android", "Ubuntu" - // Note: For mapping user agent strings to OS names, libraries such as - // [ua-parser] can be utilized. - // - // [ua-parser]: https://github.com/ua-parser - UserAgentOSNameKey = attribute.Key("user_agent.os.name") - - // UserAgentOSVersionKey is the attribute Key conforming to the - // "user_agent.os.version" semantic conventions. It represents the version - // string of the operating system as defined in [Version Attributes]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "14.2.1", "18.04.1" - // Note: For mapping user agent strings to OS versions, libraries such as - // [ua-parser] can be utilized. - // - // [Version Attributes]: /docs/resource/README.md#version-attributes - // [ua-parser]: https://github.com/ua-parser - UserAgentOSVersionKey = attribute.Key("user_agent.os.version") - - // UserAgentSyntheticTypeKey is the attribute Key conforming to the - // "user_agent.synthetic.type" semantic conventions. It represents the specifies - // the category of synthetic traffic, such as tests or bots. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: This attribute MAY be derived from the contents of the - // `user_agent.original` attribute. Components that populate the attribute are - // responsible for determining what they consider to be synthetic bot or test - // traffic. This attribute can either be set for self-identification purposes, - // or on telemetry detected to be generated as a result of a synthetic request. - // This attribute is useful for distinguishing between genuine client traffic - // and synthetic traffic generated by bots or tests. - UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type") - - // UserAgentVersionKey is the attribute Key conforming to the - // "user_agent.version" semantic conventions. It represents the version of the - // user-agent extracted from original. Usually refers to the browser's version. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "14.1.2", "1.0.0" - // Note: [Example] of extracting browser's version from original string. In the - // case of using a user-agent for non-browser products, such as microservices - // with multiple names/versions inside the `user_agent.original`, the most - // significant version SHOULD be selected. In such a scenario it should align - // with `user_agent.name` - // - // [Example]: https://uaparser.dev/#demo - UserAgentVersionKey = attribute.Key("user_agent.version") -) - -// UserAgentName returns an attribute KeyValue conforming to the -// "user_agent.name" semantic conventions. It represents the name of the -// user-agent extracted from original. Usually refers to the browser's name. -func UserAgentName(val string) attribute.KeyValue { - return UserAgentNameKey.String(val) -} - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP User-Agent] header sent by the client. -// -// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} - -// UserAgentOSName returns an attribute KeyValue conforming to the -// "user_agent.os.name" semantic conventions. It represents the human readable -// operating system name. -func UserAgentOSName(val string) attribute.KeyValue { - return UserAgentOSNameKey.String(val) -} - -// UserAgentOSVersion returns an attribute KeyValue conforming to the -// "user_agent.os.version" semantic conventions. It represents the version string -// of the operating system as defined in [Version Attributes]. -// -// [Version Attributes]: /docs/resource/README.md#version-attributes -func UserAgentOSVersion(val string) attribute.KeyValue { - return UserAgentOSVersionKey.String(val) -} - -// UserAgentVersion returns an attribute KeyValue conforming to the -// "user_agent.version" semantic conventions. It represents the version of the -// user-agent extracted from original. Usually refers to the browser's version. -func UserAgentVersion(val string) attribute.KeyValue { - return UserAgentVersionKey.String(val) -} - -// Enum values for user_agent.synthetic.type -var ( - // Bot source. - // Stability: development - UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot") - // Synthetic test source. - // Stability: development - UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test") -) - -// Namespace: vcs -const ( - // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id" - // semantic conventions. It represents the ID of the change (pull request/merge - // request/changelist) if applicable. This is usually a unique (within - // repository) identifier generated by the VCS system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "123" - VCSChangeIDKey = attribute.Key("vcs.change.id") - - // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state" - // semantic conventions. It represents the state of the change (pull - // request/merge request/changelist). - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "open", "closed", "merged" - VCSChangeStateKey = attribute.Key("vcs.change.state") - - // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title" - // semantic conventions. It represents the human readable title of the change - // (pull request/merge request/changelist). This title is often a brief summary - // of the change and may get merged in to a ref as the commit summary. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update - // dependency" - VCSChangeTitleKey = attribute.Key("vcs.change.title") - - // VCSLineChangeTypeKey is the attribute Key conforming to the - // "vcs.line_change.type" semantic conventions. It represents the type of line - // change being measured on a branch or change. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "added", "removed" - VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type") - - // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name" - // semantic conventions. It represents the group owner within the version - // control system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-org", "myteam", "business-unit" - VCSOwnerNameKey = attribute.Key("vcs.owner.name") - - // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name" - // semantic conventions. It represents the name of the version control system - // provider. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "github", "gitlab", "gitea", "bitbucket" - VCSProviderNameKey = attribute.Key("vcs.provider.name") - - // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name" - // semantic conventions. It represents the name of the [reference] such as - // **branch** or **tag** in the repository. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-feature-branch", "tag-1-test" - // Note: `base` refers to the starting point of a change. For example, `main` - // would be the base reference of type branch if you've created a new - // reference of type branch from it and created new commits. - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name") - - // VCSRefBaseRevisionKey is the attribute Key conforming to the - // "vcs.ref.base.revision" semantic conventions. It represents the revision, - // literally [revised version], The revision most often refers to a commit - // object in Git, or a revision number in SVN. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", - // "main", "123", "HEAD" - // Note: `base` refers to the starting point of a change. For example, `main` - // would be the base reference of type branch if you've created a new - // reference of type branch from it and created new commits. The - // revision can be a full [hash value (see - // glossary)], - // of the recorded change to a ref within a repository pointing to a - // commit [commit] object. It does - // not necessarily have to be a hash; it can simply define a [revision - // number] - // which is an integer that is monotonically increasing. In cases where - // it is identical to the `ref.base.name`, it SHOULD still be included. - // It is up to the implementer to decide which value to set as the - // revision based on the VCS system and situational context. - // - // [revised version]: https://www.merriam-webster.com/dictionary/revision - // [hash value (see - // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf - // [commit]: https://git-scm.com/docs/git-commit - // [revision - // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html - VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision") - - // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type" - // semantic conventions. It represents the type of the [reference] in the - // repository. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "branch", "tag" - // Note: `base` refers to the starting point of a change. For example, `main` - // would be the base reference of type branch if you've created a new - // reference of type branch from it and created new commits. - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type") - - // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name" - // semantic conventions. It represents the name of the [reference] such as - // **branch** or **tag** in the repository. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-feature-branch", "tag-1-test" - // Note: `head` refers to where you are right now; the current reference at a - // given time. - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name") - - // VCSRefHeadRevisionKey is the attribute Key conforming to the - // "vcs.ref.head.revision" semantic conventions. It represents the revision, - // literally [revised version], The revision most often refers to a commit - // object in Git, or a revision number in SVN. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", - // "main", "123", "HEAD" - // Note: `head` refers to where you are right now; the current reference at a - // given time.The revision can be a full [hash value (see - // glossary)], - // of the recorded change to a ref within a repository pointing to a - // commit [commit] object. It does - // not necessarily have to be a hash; it can simply define a [revision - // number] - // which is an integer that is monotonically increasing. In cases where - // it is identical to the `ref.head.name`, it SHOULD still be included. - // It is up to the implementer to decide which value to set as the - // revision based on the VCS system and situational context. - // - // [revised version]: https://www.merriam-webster.com/dictionary/revision - // [hash value (see - // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf - // [commit]: https://git-scm.com/docs/git-commit - // [revision - // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html - VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision") - - // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type" - // semantic conventions. It represents the type of the [reference] in the - // repository. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "branch", "tag" - // Note: `head` refers to where you are right now; the current reference at a - // given time. - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type") - - // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic - // conventions. It represents the type of the [reference] in the repository. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "branch", "tag" - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefTypeKey = attribute.Key("vcs.ref.type") - - // VCSRepositoryNameKey is the attribute Key conforming to the - // "vcs.repository.name" semantic conventions. It represents the human readable - // name of the repository. It SHOULD NOT include any additional identifier like - // Group/SubGroup in GitLab or organization in GitHub. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "semantic-conventions", "my-cool-repo" - // Note: Due to it only being the name, it can clash with forks of the same - // repository if collecting telemetry across multiple orgs or groups in - // the same backends. - VCSRepositoryNameKey = attribute.Key("vcs.repository.name") - - // VCSRepositoryURLFullKey is the attribute Key conforming to the - // "vcs.repository.url.full" semantic conventions. It represents the - // [canonical URL] of the repository providing the complete HTTP(S) address in - // order to locate and identify the repository through a browser. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "https://github.com/opentelemetry/open-telemetry-collector-contrib", - // "https://gitlab.com/my-org/my-project/my-projects-project/repo" - // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include - // the `.git` extension. - // - // [canonical URL]: https://support.google.com/webmasters/answer/10347851 - VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full") - - // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the - // "vcs.revision_delta.direction" semantic conventions. It represents the type - // of revision comparison. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ahead", "behind" - VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction") -) - -// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id" -// semantic conventions. It represents the ID of the change (pull request/merge -// request/changelist) if applicable. This is usually a unique (within -// repository) identifier generated by the VCS system. -func VCSChangeID(val string) attribute.KeyValue { - return VCSChangeIDKey.String(val) -} - -// VCSChangeTitle returns an attribute KeyValue conforming to the -// "vcs.change.title" semantic conventions. It represents the human readable -// title of the change (pull request/merge request/changelist). This title is -// often a brief summary of the change and may get merged in to a ref as the -// commit summary. -func VCSChangeTitle(val string) attribute.KeyValue { - return VCSChangeTitleKey.String(val) -} - -// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name" -// semantic conventions. It represents the group owner within the version control -// system. -func VCSOwnerName(val string) attribute.KeyValue { - return VCSOwnerNameKey.String(val) -} - -// VCSRefBaseName returns an attribute KeyValue conforming to the -// "vcs.ref.base.name" semantic conventions. It represents the name of the -// [reference] such as **branch** or **tag** in the repository. -// -// [reference]: https://git-scm.com/docs/gitglossary#def_ref -func VCSRefBaseName(val string) attribute.KeyValue { - return VCSRefBaseNameKey.String(val) -} - -// VCSRefBaseRevision returns an attribute KeyValue conforming to the -// "vcs.ref.base.revision" semantic conventions. It represents the revision, -// literally [revised version], The revision most often refers to a commit object -// in Git, or a revision number in SVN. -// -// [revised version]: https://www.merriam-webster.com/dictionary/revision -func VCSRefBaseRevision(val string) attribute.KeyValue { - return VCSRefBaseRevisionKey.String(val) -} - -// VCSRefHeadName returns an attribute KeyValue conforming to the -// "vcs.ref.head.name" semantic conventions. It represents the name of the -// [reference] such as **branch** or **tag** in the repository. -// -// [reference]: https://git-scm.com/docs/gitglossary#def_ref -func VCSRefHeadName(val string) attribute.KeyValue { - return VCSRefHeadNameKey.String(val) -} - -// VCSRefHeadRevision returns an attribute KeyValue conforming to the -// "vcs.ref.head.revision" semantic conventions. It represents the revision, -// literally [revised version], The revision most often refers to a commit object -// in Git, or a revision number in SVN. -// -// [revised version]: https://www.merriam-webster.com/dictionary/revision -func VCSRefHeadRevision(val string) attribute.KeyValue { - return VCSRefHeadRevisionKey.String(val) -} - -// VCSRepositoryName returns an attribute KeyValue conforming to the -// "vcs.repository.name" semantic conventions. It represents the human readable -// name of the repository. It SHOULD NOT include any additional identifier like -// Group/SubGroup in GitLab or organization in GitHub. -func VCSRepositoryName(val string) attribute.KeyValue { - return VCSRepositoryNameKey.String(val) -} - -// VCSRepositoryURLFull returns an attribute KeyValue conforming to the -// "vcs.repository.url.full" semantic conventions. It represents the -// [canonical URL] of the repository providing the complete HTTP(S) address in -// order to locate and identify the repository through a browser. -// -// [canonical URL]: https://support.google.com/webmasters/answer/10347851 -func VCSRepositoryURLFull(val string) attribute.KeyValue { - return VCSRepositoryURLFullKey.String(val) -} - -// Enum values for vcs.change.state -var ( - // Open means the change is currently active and under review. It hasn't been - // merged into the target branch yet, and it's still possible to make changes or - // add comments. - // Stability: development - VCSChangeStateOpen = VCSChangeStateKey.String("open") - // WIP (work-in-progress, draft) means the change is still in progress and not - // yet ready for a full review. It might still undergo significant changes. - // Stability: development - VCSChangeStateWip = VCSChangeStateKey.String("wip") - // Closed means the merge request has been closed without merging. This can - // happen for various reasons, such as the changes being deemed unnecessary, the - // issue being resolved in another way, or the author deciding to withdraw the - // request. - // Stability: development - VCSChangeStateClosed = VCSChangeStateKey.String("closed") - // Merged indicates that the change has been successfully integrated into the - // target codebase. - // Stability: development - VCSChangeStateMerged = VCSChangeStateKey.String("merged") -) - -// Enum values for vcs.line_change.type -var ( - // How many lines were added. - // Stability: development - VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added") - // How many lines were removed. - // Stability: development - VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed") -) - -// Enum values for vcs.provider.name -var ( - // [GitHub] - // Stability: development - // - // [GitHub]: https://github.com - VCSProviderNameGithub = VCSProviderNameKey.String("github") - // [GitLab] - // Stability: development - // - // [GitLab]: https://gitlab.com - VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab") - // [Gitea] - // Stability: development - // - // [Gitea]: https://gitea.io - VCSProviderNameGitea = VCSProviderNameKey.String("gitea") - // [Bitbucket] - // Stability: development - // - // [Bitbucket]: https://bitbucket.org - VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket") -) - -// Enum values for vcs.ref.base.type -var ( - // [branch] - // Stability: development - // - // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch - VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch") - // [tag] - // Stability: development - // - // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag - VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag") -) - -// Enum values for vcs.ref.head.type -var ( - // [branch] - // Stability: development - // - // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch - VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch") - // [tag] - // Stability: development - // - // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag - VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag") -) - -// Enum values for vcs.ref.type -var ( - // [branch] - // Stability: development - // - // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch - VCSRefTypeBranch = VCSRefTypeKey.String("branch") - // [tag] - // Stability: development - // - // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag - VCSRefTypeTag = VCSRefTypeKey.String("tag") -) - -// Enum values for vcs.revision_delta.direction -var ( - // How many revisions the change is behind the target ref. - // Stability: development - VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind") - // How many revisions the change is ahead of the target ref. - // Stability: development - VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead") -) - -// Namespace: webengine -const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the additional - // description of the web engine (e.g. detailed version and edition - // information). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final" - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "WildFly" - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of the - // web engine. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "21.0.0" - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineName returns an attribute KeyValue conforming to the "webengine.name" -// semantic conventions. It represents the name of the web engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the web -// engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} - -// Namespace: zos -const ( - // ZOSSmfIDKey is the attribute Key conforming to the "zos.smf.id" semantic - // conventions. It represents the System Management Facility (SMF) Identifier - // uniquely identified a z/OS system within a SYSPLEX or mainframe environment - // and is used for system and performance analysis. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "SYS1" - ZOSSmfIDKey = attribute.Key("zos.smf.id") - - // ZOSSysplexNameKey is the attribute Key conforming to the "zos.sysplex.name" - // semantic conventions. It represents the name of the SYSPLEX to which the z/OS - // system belongs too. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "SYSPLEX1" - ZOSSysplexNameKey = attribute.Key("zos.sysplex.name") -) - -// ZOSSmfID returns an attribute KeyValue conforming to the "zos.smf.id" semantic -// conventions. It represents the System Management Facility (SMF) Identifier -// uniquely identified a z/OS system within a SYSPLEX or mainframe environment -// and is used for system and performance analysis. -func ZOSSmfID(val string) attribute.KeyValue { - return ZOSSmfIDKey.String(val) -} - -// ZOSSysplexName returns an attribute KeyValue conforming to the -// "zos.sysplex.name" semantic conventions. It represents the name of the SYSPLEX -// to which the z/OS system belongs too. -func ZOSSysplexName(val string) attribute.KeyValue { - return ZOSSysplexNameKey.String(val) -} \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/doc.go deleted file mode 100644 index c5c41e4d27..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.40.0 -// version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/error_type.go deleted file mode 100644 index 6d26e52821..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/error_type.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0" - -import ( - "errors" - "reflect" - - "go.opentelemetry.io/otel/attribute" -) - -// ErrorType returns an [attribute.KeyValue] identifying the error type of err. -// -// If err is nil, the returned attribute has the default value -// [ErrorTypeOther]. -// -// If err or one of the errors in its chain has the method -// -// ErrorType() string -// -// the returned attribute has that method's return value. If multiple errors in -// the chain implement this method, the value from the first match found by -// [errors.As] is used. Otherwise, the returned attribute has a value derived -// from the concrete type of err. -// -// The key of the returned attribute is [ErrorTypeKey]. -func ErrorType(err error) attribute.KeyValue { - if err == nil { - return ErrorTypeOther - } - - return ErrorTypeKey.String(errorType(err)) -} - -func errorType(err error) string { - var s string - if et, ok := err.(interface{ ErrorType() string }); ok { - // Fast path: check the top-level error first. - s = et.ErrorType() - } else { - // Fallback: search the error chain for an ErrorType method. - var et interface{ ErrorType() string } - if errors.As(err, &et) { - // Prioritize the ErrorType method if available. - s = et.ErrorType() - } - } - if s == "" { - // Fallback to reflection if the ErrorType method is not supported or - // returns an empty value. - - t := reflect.TypeOf(err) - pkg, name := t.PkgPath(), t.Name() - if pkg != "" && name != "" { - s = pkg + "." + name - } else { - // The type has no package path or name (predeclared, not-defined, - // or alias for a not-defined type). - // - // This is not guaranteed to be unique, but is a best effort. - s = t.String() - } - } - return s -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/exception.go deleted file mode 100644 index 6a26231a1a..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/otelconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/otelconv/metric.go deleted file mode 100644 index 901da86985..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/otelconv/metric.go +++ /dev/null @@ -1,2222 +0,0 @@ -// Code generated from semantic convention specification. DO NOT EDIT. - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package otelconv provides types and functionality for OpenTelemetry semantic -// conventions in the "otel" namespace. -package otelconv - -import ( - "context" - "sync" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/noop" -) - -var ( - addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} - recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} -) - -// ErrorTypeAttr is an attribute conforming to the error.type semantic -// conventions. It represents the describes a class of error the operation ended -// with. -type ErrorTypeAttr string - -var ( - // ErrorTypeOther is a fallback error value to be used when the instrumentation - // doesn't define a custom value. - ErrorTypeOther ErrorTypeAttr = "_OTHER" -) - -// ComponentTypeAttr is an attribute conforming to the otel.component.type -// semantic conventions. It represents a name identifying the type of the -// OpenTelemetry component. -type ComponentTypeAttr string - -var ( - // ComponentTypeBatchingSpanProcessor is the builtin SDK batching span - // processor. - ComponentTypeBatchingSpanProcessor ComponentTypeAttr = "batching_span_processor" - // ComponentTypeSimpleSpanProcessor is the builtin SDK simple span processor. - ComponentTypeSimpleSpanProcessor ComponentTypeAttr = "simple_span_processor" - // ComponentTypeBatchingLogProcessor is the builtin SDK batching log record - // processor. - ComponentTypeBatchingLogProcessor ComponentTypeAttr = "batching_log_processor" - // ComponentTypeSimpleLogProcessor is the builtin SDK simple log record - // processor. - ComponentTypeSimpleLogProcessor ComponentTypeAttr = "simple_log_processor" - // ComponentTypeOtlpGRPCSpanExporter is the OTLP span exporter over gRPC with - // protobuf serialization. - ComponentTypeOtlpGRPCSpanExporter ComponentTypeAttr = "otlp_grpc_span_exporter" - // ComponentTypeOtlpHTTPSpanExporter is the OTLP span exporter over HTTP with - // protobuf serialization. - ComponentTypeOtlpHTTPSpanExporter ComponentTypeAttr = "otlp_http_span_exporter" - // ComponentTypeOtlpHTTPJSONSpanExporter is the OTLP span exporter over HTTP - // with JSON serialization. - ComponentTypeOtlpHTTPJSONSpanExporter ComponentTypeAttr = "otlp_http_json_span_exporter" - // ComponentTypeZipkinHTTPSpanExporter is the zipkin span exporter over HTTP. - ComponentTypeZipkinHTTPSpanExporter ComponentTypeAttr = "zipkin_http_span_exporter" - // ComponentTypeOtlpGRPCLogExporter is the OTLP log record exporter over gRPC - // with protobuf serialization. - ComponentTypeOtlpGRPCLogExporter ComponentTypeAttr = "otlp_grpc_log_exporter" - // ComponentTypeOtlpHTTPLogExporter is the OTLP log record exporter over HTTP - // with protobuf serialization. - ComponentTypeOtlpHTTPLogExporter ComponentTypeAttr = "otlp_http_log_exporter" - // ComponentTypeOtlpHTTPJSONLogExporter is the OTLP log record exporter over - // HTTP with JSON serialization. - ComponentTypeOtlpHTTPJSONLogExporter ComponentTypeAttr = "otlp_http_json_log_exporter" - // ComponentTypePeriodicMetricReader is the builtin SDK periodically exporting - // metric reader. - ComponentTypePeriodicMetricReader ComponentTypeAttr = "periodic_metric_reader" - // ComponentTypeOtlpGRPCMetricExporter is the OTLP metric exporter over gRPC - // with protobuf serialization. - ComponentTypeOtlpGRPCMetricExporter ComponentTypeAttr = "otlp_grpc_metric_exporter" - // ComponentTypeOtlpHTTPMetricExporter is the OTLP metric exporter over HTTP - // with protobuf serialization. - ComponentTypeOtlpHTTPMetricExporter ComponentTypeAttr = "otlp_http_metric_exporter" - // ComponentTypeOtlpHTTPJSONMetricExporter is the OTLP metric exporter over HTTP - // with JSON serialization. - ComponentTypeOtlpHTTPJSONMetricExporter ComponentTypeAttr = "otlp_http_json_metric_exporter" - // ComponentTypePrometheusHTTPTextMetricExporter is the prometheus metric - // exporter over HTTP with the default text-based format. - ComponentTypePrometheusHTTPTextMetricExporter ComponentTypeAttr = "prometheus_http_text_metric_exporter" -) - -// SpanParentOriginAttr is an attribute conforming to the otel.span.parent.origin -// semantic conventions. It represents the determines whether the span has a -// parent span, and if so, [whether it is a remote parent]. -// -// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote -type SpanParentOriginAttr string - -var ( - // SpanParentOriginNone is the span does not have a parent, it is a root span. - SpanParentOriginNone SpanParentOriginAttr = "none" - // SpanParentOriginLocal is the span has a parent and the parent's span context - // [isRemote()] is false. - // - // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote - SpanParentOriginLocal SpanParentOriginAttr = "local" - // SpanParentOriginRemote is the span has a parent and the parent's span context - // [isRemote()] is true. - // - // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote - SpanParentOriginRemote SpanParentOriginAttr = "remote" -) - -// SpanSamplingResultAttr is an attribute conforming to the -// otel.span.sampling_result semantic conventions. It represents the result value -// of the sampler for this span. -type SpanSamplingResultAttr string - -var ( - // SpanSamplingResultDrop is the span is not sampled and not recording. - SpanSamplingResultDrop SpanSamplingResultAttr = "DROP" - // SpanSamplingResultRecordOnly is the span is not sampled, but recording. - SpanSamplingResultRecordOnly SpanSamplingResultAttr = "RECORD_ONLY" - // SpanSamplingResultRecordAndSample is the span is sampled and recording. - SpanSamplingResultRecordAndSample SpanSamplingResultAttr = "RECORD_AND_SAMPLE" -) - -// SDKExporterLogExported is an instrument used to record metric values -// conforming to the "otel.sdk.exporter.log.exported" semantic conventions. It -// represents the number of log records for which the export has finished, either -// successful or failed. -type SDKExporterLogExported struct { - metric.Int64Counter -} - -var newSDKExporterLogExportedOpts = []metric.Int64CounterOption{ - metric.WithDescription("The number of log records for which the export has finished, either successful or failed."), - metric.WithUnit("{log_record}"), -} - -// NewSDKExporterLogExported returns a new SDKExporterLogExported instrument. -func NewSDKExporterLogExported( - m metric.Meter, - opt ...metric.Int64CounterOption, -) (SDKExporterLogExported, error) { - // Check if the meter is nil. - if m == nil { - return SDKExporterLogExported{noop.Int64Counter{}}, nil - } - - if len(opt) == 0 { - opt = newSDKExporterLogExportedOpts - } else { - opt = append(opt, newSDKExporterLogExportedOpts...) - } - - i, err := m.Int64Counter( - "otel.sdk.exporter.log.exported", - opt..., - ) - if err != nil { - return SDKExporterLogExported{noop.Int64Counter{}}, err - } - return SDKExporterLogExported{i}, nil -} - -// Inst returns the underlying metric instrument. -func (m SDKExporterLogExported) Inst() metric.Int64Counter { - return m.Int64Counter -} - -// Name returns the semantic convention name of the instrument. -func (SDKExporterLogExported) Name() string { - return "otel.sdk.exporter.log.exported" -} - -// Unit returns the semantic convention unit of the instrument -func (SDKExporterLogExported) Unit() string { - return "{log_record}" -} - -// Description returns the semantic convention description of the instrument -func (SDKExporterLogExported) Description() string { - return "The number of log records for which the export has finished, either successful or failed." -} - -// Add adds incr to the existing count for attrs. -// -// All additional attrs passed are included in the recorded value. -// -// For successful exports, `error.type` MUST NOT be set. For failed exports, -// `error.type` MUST contain the failure cause. -// For exporters with partial success semantics (e.g. OTLP with -// `rejected_log_records`), rejected log records MUST count as failed and only -// non-rejected log records count as success. -// If no rejection reason is available, `rejected` SHOULD be used as value for -// `error.type`. -func (m SDKExporterLogExported) Add( - ctx context.Context, - incr int64, - attrs ...attribute.KeyValue, -) { - if len(attrs) == 0 { - m.Int64Counter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append( - *o, - metric.WithAttributes( - attrs..., - ), - ) - - m.Int64Counter.Add(ctx, incr, *o...) -} - -// AddSet adds incr to the existing count for set. -// -// For successful exports, `error.type` MUST NOT be set. For failed exports, -// `error.type` MUST contain the failure cause. -// For exporters with partial success semantics (e.g. OTLP with -// `rejected_log_records`), rejected log records MUST count as failed and only -// non-rejected log records count as success. -// If no rejection reason is available, `rejected` SHOULD be used as value for -// `error.type`. -func (m SDKExporterLogExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { - if set.Len() == 0 { - m.Int64Counter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append(*o, metric.WithAttributeSet(set)) - m.Int64Counter.Add(ctx, incr, *o...) -} - -// AttrErrorType returns an optional attribute for the "error.type" semantic -// convention. It represents the describes a class of error the operation ended -// with. -func (SDKExporterLogExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { - return attribute.String("error.type", string(val)) -} - -// AttrComponentName returns an optional attribute for the "otel.component.name" -// semantic convention. It represents a name uniquely identifying the instance of -// the OpenTelemetry component within its containing SDK instance. -func (SDKExporterLogExported) AttrComponentName(val string) attribute.KeyValue { - return attribute.String("otel.component.name", val) -} - -// AttrComponentType returns an optional attribute for the "otel.component.type" -// semantic convention. It represents a name identifying the type of the -// OpenTelemetry component. -func (SDKExporterLogExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { - return attribute.String("otel.component.type", string(val)) -} - -// AttrServerAddress returns an optional attribute for the "server.address" -// semantic convention. It represents the server domain name if available without -// reverse DNS lookup; otherwise, IP address or Unix domain socket name. -func (SDKExporterLogExported) AttrServerAddress(val string) attribute.KeyValue { - return attribute.String("server.address", val) -} - -// AttrServerPort returns an optional attribute for the "server.port" semantic -// convention. It represents the server port number. -func (SDKExporterLogExported) AttrServerPort(val int) attribute.KeyValue { - return attribute.Int("server.port", val) -} - -// SDKExporterLogInflight is an instrument used to record metric values -// conforming to the "otel.sdk.exporter.log.inflight" semantic conventions. It -// represents the number of log records which were passed to the exporter, but -// that have not been exported yet (neither successful, nor failed). -type SDKExporterLogInflight struct { - metric.Int64UpDownCounter -} - -var newSDKExporterLogInflightOpts = []metric.Int64UpDownCounterOption{ - metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), - metric.WithUnit("{log_record}"), -} - -// NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument. -func NewSDKExporterLogInflight( - m metric.Meter, - opt ...metric.Int64UpDownCounterOption, -) (SDKExporterLogInflight, error) { - // Check if the meter is nil. - if m == nil { - return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil - } - - if len(opt) == 0 { - opt = newSDKExporterLogInflightOpts - } else { - opt = append(opt, newSDKExporterLogInflightOpts...) - } - - i, err := m.Int64UpDownCounter( - "otel.sdk.exporter.log.inflight", - opt..., - ) - if err != nil { - return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err - } - return SDKExporterLogInflight{i}, nil -} - -// Inst returns the underlying metric instrument. -func (m SDKExporterLogInflight) Inst() metric.Int64UpDownCounter { - return m.Int64UpDownCounter -} - -// Name returns the semantic convention name of the instrument. -func (SDKExporterLogInflight) Name() string { - return "otel.sdk.exporter.log.inflight" -} - -// Unit returns the semantic convention unit of the instrument -func (SDKExporterLogInflight) Unit() string { - return "{log_record}" -} - -// Description returns the semantic convention description of the instrument -func (SDKExporterLogInflight) Description() string { - return "The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." -} - -// Add adds incr to the existing count for attrs. -// -// All additional attrs passed are included in the recorded value. -// -// For successful exports, `error.type` MUST NOT be set. For failed exports, -// `error.type` MUST contain the failure cause. -func (m SDKExporterLogInflight) Add( - ctx context.Context, - incr int64, - attrs ...attribute.KeyValue, -) { - if len(attrs) == 0 { - m.Int64UpDownCounter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append( - *o, - metric.WithAttributes( - attrs..., - ), - ) - - m.Int64UpDownCounter.Add(ctx, incr, *o...) -} - -// AddSet adds incr to the existing count for set. -// -// For successful exports, `error.type` MUST NOT be set. For failed exports, -// `error.type` MUST contain the failure cause. -func (m SDKExporterLogInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { - if set.Len() == 0 { - m.Int64UpDownCounter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append(*o, metric.WithAttributeSet(set)) - m.Int64UpDownCounter.Add(ctx, incr, *o...) -} - -// AttrComponentName returns an optional attribute for the "otel.component.name" -// semantic convention. It represents a name uniquely identifying the instance of -// the OpenTelemetry component within its containing SDK instance. -func (SDKExporterLogInflight) AttrComponentName(val string) attribute.KeyValue { - return attribute.String("otel.component.name", val) -} - -// AttrComponentType returns an optional attribute for the "otel.component.type" -// semantic convention. It represents a name identifying the type of the -// OpenTelemetry component. -func (SDKExporterLogInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { - return attribute.String("otel.component.type", string(val)) -} - -// AttrServerAddress returns an optional attribute for the "server.address" -// semantic convention. It represents the server domain name if available without -// reverse DNS lookup; otherwise, IP address or Unix domain socket name. -func (SDKExporterLogInflight) AttrServerAddress(val string) attribute.KeyValue { - return attribute.String("server.address", val) -} - -// AttrServerPort returns an optional attribute for the "server.port" semantic -// convention. It represents the server port number. -func (SDKExporterLogInflight) AttrServerPort(val int) attribute.KeyValue { - return attribute.Int("server.port", val) -} - -// SDKExporterMetricDataPointExported is an instrument used to record metric -// values conforming to the "otel.sdk.exporter.metric_data_point.exported" -// semantic conventions. It represents the number of metric data points for which -// the export has finished, either successful or failed. -type SDKExporterMetricDataPointExported struct { - metric.Int64Counter -} - -var newSDKExporterMetricDataPointExportedOpts = []metric.Int64CounterOption{ - metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."), - metric.WithUnit("{data_point}"), -} - -// NewSDKExporterMetricDataPointExported returns a new -// SDKExporterMetricDataPointExported instrument. -func NewSDKExporterMetricDataPointExported( - m metric.Meter, - opt ...metric.Int64CounterOption, -) (SDKExporterMetricDataPointExported, error) { - // Check if the meter is nil. - if m == nil { - return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil - } - - if len(opt) == 0 { - opt = newSDKExporterMetricDataPointExportedOpts - } else { - opt = append(opt, newSDKExporterMetricDataPointExportedOpts...) - } - - i, err := m.Int64Counter( - "otel.sdk.exporter.metric_data_point.exported", - opt..., - ) - if err != nil { - return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err - } - return SDKExporterMetricDataPointExported{i}, nil -} - -// Inst returns the underlying metric instrument. -func (m SDKExporterMetricDataPointExported) Inst() metric.Int64Counter { - return m.Int64Counter -} - -// Name returns the semantic convention name of the instrument. -func (SDKExporterMetricDataPointExported) Name() string { - return "otel.sdk.exporter.metric_data_point.exported" -} - -// Unit returns the semantic convention unit of the instrument -func (SDKExporterMetricDataPointExported) Unit() string { - return "{data_point}" -} - -// Description returns the semantic convention description of the instrument -func (SDKExporterMetricDataPointExported) Description() string { - return "The number of metric data points for which the export has finished, either successful or failed." -} - -// Add adds incr to the existing count for attrs. -// -// All additional attrs passed are included in the recorded value. -// -// For successful exports, `error.type` MUST NOT be set. For failed exports, -// `error.type` MUST contain the failure cause. -// For exporters with partial success semantics (e.g. OTLP with -// `rejected_data_points`), rejected data points MUST count as failed and only -// non-rejected data points count as success. -// If no rejection reason is available, `rejected` SHOULD be used as value for -// `error.type`. -func (m SDKExporterMetricDataPointExported) Add( - ctx context.Context, - incr int64, - attrs ...attribute.KeyValue, -) { - if len(attrs) == 0 { - m.Int64Counter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append( - *o, - metric.WithAttributes( - attrs..., - ), - ) - - m.Int64Counter.Add(ctx, incr, *o...) -} - -// AddSet adds incr to the existing count for set. -// -// For successful exports, `error.type` MUST NOT be set. For failed exports, -// `error.type` MUST contain the failure cause. -// For exporters with partial success semantics (e.g. OTLP with -// `rejected_data_points`), rejected data points MUST count as failed and only -// non-rejected data points count as success. -// If no rejection reason is available, `rejected` SHOULD be used as value for -// `error.type`. -func (m SDKExporterMetricDataPointExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { - if set.Len() == 0 { - m.Int64Counter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append(*o, metric.WithAttributeSet(set)) - m.Int64Counter.Add(ctx, incr, *o...) -} - -// AttrErrorType returns an optional attribute for the "error.type" semantic -// convention. It represents the describes a class of error the operation ended -// with. -func (SDKExporterMetricDataPointExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { - return attribute.String("error.type", string(val)) -} - -// AttrComponentName returns an optional attribute for the "otel.component.name" -// semantic convention. It represents a name uniquely identifying the instance of -// the OpenTelemetry component within its containing SDK instance. -func (SDKExporterMetricDataPointExported) AttrComponentName(val string) attribute.KeyValue { - return attribute.String("otel.component.name", val) -} - -// AttrComponentType returns an optional attribute for the "otel.component.type" -// semantic convention. It represents a name identifying the type of the -// OpenTelemetry component. -func (SDKExporterMetricDataPointExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { - return attribute.String("otel.component.type", string(val)) -} - -// AttrServerAddress returns an optional attribute for the "server.address" -// semantic convention. It represents the server domain name if available without -// reverse DNS lookup; otherwise, IP address or Unix domain socket name. -func (SDKExporterMetricDataPointExported) AttrServerAddress(val string) attribute.KeyValue { - return attribute.String("server.address", val) -} - -// AttrServerPort returns an optional attribute for the "server.port" semantic -// convention. It represents the server port number. -func (SDKExporterMetricDataPointExported) AttrServerPort(val int) attribute.KeyValue { - return attribute.Int("server.port", val) -} - -// SDKExporterMetricDataPointInflight is an instrument used to record metric -// values conforming to the "otel.sdk.exporter.metric_data_point.inflight" -// semantic conventions. It represents the number of metric data points which -// were passed to the exporter, but that have not been exported yet (neither -// successful, nor failed). -type SDKExporterMetricDataPointInflight struct { - metric.Int64UpDownCounter -} - -var newSDKExporterMetricDataPointInflightOpts = []metric.Int64UpDownCounterOption{ - metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), - metric.WithUnit("{data_point}"), -} - -// NewSDKExporterMetricDataPointInflight returns a new -// SDKExporterMetricDataPointInflight instrument. -func NewSDKExporterMetricDataPointInflight( - m metric.Meter, - opt ...metric.Int64UpDownCounterOption, -) (SDKExporterMetricDataPointInflight, error) { - // Check if the meter is nil. - if m == nil { - return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil - } - - if len(opt) == 0 { - opt = newSDKExporterMetricDataPointInflightOpts - } else { - opt = append(opt, newSDKExporterMetricDataPointInflightOpts...) - } - - i, err := m.Int64UpDownCounter( - "otel.sdk.exporter.metric_data_point.inflight", - opt..., - ) - if err != nil { - return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err - } - return SDKExporterMetricDataPointInflight{i}, nil -} - -// Inst returns the underlying metric instrument. -func (m SDKExporterMetricDataPointInflight) Inst() metric.Int64UpDownCounter { - return m.Int64UpDownCounter -} - -// Name returns the semantic convention name of the instrument. -func (SDKExporterMetricDataPointInflight) Name() string { - return "otel.sdk.exporter.metric_data_point.inflight" -} - -// Unit returns the semantic convention unit of the instrument -func (SDKExporterMetricDataPointInflight) Unit() string { - return "{data_point}" -} - -// Description returns the semantic convention description of the instrument -func (SDKExporterMetricDataPointInflight) Description() string { - return "The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." -} - -// Add adds incr to the existing count for attrs. -// -// All additional attrs passed are included in the recorded value. -// -// For successful exports, `error.type` MUST NOT be set. For failed exports, -// `error.type` MUST contain the failure cause. -func (m SDKExporterMetricDataPointInflight) Add( - ctx context.Context, - incr int64, - attrs ...attribute.KeyValue, -) { - if len(attrs) == 0 { - m.Int64UpDownCounter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append( - *o, - metric.WithAttributes( - attrs..., - ), - ) - - m.Int64UpDownCounter.Add(ctx, incr, *o...) -} - -// AddSet adds incr to the existing count for set. -// -// For successful exports, `error.type` MUST NOT be set. For failed exports, -// `error.type` MUST contain the failure cause. -func (m SDKExporterMetricDataPointInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { - if set.Len() == 0 { - m.Int64UpDownCounter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append(*o, metric.WithAttributeSet(set)) - m.Int64UpDownCounter.Add(ctx, incr, *o...) -} - -// AttrComponentName returns an optional attribute for the "otel.component.name" -// semantic convention. It represents a name uniquely identifying the instance of -// the OpenTelemetry component within its containing SDK instance. -func (SDKExporterMetricDataPointInflight) AttrComponentName(val string) attribute.KeyValue { - return attribute.String("otel.component.name", val) -} - -// AttrComponentType returns an optional attribute for the "otel.component.type" -// semantic convention. It represents a name identifying the type of the -// OpenTelemetry component. -func (SDKExporterMetricDataPointInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { - return attribute.String("otel.component.type", string(val)) -} - -// AttrServerAddress returns an optional attribute for the "server.address" -// semantic convention. It represents the server domain name if available without -// reverse DNS lookup; otherwise, IP address or Unix domain socket name. -func (SDKExporterMetricDataPointInflight) AttrServerAddress(val string) attribute.KeyValue { - return attribute.String("server.address", val) -} - -// AttrServerPort returns an optional attribute for the "server.port" semantic -// convention. It represents the server port number. -func (SDKExporterMetricDataPointInflight) AttrServerPort(val int) attribute.KeyValue { - return attribute.Int("server.port", val) -} - -// SDKExporterOperationDuration is an instrument used to record metric values -// conforming to the "otel.sdk.exporter.operation.duration" semantic conventions. -// It represents the duration of exporting a batch of telemetry records. -type SDKExporterOperationDuration struct { - metric.Float64Histogram -} - -var newSDKExporterOperationDurationOpts = []metric.Float64HistogramOption{ - metric.WithDescription("The duration of exporting a batch of telemetry records."), - metric.WithUnit("s"), -} - -// NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration -// instrument. -func NewSDKExporterOperationDuration( - m metric.Meter, - opt ...metric.Float64HistogramOption, -) (SDKExporterOperationDuration, error) { - // Check if the meter is nil. - if m == nil { - return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil - } - - if len(opt) == 0 { - opt = newSDKExporterOperationDurationOpts - } else { - opt = append(opt, newSDKExporterOperationDurationOpts...) - } - - i, err := m.Float64Histogram( - "otel.sdk.exporter.operation.duration", - opt..., - ) - if err != nil { - return SDKExporterOperationDuration{noop.Float64Histogram{}}, err - } - return SDKExporterOperationDuration{i}, nil -} - -// Inst returns the underlying metric instrument. -func (m SDKExporterOperationDuration) Inst() metric.Float64Histogram { - return m.Float64Histogram -} - -// Name returns the semantic convention name of the instrument. -func (SDKExporterOperationDuration) Name() string { - return "otel.sdk.exporter.operation.duration" -} - -// Unit returns the semantic convention unit of the instrument -func (SDKExporterOperationDuration) Unit() string { - return "s" -} - -// Description returns the semantic convention description of the instrument -func (SDKExporterOperationDuration) Description() string { - return "The duration of exporting a batch of telemetry records." -} - -// Record records val to the current distribution for attrs. -// -// All additional attrs passed are included in the recorded value. -// -// This metric defines successful operations using the full success definitions -// for [http] -// and [grpc]. Anything else is defined as an unsuccessful operation. For -// successful -// operations, `error.type` MUST NOT be set. For unsuccessful export operations, -// `error.type` MUST contain a relevant failure cause. -// -// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 -// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success -func (m SDKExporterOperationDuration) Record( - ctx context.Context, - val float64, - attrs ...attribute.KeyValue, -) { - if len(attrs) == 0 { - m.Float64Histogram.Record(ctx, val) - return - } - - o := recOptPool.Get().(*[]metric.RecordOption) - defer func() { - *o = (*o)[:0] - recOptPool.Put(o) - }() - - *o = append( - *o, - metric.WithAttributes( - attrs..., - ), - ) - - m.Float64Histogram.Record(ctx, val, *o...) -} - -// RecordSet records val to the current distribution for set. -// -// This metric defines successful operations using the full success definitions -// for [http] -// and [grpc]. Anything else is defined as an unsuccessful operation. For -// successful -// operations, `error.type` MUST NOT be set. For unsuccessful export operations, -// `error.type` MUST contain a relevant failure cause. -// -// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 -// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success -func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { - if set.Len() == 0 { - m.Float64Histogram.Record(ctx, val) - return - } - - o := recOptPool.Get().(*[]metric.RecordOption) - defer func() { - *o = (*o)[:0] - recOptPool.Put(o) - }() - - *o = append(*o, metric.WithAttributeSet(set)) - m.Float64Histogram.Record(ctx, val, *o...) -} - -// AttrErrorType returns an optional attribute for the "error.type" semantic -// convention. It represents the describes a class of error the operation ended -// with. -func (SDKExporterOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { - return attribute.String("error.type", string(val)) -} - -// AttrHTTPResponseStatusCode returns an optional attribute for the -// "http.response.status_code" semantic convention. It represents the HTTP status -// code of the last HTTP request performed in scope of this export call. -func (SDKExporterOperationDuration) AttrHTTPResponseStatusCode(val int) attribute.KeyValue { - return attribute.Int("http.response.status_code", val) -} - -// AttrComponentName returns an optional attribute for the "otel.component.name" -// semantic convention. It represents a name uniquely identifying the instance of -// the OpenTelemetry component within its containing SDK instance. -func (SDKExporterOperationDuration) AttrComponentName(val string) attribute.KeyValue { - return attribute.String("otel.component.name", val) -} - -// AttrComponentType returns an optional attribute for the "otel.component.type" -// semantic convention. It represents a name identifying the type of the -// OpenTelemetry component. -func (SDKExporterOperationDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { - return attribute.String("otel.component.type", string(val)) -} - -// AttrRPCResponseStatusCode returns an optional attribute for the -// "rpc.response.status_code" semantic convention. It represents the gRPC status -// code of the last gRPC request performed in scope of this export call. -func (SDKExporterOperationDuration) AttrRPCResponseStatusCode(val string) attribute.KeyValue { - return attribute.String("rpc.response.status_code", val) -} - -// AttrServerAddress returns an optional attribute for the "server.address" -// semantic convention. It represents the server domain name if available without -// reverse DNS lookup; otherwise, IP address or Unix domain socket name. -func (SDKExporterOperationDuration) AttrServerAddress(val string) attribute.KeyValue { - return attribute.String("server.address", val) -} - -// AttrServerPort returns an optional attribute for the "server.port" semantic -// convention. It represents the server port number. -func (SDKExporterOperationDuration) AttrServerPort(val int) attribute.KeyValue { - return attribute.Int("server.port", val) -} - -// SDKExporterSpanExported is an instrument used to record metric values -// conforming to the "otel.sdk.exporter.span.exported" semantic conventions. It -// represents the number of spans for which the export has finished, either -// successful or failed. -type SDKExporterSpanExported struct { - metric.Int64Counter -} - -var newSDKExporterSpanExportedOpts = []metric.Int64CounterOption{ - metric.WithDescription("The number of spans for which the export has finished, either successful or failed."), - metric.WithUnit("{span}"), -} - -// NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument. -func NewSDKExporterSpanExported( - m metric.Meter, - opt ...metric.Int64CounterOption, -) (SDKExporterSpanExported, error) { - // Check if the meter is nil. - if m == nil { - return SDKExporterSpanExported{noop.Int64Counter{}}, nil - } - - if len(opt) == 0 { - opt = newSDKExporterSpanExportedOpts - } else { - opt = append(opt, newSDKExporterSpanExportedOpts...) - } - - i, err := m.Int64Counter( - "otel.sdk.exporter.span.exported", - opt..., - ) - if err != nil { - return SDKExporterSpanExported{noop.Int64Counter{}}, err - } - return SDKExporterSpanExported{i}, nil -} - -// Inst returns the underlying metric instrument. -func (m SDKExporterSpanExported) Inst() metric.Int64Counter { - return m.Int64Counter -} - -// Name returns the semantic convention name of the instrument. -func (SDKExporterSpanExported) Name() string { - return "otel.sdk.exporter.span.exported" -} - -// Unit returns the semantic convention unit of the instrument -func (SDKExporterSpanExported) Unit() string { - return "{span}" -} - -// Description returns the semantic convention description of the instrument -func (SDKExporterSpanExported) Description() string { - return "The number of spans for which the export has finished, either successful or failed." -} - -// Add adds incr to the existing count for attrs. -// -// All additional attrs passed are included in the recorded value. -// -// For successful exports, `error.type` MUST NOT be set. For failed exports, -// `error.type` MUST contain the failure cause. -// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` -// ), rejected spans MUST count as failed and only non-rejected spans count as -// success. -// If no rejection reason is available, `rejected` SHOULD be used as value for -// `error.type`. -func (m SDKExporterSpanExported) Add( - ctx context.Context, - incr int64, - attrs ...attribute.KeyValue, -) { - if len(attrs) == 0 { - m.Int64Counter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append( - *o, - metric.WithAttributes( - attrs..., - ), - ) - - m.Int64Counter.Add(ctx, incr, *o...) -} - -// AddSet adds incr to the existing count for set. -// -// For successful exports, `error.type` MUST NOT be set. For failed exports, -// `error.type` MUST contain the failure cause. -// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` -// ), rejected spans MUST count as failed and only non-rejected spans count as -// success. -// If no rejection reason is available, `rejected` SHOULD be used as value for -// `error.type`. -func (m SDKExporterSpanExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { - if set.Len() == 0 { - m.Int64Counter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append(*o, metric.WithAttributeSet(set)) - m.Int64Counter.Add(ctx, incr, *o...) -} - -// AttrErrorType returns an optional attribute for the "error.type" semantic -// convention. It represents the describes a class of error the operation ended -// with. -func (SDKExporterSpanExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { - return attribute.String("error.type", string(val)) -} - -// AttrComponentName returns an optional attribute for the "otel.component.name" -// semantic convention. It represents a name uniquely identifying the instance of -// the OpenTelemetry component within its containing SDK instance. -func (SDKExporterSpanExported) AttrComponentName(val string) attribute.KeyValue { - return attribute.String("otel.component.name", val) -} - -// AttrComponentType returns an optional attribute for the "otel.component.type" -// semantic convention. It represents a name identifying the type of the -// OpenTelemetry component. -func (SDKExporterSpanExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { - return attribute.String("otel.component.type", string(val)) -} - -// AttrServerAddress returns an optional attribute for the "server.address" -// semantic convention. It represents the server domain name if available without -// reverse DNS lookup; otherwise, IP address or Unix domain socket name. -func (SDKExporterSpanExported) AttrServerAddress(val string) attribute.KeyValue { - return attribute.String("server.address", val) -} - -// AttrServerPort returns an optional attribute for the "server.port" semantic -// convention. It represents the server port number. -func (SDKExporterSpanExported) AttrServerPort(val int) attribute.KeyValue { - return attribute.Int("server.port", val) -} - -// SDKExporterSpanInflight is an instrument used to record metric values -// conforming to the "otel.sdk.exporter.span.inflight" semantic conventions. It -// represents the number of spans which were passed to the exporter, but that -// have not been exported yet (neither successful, nor failed). -type SDKExporterSpanInflight struct { - metric.Int64UpDownCounter -} - -var newSDKExporterSpanInflightOpts = []metric.Int64UpDownCounterOption{ - metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), - metric.WithUnit("{span}"), -} - -// NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument. -func NewSDKExporterSpanInflight( - m metric.Meter, - opt ...metric.Int64UpDownCounterOption, -) (SDKExporterSpanInflight, error) { - // Check if the meter is nil. - if m == nil { - return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil - } - - if len(opt) == 0 { - opt = newSDKExporterSpanInflightOpts - } else { - opt = append(opt, newSDKExporterSpanInflightOpts...) - } - - i, err := m.Int64UpDownCounter( - "otel.sdk.exporter.span.inflight", - opt..., - ) - if err != nil { - return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err - } - return SDKExporterSpanInflight{i}, nil -} - -// Inst returns the underlying metric instrument. -func (m SDKExporterSpanInflight) Inst() metric.Int64UpDownCounter { - return m.Int64UpDownCounter -} - -// Name returns the semantic convention name of the instrument. -func (SDKExporterSpanInflight) Name() string { - return "otel.sdk.exporter.span.inflight" -} - -// Unit returns the semantic convention unit of the instrument -func (SDKExporterSpanInflight) Unit() string { - return "{span}" -} - -// Description returns the semantic convention description of the instrument -func (SDKExporterSpanInflight) Description() string { - return "The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." -} - -// Add adds incr to the existing count for attrs. -// -// All additional attrs passed are included in the recorded value. -// -// For successful exports, `error.type` MUST NOT be set. For failed exports, -// `error.type` MUST contain the failure cause. -func (m SDKExporterSpanInflight) Add( - ctx context.Context, - incr int64, - attrs ...attribute.KeyValue, -) { - if len(attrs) == 0 { - m.Int64UpDownCounter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append( - *o, - metric.WithAttributes( - attrs..., - ), - ) - - m.Int64UpDownCounter.Add(ctx, incr, *o...) -} - -// AddSet adds incr to the existing count for set. -// -// For successful exports, `error.type` MUST NOT be set. For failed exports, -// `error.type` MUST contain the failure cause. -func (m SDKExporterSpanInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { - if set.Len() == 0 { - m.Int64UpDownCounter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append(*o, metric.WithAttributeSet(set)) - m.Int64UpDownCounter.Add(ctx, incr, *o...) -} - -// AttrComponentName returns an optional attribute for the "otel.component.name" -// semantic convention. It represents a name uniquely identifying the instance of -// the OpenTelemetry component within its containing SDK instance. -func (SDKExporterSpanInflight) AttrComponentName(val string) attribute.KeyValue { - return attribute.String("otel.component.name", val) -} - -// AttrComponentType returns an optional attribute for the "otel.component.type" -// semantic convention. It represents a name identifying the type of the -// OpenTelemetry component. -func (SDKExporterSpanInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { - return attribute.String("otel.component.type", string(val)) -} - -// AttrServerAddress returns an optional attribute for the "server.address" -// semantic convention. It represents the server domain name if available without -// reverse DNS lookup; otherwise, IP address or Unix domain socket name. -func (SDKExporterSpanInflight) AttrServerAddress(val string) attribute.KeyValue { - return attribute.String("server.address", val) -} - -// AttrServerPort returns an optional attribute for the "server.port" semantic -// convention. It represents the server port number. -func (SDKExporterSpanInflight) AttrServerPort(val int) attribute.KeyValue { - return attribute.Int("server.port", val) -} - -// SDKLogCreated is an instrument used to record metric values conforming to the -// "otel.sdk.log.created" semantic conventions. It represents the number of logs -// submitted to enabled SDK Loggers. -type SDKLogCreated struct { - metric.Int64Counter -} - -var newSDKLogCreatedOpts = []metric.Int64CounterOption{ - metric.WithDescription("The number of logs submitted to enabled SDK Loggers."), - metric.WithUnit("{log_record}"), -} - -// NewSDKLogCreated returns a new SDKLogCreated instrument. -func NewSDKLogCreated( - m metric.Meter, - opt ...metric.Int64CounterOption, -) (SDKLogCreated, error) { - // Check if the meter is nil. - if m == nil { - return SDKLogCreated{noop.Int64Counter{}}, nil - } - - if len(opt) == 0 { - opt = newSDKLogCreatedOpts - } else { - opt = append(opt, newSDKLogCreatedOpts...) - } - - i, err := m.Int64Counter( - "otel.sdk.log.created", - opt..., - ) - if err != nil { - return SDKLogCreated{noop.Int64Counter{}}, err - } - return SDKLogCreated{i}, nil -} - -// Inst returns the underlying metric instrument. -func (m SDKLogCreated) Inst() metric.Int64Counter { - return m.Int64Counter -} - -// Name returns the semantic convention name of the instrument. -func (SDKLogCreated) Name() string { - return "otel.sdk.log.created" -} - -// Unit returns the semantic convention unit of the instrument -func (SDKLogCreated) Unit() string { - return "{log_record}" -} - -// Description returns the semantic convention description of the instrument -func (SDKLogCreated) Description() string { - return "The number of logs submitted to enabled SDK Loggers." -} - -// Add adds incr to the existing count for attrs. -func (m SDKLogCreated) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { - if len(attrs) == 0 { - m.Int64Counter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append(*o, metric.WithAttributes(attrs...)) - m.Int64Counter.Add(ctx, incr, *o...) -} - -// AddSet adds incr to the existing count for set. -func (m SDKLogCreated) AddSet(ctx context.Context, incr int64, set attribute.Set) { - if set.Len() == 0 { - m.Int64Counter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append(*o, metric.WithAttributeSet(set)) - m.Int64Counter.Add(ctx, incr, *o...) -} - -// SDKMetricReaderCollectionDuration is an instrument used to record metric -// values conforming to the "otel.sdk.metric_reader.collection.duration" semantic -// conventions. It represents the duration of the collect operation of the metric -// reader. -type SDKMetricReaderCollectionDuration struct { - metric.Float64Histogram -} - -var newSDKMetricReaderCollectionDurationOpts = []metric.Float64HistogramOption{ - metric.WithDescription("The duration of the collect operation of the metric reader."), - metric.WithUnit("s"), -} - -// NewSDKMetricReaderCollectionDuration returns a new -// SDKMetricReaderCollectionDuration instrument. -func NewSDKMetricReaderCollectionDuration( - m metric.Meter, - opt ...metric.Float64HistogramOption, -) (SDKMetricReaderCollectionDuration, error) { - // Check if the meter is nil. - if m == nil { - return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil - } - - if len(opt) == 0 { - opt = newSDKMetricReaderCollectionDurationOpts - } else { - opt = append(opt, newSDKMetricReaderCollectionDurationOpts...) - } - - i, err := m.Float64Histogram( - "otel.sdk.metric_reader.collection.duration", - opt..., - ) - if err != nil { - return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err - } - return SDKMetricReaderCollectionDuration{i}, nil -} - -// Inst returns the underlying metric instrument. -func (m SDKMetricReaderCollectionDuration) Inst() metric.Float64Histogram { - return m.Float64Histogram -} - -// Name returns the semantic convention name of the instrument. -func (SDKMetricReaderCollectionDuration) Name() string { - return "otel.sdk.metric_reader.collection.duration" -} - -// Unit returns the semantic convention unit of the instrument -func (SDKMetricReaderCollectionDuration) Unit() string { - return "s" -} - -// Description returns the semantic convention description of the instrument -func (SDKMetricReaderCollectionDuration) Description() string { - return "The duration of the collect operation of the metric reader." -} - -// Record records val to the current distribution for attrs. -// -// All additional attrs passed are included in the recorded value. -// -// For successful collections, `error.type` MUST NOT be set. For failed -// collections, `error.type` SHOULD contain the failure cause. -// It can happen that metrics collection is successful for some MetricProducers, -// while others fail. In that case `error.type` SHOULD be set to any of the -// failure causes. -func (m SDKMetricReaderCollectionDuration) Record( - ctx context.Context, - val float64, - attrs ...attribute.KeyValue, -) { - if len(attrs) == 0 { - m.Float64Histogram.Record(ctx, val) - return - } - - o := recOptPool.Get().(*[]metric.RecordOption) - defer func() { - *o = (*o)[:0] - recOptPool.Put(o) - }() - - *o = append( - *o, - metric.WithAttributes( - attrs..., - ), - ) - - m.Float64Histogram.Record(ctx, val, *o...) -} - -// RecordSet records val to the current distribution for set. -// -// For successful collections, `error.type` MUST NOT be set. For failed -// collections, `error.type` SHOULD contain the failure cause. -// It can happen that metrics collection is successful for some MetricProducers, -// while others fail. In that case `error.type` SHOULD be set to any of the -// failure causes. -func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { - if set.Len() == 0 { - m.Float64Histogram.Record(ctx, val) - return - } - - o := recOptPool.Get().(*[]metric.RecordOption) - defer func() { - *o = (*o)[:0] - recOptPool.Put(o) - }() - - *o = append(*o, metric.WithAttributeSet(set)) - m.Float64Histogram.Record(ctx, val, *o...) -} - -// AttrErrorType returns an optional attribute for the "error.type" semantic -// convention. It represents the describes a class of error the operation ended -// with. -func (SDKMetricReaderCollectionDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { - return attribute.String("error.type", string(val)) -} - -// AttrComponentName returns an optional attribute for the "otel.component.name" -// semantic convention. It represents a name uniquely identifying the instance of -// the OpenTelemetry component within its containing SDK instance. -func (SDKMetricReaderCollectionDuration) AttrComponentName(val string) attribute.KeyValue { - return attribute.String("otel.component.name", val) -} - -// AttrComponentType returns an optional attribute for the "otel.component.type" -// semantic convention. It represents a name identifying the type of the -// OpenTelemetry component. -func (SDKMetricReaderCollectionDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { - return attribute.String("otel.component.type", string(val)) -} - -// SDKProcessorLogProcessed is an instrument used to record metric values -// conforming to the "otel.sdk.processor.log.processed" semantic conventions. It -// represents the number of log records for which the processing has finished, -// either successful or failed. -type SDKProcessorLogProcessed struct { - metric.Int64Counter -} - -var newSDKProcessorLogProcessedOpts = []metric.Int64CounterOption{ - metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."), - metric.WithUnit("{log_record}"), -} - -// NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument. -func NewSDKProcessorLogProcessed( - m metric.Meter, - opt ...metric.Int64CounterOption, -) (SDKProcessorLogProcessed, error) { - // Check if the meter is nil. - if m == nil { - return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil - } - - if len(opt) == 0 { - opt = newSDKProcessorLogProcessedOpts - } else { - opt = append(opt, newSDKProcessorLogProcessedOpts...) - } - - i, err := m.Int64Counter( - "otel.sdk.processor.log.processed", - opt..., - ) - if err != nil { - return SDKProcessorLogProcessed{noop.Int64Counter{}}, err - } - return SDKProcessorLogProcessed{i}, nil -} - -// Inst returns the underlying metric instrument. -func (m SDKProcessorLogProcessed) Inst() metric.Int64Counter { - return m.Int64Counter -} - -// Name returns the semantic convention name of the instrument. -func (SDKProcessorLogProcessed) Name() string { - return "otel.sdk.processor.log.processed" -} - -// Unit returns the semantic convention unit of the instrument -func (SDKProcessorLogProcessed) Unit() string { - return "{log_record}" -} - -// Description returns the semantic convention description of the instrument -func (SDKProcessorLogProcessed) Description() string { - return "The number of log records for which the processing has finished, either successful or failed." -} - -// Add adds incr to the existing count for attrs. -// -// All additional attrs passed are included in the recorded value. -// -// For successful processing, `error.type` MUST NOT be set. For failed -// processing, `error.type` MUST contain the failure cause. -// For the SDK Simple and Batching Log Record Processor a log record is -// considered to be processed already when it has been submitted to the exporter, -// not when the corresponding export call has finished. -func (m SDKProcessorLogProcessed) Add( - ctx context.Context, - incr int64, - attrs ...attribute.KeyValue, -) { - if len(attrs) == 0 { - m.Int64Counter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append( - *o, - metric.WithAttributes( - attrs..., - ), - ) - - m.Int64Counter.Add(ctx, incr, *o...) -} - -// AddSet adds incr to the existing count for set. -// -// For successful processing, `error.type` MUST NOT be set. For failed -// processing, `error.type` MUST contain the failure cause. -// For the SDK Simple and Batching Log Record Processor a log record is -// considered to be processed already when it has been submitted to the exporter, -// not when the corresponding export call has finished. -func (m SDKProcessorLogProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { - if set.Len() == 0 { - m.Int64Counter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append(*o, metric.WithAttributeSet(set)) - m.Int64Counter.Add(ctx, incr, *o...) -} - -// AttrErrorType returns an optional attribute for the "error.type" semantic -// convention. It represents a low-cardinality description of the failure reason. -// SDK Batching Log Record Processors MUST use `queue_full` for log records -// dropped due to a full queue. -func (SDKProcessorLogProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { - return attribute.String("error.type", string(val)) -} - -// AttrComponentName returns an optional attribute for the "otel.component.name" -// semantic convention. It represents a name uniquely identifying the instance of -// the OpenTelemetry component within its containing SDK instance. -func (SDKProcessorLogProcessed) AttrComponentName(val string) attribute.KeyValue { - return attribute.String("otel.component.name", val) -} - -// AttrComponentType returns an optional attribute for the "otel.component.type" -// semantic convention. It represents a name identifying the type of the -// OpenTelemetry component. -func (SDKProcessorLogProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { - return attribute.String("otel.component.type", string(val)) -} - -// SDKProcessorLogQueueCapacity is an instrument used to record metric values -// conforming to the "otel.sdk.processor.log.queue.capacity" semantic -// conventions. It represents the maximum number of log records the queue of a -// given instance of an SDK Log Record processor can hold. -type SDKProcessorLogQueueCapacity struct { - metric.Int64ObservableUpDownCounter -} - -var newSDKProcessorLogQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."), - metric.WithUnit("{log_record}"), -} - -// NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity -// instrument. -func NewSDKProcessorLogQueueCapacity( - m metric.Meter, - opt ...metric.Int64ObservableUpDownCounterOption, -) (SDKProcessorLogQueueCapacity, error) { - // Check if the meter is nil. - if m == nil { - return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil - } - - if len(opt) == 0 { - opt = newSDKProcessorLogQueueCapacityOpts - } else { - opt = append(opt, newSDKProcessorLogQueueCapacityOpts...) - } - - i, err := m.Int64ObservableUpDownCounter( - "otel.sdk.processor.log.queue.capacity", - opt..., - ) - if err != nil { - return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err - } - return SDKProcessorLogQueueCapacity{i}, nil -} - -// Inst returns the underlying metric instrument. -func (m SDKProcessorLogQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { - return m.Int64ObservableUpDownCounter -} - -// Name returns the semantic convention name of the instrument. -func (SDKProcessorLogQueueCapacity) Name() string { - return "otel.sdk.processor.log.queue.capacity" -} - -// Unit returns the semantic convention unit of the instrument -func (SDKProcessorLogQueueCapacity) Unit() string { - return "{log_record}" -} - -// Description returns the semantic convention description of the instrument -func (SDKProcessorLogQueueCapacity) Description() string { - return "The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold." -} - -// AttrComponentName returns an optional attribute for the "otel.component.name" -// semantic convention. It represents a name uniquely identifying the instance of -// the OpenTelemetry component within its containing SDK instance. -func (SDKProcessorLogQueueCapacity) AttrComponentName(val string) attribute.KeyValue { - return attribute.String("otel.component.name", val) -} - -// AttrComponentType returns an optional attribute for the "otel.component.type" -// semantic convention. It represents a name identifying the type of the -// OpenTelemetry component. -func (SDKProcessorLogQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { - return attribute.String("otel.component.type", string(val)) -} - -// SDKProcessorLogQueueSize is an instrument used to record metric values -// conforming to the "otel.sdk.processor.log.queue.size" semantic conventions. It -// represents the number of log records in the queue of a given instance of an -// SDK log processor. -type SDKProcessorLogQueueSize struct { - metric.Int64ObservableUpDownCounter -} - -var newSDKProcessorLogQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."), - metric.WithUnit("{log_record}"), -} - -// NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument. -func NewSDKProcessorLogQueueSize( - m metric.Meter, - opt ...metric.Int64ObservableUpDownCounterOption, -) (SDKProcessorLogQueueSize, error) { - // Check if the meter is nil. - if m == nil { - return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil - } - - if len(opt) == 0 { - opt = newSDKProcessorLogQueueSizeOpts - } else { - opt = append(opt, newSDKProcessorLogQueueSizeOpts...) - } - - i, err := m.Int64ObservableUpDownCounter( - "otel.sdk.processor.log.queue.size", - opt..., - ) - if err != nil { - return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err - } - return SDKProcessorLogQueueSize{i}, nil -} - -// Inst returns the underlying metric instrument. -func (m SDKProcessorLogQueueSize) Inst() metric.Int64ObservableUpDownCounter { - return m.Int64ObservableUpDownCounter -} - -// Name returns the semantic convention name of the instrument. -func (SDKProcessorLogQueueSize) Name() string { - return "otel.sdk.processor.log.queue.size" -} - -// Unit returns the semantic convention unit of the instrument -func (SDKProcessorLogQueueSize) Unit() string { - return "{log_record}" -} - -// Description returns the semantic convention description of the instrument -func (SDKProcessorLogQueueSize) Description() string { - return "The number of log records in the queue of a given instance of an SDK log processor." -} - -// AttrComponentName returns an optional attribute for the "otel.component.name" -// semantic convention. It represents a name uniquely identifying the instance of -// the OpenTelemetry component within its containing SDK instance. -func (SDKProcessorLogQueueSize) AttrComponentName(val string) attribute.KeyValue { - return attribute.String("otel.component.name", val) -} - -// AttrComponentType returns an optional attribute for the "otel.component.type" -// semantic convention. It represents a name identifying the type of the -// OpenTelemetry component. -func (SDKProcessorLogQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { - return attribute.String("otel.component.type", string(val)) -} - -// SDKProcessorSpanProcessed is an instrument used to record metric values -// conforming to the "otel.sdk.processor.span.processed" semantic conventions. It -// represents the number of spans for which the processing has finished, either -// successful or failed. -type SDKProcessorSpanProcessed struct { - metric.Int64Counter -} - -var newSDKProcessorSpanProcessedOpts = []metric.Int64CounterOption{ - metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."), - metric.WithUnit("{span}"), -} - -// NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed -// instrument. -func NewSDKProcessorSpanProcessed( - m metric.Meter, - opt ...metric.Int64CounterOption, -) (SDKProcessorSpanProcessed, error) { - // Check if the meter is nil. - if m == nil { - return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil - } - - if len(opt) == 0 { - opt = newSDKProcessorSpanProcessedOpts - } else { - opt = append(opt, newSDKProcessorSpanProcessedOpts...) - } - - i, err := m.Int64Counter( - "otel.sdk.processor.span.processed", - opt..., - ) - if err != nil { - return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err - } - return SDKProcessorSpanProcessed{i}, nil -} - -// Inst returns the underlying metric instrument. -func (m SDKProcessorSpanProcessed) Inst() metric.Int64Counter { - return m.Int64Counter -} - -// Name returns the semantic convention name of the instrument. -func (SDKProcessorSpanProcessed) Name() string { - return "otel.sdk.processor.span.processed" -} - -// Unit returns the semantic convention unit of the instrument -func (SDKProcessorSpanProcessed) Unit() string { - return "{span}" -} - -// Description returns the semantic convention description of the instrument -func (SDKProcessorSpanProcessed) Description() string { - return "The number of spans for which the processing has finished, either successful or failed." -} - -// Add adds incr to the existing count for attrs. -// -// All additional attrs passed are included in the recorded value. -// -// For successful processing, `error.type` MUST NOT be set. For failed -// processing, `error.type` MUST contain the failure cause. -// For the SDK Simple and Batching Span Processor a span is considered to be -// processed already when it has been submitted to the exporter, not when the -// corresponding export call has finished. -func (m SDKProcessorSpanProcessed) Add( - ctx context.Context, - incr int64, - attrs ...attribute.KeyValue, -) { - if len(attrs) == 0 { - m.Int64Counter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append( - *o, - metric.WithAttributes( - attrs..., - ), - ) - - m.Int64Counter.Add(ctx, incr, *o...) -} - -// AddSet adds incr to the existing count for set. -// -// For successful processing, `error.type` MUST NOT be set. For failed -// processing, `error.type` MUST contain the failure cause. -// For the SDK Simple and Batching Span Processor a span is considered to be -// processed already when it has been submitted to the exporter, not when the -// corresponding export call has finished. -func (m SDKProcessorSpanProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { - if set.Len() == 0 { - m.Int64Counter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append(*o, metric.WithAttributeSet(set)) - m.Int64Counter.Add(ctx, incr, *o...) -} - -// AttrErrorType returns an optional attribute for the "error.type" semantic -// convention. It represents a low-cardinality description of the failure reason. -// SDK Batching Span Processors MUST use `queue_full` for spans dropped due to a -// full queue. -func (SDKProcessorSpanProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { - return attribute.String("error.type", string(val)) -} - -// AttrComponentName returns an optional attribute for the "otel.component.name" -// semantic convention. It represents a name uniquely identifying the instance of -// the OpenTelemetry component within its containing SDK instance. -func (SDKProcessorSpanProcessed) AttrComponentName(val string) attribute.KeyValue { - return attribute.String("otel.component.name", val) -} - -// AttrComponentType returns an optional attribute for the "otel.component.type" -// semantic convention. It represents a name identifying the type of the -// OpenTelemetry component. -func (SDKProcessorSpanProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { - return attribute.String("otel.component.type", string(val)) -} - -// SDKProcessorSpanQueueCapacity is an instrument used to record metric values -// conforming to the "otel.sdk.processor.span.queue.capacity" semantic -// conventions. It represents the maximum number of spans the queue of a given -// instance of an SDK span processor can hold. -type SDKProcessorSpanQueueCapacity struct { - metric.Int64ObservableUpDownCounter -} - -var newSDKProcessorSpanQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."), - metric.WithUnit("{span}"), -} - -// NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity -// instrument. -func NewSDKProcessorSpanQueueCapacity( - m metric.Meter, - opt ...metric.Int64ObservableUpDownCounterOption, -) (SDKProcessorSpanQueueCapacity, error) { - // Check if the meter is nil. - if m == nil { - return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil - } - - if len(opt) == 0 { - opt = newSDKProcessorSpanQueueCapacityOpts - } else { - opt = append(opt, newSDKProcessorSpanQueueCapacityOpts...) - } - - i, err := m.Int64ObservableUpDownCounter( - "otel.sdk.processor.span.queue.capacity", - opt..., - ) - if err != nil { - return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err - } - return SDKProcessorSpanQueueCapacity{i}, nil -} - -// Inst returns the underlying metric instrument. -func (m SDKProcessorSpanQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { - return m.Int64ObservableUpDownCounter -} - -// Name returns the semantic convention name of the instrument. -func (SDKProcessorSpanQueueCapacity) Name() string { - return "otel.sdk.processor.span.queue.capacity" -} - -// Unit returns the semantic convention unit of the instrument -func (SDKProcessorSpanQueueCapacity) Unit() string { - return "{span}" -} - -// Description returns the semantic convention description of the instrument -func (SDKProcessorSpanQueueCapacity) Description() string { - return "The maximum number of spans the queue of a given instance of an SDK span processor can hold." -} - -// AttrComponentName returns an optional attribute for the "otel.component.name" -// semantic convention. It represents a name uniquely identifying the instance of -// the OpenTelemetry component within its containing SDK instance. -func (SDKProcessorSpanQueueCapacity) AttrComponentName(val string) attribute.KeyValue { - return attribute.String("otel.component.name", val) -} - -// AttrComponentType returns an optional attribute for the "otel.component.type" -// semantic convention. It represents a name identifying the type of the -// OpenTelemetry component. -func (SDKProcessorSpanQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { - return attribute.String("otel.component.type", string(val)) -} - -// SDKProcessorSpanQueueSize is an instrument used to record metric values -// conforming to the "otel.sdk.processor.span.queue.size" semantic conventions. -// It represents the number of spans in the queue of a given instance of an SDK -// span processor. -type SDKProcessorSpanQueueSize struct { - metric.Int64ObservableUpDownCounter -} - -var newSDKProcessorSpanQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."), - metric.WithUnit("{span}"), -} - -// NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize -// instrument. -func NewSDKProcessorSpanQueueSize( - m metric.Meter, - opt ...metric.Int64ObservableUpDownCounterOption, -) (SDKProcessorSpanQueueSize, error) { - // Check if the meter is nil. - if m == nil { - return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil - } - - if len(opt) == 0 { - opt = newSDKProcessorSpanQueueSizeOpts - } else { - opt = append(opt, newSDKProcessorSpanQueueSizeOpts...) - } - - i, err := m.Int64ObservableUpDownCounter( - "otel.sdk.processor.span.queue.size", - opt..., - ) - if err != nil { - return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err - } - return SDKProcessorSpanQueueSize{i}, nil -} - -// Inst returns the underlying metric instrument. -func (m SDKProcessorSpanQueueSize) Inst() metric.Int64ObservableUpDownCounter { - return m.Int64ObservableUpDownCounter -} - -// Name returns the semantic convention name of the instrument. -func (SDKProcessorSpanQueueSize) Name() string { - return "otel.sdk.processor.span.queue.size" -} - -// Unit returns the semantic convention unit of the instrument -func (SDKProcessorSpanQueueSize) Unit() string { - return "{span}" -} - -// Description returns the semantic convention description of the instrument -func (SDKProcessorSpanQueueSize) Description() string { - return "The number of spans in the queue of a given instance of an SDK span processor." -} - -// AttrComponentName returns an optional attribute for the "otel.component.name" -// semantic convention. It represents a name uniquely identifying the instance of -// the OpenTelemetry component within its containing SDK instance. -func (SDKProcessorSpanQueueSize) AttrComponentName(val string) attribute.KeyValue { - return attribute.String("otel.component.name", val) -} - -// AttrComponentType returns an optional attribute for the "otel.component.type" -// semantic convention. It represents a name identifying the type of the -// OpenTelemetry component. -func (SDKProcessorSpanQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { - return attribute.String("otel.component.type", string(val)) -} - -// SDKSpanLive is an instrument used to record metric values conforming to the -// "otel.sdk.span.live" semantic conventions. It represents the number of created -// spans with `recording=true` for which the end operation has not been called -// yet. -type SDKSpanLive struct { - metric.Int64UpDownCounter -} - -var newSDKSpanLiveOpts = []metric.Int64UpDownCounterOption{ - metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."), - metric.WithUnit("{span}"), -} - -// NewSDKSpanLive returns a new SDKSpanLive instrument. -func NewSDKSpanLive( - m metric.Meter, - opt ...metric.Int64UpDownCounterOption, -) (SDKSpanLive, error) { - // Check if the meter is nil. - if m == nil { - return SDKSpanLive{noop.Int64UpDownCounter{}}, nil - } - - if len(opt) == 0 { - opt = newSDKSpanLiveOpts - } else { - opt = append(opt, newSDKSpanLiveOpts...) - } - - i, err := m.Int64UpDownCounter( - "otel.sdk.span.live", - opt..., - ) - if err != nil { - return SDKSpanLive{noop.Int64UpDownCounter{}}, err - } - return SDKSpanLive{i}, nil -} - -// Inst returns the underlying metric instrument. -func (m SDKSpanLive) Inst() metric.Int64UpDownCounter { - return m.Int64UpDownCounter -} - -// Name returns the semantic convention name of the instrument. -func (SDKSpanLive) Name() string { - return "otel.sdk.span.live" -} - -// Unit returns the semantic convention unit of the instrument -func (SDKSpanLive) Unit() string { - return "{span}" -} - -// Description returns the semantic convention description of the instrument -func (SDKSpanLive) Description() string { - return "The number of created spans with `recording=true` for which the end operation has not been called yet." -} - -// Add adds incr to the existing count for attrs. -// -// All additional attrs passed are included in the recorded value. -func (m SDKSpanLive) Add( - ctx context.Context, - incr int64, - attrs ...attribute.KeyValue, -) { - if len(attrs) == 0 { - m.Int64UpDownCounter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append( - *o, - metric.WithAttributes( - attrs..., - ), - ) - - m.Int64UpDownCounter.Add(ctx, incr, *o...) -} - -// AddSet adds incr to the existing count for set. -func (m SDKSpanLive) AddSet(ctx context.Context, incr int64, set attribute.Set) { - if set.Len() == 0 { - m.Int64UpDownCounter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append(*o, metric.WithAttributeSet(set)) - m.Int64UpDownCounter.Add(ctx, incr, *o...) -} - -// AttrSpanSamplingResult returns an optional attribute for the -// "otel.span.sampling_result" semantic convention. It represents the result -// value of the sampler for this span. -func (SDKSpanLive) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { - return attribute.String("otel.span.sampling_result", string(val)) -} - -// SDKSpanStarted is an instrument used to record metric values conforming to the -// "otel.sdk.span.started" semantic conventions. It represents the number of -// created spans. -type SDKSpanStarted struct { - metric.Int64Counter -} - -var newSDKSpanStartedOpts = []metric.Int64CounterOption{ - metric.WithDescription("The number of created spans."), - metric.WithUnit("{span}"), -} - -// NewSDKSpanStarted returns a new SDKSpanStarted instrument. -func NewSDKSpanStarted( - m metric.Meter, - opt ...metric.Int64CounterOption, -) (SDKSpanStarted, error) { - // Check if the meter is nil. - if m == nil { - return SDKSpanStarted{noop.Int64Counter{}}, nil - } - - if len(opt) == 0 { - opt = newSDKSpanStartedOpts - } else { - opt = append(opt, newSDKSpanStartedOpts...) - } - - i, err := m.Int64Counter( - "otel.sdk.span.started", - opt..., - ) - if err != nil { - return SDKSpanStarted{noop.Int64Counter{}}, err - } - return SDKSpanStarted{i}, nil -} - -// Inst returns the underlying metric instrument. -func (m SDKSpanStarted) Inst() metric.Int64Counter { - return m.Int64Counter -} - -// Name returns the semantic convention name of the instrument. -func (SDKSpanStarted) Name() string { - return "otel.sdk.span.started" -} - -// Unit returns the semantic convention unit of the instrument -func (SDKSpanStarted) Unit() string { - return "{span}" -} - -// Description returns the semantic convention description of the instrument -func (SDKSpanStarted) Description() string { - return "The number of created spans." -} - -// Add adds incr to the existing count for attrs. -// -// All additional attrs passed are included in the recorded value. -// -// Implementations MUST record this metric for all spans, even for non-recording -// ones. -func (m SDKSpanStarted) Add( - ctx context.Context, - incr int64, - attrs ...attribute.KeyValue, -) { - if len(attrs) == 0 { - m.Int64Counter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append( - *o, - metric.WithAttributes( - attrs..., - ), - ) - - m.Int64Counter.Add(ctx, incr, *o...) -} - -// AddSet adds incr to the existing count for set. -// -// Implementations MUST record this metric for all spans, even for non-recording -// ones. -func (m SDKSpanStarted) AddSet(ctx context.Context, incr int64, set attribute.Set) { - if set.Len() == 0 { - m.Int64Counter.Add(ctx, incr) - return - } - - o := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *o = (*o)[:0] - addOptPool.Put(o) - }() - - *o = append(*o, metric.WithAttributeSet(set)) - m.Int64Counter.Add(ctx, incr, *o...) -} - -// AttrSpanParentOrigin returns an optional attribute for the -// "otel.span.parent.origin" semantic convention. It represents the determines -// whether the span has a parent span, and if so, [whether it is a remote parent] -// . -// -// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote -func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.KeyValue { - return attribute.String("otel.span.parent.origin", string(val)) -} - -// AttrSpanSamplingResult returns an optional attribute for the -// "otel.span.sampling_result" semantic convention. It represents the result -// value of the sampler for this span. -func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { - return attribute.String("otel.span.sampling_result", string(val)) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/schema.go deleted file mode 100644 index a07ffa3361..0000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.40.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.40.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.40.0" diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go deleted file mode 100644 index 6836c65478..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otel // import "go.opentelemetry.io/otel" - -import ( - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/trace" -) - -// Tracer creates a named tracer that implements Tracer interface. -// If the name is an empty string then provider uses default name. -// -// This is short for GetTracerProvider().Tracer(name, opts...) -func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { - return GetTracerProvider().Tracer(name, opts...) -} - -// GetTracerProvider returns the registered global trace provider. -// If none is registered then an instance of NoopTracerProvider is returned. -// -// Use the trace provider to create a named tracer. E.g. -// -// tracer := otel.GetTracerProvider().Tracer("example.com/foo") -// -// or -// -// tracer := otel.Tracer("example.com/foo") -func GetTracerProvider() trace.TracerProvider { - return global.TracerProvider() -} - -// SetTracerProvider registers `tp` as the global trace provider. -func SetTracerProvider(tp trace.TracerProvider) { - global.SetTracerProvider(tp) -} diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE deleted file mode 100644 index f1aee0f110..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/LICENSE +++ /dev/null @@ -1,231 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------------------------------------- - -Copyright 2009 The Go Authors. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google LLC nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/trace/README.md b/vendor/go.opentelemetry.io/otel/trace/README.md deleted file mode 100644 index 58ccaba69b..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Trace API - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/trace) diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go deleted file mode 100644 index 9316fd0ac4..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/auto.go +++ /dev/null @@ -1,662 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "context" - "encoding/json" - "fmt" - "math" - "os" - "reflect" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - "unicode/utf8" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.40.0" - "go.opentelemetry.io/otel/trace/embedded" - "go.opentelemetry.io/otel/trace/internal/telemetry" -) - -// newAutoTracerProvider returns an auto-instrumentable [trace.TracerProvider]. -// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument -// the process using the returned TracerProvider, all of the telemetry it -// produces will be processed and handled by that Instrumentation. By default, -// if no Instrumentation instruments the TracerProvider it will not generate -// any trace telemetry. -func newAutoTracerProvider() TracerProvider { return tracerProviderInstance } - -var tracerProviderInstance = new(autoTracerProvider) - -type autoTracerProvider struct{ embedded.TracerProvider } - -var _ TracerProvider = autoTracerProvider{} - -func (autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { - cfg := NewTracerConfig(opts...) - return autoTracer{ - name: name, - version: cfg.InstrumentationVersion(), - schemaURL: cfg.SchemaURL(), - } -} - -type autoTracer struct { - embedded.Tracer - - name, schemaURL, version string -} - -var _ Tracer = autoTracer{} - -func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOption) (context.Context, Span) { - var psc, sc SpanContext - sampled := true - span := new(autoSpan) - - // Ask eBPF for sampling decision and span context info. - t.start(ctx, span, &psc, &sampled, &sc) - - span.sampled.Store(sampled) - span.spanContext = sc - - ctx = ContextWithSpan(ctx, span) - - if sampled { - // Only build traces if sampled. - cfg := NewSpanStartConfig(opts...) - span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) - } - - return ctx, span -} - -// Expected to be implemented in eBPF. -// -//go:noinline -func (*autoTracer) start( - ctx context.Context, - spanPtr *autoSpan, - psc *SpanContext, - sampled *bool, - sc *SpanContext, -) { - start(ctx, spanPtr, psc, sampled, sc) -} - -// start is used for testing. -var start = func(context.Context, *autoSpan, *SpanContext, *bool, *SpanContext) {} - -func (t autoTracer) traces(name string, cfg SpanConfig, sc, psc SpanContext) (*telemetry.Traces, *telemetry.Span) { - span := &telemetry.Span{ - TraceID: telemetry.TraceID(sc.TraceID()), - SpanID: telemetry.SpanID(sc.SpanID()), - Flags: uint32(sc.TraceFlags()), - TraceState: sc.TraceState().String(), - ParentSpanID: telemetry.SpanID(psc.SpanID()), - Name: name, - Kind: spanKind(cfg.SpanKind()), - } - - span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) - - links := cfg.Links() - if limit := maxSpan.Links; limit == 0 { - n := int64(len(links)) - if n > 0 { - span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. - } - } else { - if limit > 0 { - n := int64(max(len(links)-limit, 0)) - span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. - links = links[n:] - } - span.Links = convLinks(links) - } - - if t := cfg.Timestamp(); !t.IsZero() { - span.StartTime = cfg.Timestamp() - } else { - span.StartTime = time.Now() - } - - return &telemetry.Traces{ - ResourceSpans: []*telemetry.ResourceSpans{ - { - ScopeSpans: []*telemetry.ScopeSpans{ - { - Scope: &telemetry.Scope{ - Name: t.name, - Version: t.version, - }, - Spans: []*telemetry.Span{span}, - SchemaURL: t.schemaURL, - }, - }, - }, - }, - }, span -} - -func spanKind(kind SpanKind) telemetry.SpanKind { - switch kind { - case SpanKindInternal: - return telemetry.SpanKindInternal - case SpanKindServer: - return telemetry.SpanKindServer - case SpanKindClient: - return telemetry.SpanKindClient - case SpanKindProducer: - return telemetry.SpanKindProducer - case SpanKindConsumer: - return telemetry.SpanKindConsumer - } - return telemetry.SpanKind(0) // undefined. -} - -type autoSpan struct { - embedded.Span - - spanContext SpanContext - sampled atomic.Bool - - mu sync.Mutex - traces *telemetry.Traces - span *telemetry.Span -} - -func (s *autoSpan) SpanContext() SpanContext { - if s == nil { - return SpanContext{} - } - // s.spanContext is immutable, do not acquire lock s.mu. - return s.spanContext -} - -func (s *autoSpan) IsRecording() bool { - if s == nil { - return false - } - - return s.sampled.Load() -} - -func (s *autoSpan) SetStatus(c codes.Code, msg string) { - if s == nil || !s.sampled.Load() { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - - if s.span.Status == nil { - s.span.Status = new(telemetry.Status) - } - - s.span.Status.Message = msg - - switch c { - case codes.Unset: - s.span.Status.Code = telemetry.StatusCodeUnset - case codes.Error: - s.span.Status.Code = telemetry.StatusCodeError - case codes.Ok: - s.span.Status.Code = telemetry.StatusCodeOK - } -} - -func (s *autoSpan) SetAttributes(attrs ...attribute.KeyValue) { - if s == nil || !s.sampled.Load() { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - - limit := maxSpan.Attrs - if limit == 0 { - // No attributes allowed. - n := int64(len(attrs)) - if n > 0 { - s.span.DroppedAttrs += uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. - } - return - } - - m := make(map[string]int) - for i, a := range s.span.Attrs { - m[a.Key] = i - } - - for _, a := range attrs { - val := convAttrValue(a.Value) - if val.Empty() { - s.span.DroppedAttrs++ - continue - } - - if idx, ok := m[string(a.Key)]; ok { - s.span.Attrs[idx] = telemetry.Attr{ - Key: string(a.Key), - Value: val, - } - } else if limit < 0 || len(s.span.Attrs) < limit { - s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ - Key: string(a.Key), - Value: val, - }) - m[string(a.Key)] = len(s.span.Attrs) - 1 - } else { - s.span.DroppedAttrs++ - } - } -} - -// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The -// number of dropped attributes is also returned. -func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { - n := len(attrs) - if limit == 0 { - var out uint32 - if n > 0 { - out = uint32(min(int64(n), math.MaxUint32)) // nolint: gosec // Bounds checked. - } - return nil, out - } - - if limit < 0 { - // Unlimited. - return convAttrs(attrs), 0 - } - - if n < 0 { - n = 0 - } - - limit = min(n, limit) - return convAttrs(attrs[:limit]), uint32(n - limit) // nolint: gosec // Bounds checked. -} - -func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { - if len(attrs) == 0 { - // Avoid allocations if not necessary. - return nil - } - - out := make([]telemetry.Attr, 0, len(attrs)) - for _, attr := range attrs { - key := string(attr.Key) - val := convAttrValue(attr.Value) - if val.Empty() { - continue - } - out = append(out, telemetry.Attr{Key: key, Value: val}) - } - return out -} - -func convAttrValue(value attribute.Value) telemetry.Value { - switch value.Type() { - case attribute.BOOL: - return telemetry.BoolValue(value.AsBool()) - case attribute.INT64: - return telemetry.Int64Value(value.AsInt64()) - case attribute.FLOAT64: - return telemetry.Float64Value(value.AsFloat64()) - case attribute.STRING: - v := truncate(maxSpan.AttrValueLen, value.AsString()) - return telemetry.StringValue(v) - case attribute.BOOLSLICE: - slice := value.AsBoolSlice() - out := make([]telemetry.Value, 0, len(slice)) - for _, v := range slice { - out = append(out, telemetry.BoolValue(v)) - } - return telemetry.SliceValue(out...) - case attribute.INT64SLICE: - slice := value.AsInt64Slice() - out := make([]telemetry.Value, 0, len(slice)) - for _, v := range slice { - out = append(out, telemetry.Int64Value(v)) - } - return telemetry.SliceValue(out...) - case attribute.FLOAT64SLICE: - slice := value.AsFloat64Slice() - out := make([]telemetry.Value, 0, len(slice)) - for _, v := range slice { - out = append(out, telemetry.Float64Value(v)) - } - return telemetry.SliceValue(out...) - case attribute.STRINGSLICE: - slice := value.AsStringSlice() - out := make([]telemetry.Value, 0, len(slice)) - for _, v := range slice { - v = truncate(maxSpan.AttrValueLen, v) - out = append(out, telemetry.StringValue(v)) - } - return telemetry.SliceValue(out...) - } - return telemetry.Value{} -} - -// truncate returns a truncated version of s such that it contains less than -// the limit number of characters. Truncation is applied by returning the limit -// number of valid characters contained in s. -// -// If limit is negative, it returns the original string. -// -// UTF-8 is supported. When truncating, all invalid characters are dropped -// before applying truncation. -// -// If s already contains less than the limit number of bytes, it is returned -// unchanged. No invalid characters are removed. -func truncate(limit int, s string) string { - // This prioritize performance in the following order based on the most - // common expected use-cases. - // - // - Short values less than the default limit (128). - // - Strings with valid encodings that exceed the limit. - // - No limit. - // - Strings with invalid encodings that exceed the limit. - if limit < 0 || len(s) <= limit { - return s - } - - // Optimistically, assume all valid UTF-8. - var b strings.Builder - count := 0 - for i, c := range s { - if c != utf8.RuneError { - count++ - if count > limit { - return s[:i] - } - continue - } - - _, size := utf8.DecodeRuneInString(s[i:]) - if size == 1 { - // Invalid encoding. - b.Grow(len(s) - 1) - _, _ = b.WriteString(s[:i]) - s = s[i:] - break - } - } - - // Fast-path, no invalid input. - if b.Cap() == 0 { - return s - } - - // Truncate while validating UTF-8. - for i := 0; i < len(s) && count < limit; { - c := s[i] - if c < utf8.RuneSelf { - // Optimization for single byte runes (common case). - _ = b.WriteByte(c) - i++ - count++ - continue - } - - _, size := utf8.DecodeRuneInString(s[i:]) - if size == 1 { - // We checked for all 1-byte runes above, this is a RuneError. - i++ - continue - } - - _, _ = b.WriteString(s[i : i+size]) - i += size - count++ - } - - return b.String() -} - -func (s *autoSpan) End(opts ...SpanEndOption) { - if s == nil || !s.sampled.Swap(false) { - return - } - - // s.end exists so the lock (s.mu) is not held while s.ended is called. - s.ended(s.end(opts)) -} - -func (s *autoSpan) end(opts []SpanEndOption) []byte { - s.mu.Lock() - defer s.mu.Unlock() - - cfg := NewSpanEndConfig(opts...) - if t := cfg.Timestamp(); !t.IsZero() { - s.span.EndTime = cfg.Timestamp() - } else { - s.span.EndTime = time.Now() - } - - b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. - return b -} - -// Expected to be implemented in eBPF. -// -//go:noinline -func (*autoSpan) ended(buf []byte) { ended(buf) } - -// ended is used for testing. -var ended = func([]byte) {} - -func (s *autoSpan) RecordError(err error, opts ...EventOption) { - if s == nil || err == nil || !s.sampled.Load() { - return - } - - cfg := NewEventConfig(opts...) - - attrs := cfg.Attributes() - attrs = append(attrs, - semconv.ExceptionType(typeStr(err)), - semconv.ExceptionMessage(err.Error()), - ) - if cfg.StackTrace() { - buf := make([]byte, 2048) - n := runtime.Stack(buf, false) - attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) - } - - s.mu.Lock() - defer s.mu.Unlock() - - s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) -} - -func typeStr(i any) string { - t := reflect.TypeOf(i) - if t.PkgPath() == "" && t.Name() == "" { - // Likely a builtin type. - return t.String() - } - return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) -} - -func (s *autoSpan) AddEvent(name string, opts ...EventOption) { - if s == nil || !s.sampled.Load() { - return - } - - cfg := NewEventConfig(opts...) - - s.mu.Lock() - defer s.mu.Unlock() - - s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) -} - -// addEvent adds an event with name and attrs at tStamp to the span. The span -// lock (s.mu) needs to be held by the caller. -func (s *autoSpan) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { - limit := maxSpan.Events - - if limit == 0 { - s.span.DroppedEvents++ - return - } - - if limit > 0 && len(s.span.Events) == limit { - // Drop head while avoiding allocation of more capacity. - copy(s.span.Events[:limit-1], s.span.Events[1:]) - s.span.Events = s.span.Events[:limit-1] - s.span.DroppedEvents++ - } - - e := &telemetry.SpanEvent{Time: tStamp, Name: name} - e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) - - s.span.Events = append(s.span.Events, e) -} - -func (s *autoSpan) AddLink(link Link) { - if s == nil || !s.sampled.Load() { - return - } - - l := maxSpan.Links - - s.mu.Lock() - defer s.mu.Unlock() - - if l == 0 { - s.span.DroppedLinks++ - return - } - - if l > 0 && len(s.span.Links) == l { - // Drop head while avoiding allocation of more capacity. - copy(s.span.Links[:l-1], s.span.Links[1:]) - s.span.Links = s.span.Links[:l-1] - s.span.DroppedLinks++ - } - - s.span.Links = append(s.span.Links, convLink(link)) -} - -func convLinks(links []Link) []*telemetry.SpanLink { - out := make([]*telemetry.SpanLink, 0, len(links)) - for _, link := range links { - out = append(out, convLink(link)) - } - return out -} - -func convLink(link Link) *telemetry.SpanLink { - l := &telemetry.SpanLink{ - TraceID: telemetry.TraceID(link.SpanContext.TraceID()), - SpanID: telemetry.SpanID(link.SpanContext.SpanID()), - TraceState: link.SpanContext.TraceState().String(), - Flags: uint32(link.SpanContext.TraceFlags()), - } - l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) - - return l -} - -func (s *autoSpan) SetName(name string) { - if s == nil || !s.sampled.Load() { - return - } - - s.mu.Lock() - defer s.mu.Unlock() - - s.span.Name = name -} - -func (*autoSpan) TracerProvider() TracerProvider { return newAutoTracerProvider() } - -// maxSpan are the span limits resolved during startup. -var maxSpan = newSpanLimits() - -type spanLimits struct { - // Attrs is the number of allowed attributes for a span. - // - // This is resolved from the environment variable value for the - // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the - // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if - // that is not set, is used. - Attrs int - // AttrValueLen is the maximum attribute value length allowed for a span. - // - // This is resolved from the environment variable value for the - // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the - // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 - // if that is not set, is used. - AttrValueLen int - // Events is the number of allowed events for a span. - // - // This is resolved from the environment variable value for the - // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. - Events int - // EventAttrs is the number of allowed attributes for a span event. - // - // The is resolved from the environment variable value for the - // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. - EventAttrs int - // Links is the number of allowed Links for a span. - // - // This is resolved from the environment variable value for the - // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. - Links int - // LinkAttrs is the number of allowed attributes for a span link. - // - // This is resolved from the environment variable value for the - // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. - LinkAttrs int -} - -func newSpanLimits() spanLimits { - return spanLimits{ - Attrs: firstEnv( - 128, - "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", - "OTEL_ATTRIBUTE_COUNT_LIMIT", - ), - AttrValueLen: firstEnv( - -1, // Unlimited. - "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", - "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", - ), - Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), - EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), - Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), - LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), - } -} - -// firstEnv returns the parsed integer value of the first matching environment -// variable from keys. The defaultVal is returned if the value is not an -// integer or no match is found. -func firstEnv(defaultVal int, keys ...string) int { - for _, key := range keys { - strV := os.Getenv(key) - if strV == "" { - continue - } - - v, err := strconv.Atoi(strV) - if err == nil { - return v - } - // Ignore invalid environment variable. - } - - return defaultVal -} diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go deleted file mode 100644 index d9ecef1cad..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "slices" - "time" - - "go.opentelemetry.io/otel/attribute" -) - -// TracerConfig is a group of options for a Tracer. -type TracerConfig struct { - instrumentationVersion string - // Schema URL of the telemetry emitted by the Tracer. - schemaURL string - attrs attribute.Set -} - -// InstrumentationVersion returns the version of the library providing instrumentation. -func (t *TracerConfig) InstrumentationVersion() string { - return t.instrumentationVersion -} - -// InstrumentationAttributes returns the attributes associated with the library -// providing instrumentation. -func (t *TracerConfig) InstrumentationAttributes() attribute.Set { - return t.attrs -} - -// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. -func (t *TracerConfig) SchemaURL() string { - return t.schemaURL -} - -// NewTracerConfig applies all the options to a returned TracerConfig. -func NewTracerConfig(options ...TracerOption) TracerConfig { - var config TracerConfig - for _, option := range options { - config = option.apply(config) - } - return config -} - -// TracerOption applies an option to a TracerConfig. -type TracerOption interface { - apply(TracerConfig) TracerConfig -} - -type tracerOptionFunc func(TracerConfig) TracerConfig - -func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig { - return fn(cfg) -} - -// SpanConfig is a group of options for a Span. -type SpanConfig struct { - attributes []attribute.KeyValue - timestamp time.Time - links []Link - newRoot bool - spanKind SpanKind - stackTrace bool -} - -// Attributes describe the associated qualities of a Span. -func (cfg *SpanConfig) Attributes() []attribute.KeyValue { - return cfg.attributes -} - -// Timestamp is a time in a Span life-cycle. -func (cfg *SpanConfig) Timestamp() time.Time { - return cfg.timestamp -} - -// StackTrace reports whether stack trace capturing is enabled. -func (cfg *SpanConfig) StackTrace() bool { - return cfg.stackTrace -} - -// Links are the associations a Span has with other Spans. -func (cfg *SpanConfig) Links() []Link { - return cfg.links -} - -// NewRoot identifies a Span as the root Span for a new trace. This is -// commonly used when an existing trace crosses trust boundaries and the -// remote parent span context should be ignored for security. -func (cfg *SpanConfig) NewRoot() bool { - return cfg.newRoot -} - -// SpanKind is the role a Span has in a trace. -func (cfg *SpanConfig) SpanKind() SpanKind { - return cfg.spanKind -} - -// NewSpanStartConfig applies all the options to a returned SpanConfig. -// No validation is performed on the returned SpanConfig (e.g. no uniqueness -// checking or bounding of data), it is left to the SDK to perform this -// action. -func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { - var c SpanConfig - for _, option := range options { - c = option.applySpanStart(c) - } - return c -} - -// NewSpanEndConfig applies all the options to a returned SpanConfig. -// No validation is performed on the returned SpanConfig (e.g. no uniqueness -// checking or bounding of data), it is left to the SDK to perform this -// action. -func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { - var c SpanConfig - for _, option := range options { - c = option.applySpanEnd(c) - } - return c -} - -// SpanStartOption applies an option to a SpanConfig. These options are applicable -// only when the span is created. -type SpanStartOption interface { - applySpanStart(SpanConfig) SpanConfig -} - -type spanOptionFunc func(SpanConfig) SpanConfig - -func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig { - return fn(cfg) -} - -// SpanEndOption applies an option to a SpanConfig. These options are -// applicable only when the span is ended. -type SpanEndOption interface { - applySpanEnd(SpanConfig) SpanConfig -} - -// EventConfig is a group of options for an Event. -type EventConfig struct { - attributes []attribute.KeyValue - timestamp time.Time - stackTrace bool -} - -// Attributes describe the associated qualities of an Event. -func (cfg *EventConfig) Attributes() []attribute.KeyValue { - return cfg.attributes -} - -// Timestamp is a time in an Event life-cycle. -func (cfg *EventConfig) Timestamp() time.Time { - return cfg.timestamp -} - -// StackTrace reports whether stack trace capturing is enabled. -func (cfg *EventConfig) StackTrace() bool { - return cfg.stackTrace -} - -// NewEventConfig applies all the EventOptions to a returned EventConfig. If no -// timestamp option is passed, the returned EventConfig will have a Timestamp -// set to the call time, otherwise no validation is performed on the returned -// EventConfig. -func NewEventConfig(options ...EventOption) EventConfig { - var c EventConfig - for _, option := range options { - c = option.applyEvent(c) - } - if c.timestamp.IsZero() { - c.timestamp = time.Now() - } - return c -} - -// EventOption applies span event options to an EventConfig. -type EventOption interface { - applyEvent(EventConfig) EventConfig -} - -// SpanOption are options that can be used at both the beginning and end of a span. -type SpanOption interface { - SpanStartOption - SpanEndOption -} - -// SpanStartEventOption are options that can be used at the start of a span, or with an event. -type SpanStartEventOption interface { - SpanStartOption - EventOption -} - -// SpanEndEventOption are options that can be used at the end of a span, or with an event. -type SpanEndEventOption interface { - SpanEndOption - EventOption -} - -type attributeOption []attribute.KeyValue - -func (o attributeOption) applySpan(c SpanConfig) SpanConfig { - c.attributes = append(c.attributes, []attribute.KeyValue(o)...) - return c -} -func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } -func (o attributeOption) applyEvent(c EventConfig) EventConfig { - c.attributes = append(c.attributes, []attribute.KeyValue(o)...) - return c -} - -var _ SpanStartEventOption = attributeOption{} - -// WithAttributes adds the attributes related to a span life-cycle event. -// These attributes are used to describe the work a Span represents when this -// option is provided to a Span's start event. Otherwise, these -// attributes provide additional information about the event being recorded -// (e.g. error, state change, processing progress, system event). -// -// If multiple of these options are passed the attributes of each successive -// option will extend the attributes instead of overwriting. There is no -// guarantee of uniqueness in the resulting attributes. -func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { - return attributeOption(attributes) -} - -// SpanEventOption are options that can be used with an event or a span. -type SpanEventOption interface { - SpanOption - EventOption -} - -type timestampOption time.Time - -func (o timestampOption) applySpan(c SpanConfig) SpanConfig { - c.timestamp = time.Time(o) - return c -} -func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } -func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } -func (o timestampOption) applyEvent(c EventConfig) EventConfig { - c.timestamp = time.Time(o) - return c -} - -var _ SpanEventOption = timestampOption{} - -// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. -// started, stopped, errored). -func WithTimestamp(t time.Time) SpanEventOption { - return timestampOption(t) -} - -type stackTraceOption bool - -func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { - c.stackTrace = bool(o) - return c -} - -func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { - c.stackTrace = bool(o) - return c -} -func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } - -// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). -func WithStackTrace(b bool) SpanEndEventOption { - return stackTraceOption(b) -} - -// WithLinks adds links to a Span. The links are added to the existing Span -// links, i.e. this does not overwrite. Links with invalid span context are ignored. -func WithLinks(links ...Link) SpanStartOption { - return spanOptionFunc(func(cfg SpanConfig) SpanConfig { - cfg.links = append(cfg.links, links...) - return cfg - }) -} - -// WithNewRoot specifies that the Span should be treated as a root Span. Any -// existing parent span context will be ignored when defining the Span's trace -// identifiers. -func WithNewRoot() SpanStartOption { - return spanOptionFunc(func(cfg SpanConfig) SpanConfig { - cfg.newRoot = true - return cfg - }) -} - -// WithSpanKind sets the SpanKind of a Span. -func WithSpanKind(kind SpanKind) SpanStartOption { - return spanOptionFunc(func(cfg SpanConfig) SpanConfig { - cfg.spanKind = kind - return cfg - }) -} - -// WithInstrumentationVersion sets the instrumentation version. -func WithInstrumentationVersion(version string) TracerOption { - return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { - cfg.instrumentationVersion = version - return cfg - }) -} - -// mergeSets returns the union of keys between a and b. Any duplicate keys will -// use the value associated with b. -func mergeSets(a, b attribute.Set) attribute.Set { - // NewMergeIterator uses the first value for any duplicates. - iter := attribute.NewMergeIterator(&b, &a) - merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) - for iter.Next() { - merged = append(merged, iter.Attribute()) - } - return attribute.NewSet(merged...) -} - -// WithInstrumentationAttributes adds the instrumentation attributes. -// -// This is equivalent to calling [WithInstrumentationAttributeSet] with an -// [attribute.Set] created from a clone of the passed attributes. -// [WithInstrumentationAttributeSet] is recommended for more control. -// -// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] -// options are passed, the attributes will be merged together in the order -// they are passed. Attributes with duplicate keys will use the last value passed. -func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { - set := attribute.NewSet(slices.Clone(attr)...) - return WithInstrumentationAttributeSet(set) -} - -// WithInstrumentationAttributeSet adds the instrumentation attributes. -// -// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] -// options are passed, the attributes will be merged together in the order -// they are passed. Attributes with duplicate keys will use the last value passed. -func WithInstrumentationAttributeSet(set attribute.Set) TracerOption { - if set.Len() == 0 { - return tracerOptionFunc(func(config TracerConfig) TracerConfig { - return config - }) - } - - return tracerOptionFunc(func(config TracerConfig) TracerConfig { - if config.attrs.Len() == 0 { - config.attrs = set - } else { - config.attrs = mergeSets(config.attrs, set) - } - return config - }) -} - -// WithSchemaURL sets the schema URL for the Tracer. -func WithSchemaURL(schemaURL string) TracerOption { - return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { - cfg.schemaURL = schemaURL - return cfg - }) -} diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go deleted file mode 100644 index 8c45a7107f..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import "context" - -type traceContextKeyType int - -const currentSpanKey traceContextKeyType = iota - -// ContextWithSpan returns a copy of parent with span set as the current Span. -func ContextWithSpan(parent context.Context, span Span) context.Context { - return context.WithValue(parent, currentSpanKey, span) -} - -// ContextWithSpanContext returns a copy of parent with sc as the current -// Span. The Span implementation that wraps sc is non-recording and performs -// no operations other than to return sc as the SpanContext from the -// SpanContext method. -func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { - return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) -} - -// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly -// as a remote SpanContext and as the current Span. The Span implementation -// that wraps rsc is non-recording and performs no operations other than to -// return rsc as the SpanContext from the SpanContext method. -func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { - return ContextWithSpanContext(parent, rsc.WithRemote(true)) -} - -// SpanFromContext returns the current Span from ctx. -// -// If no Span is currently set in ctx an implementation of a Span that -// performs no operations is returned. -func SpanFromContext(ctx context.Context) Span { - if ctx == nil { - return noopSpanInstance - } - if span, ok := ctx.Value(currentSpanKey).(Span); ok { - return span - } - return noopSpanInstance -} - -// SpanContextFromContext returns the current Span's SpanContext. -func SpanContextFromContext(ctx context.Context) SpanContext { - return SpanFromContext(ctx).SpanContext() -} diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go deleted file mode 100644 index cdbf41d6d7..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -/* -Package trace provides an implementation of the tracing part of the -OpenTelemetry API. - -To participate in distributed traces a Span needs to be created for the -operation being performed as part of a traced workflow. In its simplest form: - - var tracer trace.Tracer - - func init() { - tracer = otel.Tracer("instrumentation/package/name") - } - - func operation(ctx context.Context) { - var span trace.Span - ctx, span = tracer.Start(ctx, "operation") - defer span.End() - // ... - } - -A Tracer is unique to the instrumentation and is used to create Spans. -Instrumentation should be designed to accept a TracerProvider from which it -can create its own unique Tracer. Alternatively, the registered global -TracerProvider from the go.opentelemetry.io/otel package can be used as -a default. - - const ( - name = "instrumentation/package/name" - version = "0.1.0" - ) - - type Instrumentation struct { - tracer trace.Tracer - } - - func NewInstrumentation(tp trace.TracerProvider) *Instrumentation { - if tp == nil { - tp = otel.TracerProvider() - } - return &Instrumentation{ - tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)), - } - } - - func operation(ctx context.Context, inst *Instrumentation) { - var span trace.Span - ctx, span = inst.tracer.Start(ctx, "operation") - defer span.End() - // ... - } - -# API Implementations - -This package does not conform to the standard Go versioning policy; all of its -interfaces may have methods added to them without a package major version bump. -This non-standard API evolution could surprise an uninformed implementation -author. They could unknowingly build their implementation in a way that would -result in a runtime panic for their users that update to the new API. - -The API is designed to help inform an instrumentation author about this -non-standard API evolution. It requires them to choose a default behavior for -unimplemented interface methods. There are three behavior choices they can -make: - - - Compilation failure - - Panic - - Default to another implementation - -All interfaces in this API embed a corresponding interface from -[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default -behavior of their implementations to be a compilation failure, signaling to -their users they need to update to the latest version of that implementation, -they need to embed the corresponding interface from -[go.opentelemetry.io/otel/trace/embedded] in their implementation. For -example, - - import "go.opentelemetry.io/otel/trace/embedded" - - type TracerProvider struct { - embedded.TracerProvider - // ... - } - -If an author wants the default behavior of their implementations to panic, they -can embed the API interface directly. - - import "go.opentelemetry.io/otel/trace" - - type TracerProvider struct { - trace.TracerProvider - // ... - } - -This option is not recommended. It will lead to publishing packages that -contain runtime panics when users update to newer versions of -[go.opentelemetry.io/otel/trace], which may be done with a transitive -dependency. - -Finally, an author can embed another implementation in theirs. The embedded -implementation will be used for methods not defined by the author. For example, -an author who wants to default to silently dropping the call can use -[go.opentelemetry.io/otel/trace/noop]: - - import "go.opentelemetry.io/otel/trace/noop" - - type TracerProvider struct { - noop.TracerProvider - // ... - } - -It is strongly recommended that authors only embed -[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior. -That implementation is the only one OpenTelemetry authors can guarantee will -fully implement all the API interfaces when a user updates their API. -*/ -package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md deleted file mode 100644 index 7754a239ee..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Trace Embedded - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded) diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go deleted file mode 100644 index 3e359a00bf..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package embedded provides interfaces embedded within the [OpenTelemetry -// trace API]. -// -// Implementers of the [OpenTelemetry trace API] can embed the relevant type -// from this package into their implementation directly. Doing so will result -// in a compilation error for users when the [OpenTelemetry trace API] is -// extended (which is something that can happen without a major version bump of -// the API package). -// -// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace -package embedded // import "go.opentelemetry.io/otel/trace/embedded" - -// TracerProvider is embedded in -// [go.opentelemetry.io/otel/trace.TracerProvider]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to -// experience a compilation error, signaling they need to update to your latest -// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider] -// interface is extended (which is something that can happen without a major -// version bump of the API package). -type TracerProvider interface{ tracerProvider() } - -// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a -// compilation error, signaling they need to update to your latest -// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface -// is extended (which is something that can happen without a major version bump -// of the API package). -type Tracer interface{ tracer() } - -// Span is embedded in [go.opentelemetry.io/otel/trace.Span]. -// -// Embed this interface in your implementation of the -// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a -// compilation error, signaling they need to update to your latest -// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is -// extended (which is something that can happen without a major version bump of -// the API package). -type Span interface{ span() } diff --git a/vendor/go.opentelemetry.io/otel/trace/hex.go b/vendor/go.opentelemetry.io/otel/trace/hex.go deleted file mode 100644 index 1cbef1d4b9..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/hex.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -const ( - // hexLU is a hex lookup table of the 16 lowercase hex digits. - // The character values of the string are indexed at the equivalent - // hexadecimal value they represent. This table efficiently encodes byte data - // into a string representation of hexadecimal. - hexLU = "0123456789abcdef" - - // hexRev is a reverse hex lookup table for lowercase hex digits. - // The table is efficiently decodes a hexadecimal string into bytes. - // Valid hexadecimal characters are indexed at their respective values. All - // other invalid ASCII characters are represented with '\xff'. - // - // The '\xff' character is used as invalid because no valid character has - // the upper 4 bits set. Meaning, an efficient validation can be performed - // over multiple character parsing by checking these bits remain zero. - hexRev = "" + - "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + - "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + - "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + - "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" + - "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + - "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + - "\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" + - "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + - "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + - "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + - "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + - "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + - "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + - "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + - "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + - "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" -) diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go deleted file mode 100644 index ff0f6eac62..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -// Attr is a key-value pair. -type Attr struct { - Key string `json:"key,omitempty"` - Value Value `json:"value,omitempty"` -} - -// String returns an Attr for a string value. -func String(key, value string) Attr { - return Attr{key, StringValue(value)} -} - -// Int64 returns an Attr for an int64 value. -func Int64(key string, value int64) Attr { - return Attr{key, Int64Value(value)} -} - -// Int returns an Attr for an int value. -func Int(key string, value int) Attr { - return Int64(key, int64(value)) -} - -// Float64 returns an Attr for a float64 value. -func Float64(key string, value float64) Attr { - return Attr{key, Float64Value(value)} -} - -// Bool returns an Attr for a bool value. -func Bool(key string, value bool) Attr { - return Attr{key, BoolValue(value)} -} - -// Bytes returns an Attr for a []byte value. -// The passed slice must not be changed after it is passed. -func Bytes(key string, value []byte) Attr { - return Attr{key, BytesValue(value)} -} - -// Slice returns an Attr for a []Value value. -// The passed slice must not be changed after it is passed. -func Slice(key string, value ...Value) Attr { - return Attr{key, SliceValue(value...)} -} - -// Map returns an Attr for a map value. -// The passed slice must not be changed after it is passed. -func Map(key string, value ...Attr) Attr { - return Attr{key, MapValue(value...)} -} - -// Equal reports whether a is equal to b. -func (a Attr) Equal(b Attr) bool { - return a.Key == b.Key && a.Value.Equal(b.Value) -} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go deleted file mode 100644 index 5debe90bbb..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -/* -Package telemetry provides a lightweight representations of OpenTelemetry -telemetry that is compatible with the OTLP JSON protobuf encoding. -*/ -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go deleted file mode 100644 index bea56f2e7d..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -import ( - "encoding/hex" - "errors" - "fmt" -) - -const ( - traceIDSize = 16 - spanIDSize = 8 -) - -// TraceID is a custom data type that is used for all trace IDs. -type TraceID [traceIDSize]byte - -// String returns the hex string representation form of a TraceID. -func (tid TraceID) String() string { - return hex.EncodeToString(tid[:]) -} - -// IsEmpty reports whether the TraceID contains only zero bytes. -func (tid TraceID) IsEmpty() bool { - return tid == [traceIDSize]byte{} -} - -// MarshalJSON converts the trace ID into a hex string enclosed in quotes. -func (tid TraceID) MarshalJSON() ([]byte, error) { - if tid.IsEmpty() { - return []byte(`""`), nil - } - return marshalJSON(tid[:]) -} - -// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in -// quotes. -func (tid *TraceID) UnmarshalJSON(data []byte) error { - *tid = [traceIDSize]byte{} - return unmarshalJSON(tid[:], data) -} - -// SpanID is a custom data type that is used for all span IDs. -type SpanID [spanIDSize]byte - -// String returns the hex string representation form of a SpanID. -func (sid SpanID) String() string { - return hex.EncodeToString(sid[:]) -} - -// IsEmpty reports whether the SpanID contains only zero bytes. -func (sid SpanID) IsEmpty() bool { - return sid == [spanIDSize]byte{} -} - -// MarshalJSON converts span ID into a hex string enclosed in quotes. -func (sid SpanID) MarshalJSON() ([]byte, error) { - if sid.IsEmpty() { - return []byte(`""`), nil - } - return marshalJSON(sid[:]) -} - -// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. -func (sid *SpanID) UnmarshalJSON(data []byte) error { - *sid = [spanIDSize]byte{} - return unmarshalJSON(sid[:], data) -} - -// marshalJSON converts id into a hex string enclosed in quotes. -func marshalJSON(id []byte) ([]byte, error) { - // Plus 2 quote chars at the start and end. - hexLen := hex.EncodedLen(len(id)) + 2 - - b := make([]byte, hexLen) - hex.Encode(b[1:hexLen-1], id) - b[0], b[hexLen-1] = '"', '"' - - return b, nil -} - -// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -func unmarshalJSON(dst, src []byte) error { - if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { - src = src[1 : l-1] - } - nLen := len(src) - if nLen == 0 { - return nil - } - - if len(dst) != hex.DecodedLen(nLen) { - return errors.New("invalid length for ID") - } - - _, err := hex.Decode(dst, src) - if err != nil { - return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) - } - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go deleted file mode 100644 index f5e3a8cec9..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -import ( - "encoding/json" - "strconv" -) - -// protoInt64 represents the protobuf encoding of integers which can be either -// strings or integers. -type protoInt64 int64 - -// Int64 returns the protoInt64 as an int64. -func (i *protoInt64) Int64() int64 { return int64(*i) } - -// UnmarshalJSON decodes both strings and integers. -func (i *protoInt64) UnmarshalJSON(data []byte) error { - if data[0] == '"' { - var str string - if err := json.Unmarshal(data, &str); err != nil { - return err - } - parsedInt, err := strconv.ParseInt(str, 10, 64) - if err != nil { - return err - } - *i = protoInt64(parsedInt) - } else { - var parsedInt int64 - if err := json.Unmarshal(data, &parsedInt); err != nil { - return err - } - *i = protoInt64(parsedInt) - } - return nil -} - -// protoUint64 represents the protobuf encoding of integers which can be either -// strings or integers. -type protoUint64 uint64 - -// Int64 returns the protoUint64 as a uint64. -func (i *protoUint64) Uint64() uint64 { return uint64(*i) } - -// UnmarshalJSON decodes both strings and integers. -func (i *protoUint64) UnmarshalJSON(data []byte) error { - if data[0] == '"' { - var str string - if err := json.Unmarshal(data, &str); err != nil { - return err - } - parsedUint, err := strconv.ParseUint(str, 10, 64) - if err != nil { - return err - } - *i = protoUint64(parsedUint) - } else { - var parsedUint uint64 - if err := json.Unmarshal(data, &parsedUint); err != nil { - return err - } - *i = protoUint64(parsedUint) - } - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go deleted file mode 100644 index 1798a702d4..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// Resource information. -type Resource struct { - // Attrs are the set of attributes that describe the resource. Attribute - // keys MUST be unique (it is not allowed to have more than one attribute - // with the same key). - Attrs []Attr `json:"attributes,omitempty"` - // DroppedAttrs is the number of dropped attributes. If the value - // is 0, then no attributes were dropped. - DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. -func (r *Resource) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid Resource type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid Resource field: %#v", keyIface) - } - - switch key { - case "attributes": - err = decoder.Decode(&r.Attrs) - case "droppedAttributesCount", "dropped_attributes_count": - err = decoder.Decode(&r.DroppedAttrs) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go deleted file mode 100644 index c2b4c635b7..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// Scope is the identifying values of the instrumentation scope. -type Scope struct { - Name string `json:"name,omitempty"` - Version string `json:"version,omitempty"` - Attrs []Attr `json:"attributes,omitempty"` - DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. -func (s *Scope) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid Scope type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid Scope field: %#v", keyIface) - } - - switch key { - case "name": - err = decoder.Decode(&s.Name) - case "version": - err = decoder.Decode(&s.Version) - case "attributes": - err = decoder.Decode(&s.Attrs) - case "droppedAttributesCount", "dropped_attributes_count": - err = decoder.Decode(&s.DroppedAttrs) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go deleted file mode 100644 index e7ca62c660..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go +++ /dev/null @@ -1,472 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -import ( - "bytes" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "time" -) - -// A Span represents a single operation performed by a single component of the -// system. -type Span struct { - // A unique identifier for a trace. All spans from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR - // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is required. - TraceID TraceID `json:"traceId,omitempty"` - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes OR of length - // other than 8 bytes is considered invalid (empty string in OTLP/JSON - // is zero-length and thus is also invalid). - // - // This field is required. - SpanID SpanID `json:"spanId,omitempty"` - // trace_state conveys information about request position in multiple distributed tracing graphs. - // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header - // See also https://github.com/w3c/distributed-tracing for more details about this field. - TraceState string `json:"traceState,omitempty"` - // The `span_id` of this span's parent span. If this is a root span, then this - // field must be empty. The ID is an 8-byte array. - ParentSpanID SpanID `json:"parentSpanId,omitempty"` - // Flags, a bit field. - // - // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - // Context specification. To read the 8-bit W3C trace flag, use - // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - // - // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. - // - // Bits 8 and 9 represent the 3 states of whether a span's parent - // is remote. The states are (unknown, is not remote, is remote). - // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - // - // When creating span messages, if the message is logically forwarded from another source - // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD - // be copied as-is. If creating from a source that does not have an equivalent flags field - // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST - // be set to zero. - // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - // - // [Optional]. - Flags uint32 `json:"flags,omitempty"` - // A description of the span's operation. - // - // For example, the name can be a qualified method name or a file name - // and a line number where the operation is called. A best practice is to use - // the same display name at the same call point in an application. - // This makes it easier to correlate spans in different traces. - // - // This field is semantically required to be set to non-empty string. - // Empty value is equivalent to an unknown span name. - // - // This field is required. - Name string `json:"name"` - // Distinguishes between spans generated in a particular context. For example, - // two spans with the same name may be distinguished using `CLIENT` (caller) - // and `SERVER` (callee) to identify queueing latency associated with the span. - Kind SpanKind `json:"kind,omitempty"` - // start_time_unix_nano is the start time of the span. On the client side, this is the time - // kept by the local machine where the span execution starts. On the server side, this - // is the time when the server's application handler starts running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - StartTime time.Time `json:"startTimeUnixNano,omitempty"` - // end_time_unix_nano is the end time of the span. On the client side, this is the time - // kept by the local machine where the span execution ends. On the server side, this - // is the time when the server application handler stops running. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - EndTime time.Time `json:"endTimeUnixNano,omitempty"` - // attributes is a collection of key/value pairs. Note, global attributes - // like server name can be set using the resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "example.com/myattribute": true - // "example.com/score": 10.239 - // - // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - Attrs []Attr `json:"attributes,omitempty"` - // dropped_attributes_count is the number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` - // events is a collection of Event items. - Events []*SpanEvent `json:"events,omitempty"` - // dropped_events_count is the number of dropped events. If the value is 0, then no - // events were dropped. - DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` - // links is a collection of Links, which are references from this span to a span - // in the same or different trace. - Links []*SpanLink `json:"links,omitempty"` - // dropped_links_count is the number of dropped links after the maximum size was - // enforced. If this value is 0, then no links were dropped. - DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` - // An optional final status for this span. Semantically when Status isn't set, it means - // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). - Status *Status `json:"status,omitempty"` -} - -// MarshalJSON encodes s into OTLP formatted JSON. -func (s Span) MarshalJSON() ([]byte, error) { - startT := s.StartTime.UnixNano() - if s.StartTime.IsZero() || startT < 0 { - startT = 0 - } - - endT := s.EndTime.UnixNano() - if s.EndTime.IsZero() || endT < 0 { - endT = 0 - } - - // Override non-empty default SpanID marshal and omitempty. - var parentSpanId string - if !s.ParentSpanID.IsEmpty() { - b := make([]byte, hex.EncodedLen(spanIDSize)) - hex.Encode(b, s.ParentSpanID[:]) - parentSpanId = string(b) - } - - type Alias Span - return json.Marshal(struct { - Alias - ParentSpanID string `json:"parentSpanId,omitempty"` - StartTime uint64 `json:"startTimeUnixNano,omitempty"` - EndTime uint64 `json:"endTimeUnixNano,omitempty"` - }{ - Alias: Alias(s), - ParentSpanID: parentSpanId, - StartTime: uint64(startT), // nolint:gosec // >0 checked above. - EndTime: uint64(endT), // nolint:gosec // >0 checked above. - }) -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. -func (s *Span) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid Span type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid Span field: %#v", keyIface) - } - - switch key { - case "traceId", "trace_id": - err = decoder.Decode(&s.TraceID) - case "spanId", "span_id": - err = decoder.Decode(&s.SpanID) - case "traceState", "trace_state": - err = decoder.Decode(&s.TraceState) - case "parentSpanId", "parent_span_id": - err = decoder.Decode(&s.ParentSpanID) - case "flags": - err = decoder.Decode(&s.Flags) - case "name": - err = decoder.Decode(&s.Name) - case "kind": - err = decoder.Decode(&s.Kind) - case "startTimeUnixNano", "start_time_unix_nano": - var val protoUint64 - err = decoder.Decode(&val) - v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. - s.StartTime = time.Unix(0, v) - case "endTimeUnixNano", "end_time_unix_nano": - var val protoUint64 - err = decoder.Decode(&val) - v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. - s.EndTime = time.Unix(0, v) - case "attributes": - err = decoder.Decode(&s.Attrs) - case "droppedAttributesCount", "dropped_attributes_count": - err = decoder.Decode(&s.DroppedAttrs) - case "events": - err = decoder.Decode(&s.Events) - case "droppedEventsCount", "dropped_events_count": - err = decoder.Decode(&s.DroppedEvents) - case "links": - err = decoder.Decode(&s.Links) - case "droppedLinksCount", "dropped_links_count": - err = decoder.Decode(&s.DroppedLinks) - case "status": - err = decoder.Decode(&s.Status) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} - -// SpanFlags represents constants used to interpret the -// Span.flags field, which is protobuf 'fixed32' type and is to -// be used as bit-fields. Each non-zero value defined in this enum is -// a bit-mask. To extract the bit-field, for example, use an -// expression like: -// -// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) -// -// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. -// -// Note that Span flags were introduced in version 1.1 of the -// OpenTelemetry protocol. Older Span producers do not set this -// field, consequently consumers should not rely on the absence of a -// particular flag bit to indicate the presence of a particular feature. -type SpanFlags int32 - -const ( - // SpanFlagsTraceFlagsMask is a mask for trace-flags. - // - // Bits 0-7 are used for trace flags. - SpanFlagsTraceFlagsMask SpanFlags = 255 - // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status. - // - // Bits 8 and 9 are used to indicate that the parent span or link span is - // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - SpanFlagsContextHasIsRemoteMask SpanFlags = 256 - // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status. - // - // Bits 8 and 9 are used to indicate that the parent span or link span is - // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is - // remote. - SpanFlagsContextIsRemoteMask SpanFlags = 512 -) - -// SpanKind is the type of span. Can be used to specify additional relationships between spans -// in addition to a parent/child relationship. -type SpanKind int32 - -const ( - // SpanKindInternal indicates that the span represents an internal - // operation within an application, as opposed to an operation happening at - // the boundaries. - SpanKindInternal SpanKind = 1 - // SpanKindServer indicates that the span covers server-side handling of an - // RPC or other remote network request. - SpanKindServer SpanKind = 2 - // SpanKindClient indicates that the span describes a request to some - // remote service. - SpanKindClient SpanKind = 3 - // SpanKindProducer indicates that the span describes a producer sending a - // message to a broker. Unlike SpanKindClient and SpanKindServer, there is - // often no direct critical path latency relationship between producer and - // consumer spans. A SpanKindProducer span ends when the message was - // accepted by the broker while the logical processing of the message might - // span a much longer time. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer indicates that the span describes a consumer receiving - // a message from a broker. Like SpanKindProducer, there is often no direct - // critical path latency relationship between producer and consumer spans. - SpanKindConsumer SpanKind = 5 -) - -// SpanEvent is a time-stamped annotation of the span, consisting of -// user-supplied text description and key-value pairs. -type SpanEvent struct { - // time_unix_nano is the time the event occurred. - Time time.Time `json:"timeUnixNano,omitempty"` - // name of the event. - // This field is semantically required to be set to non-empty string. - Name string `json:"name,omitempty"` - // attributes is a collection of attribute key/value pairs on the event. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - Attrs []Attr `json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` -} - -// MarshalJSON encodes e into OTLP formatted JSON. -func (e SpanEvent) MarshalJSON() ([]byte, error) { - t := e.Time.UnixNano() - if e.Time.IsZero() || t < 0 { - t = 0 - } - - type Alias SpanEvent - return json.Marshal(struct { - Alias - Time uint64 `json:"timeUnixNano,omitempty"` - }{ - Alias: Alias(e), - Time: uint64(t), // nolint: gosec // >0 checked above - }) -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. -func (se *SpanEvent) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid SpanEvent type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) - } - - switch key { - case "timeUnixNano", "time_unix_nano": - var val protoUint64 - err = decoder.Decode(&val) - v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. - se.Time = time.Unix(0, v) - case "name": - err = decoder.Decode(&se.Name) - case "attributes": - err = decoder.Decode(&se.Attrs) - case "droppedAttributesCount", "dropped_attributes_count": - err = decoder.Decode(&se.DroppedAttrs) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} - -// SpanLink is a reference from the current span to another span in the same -// trace or in a different trace. For example, this can be used in batching -// operations, where a single batch handler processes multiple requests from -// different traces or when the handler receives a request from a different -// project. -type SpanLink struct { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. - TraceID TraceID `json:"traceId,omitempty"` - // A unique identifier for the linked span. The ID is an 8-byte array. - SpanID SpanID `json:"spanId,omitempty"` - // The trace_state associated with the link. - TraceState string `json:"traceState,omitempty"` - // attributes is a collection of attribute key/value pairs on the link. - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - Attrs []Attr `json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, - // then no attributes were dropped. - DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` - // Flags, a bit field. - // - // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace - // Context specification. To read the 8-bit W3C trace flag, use - // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. - // - // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. - // - // Bits 8 and 9 represent the 3 states of whether the link is remote. - // The states are (unknown, is not remote, is remote). - // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. - // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. - // - // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. - // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. - // - // [Optional]. - Flags uint32 `json:"flags,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. -func (sl *SpanLink) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid SpanLink type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid SpanLink field: %#v", keyIface) - } - - switch key { - case "traceId", "trace_id": - err = decoder.Decode(&sl.TraceID) - case "spanId", "span_id": - err = decoder.Decode(&sl.SpanID) - case "traceState", "trace_state": - err = decoder.Decode(&sl.TraceState) - case "attributes": - err = decoder.Decode(&sl.Attrs) - case "droppedAttributesCount", "dropped_attributes_count": - err = decoder.Decode(&sl.DroppedAttrs) - case "flags": - err = decoder.Decode(&sl.Flags) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go deleted file mode 100644 index 1039bf40cd..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -// StatusCode is the status of a Span. -// -// For the semantics of status codes see -// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status -type StatusCode int32 - -const ( - // StatusCodeUnset is the default status. - StatusCodeUnset StatusCode = 0 - // StatusCodeOK is used when the Span has been validated by an Application - // developer or Operator to have completed successfully. - StatusCodeOK StatusCode = 1 - // StatusCodeError is used when the Span contains an error. - StatusCodeError StatusCode = 2 -) - -var statusCodeStrings = []string{ - "Unset", - "OK", - "Error", -} - -func (s StatusCode) String() string { - if s >= 0 && int(s) < len(statusCodeStrings) { - return statusCodeStrings[s] - } - return "" -} - -// Status defines a logical error model that is suitable for different -// programming environments, including REST APIs and RPC APIs. -type Status struct { - // A developer-facing human readable error message. - Message string `json:"message,omitempty"` - // The status code. - Code StatusCode `json:"code,omitempty"` -} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go deleted file mode 100644 index e5f10767ca..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" -) - -// Traces represents the traces data that can be stored in a persistent storage, -// OR can be embedded by other protocols that transfer OTLP traces data but do -// not implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -type Traces struct { - // An array of ResourceSpans. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. -func (td *Traces) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid TracesData type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid TracesData field: %#v", keyIface) - } - - switch key { - case "resourceSpans", "resource_spans": - err = decoder.Decode(&td.ResourceSpans) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} - -// ResourceSpans is a collection of ScopeSpans from a Resource. -type ResourceSpans struct { - // The resource for the spans in this message. - // If this field is not set then no resource info is known. - Resource Resource `json:"resource"` - // A list of ScopeSpans that originate from a resource. - ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_spans" field which have their own schema_url field. - SchemaURL string `json:"schemaUrl,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. -func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid ResourceSpans type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) - } - - switch key { - case "resource": - err = decoder.Decode(&rs.Resource) - case "scopeSpans", "scope_spans": - err = decoder.Decode(&rs.ScopeSpans) - case "schemaUrl", "schema_url": - err = decoder.Decode(&rs.SchemaURL) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} - -// ScopeSpans is a collection of Spans produced by an InstrumentationScope. -type ScopeSpans struct { - // The instrumentation scope information for the spans in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - Scope *Scope `json:"scope"` - // A list of Spans that originate from an instrumentation scope. - Spans []*Span `json:"spans,omitempty"` - // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all spans and span events in the "spans" field. - SchemaURL string `json:"schemaUrl,omitempty"` -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. -func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid ScopeSpans type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) - } - - switch key { - case "scope": - err = decoder.Decode(&ss.Scope) - case "spans": - err = decoder.Decode(&ss.Spans) - case "schemaUrl", "schema_url": - err = decoder.Decode(&ss.SchemaURL) - default: - // Skip unknown. - } - - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go deleted file mode 100644 index cb7927b816..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" - -import ( - "bytes" - "cmp" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "slices" - "strconv" - "unsafe" -) - -// A Value represents a structured value. -// A zero value is valid and represents an empty value. -type Value struct { - // Ensure forward compatibility by explicitly making this not comparable. - noCmp [0]func() //nolint: unused // This is indeed used. - - // num holds the value for Int64, Float64, and Bool. It holds the length - // for String, Bytes, Slice, Map. - num uint64 - // any holds either the KindBool, KindInt64, KindFloat64, stringptr, - // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 - // then the value of Value is in num as described above. Otherwise, it - // contains the value wrapped in the appropriate type. - any any -} - -type ( - // sliceptr represents a value in Value.any for KindString Values. - stringptr *byte - // bytesptr represents a value in Value.any for KindBytes Values. - bytesptr *byte - // sliceptr represents a value in Value.any for KindSlice Values. - sliceptr *Value - // mapptr represents a value in Value.any for KindMap Values. - mapptr *Attr -) - -// ValueKind is the kind of a [Value]. -type ValueKind int - -// ValueKind values. -const ( - ValueKindEmpty ValueKind = iota - ValueKindBool - ValueKindFloat64 - ValueKindInt64 - ValueKindString - ValueKindBytes - ValueKindSlice - ValueKindMap -) - -var valueKindStrings = []string{ - "Empty", - "Bool", - "Float64", - "Int64", - "String", - "Bytes", - "Slice", - "Map", -} - -func (k ValueKind) String() string { - if k >= 0 && int(k) < len(valueKindStrings) { - return valueKindStrings[k] - } - return "" -} - -// StringValue returns a new [Value] for a string. -func StringValue(v string) Value { - return Value{ - num: uint64(len(v)), - any: stringptr(unsafe.StringData(v)), - } -} - -// IntValue returns a [Value] for an int. -func IntValue(v int) Value { return Int64Value(int64(v)) } - -// Int64Value returns a [Value] for an int64. -func Int64Value(v int64) Value { - return Value{ - num: uint64(v), // nolint: gosec // Store raw bytes. - any: ValueKindInt64, - } -} - -// Float64Value returns a [Value] for a float64. -func Float64Value(v float64) Value { - return Value{num: math.Float64bits(v), any: ValueKindFloat64} -} - -// BoolValue returns a [Value] for a bool. -func BoolValue(v bool) Value { //nolint:revive // Not a control flag. - var n uint64 - if v { - n = 1 - } - return Value{num: n, any: ValueKindBool} -} - -// BytesValue returns a [Value] for a byte slice. The passed slice must not be -// changed after it is passed. -func BytesValue(v []byte) Value { - return Value{ - num: uint64(len(v)), - any: bytesptr(unsafe.SliceData(v)), - } -} - -// SliceValue returns a [Value] for a slice of [Value]. The passed slice must -// not be changed after it is passed. -func SliceValue(vs ...Value) Value { - return Value{ - num: uint64(len(vs)), - any: sliceptr(unsafe.SliceData(vs)), - } -} - -// MapValue returns a new [Value] for a slice of key-value pairs. The passed -// slice must not be changed after it is passed. -func MapValue(kvs ...Attr) Value { - return Value{ - num: uint64(len(kvs)), - any: mapptr(unsafe.SliceData(kvs)), - } -} - -// AsString returns the value held by v as a string. -func (v Value) AsString() string { - if sp, ok := v.any.(stringptr); ok { - return unsafe.String(sp, v.num) - } - // TODO: error handle - return "" -} - -// asString returns the value held by v as a string. It will panic if the Value -// is not KindString. -func (v Value) asString() string { - return unsafe.String(v.any.(stringptr), v.num) -} - -// AsInt64 returns the value held by v as an int64. -func (v Value) AsInt64() int64 { - if v.Kind() != ValueKindInt64 { - // TODO: error handle - return 0 - } - return v.asInt64() -} - -// asInt64 returns the value held by v as an int64. If v is not of KindInt64, -// this will return garbage. -func (v Value) asInt64() int64 { - // Assumes v.num was a valid int64 (overflow not checked). - return int64(v.num) // nolint: gosec -} - -// AsBool returns the value held by v as a bool. -func (v Value) AsBool() bool { - if v.Kind() != ValueKindBool { - // TODO: error handle - return false - } - return v.asBool() -} - -// asBool returns the value held by v as a bool. If v is not of KindBool, this -// will return garbage. -func (v Value) asBool() bool { return v.num == 1 } - -// AsFloat64 returns the value held by v as a float64. -func (v Value) AsFloat64() float64 { - if v.Kind() != ValueKindFloat64 { - // TODO: error handle - return 0 - } - return v.asFloat64() -} - -// asFloat64 returns the value held by v as a float64. If v is not of -// KindFloat64, this will return garbage. -func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } - -// AsBytes returns the value held by v as a []byte. -func (v Value) AsBytes() []byte { - if sp, ok := v.any.(bytesptr); ok { - return unsafe.Slice((*byte)(sp), v.num) - } - // TODO: error handle - return nil -} - -// asBytes returns the value held by v as a []byte. It will panic if the Value -// is not KindBytes. -func (v Value) asBytes() []byte { - return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) -} - -// AsSlice returns the value held by v as a []Value. -func (v Value) AsSlice() []Value { - if sp, ok := v.any.(sliceptr); ok { - return unsafe.Slice((*Value)(sp), v.num) - } - // TODO: error handle - return nil -} - -// asSlice returns the value held by v as a []Value. It will panic if the Value -// is not KindSlice. -func (v Value) asSlice() []Value { - return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) -} - -// AsMap returns the value held by v as a []Attr. -func (v Value) AsMap() []Attr { - if sp, ok := v.any.(mapptr); ok { - return unsafe.Slice((*Attr)(sp), v.num) - } - // TODO: error handle - return nil -} - -// asMap returns the value held by v as a []Attr. It will panic if the -// Value is not KindMap. -func (v Value) asMap() []Attr { - return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) -} - -// Kind returns the Kind of v. -func (v Value) Kind() ValueKind { - switch x := v.any.(type) { - case ValueKind: - return x - case stringptr: - return ValueKindString - case bytesptr: - return ValueKindBytes - case sliceptr: - return ValueKindSlice - case mapptr: - return ValueKindMap - default: - return ValueKindEmpty - } -} - -// Empty reports whether v does not hold any value. -func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } - -// Equal reports whether v is equal to w. -func (v Value) Equal(w Value) bool { - k1 := v.Kind() - k2 := w.Kind() - if k1 != k2 { - return false - } - switch k1 { - case ValueKindInt64, ValueKindBool: - return v.num == w.num - case ValueKindString: - return v.asString() == w.asString() - case ValueKindFloat64: - return v.asFloat64() == w.asFloat64() - case ValueKindSlice: - return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) - case ValueKindMap: - sv := sortMap(v.asMap()) - sw := sortMap(w.asMap()) - return slices.EqualFunc(sv, sw, Attr.Equal) - case ValueKindBytes: - return bytes.Equal(v.asBytes(), w.asBytes()) - case ValueKindEmpty: - return true - default: - // TODO: error handle - return false - } -} - -func sortMap(m []Attr) []Attr { - sm := make([]Attr, len(m)) - copy(sm, m) - slices.SortFunc(sm, func(a, b Attr) int { - return cmp.Compare(a.Key, b.Key) - }) - - return sm -} - -// String returns Value's value as a string, formatted like [fmt.Sprint]. -// -// The returned string is meant for debugging; -// the string representation is not stable. -func (v Value) String() string { - switch v.Kind() { - case ValueKindString: - return v.asString() - case ValueKindInt64: - // Assumes v.num was a valid int64 (overflow not checked). - return strconv.FormatInt(int64(v.num), 10) // nolint: gosec - case ValueKindFloat64: - return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) - case ValueKindBool: - return strconv.FormatBool(v.asBool()) - case ValueKindBytes: - return string(v.asBytes()) - case ValueKindMap: - return fmt.Sprint(v.asMap()) - case ValueKindSlice: - return fmt.Sprint(v.asSlice()) - case ValueKindEmpty: - return "" - default: - // Try to handle this as gracefully as possible. - // - // Don't panic here. The goal here is to have developers find this - // first if a slog.Kind is is not handled. It is - // preferable to have user's open issue asking why their attributes - // have a "unhandled: " prefix than say that their code is panicking. - return fmt.Sprintf("", v.Kind()) - } -} - -// MarshalJSON encodes v into OTLP formatted JSON. -func (v *Value) MarshalJSON() ([]byte, error) { - switch v.Kind() { - case ValueKindString: - return json.Marshal(struct { - Value string `json:"stringValue"` - }{v.asString()}) - case ValueKindInt64: - return json.Marshal(struct { - Value string `json:"intValue"` - }{strconv.FormatInt(int64(v.num), 10)}) // nolint: gosec // From raw bytes. - case ValueKindFloat64: - return json.Marshal(struct { - Value float64 `json:"doubleValue"` - }{v.asFloat64()}) - case ValueKindBool: - return json.Marshal(struct { - Value bool `json:"boolValue"` - }{v.asBool()}) - case ValueKindBytes: - return json.Marshal(struct { - Value []byte `json:"bytesValue"` - }{v.asBytes()}) - case ValueKindMap: - return json.Marshal(struct { - Value struct { - Values []Attr `json:"values"` - } `json:"kvlistValue"` - }{struct { - Values []Attr `json:"values"` - }{v.asMap()}}) - case ValueKindSlice: - return json.Marshal(struct { - Value struct { - Values []Value `json:"values"` - } `json:"arrayValue"` - }{struct { - Values []Value `json:"values"` - }{v.asSlice()}}) - case ValueKindEmpty: - return nil, nil - default: - return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) - } -} - -// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. -func (v *Value) UnmarshalJSON(data []byte) error { - decoder := json.NewDecoder(bytes.NewReader(data)) - - t, err := decoder.Token() - if err != nil { - return err - } - if t != json.Delim('{') { - return errors.New("invalid Value type") - } - - for decoder.More() { - keyIface, err := decoder.Token() - if err != nil { - if errors.Is(err, io.EOF) { - // Empty. - return nil - } - return err - } - - key, ok := keyIface.(string) - if !ok { - return fmt.Errorf("invalid Value key: %#v", keyIface) - } - - switch key { - case "stringValue", "string_value": - var val string - err = decoder.Decode(&val) - *v = StringValue(val) - case "boolValue", "bool_value": - var val bool - err = decoder.Decode(&val) - *v = BoolValue(val) - case "intValue", "int_value": - var val protoInt64 - err = decoder.Decode(&val) - *v = Int64Value(val.Int64()) - case "doubleValue", "double_value": - var val float64 - err = decoder.Decode(&val) - *v = Float64Value(val) - case "bytesValue", "bytes_value": - var val64 string - if err := decoder.Decode(&val64); err != nil { - return err - } - var val []byte - val, err = base64.StdEncoding.DecodeString(val64) - *v = BytesValue(val) - case "arrayValue", "array_value": - var val struct{ Values []Value } - err = decoder.Decode(&val) - *v = SliceValue(val.Values...) - case "kvlistValue", "kvlist_value": - var val struct{ Values []Attr } - err = decoder.Decode(&val) - *v = MapValue(val.Values...) - default: - // Skip unknown. - continue - } - // Use first valid. Ignore the rest. - return err - } - - // Only unknown fields. Return nil without unmarshaling any value. - return nil -} diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go deleted file mode 100644 index c00221e7be..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -// nonRecordingSpan is a minimal implementation of a Span that wraps a -// SpanContext. It performs no operations other than to return the wrapped -// SpanContext. -type nonRecordingSpan struct { - noopSpan - - sc SpanContext -} - -// SpanContext returns the wrapped SpanContext. -func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go deleted file mode 100644 index 400fab1238..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" -) - -// NewNoopTracerProvider returns an implementation of TracerProvider that -// performs no operations. The Tracer and Spans created from the returned -// TracerProvider also perform no operations. -// -// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider] -// instead. -func NewNoopTracerProvider() TracerProvider { - return noopTracerProvider{} -} - -type noopTracerProvider struct{ embedded.TracerProvider } - -var _ TracerProvider = noopTracerProvider{} - -// Tracer returns noop implementation of Tracer. -func (noopTracerProvider) Tracer(string, ...TracerOption) Tracer { - return noopTracer{} -} - -// noopTracer is an implementation of Tracer that performs no operations. -type noopTracer struct{ embedded.Tracer } - -var _ Tracer = noopTracer{} - -// Start carries forward a non-recording Span, if one is present in the context, otherwise it -// creates a no-op Span. -func (noopTracer) Start(ctx context.Context, _ string, _ ...SpanStartOption) (context.Context, Span) { - span := SpanFromContext(ctx) - if _, ok := span.(nonRecordingSpan); !ok { - // span is likely already a noopSpan, but let's be sure - span = noopSpanInstance - } - return ContextWithSpan(ctx, span), span -} - -// noopSpan is an implementation of Span that performs no operations. -type noopSpan struct{ embedded.Span } - -var noopSpanInstance Span = noopSpan{} - -// SpanContext returns an empty span context. -func (noopSpan) SpanContext() SpanContext { return SpanContext{} } - -// IsRecording always returns false. -func (noopSpan) IsRecording() bool { return false } - -// SetStatus does nothing. -func (noopSpan) SetStatus(codes.Code, string) {} - -// SetError does nothing. -func (noopSpan) SetError(bool) {} - -// SetAttributes does nothing. -func (noopSpan) SetAttributes(...attribute.KeyValue) {} - -// End does nothing. -func (noopSpan) End(...SpanEndOption) {} - -// RecordError does nothing. -func (noopSpan) RecordError(error, ...EventOption) {} - -// AddEvent does nothing. -func (noopSpan) AddEvent(string, ...EventOption) {} - -// AddLink does nothing. -func (noopSpan) AddLink(Link) {} - -// SetName does nothing. -func (noopSpan) SetName(string) {} - -// TracerProvider returns a no-op TracerProvider. -func (s noopSpan) TracerProvider() TracerProvider { - return s.tracerProvider(autoInstEnabled) -} - -// autoInstEnabled defines if the auto-instrumentation SDK is enabled. -// -// The auto-instrumentation is expected to overwrite this value to true when it -// attaches to the process. -var autoInstEnabled = new(bool) - -// tracerProvider return a noopTracerProvider if autoEnabled is false, -// otherwise it will return a TracerProvider from the sdk package used in -// auto-instrumentation. -// -//go:noinline -func (noopSpan) tracerProvider(autoEnabled *bool) TracerProvider { - if *autoEnabled { - return newAutoTracerProvider() - } - return noopTracerProvider{} -} diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/README.md b/vendor/go.opentelemetry.io/otel/trace/noop/README.md deleted file mode 100644 index cd382c82a1..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/noop/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Trace Noop - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/noop) diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go deleted file mode 100644 index 689d220df7..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package noop provides an implementation of the OpenTelemetry trace API that -// produces no telemetry and minimizes used computation resources. -// -// Using this package to implement the OpenTelemetry trace API will effectively -// disable OpenTelemetry. -// -// This implementation can be embedded in other implementations of the -// OpenTelemetry trace API. Doing so will mean the implementation defaults to -// no operation for methods it does not implement. -package noop // import "go.opentelemetry.io/otel/trace/noop" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/trace/embedded" -) - -var ( - // Compile-time check this implements the OpenTelemetry API. - - _ trace.TracerProvider = TracerProvider{} - _ trace.Tracer = Tracer{} - _ trace.Span = Span{} -) - -// TracerProvider is an OpenTelemetry No-Op TracerProvider. -type TracerProvider struct{ embedded.TracerProvider } - -// NewTracerProvider returns a TracerProvider that does not record any telemetry. -func NewTracerProvider() TracerProvider { - return TracerProvider{} -} - -// Tracer returns an OpenTelemetry Tracer that does not record any telemetry. -func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer { - return Tracer{} -} - -// Tracer is an OpenTelemetry No-Op Tracer. -type Tracer struct{ embedded.Tracer } - -// Start creates a span. The created span will be set in a child context of ctx -// and returned with the span. -// -// If ctx contains a span context, the returned span will also contain that -// span context. If the span context in ctx is for a non-recording span, that -// span instance will be returned directly. -func (Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { - span := trace.SpanFromContext(ctx) - - // If the parent context contains a non-zero span context, that span - // context needs to be returned as a non-recording span - // (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk). - var zeroSC trace.SpanContext - if sc := span.SpanContext(); !sc.Equal(zeroSC) { - if !span.IsRecording() { - // If the span is not recording return it directly. - return ctx, span - } - // Otherwise, return the span context needs in a non-recording span. - span = Span{sc: sc} - } else { - // No parent, return a No-Op span with an empty span context. - span = noopSpanInstance - } - return trace.ContextWithSpan(ctx, span), span -} - -var noopSpanInstance trace.Span = Span{} - -// Span is an OpenTelemetry No-Op Span. -type Span struct { - embedded.Span - - sc trace.SpanContext -} - -// SpanContext returns an empty span context. -func (s Span) SpanContext() trace.SpanContext { return s.sc } - -// IsRecording always returns false. -func (Span) IsRecording() bool { return false } - -// SetStatus does nothing. -func (Span) SetStatus(codes.Code, string) {} - -// SetAttributes does nothing. -func (Span) SetAttributes(...attribute.KeyValue) {} - -// End does nothing. -func (Span) End(...trace.SpanEndOption) {} - -// RecordError does nothing. -func (Span) RecordError(error, ...trace.EventOption) {} - -// AddEvent does nothing. -func (Span) AddEvent(string, ...trace.EventOption) {} - -// AddLink does nothing. -func (Span) AddLink(trace.Link) {} - -// SetName does nothing. -func (Span) SetName(string) {} - -// TracerProvider returns a No-Op TracerProvider. -func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go deleted file mode 100644 index ef85cb70c6..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/provider.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import "go.opentelemetry.io/otel/trace/embedded" - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type TracerProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.TracerProvider - - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go deleted file mode 100644 index d01e793664..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/span.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" -) - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Span interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Span - - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // AddLink adds a link. - // Adding links at span creation using WithLinks is preferred to calling AddLink - // later, for contexts that are available during span creation, because head - // sampling decisions can only consider information present during span creation. - AddLink(link Link) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - // - // Note that adding attributes at span creation using [WithAttributes] is preferred - // to calling SetAttribute later, as samplers can only consider information - // already present during span creation. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided -// ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go deleted file mode 100644 index e3d103c4b6..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "encoding/json" -) - -const ( - // FlagsSampled is a bitmask with the sampled bit set. A SpanContext - // with the sampling bit set means the span is sampled. - FlagsSampled = TraceFlags(0x01) - - // FlagsRandom is a bitmask with the random trace ID flag set. When - // set, it signals that the trace ID was generated randomly with at - // least 56 bits of randomness (W3C Trace Context Level 2). - FlagsRandom = TraceFlags(0x02) - - errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" - - errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" - errNilTraceID errorConst = "trace-id can't be all zero" - - errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16" - errNilSpanID errorConst = "span-id can't be all zero" -) - -type errorConst string - -func (e errorConst) Error() string { - return string(e) -} - -// TraceID is a unique identity of a trace. -// nolint:revive // revive complains about stutter of `trace.TraceID`. -type TraceID [16]byte - -var ( - nilTraceID TraceID - _ json.Marshaler = nilTraceID -) - -// IsValid reports whether the trace TraceID is valid. A valid trace ID does -// not consist of zeros only. -func (t TraceID) IsValid() bool { - return t != nilTraceID -} - -// MarshalJSON implements a custom marshal function to encode TraceID -// as a hex string. -func (t TraceID) MarshalJSON() ([]byte, error) { - b := [32 + 2]byte{0: '"', 33: '"'} - h := t.hexBytes() - copy(b[1:], h[:]) - return b[:], nil -} - -// String returns the hex string representation form of a TraceID. -func (t TraceID) String() string { - h := t.hexBytes() - return string(h[:]) -} - -// hexBytes returns the hex string representation form of a TraceID. -func (t TraceID) hexBytes() [32]byte { - return [32]byte{ - hexLU[t[0x0]>>4], hexLU[t[0x0]&0xf], - hexLU[t[0x1]>>4], hexLU[t[0x1]&0xf], - hexLU[t[0x2]>>4], hexLU[t[0x2]&0xf], - hexLU[t[0x3]>>4], hexLU[t[0x3]&0xf], - hexLU[t[0x4]>>4], hexLU[t[0x4]&0xf], - hexLU[t[0x5]>>4], hexLU[t[0x5]&0xf], - hexLU[t[0x6]>>4], hexLU[t[0x6]&0xf], - hexLU[t[0x7]>>4], hexLU[t[0x7]&0xf], - hexLU[t[0x8]>>4], hexLU[t[0x8]&0xf], - hexLU[t[0x9]>>4], hexLU[t[0x9]&0xf], - hexLU[t[0xa]>>4], hexLU[t[0xa]&0xf], - hexLU[t[0xb]>>4], hexLU[t[0xb]&0xf], - hexLU[t[0xc]>>4], hexLU[t[0xc]&0xf], - hexLU[t[0xd]>>4], hexLU[t[0xd]&0xf], - hexLU[t[0xe]>>4], hexLU[t[0xe]&0xf], - hexLU[t[0xf]>>4], hexLU[t[0xf]&0xf], - } -} - -// SpanID is a unique identity of a span in a trace. -type SpanID [8]byte - -var ( - nilSpanID SpanID - _ json.Marshaler = nilSpanID -) - -// IsValid reports whether the SpanID is valid. A valid SpanID does not consist -// of zeros only. -func (s SpanID) IsValid() bool { - return s != nilSpanID -} - -// MarshalJSON implements a custom marshal function to encode SpanID -// as a hex string. -func (s SpanID) MarshalJSON() ([]byte, error) { - b := [16 + 2]byte{0: '"', 17: '"'} - h := s.hexBytes() - copy(b[1:], h[:]) - return b[:], nil -} - -// String returns the hex string representation form of a SpanID. -func (s SpanID) String() string { - b := s.hexBytes() - return string(b[:]) -} - -func (s SpanID) hexBytes() [16]byte { - return [16]byte{ - hexLU[s[0]>>4], hexLU[s[0]&0xf], - hexLU[s[1]>>4], hexLU[s[1]&0xf], - hexLU[s[2]>>4], hexLU[s[2]&0xf], - hexLU[s[3]>>4], hexLU[s[3]&0xf], - hexLU[s[4]>>4], hexLU[s[4]&0xf], - hexLU[s[5]>>4], hexLU[s[5]&0xf], - hexLU[s[6]>>4], hexLU[s[6]&0xf], - hexLU[s[7]>>4], hexLU[s[7]&0xf], - } -} - -// TraceIDFromHex returns a TraceID from a hex string if it is compliant with -// the W3C trace-context specification. See more at -// https://www.w3.org/TR/trace-context/#trace-id -// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. -func TraceIDFromHex(h string) (TraceID, error) { - if len(h) != 32 { - return [16]byte{}, errInvalidTraceIDLength - } - var b [16]byte - invalidMark := byte(0) - for i := 0; i < len(h); i += 4 { - b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]] - b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]] - invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]] - } - // If the upper 4 bits of any byte are not zero, there was an invalid hex - // character since invalid hex characters are 0xff in hexRev. - if invalidMark&0xf0 != 0 { - return [16]byte{}, errInvalidHexID - } - // If we didn't set any bits, then h was all zeros. - if invalidMark == 0 { - return [16]byte{}, errNilTraceID - } - return b, nil -} - -// SpanIDFromHex returns a SpanID from a hex string if it is compliant -// with the w3c trace-context specification. -// See more at https://www.w3.org/TR/trace-context/#parent-id -func SpanIDFromHex(h string) (SpanID, error) { - if len(h) != 16 { - return [8]byte{}, errInvalidSpanIDLength - } - var b [8]byte - invalidMark := byte(0) - for i := 0; i < len(h); i += 4 { - b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]] - b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]] - invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]] - } - // If the upper 4 bits of any byte are not zero, there was an invalid hex - // character since invalid hex characters are 0xff in hexRev. - if invalidMark&0xf0 != 0 { - return [8]byte{}, errInvalidHexID - } - // If we didn't set any bits, then h was all zeros. - if invalidMark == 0 { - return [8]byte{}, errNilSpanID - } - return b, nil -} - -// TraceFlags contains flags that can be set on a SpanContext. -type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. - -// IsSampled reports whether the sampling bit is set in the TraceFlags. -func (tf TraceFlags) IsSampled() bool { - return tf&FlagsSampled == FlagsSampled -} - -// WithSampled sets the sampling bit in a new copy of the TraceFlags. -func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. - if sampled { - return tf | FlagsSampled - } - - return tf &^ FlagsSampled -} - -// IsRandom reports whether the random bit is set in the TraceFlags. -func (tf TraceFlags) IsRandom() bool { - return tf&FlagsRandom == FlagsRandom -} - -// WithRandom sets the random bit in a new copy of the TraceFlags. -func (tf TraceFlags) WithRandom(random bool) TraceFlags { // nolint:revive // random is not a control flag. - if random { - return tf | FlagsRandom - } - - return tf &^ FlagsRandom -} - -// MarshalJSON implements a custom marshal function to encode TraceFlags -// as a hex string. -func (tf TraceFlags) MarshalJSON() ([]byte, error) { - b := [2 + 2]byte{0: '"', 3: '"'} - h := tf.hexBytes() - copy(b[1:], h[:]) - return b[:], nil -} - -// String returns the hex string representation form of TraceFlags. -func (tf TraceFlags) String() string { - h := tf.hexBytes() - return string(h[:]) -} - -func (tf TraceFlags) hexBytes() [2]byte { - return [2]byte{hexLU[tf>>4], hexLU[tf&0xf]} -} - -// SpanContextConfig contains mutable fields usable for constructing -// an immutable SpanContext. -type SpanContextConfig struct { - TraceID TraceID - SpanID SpanID - TraceFlags TraceFlags - TraceState TraceState - Remote bool -} - -// NewSpanContext constructs a SpanContext using values from the provided -// SpanContextConfig. -func NewSpanContext(config SpanContextConfig) SpanContext { - return SpanContext{ - traceID: config.TraceID, - spanID: config.SpanID, - traceFlags: config.TraceFlags, - traceState: config.TraceState, - remote: config.Remote, - } -} - -// SpanContext contains identifying trace information about a Span. -type SpanContext struct { - traceID TraceID - spanID SpanID - traceFlags TraceFlags - traceState TraceState - remote bool -} - -var _ json.Marshaler = SpanContext{} - -// IsValid reports whether the SpanContext is valid. A valid span context has a -// valid TraceID and SpanID. -func (sc SpanContext) IsValid() bool { - return sc.HasTraceID() && sc.HasSpanID() -} - -// IsRemote reports whether the SpanContext represents a remotely-created Span. -func (sc SpanContext) IsRemote() bool { - return sc.remote -} - -// WithRemote returns a copy of sc with the Remote property set to remote. -func (sc SpanContext) WithRemote(remote bool) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: sc.spanID, - traceFlags: sc.traceFlags, - traceState: sc.traceState, - remote: remote, - } -} - -// TraceID returns the TraceID from the SpanContext. -func (sc SpanContext) TraceID() TraceID { - return sc.traceID -} - -// HasTraceID reports whether the SpanContext has a valid TraceID. -func (sc SpanContext) HasTraceID() bool { - return sc.traceID.IsValid() -} - -// WithTraceID returns a new SpanContext with the TraceID replaced. -func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { - return SpanContext{ - traceID: traceID, - spanID: sc.spanID, - traceFlags: sc.traceFlags, - traceState: sc.traceState, - remote: sc.remote, - } -} - -// SpanID returns the SpanID from the SpanContext. -func (sc SpanContext) SpanID() SpanID { - return sc.spanID -} - -// HasSpanID reports whether the SpanContext has a valid SpanID. -func (sc SpanContext) HasSpanID() bool { - return sc.spanID.IsValid() -} - -// WithSpanID returns a new SpanContext with the SpanID replaced. -func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: spanID, - traceFlags: sc.traceFlags, - traceState: sc.traceState, - remote: sc.remote, - } -} - -// TraceFlags returns the flags from the SpanContext. -func (sc SpanContext) TraceFlags() TraceFlags { - return sc.traceFlags -} - -// IsSampled reports whether the sampling bit is set in the SpanContext's TraceFlags. -func (sc SpanContext) IsSampled() bool { - return sc.traceFlags.IsSampled() -} - -// IsRandom reports whether the random bit is set in the SpanContext's TraceFlags. -func (sc SpanContext) IsRandom() bool { - return sc.traceFlags.IsRandom() -} - -// WithTraceFlags returns a new SpanContext with the TraceFlags replaced. -func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: sc.spanID, - traceFlags: flags, - traceState: sc.traceState, - remote: sc.remote, - } -} - -// TraceState returns the TraceState from the SpanContext. -func (sc SpanContext) TraceState() TraceState { - return sc.traceState -} - -// WithTraceState returns a new SpanContext with the TraceState replaced. -func (sc SpanContext) WithTraceState(state TraceState) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: sc.spanID, - traceFlags: sc.traceFlags, - traceState: state, - remote: sc.remote, - } -} - -// Equal reports whether two SpanContext values are equal. -func (sc SpanContext) Equal(other SpanContext) bool { - return sc.traceID == other.traceID && - sc.spanID == other.spanID && - sc.traceFlags == other.traceFlags && - sc.traceState.String() == other.traceState.String() && - sc.remote == other.remote -} - -// MarshalJSON implements a custom marshal function to encode a SpanContext. -func (sc SpanContext) MarshalJSON() ([]byte, error) { - return json.Marshal(SpanContextConfig{ - TraceID: sc.traceID, - SpanID: sc.spanID, - TraceFlags: sc.traceFlags, - TraceState: sc.traceState, - Remote: sc.remote, - }) -} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go deleted file mode 100644 index 77952d2a0b..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/tracer.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "context" - - "go.opentelemetry.io/otel/trace/embedded" -) - -// Tracer is the creator of Spans. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Tracer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Tracer - - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go deleted file mode 100644 index e9cb3fd4d1..0000000000 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "encoding/json" - "fmt" - "strings" -) - -const ( - maxListMembers = 32 - - listDelimiters = "," - memberDelimiter = "=" - - errInvalidKey errorConst = "invalid tracestate key" - errInvalidValue errorConst = "invalid tracestate value" - errInvalidMember errorConst = "invalid tracestate list-member" - errMemberNumber errorConst = "too many list-members in tracestate" - errDuplicate errorConst = "duplicate list-member in tracestate" -) - -type member struct { - Key string - Value string -} - -// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) ) -// means (chr = %x20-2B / %x2D-3C / %x3E-7E) . -func checkValueChar(v byte) bool { - return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d' -} - -// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) . -func checkValueLast(v byte) bool { - return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d' -} - -// based on the W3C Trace Context specification -// -// value = (0*255(chr)) nblk-chr -// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E -// chr = %x20 / nblk-chr -// -// see https://www.w3.org/TR/trace-context-1/#value -func checkValue(val string) bool { - n := len(val) - if n == 0 || n > 256 { - return false - } - for i := 0; i < n-1; i++ { - if !checkValueChar(val[i]) { - return false - } - } - return checkValueLast(val[n-1]) -} - -func checkKeyRemain(key string) bool { - // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) - for _, v := range key { - if v > 127 { - return false - } - if isAlphaNumASCII(v) { - continue - } - switch v { - case '_', '-', '*', '/': - continue - } - return false - } - return true -} - -// according to -// -// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) -// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) -// -// param n is remain part length, should be 255 in simple-key or 13 in system-id. -func checkKeyPart(key string, n int) bool { - if key == "" { - return false - } - first := key[0] // key's first char - ret := len(key[1:]) <= n - ret = ret && first >= 'a' && first <= 'z' - return ret && checkKeyRemain(key[1:]) -} - -func isAlphaNumASCII[T rune | byte](c T) bool { - if c >= 'a' && c <= 'z' { - return true - } - return c >= '0' && c <= '9' -} - -// according to -// -// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) -// -// param n is remain part length, should be 240 exactly. -func checkKeyTenant(key string, n int) bool { - if key == "" { - return false - } - return isAlphaNumASCII(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) -} - -// based on the W3C Trace Context specification -// -// key = simple-key / multi-tenant-key -// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) -// multi-tenant-key = tenant-id "@" system-id -// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) -// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) -// lcalpha = %x61-7A ; a-z -// -// see https://www.w3.org/TR/trace-context-1/#tracestate-header. -func checkKey(key string) bool { - tenant, system, ok := strings.Cut(key, "@") - if !ok { - return checkKeyPart(key, 255) - } - return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13) -} - -func newMember(key, value string) (member, error) { - if !checkKey(key) { - return member{}, errInvalidKey - } - if !checkValue(value) { - return member{}, errInvalidValue - } - return member{Key: key, Value: value}, nil -} - -func parseMember(m string) (member, error) { - key, val, ok := strings.Cut(m, memberDelimiter) - if !ok { - return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) - } - key = strings.TrimLeft(key, " \t") - val = strings.TrimRight(val, " \t") - result, e := newMember(key, val) - if e != nil { - return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) - } - return result, nil -} - -// String encodes member into a string compliant with the W3C Trace Context -// specification. -func (m member) String() string { - return m.Key + "=" + m.Value -} - -// TraceState provides additional vendor-specific trace identification -// information across different distributed tracing systems. It represents an -// immutable list consisting of key/value pairs, each pair is referred to as a -// list-member. -// -// TraceState conforms to the W3C Trace Context specification -// (https://www.w3.org/TR/trace-context-1). All operations that create or copy -// a TraceState do so by validating all input and will only produce TraceState -// that conform to the specification. Specifically, this means that all -// list-member's key/value pairs are valid, no duplicate list-members exist, -// and the maximum number of list-members (32) is not exceeded. -type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` - // list is the members in order. - list []member -} - -var _ json.Marshaler = TraceState{} - -// ParseTraceState attempts to decode a TraceState from the passed -// string. It returns an error if the input is invalid according to the W3C -// Trace Context specification. -func ParseTraceState(ts string) (TraceState, error) { - if ts == "" { - return TraceState{}, nil - } - - wrapErr := func(err error) error { - return fmt.Errorf("failed to parse tracestate: %w", err) - } - - var members []member - found := make(map[string]struct{}) - for ts != "" { - var memberStr string - memberStr, ts, _ = strings.Cut(ts, listDelimiters) - if memberStr == "" { - continue - } - - m, err := parseMember(memberStr) - if err != nil { - return TraceState{}, wrapErr(err) - } - - if _, ok := found[m.Key]; ok { - return TraceState{}, wrapErr(errDuplicate) - } - found[m.Key] = struct{}{} - - members = append(members, m) - if n := len(members); n > maxListMembers { - return TraceState{}, wrapErr(errMemberNumber) - } - } - - return TraceState{list: members}, nil -} - -// MarshalJSON marshals the TraceState into JSON. -func (ts TraceState) MarshalJSON() ([]byte, error) { - return json.Marshal(ts.String()) -} - -// String encodes the TraceState into a string compliant with the W3C -// Trace Context specification. The returned string will be invalid if the -// TraceState contains any invalid members. -func (ts TraceState) String() string { - if len(ts.list) == 0 { - return "" - } - var n int - n += len(ts.list) // member delimiters: '=' - n += len(ts.list) - 1 // list delimiters: ',' - for _, mem := range ts.list { - n += len(mem.Key) - n += len(mem.Value) - } - - var sb strings.Builder - sb.Grow(n) - _, _ = sb.WriteString(ts.list[0].Key) - _ = sb.WriteByte('=') - _, _ = sb.WriteString(ts.list[0].Value) - for i := 1; i < len(ts.list); i++ { - _ = sb.WriteByte(listDelimiters[0]) - _, _ = sb.WriteString(ts.list[i].Key) - _ = sb.WriteByte('=') - _, _ = sb.WriteString(ts.list[i].Value) - } - return sb.String() -} - -// Get returns the value paired with key from the corresponding TraceState -// list-member if it exists, otherwise an empty string is returned. -func (ts TraceState) Get(key string) string { - for _, member := range ts.list { - if member.Key == key { - return member.Value - } - } - - return "" -} - -// Walk walks all key value pairs in the TraceState by calling f -// Iteration stops if f returns false. -func (ts TraceState) Walk(f func(key, value string) bool) { - for _, m := range ts.list { - if !f(m.Key, m.Value) { - break - } - } -} - -// Insert adds a new list-member defined by the key/value pair to the -// TraceState. If a list-member already exists for the given key, that -// list-member's value is updated. The new or updated list-member is always -// moved to the beginning of the TraceState as specified by the W3C Trace -// Context specification. -// -// If key or value are invalid according to the W3C Trace Context -// specification an error is returned with the original TraceState. -// -// If adding a new list-member means the TraceState would have more members -// then is allowed, the new list-member will be inserted and the right-most -// list-member will be dropped in the returned TraceState. -func (ts TraceState) Insert(key, value string) (TraceState, error) { - m, err := newMember(key, value) - if err != nil { - return ts, err - } - n := len(ts.list) - found := n - for i := range ts.list { - if ts.list[i].Key == key { - found = i - } - } - cTS := TraceState{} - if found == n && n < maxListMembers { - cTS.list = make([]member, n+1) - } else { - cTS.list = make([]member, n) - } - cTS.list[0] = m - // When the number of members exceeds capacity, drop the "right-most". - copy(cTS.list[1:], ts.list[0:found]) - if found < n { - copy(cTS.list[1+found:], ts.list[found+1:]) - } - return cTS, nil -} - -// Delete returns a copy of the TraceState with the list-member identified by -// key removed. -func (ts TraceState) Delete(key string) TraceState { - members := make([]member, ts.Len()) - copy(members, ts.list) - for i, member := range ts.list { - if member.Key == key { - members = append(members[:i], members[i+1:]...) - // TraceState should contain no duplicate members. - break - } - } - return TraceState{list: members} -} - -// Len returns the number of list-members in the TraceState. -func (ts TraceState) Len() int { - return len(ts.list) -} diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh deleted file mode 100644 index c9b7cdbbfe..0000000000 --- a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -TARGET="${1:?Must provide target ref}" - -FILE="CHANGELOG.md" -TEMP_DIR=$(mktemp -d) -echo "Temp folder: $TEMP_DIR" - -# Only the latest commit of the feature branch is available -# automatically. To diff with the base branch, we need to -# fetch that too (and we only need its latest commit). -git fetch origin "${TARGET}" --depth=1 - -# Checkout the previous version on the base branch of the changelog to tmpfolder -git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE - -PREVIOUS_FILE="$TEMP_DIR/$FILE" -CURRENT_FILE="$FILE" -PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" -CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" - -# Extract released sections from the previous version -awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" - -# Extract released sections from the current version -awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" - -# Compare the released sections -if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then - echo "Error: The released sections of the changelog file have been modified." - diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" - rm -rf "$TEMP_DIR" - false -fi - -rm -rf "$TEMP_DIR" -echo "The released sections remain unchanged." diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go deleted file mode 100644 index 1db4f47e43..0000000000 --- a/vendor/go.opentelemetry.io/otel/version.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otel // import "go.opentelemetry.io/otel" - -// Version is the current release version of OpenTelemetry in use. -func Version() string { - return "1.43.0" -} diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml deleted file mode 100644 index bcc6ee78a4..0000000000 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -module-sets: - stable-v1: - version: v1.43.0 - modules: - - go.opentelemetry.io/otel - - go.opentelemetry.io/otel/bridge/opencensus - - go.opentelemetry.io/otel/bridge/opencensus/test - - go.opentelemetry.io/otel/bridge/opentracing - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - - go.opentelemetry.io/otel/exporters/otlp/otlptrace - - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc - - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp - - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric - - go.opentelemetry.io/otel/exporters/stdout/stdouttrace - - go.opentelemetry.io/otel/exporters/zipkin - - go.opentelemetry.io/otel/metric - - go.opentelemetry.io/otel/sdk - - go.opentelemetry.io/otel/sdk/metric - - go.opentelemetry.io/otel/trace - experimental-metrics: - version: v0.65.0 - modules: - - go.opentelemetry.io/otel/exporters/prometheus - experimental-logs: - version: v0.19.0 - modules: - - go.opentelemetry.io/otel/log - - go.opentelemetry.io/otel/log/logtest - - go.opentelemetry.io/otel/sdk/log - - go.opentelemetry.io/otel/sdk/log/logtest - - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - - go.opentelemetry.io/otel/exporters/stdout/stdoutlog - experimental-schema: - version: v0.0.16 - modules: - - go.opentelemetry.io/otel/schema -excluded-modules: - - go.opentelemetry.io/otel/internal/tools - - go.opentelemetry.io/otel/trace/internal/telemetry/test -modules: - go.opentelemetry.io/otel/exporters/stdout/stdouttrace: - version-refs: - - ./internal/version.go - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric: - version-refs: - - ./internal/version.go - go.opentelemetry.io/otel/exporters/prometheus: - version-refs: - - ./internal/version.go - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc: - version-refs: - - ./internal/version.go - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc: - version-refs: - - ./internal/version.go - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp: - version-refs: - - ./internal/version.go - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp: - version-refs: - - ./internal/version.go - go.opentelemetry.io/otel/exporters/stdout/stdoutlog: - version-refs: - - ./internal/version.go diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/go.yaml.in/yaml/v3/LICENSE similarity index 100% rename from vendor/gopkg.in/yaml.v3/LICENSE rename to vendor/go.yaml.in/yaml/v3/LICENSE diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/go.yaml.in/yaml/v3/NOTICE similarity index 100% rename from vendor/gopkg.in/yaml.v3/NOTICE rename to vendor/go.yaml.in/yaml/v3/NOTICE diff --git a/vendor/go.yaml.in/yaml/v3/README.md b/vendor/go.yaml.in/yaml/v3/README.md new file mode 100644 index 0000000000..15a85a6350 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/README.md @@ -0,0 +1,171 @@ +go.yaml.in/yaml +=============== + +YAML Support for the Go Language + + +## Introduction + +The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode +and decode [YAML](https://yaml.org/) values. + +It was originally developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go +port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to +parse and generate YAML data quickly and reliably. + + +## Project Status + +This project started as a fork of the extremely popular [go-yaml]( +https://github.com/go-yaml/yaml/) +project, and is being maintained by the official [YAML organization]( +https://github.com/yaml/). + +The YAML team took over ongoing maintenance and development of the project after +discussion with go-yaml's author, @niemeyer, following his decision to +[label the project repository as "unmaintained"]( +https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025. + +We have put together a team of dedicated maintainers including representatives +of go-yaml's most important downstream projects. + +We will strive to earn the trust of the various go-yaml forks to switch back to +this repository as their upstream. + +Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you +would like to contribute or be involved. + + +## Compatibility + +The `yaml` package supports most of YAML 1.2, but preserves some behavior from +1.1 for backwards compatibility. + +Specifically, v3 of the `yaml` package: + +* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being + decoded into a typed bool value. + Otherwise they behave as a string. + Booleans in YAML 1.2 are `true`/`false` only. +* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than + `0o777` as specified in YAML 1.2, because most parsers still use the old + format. + Octals in the `0o777` format are supported though, so new files work. +* Does not support base-60 floats. + These are gone from YAML 1.2, and were actually never supported by this + package as it's clearly a poor choice. + + +## Installation and Usage + +The import path for the package is *go.yaml.in/yaml/v3*. + +To install it, run: + +```bash +go get go.yaml.in/yaml/v3 +``` + + +## API Documentation + +See: + + +## API Stability + +The package API for yaml v3 will remain stable as described in [gopkg.in]( +https://gopkg.in). + + +## Example + +```go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + + +## License + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/go.yaml.in/yaml/v3/apic.go similarity index 99% rename from vendor/gopkg.in/yaml.v3/apic.go rename to vendor/go.yaml.in/yaml/v3/apic.go index ae7d049f18..05fd305da1 100644 --- a/vendor/gopkg.in/yaml.v3/apic.go +++ b/vendor/go.yaml.in/yaml/v3/apic.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/vendor/go.yaml.in/yaml/v3/decode.go b/vendor/go.yaml.in/yaml/v3/decode.go new file mode 100644 index 0000000000..02e2b17bfe --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/decode.go @@ -0,0 +1,1018 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool + textless bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int + + mergedFields map[interface{}]bool +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough + default: + failf("cannot decode node with unknown kind %d", n.Kind) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return d.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + mapIsNew = true + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if d.getPossiblyUnhashableKey(mergedFields, ki) { + continue + } + d.setPossiblyUnhashableKey(mergedFields, ki, true) + } + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) + } + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + mergeNode = n.Content[i+1] + continue + } + if !d.unmarshal(ni, name) { + continue + } + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) setPossiblyUnhashableKey(m map[interface{}]bool, key interface{}, value bool) { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + m[key] = value +} + +func (d *decoder) getPossiblyUnhashableKey(m map[interface{}]bool, key interface{}) bool { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + return m[key] +} + +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.setPossiblyUnhashableKey(d.mergedFields, k.Interface(), true) + } + } + } + + switch merge.Kind { + case MappingNode: + d.unmarshal(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } + + d.mergedFields = mergedFields +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/go.yaml.in/yaml/v3/emitterc.go similarity index 95% rename from vendor/gopkg.in/yaml.v3/emitterc.go rename to vendor/go.yaml.in/yaml/v3/emitterc.go index 0f47c9ca8a..ab4e03ba72 100644 --- a/vendor/gopkg.in/yaml.v3/emitterc.go +++ b/vendor/go.yaml.in/yaml/v3/emitterc.go @@ -162,10 +162,9 @@ func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { // Check if we need to accumulate more events before emitting. // // We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { if emitter.events_head == len(emitter.events) { return true @@ -226,7 +225,7 @@ func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_ } // Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { +func yaml_emitter_increase_indent_compact(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool { emitter.indents = append(emitter.indents, emitter.indent) if emitter.indent < 0 { if flow { @@ -241,7 +240,14 @@ func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool emitter.indent += 2 } else { // Everything else aligns to the chosen indentation. - emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) + emitter.indent = emitter.best_indent * ((emitter.indent + emitter.best_indent) / emitter.best_indent) + if compact_seq { + // The value compact_seq passed in is almost always set to `false` when this function is called, + // except when we are dealing with sequence nodes. So this gets triggered to subtract 2 only when we + // are increasing the indent to account for sequence nodes, which will be correct because we need to + // subtract 2 to account for the - at the beginning of the sequence node. + emitter.indent = emitter.indent - 2 + } } } return true @@ -478,6 +484,18 @@ func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") } +// yaml_emitter_increase_indent preserves the original signature and delegates to +// yaml_emitter_increase_indent_compact without compact-sequence indentation +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false) +} + +// yaml_emitter_process_line_comment preserves the original signature and delegates to +// yaml_emitter_process_line_comment_linebreak passing false for linebreak +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + return yaml_emitter_process_line_comment_linebreak(emitter, false) +} + // Expect the root node. func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) @@ -728,7 +746,16 @@ func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_e // Expect a block item node. func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { - if !yaml_emitter_increase_indent(emitter, false, false) { + // emitter.mapping context tells us if we are currently in a mapping context. + // emiiter.column tells us which column we are in in the yaml output. 0 is the first char of the column. + // emitter.indentation tells us if the last character was an indentation character. + // emitter.compact_sequence_indent tells us if '- ' is considered part of the indentation for sequence elements. + // So, `seq` means that we are in a mapping context, and we are either at the first char of the column or + // the last character was not an indentation character, and we consider '- ' part of the indentation + // for sequence elements. + seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) && + emitter.compact_sequence_indent + if !yaml_emitter_increase_indent_compact(emitter, false, false, seq) { return false } } @@ -1144,8 +1171,15 @@ func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { } // Write an line comment. -func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { +func yaml_emitter_process_line_comment_linebreak(emitter *yaml_emitter_t, linebreak bool) bool { if len(emitter.line_comment) == 0 { + // The next 3 lines are needed to resolve an issue with leading newlines + // See https://github.com/go-yaml/yaml/issues/755 + // When linebreak is set to true, put_break will be called and will add + // the needed newline. + if linebreak && !put_break(emitter) { + return false + } return true } if !emitter.whitespace { @@ -1894,7 +1928,7 @@ func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bo if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } - if !yaml_emitter_process_line_comment(emitter) { + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { return false } //emitter.indention = true @@ -1931,7 +1965,7 @@ func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) boo if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } - if !yaml_emitter_process_line_comment(emitter) { + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { return false } diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/go.yaml.in/yaml/v3/encode.go similarity index 100% rename from vendor/gopkg.in/yaml.v3/encode.go rename to vendor/go.yaml.in/yaml/v3/encode.go diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/go.yaml.in/yaml/v3/parserc.go similarity index 93% rename from vendor/gopkg.in/yaml.v3/parserc.go rename to vendor/go.yaml.in/yaml/v3/parserc.go index 268558a0d6..25fe823637 100644 --- a/vendor/gopkg.in/yaml.v3/parserc.go +++ b/vendor/go.yaml.in/yaml/v3/parserc.go @@ -227,7 +227,8 @@ func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool // Parse the production: // stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ +// +// ************ func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -249,9 +250,12 @@ func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) // Parse the productions: // implicit_document ::= block_node DOCUMENT-END* -// * +// +// * +// // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* +// +// ************************* func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { token := peek_token(parser) @@ -356,8 +360,8 @@ func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t // Parse the productions: // explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** // +// *********** func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -379,9 +383,10 @@ func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event // Parse the productions: // implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* // +// ************* +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -428,30 +433,41 @@ func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) // Parse the productions: // block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * +// +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// // block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * +// +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// // flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * +// +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// // properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* +// +// ************************* +// // block_content ::= block_collection | flow_collection | SCALAR -// ****** +// +// ****** +// // flow_content ::= flow_collection | SCALAR -// ****** +// +// ****** func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() @@ -682,8 +698,8 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i // Parse the productions: // block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* // +// ******************** *********** * ********* func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -740,7 +756,8 @@ func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_e // Parse the productions: // indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * +// +// *********** * func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -805,14 +822,14 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* // -// BLOCK-END -// ********* +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* // +// BLOCK-END +// ********* func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -881,13 +898,11 @@ func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_even // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START // -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// +// ((KEY block_node_or_indentless_sequence?)? // +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -915,16 +930,18 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev // Parse the productions: // flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** +// +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * // +// * func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -987,11 +1004,10 @@ func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_ev return true } -// // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * // +// *** * func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -1011,8 +1027,8 @@ func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, ev // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * // +// ***** * func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -1035,8 +1051,8 @@ func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, // Parse the productions: // flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * // +// * func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { token := peek_token(parser) if token == nil { @@ -1053,16 +1069,17 @@ func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, ev // Parse the productions: // flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * // +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - *** * func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) @@ -1128,8 +1145,7 @@ func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event // Parse the productions: // flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// +// - ***** * func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { token := peek_token(parser) if token == nil { diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/go.yaml.in/yaml/v3/readerc.go similarity index 99% rename from vendor/gopkg.in/yaml.v3/readerc.go rename to vendor/go.yaml.in/yaml/v3/readerc.go index b7de0a89c4..56af245366 100644 --- a/vendor/gopkg.in/yaml.v3/readerc.go +++ b/vendor/go.yaml.in/yaml/v3/readerc.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/go.yaml.in/yaml/v3/resolve.go similarity index 100% rename from vendor/gopkg.in/yaml.v3/resolve.go rename to vendor/go.yaml.in/yaml/v3/resolve.go diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/go.yaml.in/yaml/v3/scannerc.go similarity index 99% rename from vendor/gopkg.in/yaml.v3/scannerc.go rename to vendor/go.yaml.in/yaml/v3/scannerc.go index ca0070108f..30b1f08920 100644 --- a/vendor/gopkg.in/yaml.v3/scannerc.go +++ b/vendor/go.yaml.in/yaml/v3/scannerc.go @@ -1614,11 +1614,11 @@ func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { // Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. // // Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { // Eat '%'. start_mark := parser.mark @@ -1719,11 +1719,11 @@ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool // Scan the directive name. // // Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ // +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { // Consume the directive name. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { @@ -1758,8 +1758,9 @@ func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark // Scan the value of VERSION-DIRECTIVE. // // Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ +// +// %YAML 1.1 # a comment \n +// ^^^^^^ func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { // Eat whitespaces. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { @@ -1797,10 +1798,11 @@ const max_number_length = 2 // Scan the version number of VERSION-DIRECTIVE. // // Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ +// +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { // Repeat while the next character is digit. @@ -1834,9 +1836,9 @@ func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark // Scan the value of a TAG-DIRECTIVE token. // // Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { var handle_value, prefix_value []byte @@ -2847,7 +2849,7 @@ func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t continue } if parser.buffer[parser.buffer_pos+peek] == '#' { - seen := parser.mark.index+peek + seen := parser.mark.index + peek for { if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false @@ -2876,7 +2878,7 @@ func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t parser.comments = append(parser.comments, yaml_comment_t{ token_mark: token_mark, start_mark: start_mark, - line: text, + line: text, }) } return true @@ -2910,7 +2912,7 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo // the foot is the line below it. var foot_line = -1 if scan_mark.line > 0 { - foot_line = parser.mark.line-parser.newlines+1 + foot_line = parser.mark.line - parser.newlines + 1 if parser.newlines == 0 && parser.mark.column > 1 { foot_line++ } @@ -2996,7 +2998,7 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo recent_empty = false // Consume until after the consumed comment line. - seen := parser.mark.index+peek + seen := parser.mark.index + peek for { if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/go.yaml.in/yaml/v3/sorter.go similarity index 100% rename from vendor/gopkg.in/yaml.v3/sorter.go rename to vendor/go.yaml.in/yaml/v3/sorter.go diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/go.yaml.in/yaml/v3/writerc.go similarity index 99% rename from vendor/gopkg.in/yaml.v3/writerc.go rename to vendor/go.yaml.in/yaml/v3/writerc.go index b8a116bf9a..266d0b092c 100644 --- a/vendor/gopkg.in/yaml.v3/writerc.go +++ b/vendor/go.yaml.in/yaml/v3/writerc.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/go.yaml.in/yaml/v3/yaml.go similarity index 91% rename from vendor/gopkg.in/yaml.v3/yaml.go rename to vendor/go.yaml.in/yaml/v3/yaml.go index 8cec6da48d..0b101cd20d 100644 --- a/vendor/gopkg.in/yaml.v3/yaml.go +++ b/vendor/go.yaml.in/yaml/v3/yaml.go @@ -17,8 +17,7 @@ // // Source code and other details for the project are available at GitHub: // -// https://github.com/go-yaml/yaml -// +// https://github.com/yaml/go-yaml package yaml import ( @@ -75,16 +74,15 @@ type Marshaler interface { // // For example: // -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) // // See the documentation of Marshal for the format of tags and a list of // supported tag options. -// func Unmarshal(in []byte, out interface{}) (err error) { return unmarshal(in, out, false) } @@ -185,36 +183,35 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // // The field tag format accepted is: // -// `(...) yaml:"[][,[,]]" (...)` +// `(...) yaml:"[][,[,]]" (...)` // // The following flags are currently supported: // -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Zero valued structs will be omitted if all their public -// fields are zero, unless they implement an IsZero -// method (see the IsZeroer interface type), in which -// case the field will be excluded if IsZero returns true. +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. // -// flow Marshal using a flow style (useful for structs, -// sequences and maps). +// flow Marshal using a flow style (useful for structs, +// sequences and maps). // -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. // // In addition, if the key is "-", the field is ignored. // // For example: // -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" func Marshal(in interface{}) (out []byte, err error) { defer handleErr(&err) e := newEncoder() @@ -278,6 +275,16 @@ func (e *Encoder) SetIndent(spaces int) { e.encoder.indent = spaces } +// CompactSeqIndent makes it so that '- ' is considered part of the indentation. +func (e *Encoder) CompactSeqIndent() { + e.encoder.emitter.compact_sequence_indent = true +} + +// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. +func (e *Encoder) DefaultSeqIndent() { + e.encoder.emitter.compact_sequence_indent = false +} + // Close closes the encoder by writing any remaining data. // It does not write a stream terminating string "...". func (e *Encoder) Close() (err error) { @@ -358,22 +365,21 @@ const ( // // For example: // -// var person struct { -// Name string -// Address yaml.Node -// } -// err := yaml.Unmarshal(data, &person) -// -// Or by itself: +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) // -// var person Node -// err := yaml.Unmarshal(data, &person) +// Or by itself: // +// var person Node +// err := yaml.Unmarshal(data, &person) type Node struct { // Kind defines whether the node is a document, a mapping, a sequence, // a scalar value, or an alias to another node. The specific data type of // scalar nodes may be obtained via the ShortTag and LongTag methods. - Kind Kind + Kind Kind // Style allows customizing the apperance of the node in the tree. Style Style @@ -421,7 +427,6 @@ func (n *Node) IsZero() bool { n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 } - // LongTag returns the long form of the tag that indicates the data type for // the node. If the Tag field isn't explicitly defined, one will be computed // based on the node properties. diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/go.yaml.in/yaml/v3/yamlh.go similarity index 99% rename from vendor/gopkg.in/yaml.v3/yamlh.go rename to vendor/go.yaml.in/yaml/v3/yamlh.go index 7c6d007706..f59aa40f64 100644 --- a/vendor/gopkg.in/yaml.v3/yamlh.go +++ b/vendor/go.yaml.in/yaml/v3/yamlh.go @@ -438,7 +438,9 @@ type yaml_document_t struct { // The number of written bytes should be set to the size_read variable. // // [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). +// +// yaml_parser_set_input(). +// // [out] buffer The buffer to write the data from the source. // [in] size The size of the buffer. // [out] size_read The actual number of bytes read from the source. @@ -639,7 +641,6 @@ type yaml_parser_t struct { } type yaml_comment_t struct { - scan_mark yaml_mark_t // Position where scanning for comments started token_mark yaml_mark_t // Position after which tokens will be associated with this comment start_mark yaml_mark_t // Position of '#' comment mark @@ -659,13 +660,14 @@ type yaml_comment_t struct { // @a buffer to the output. // // @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). +// +// yaml_emitter_set_output(). +// // @param[in] buffer The buffer with bytes to be written. // @param[in] size The size of the buffer. // // @returns On success, the handler should return @c 1. If the handler failed, // the returned value should be @c 0. -// type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error type yaml_emitter_state_t int @@ -742,6 +744,8 @@ type yaml_emitter_t struct { indent int // The current indentation level. + compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements? + flow_level int // The current flow level. root_context bool // Is it the document root context? diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go similarity index 97% rename from vendor/gopkg.in/yaml.v3/yamlprivateh.go rename to vendor/go.yaml.in/yaml/v3/yamlprivateh.go index e88f9c54ae..dea1ba9610 100644 --- a/vendor/gopkg.in/yaml.v3/yamlprivateh.go +++ b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go @@ -1,17 +1,17 @@ -// +// // Copyright (c) 2011-2019 Canonical Ltd // Copyright (c) 2006-2010 Kirill Simonov -// +// // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies // of the Software, and to permit persons to whom the Software is furnished to do // so, subject to the following conditions: -// +// // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. -// +// // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE @@ -137,8 +137,8 @@ func is_crlf(b []byte, i int) bool { func is_breakz(b []byte, i int) bool { //return is_break(b, i) || is_z(b, i) return ( - // is_break: - b[i] == '\r' || // CR (#xD) + // is_break: + b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) @@ -151,8 +151,8 @@ func is_breakz(b []byte, i int) bool { func is_spacez(b []byte, i int) bool { //return is_space(b, i) || is_breakz(b, i) return ( - // is_space: - b[i] == ' ' || + // is_space: + b[i] == ' ' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) @@ -166,8 +166,8 @@ func is_spacez(b []byte, i int) bool { func is_blankz(b []byte, i int) bool { //return is_blank(b, i) || is_breakz(b, i) return ( - // is_blank: - b[i] == ' ' || b[i] == '\t' || + // is_blank: + b[i] == ' ' || b[i] == '\t' || // is_breakz: b[i] == '\r' || // CR (#xD) b[i] == '\n' || // LF (#xA) diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 0000000000..28cd99c7f3 --- /dev/null +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := prf.Size() + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := 1; block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[len(dk)-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := 2; n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s new file mode 100644 index 0000000000..269e173ca4 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// +// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/cpu/asm_darwin_arm64_gc.s b/vendor/golang.org/x/sys/cpu/asm_darwin_arm64_gc.s new file mode 100644 index 0000000000..e07fa75eb5 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_darwin_arm64_gc.s @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && arm64 && gc + +#include "textflag.h" + +TEXT libc_sysctlbyname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctlbyname(SB) +GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s new file mode 100644 index 0000000000..ec2acfe540 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_sysctlbyname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctlbyname(SB) +GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go new file mode 100644 index 0000000000..271055be0b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/byteorder.go @@ -0,0 +1,66 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "runtime" +) + +// byteOrder is a subset of encoding/binary.ByteOrder. +type byteOrder interface { + Uint32([]byte) uint32 + Uint64([]byte) uint64 +} + +type littleEndian struct{} +type bigEndian struct{} + +func (littleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (littleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (bigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (bigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +// hostByteOrder returns littleEndian on little-endian machines and +// bigEndian on big-endian machines. +func hostByteOrder() byteOrder { + switch runtime.GOARCH { + case "386", "amd64", "amd64p32", + "alpha", + "arm", "arm64", + "loong64", + "mipsle", "mips64le", "mips64p32le", + "nios2", + "ppc64le", + "riscv", "riscv64", + "sh": + return littleEndian{} + case "armbe", "arm64be", + "m68k", + "mips", "mips64", "mips64p32", + "ppc", "ppc64", + "s390", "s390x", + "shbe", + "sparc", "sparc64": + return bigEndian{} + } + panic("unknown architecture") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go new file mode 100644 index 0000000000..63541994ef --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -0,0 +1,338 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection for +// various CPU architectures. +package cpu + +import ( + "os" + "strings" +) + +// Initialized reports whether the CPU features were initialized. +// +// For some GOOS/GOARCH combinations initialization of the CPU features depends +// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm +// Initialized will report false if reading the file fails. +var Initialized bool + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [cacheLineSize]byte } + +// X86 contains the supported CPU features of the +// current X86/AMD64 platform. If the current platform +// is not X86/AMD64 then all feature flags are false. +// +// X86 is padded to avoid false sharing. Further the HasAVX +// and HasAVX2 are only set if the OS supports XMM and YMM +// registers in addition to the CPUID feature bit being set. +var X86 struct { + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasAVX512 bool // Advanced vector extension 512 + HasAVX512F bool // Advanced vector extension 512 Foundation Instructions + HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions + HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions + HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions + HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions + HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions + HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions + HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add + HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions + HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision + HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision + HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions + HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations + HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions + HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions + HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions + HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 + HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms + HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions + HasAMXTile bool // Advanced Matrix Extension Tile instructions + HasAMXInt8 bool // Advanced Matrix Extension Int8 instructions + HasAMXBF16 bool // Advanced Matrix Extension BFloat16 instructions + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasCX16 bool // Compare and exchange 16 Bytes + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasRDRAND bool // RDRAND instruction (on-chip random number generator) + HasRDSEED bool // RDSEED instruction (on-chip random number generator) + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add + HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions + HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions + _ CacheLinePad +} + +// ARM64 contains the supported CPU features of the +// current ARMv8(aarch64) platform. If the current platform +// is not arm64 then all feature flags are false. +var ARM64 struct { + _ CacheLinePad + HasFP bool // Floating-point instruction set (always available) + HasASIMD bool // Advanced SIMD (always available) + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + HasATOMICS bool // Atomic memory operation instruction set + HasFPHP bool // Half precision floating-point instruction set + HasASIMDHP bool // Advanced SIMD half precision instruction set + HasCPUID bool // CPUID identification scheme registers + HasASIMDRDM bool // Rounding double multiply add/subtract instruction set + HasJSCVT bool // Javascript conversion from floating-point to integer + HasFCMA bool // Floating-point multiplication and addition of complex numbers + HasLRCPC bool // Release Consistent processor consistent support + HasDCPOP bool // Persistent memory support + HasSHA3 bool // SHA3 hardware implementation + HasSM3 bool // SM3 hardware implementation + HasSM4 bool // SM4 hardware implementation + HasASIMDDP bool // Advanced SIMD double precision instruction set + HasSHA512 bool // SHA512 hardware implementation + HasSVE bool // Scalable Vector Extensions + HasSVE2 bool // Scalable Vector Extensions 2 + HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + HasDIT bool // Data Independent Timing support + HasI8MM bool // Advanced SIMD Int8 matrix multiplication instructions + _ CacheLinePad +} + +// ARM contains the supported CPU features of the current ARM (32-bit) platform. +// All feature flags are false if: +// 1. the current platform is not arm, or +// 2. the current operating system is not Linux. +var ARM struct { + _ CacheLinePad + HasSWP bool // SWP instruction support + HasHALF bool // Half-word load and store support + HasTHUMB bool // ARM Thumb instruction set + Has26BIT bool // Address space limited to 26-bits + HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support + HasFPA bool // Floating point arithmetic support + HasVFP bool // Vector floating point support + HasEDSP bool // DSP Extensions support + HasJAVA bool // Java instruction set + HasIWMMXT bool // Intel Wireless MMX technology support + HasCRUNCH bool // MaverickCrunch context switching and handling + HasTHUMBEE bool // Thumb EE instruction set + HasNEON bool // NEON instruction set + HasVFPv3 bool // Vector floating point version 3 support + HasVFPv3D16 bool // Vector floating point version 3 D8-D15 + HasTLS bool // Thread local storage support + HasVFPv4 bool // Vector floating point version 4 support + HasIDIVA bool // Integer divide instruction support in ARM mode + HasIDIVT bool // Integer divide instruction support in Thumb mode + HasVFPD32 bool // Vector floating point version 3 D15-D31 + HasLPAE bool // Large Physical Address Extensions + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + _ CacheLinePad +} + +// The booleans in Loong64 contain the correspondingly named cpu feature bit. +// The struct is padded to avoid false sharing. +var Loong64 struct { + _ CacheLinePad + HasLSX bool // support 128-bit vector extension + HasLASX bool // support 256-bit vector extension + HasCRC32 bool // support CRC instruction + HasLAM_BH bool // support AM{SWAP/ADD}[_DB].{B/H} instruction + HasLAMCAS bool // support AMCAS[_DB].{B/H/W/D} instruction + _ CacheLinePad +} + +// MIPS64X contains the supported CPU features of the current mips64/mips64le +// platforms. If the current platform is not mips64/mips64le or the current +// operating system is not Linux then all feature flags are false. +var MIPS64X struct { + _ CacheLinePad + HasMSA bool // MIPS SIMD architecture + _ CacheLinePad +} + +// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. +// If the current platform is not ppc64/ppc64le then all feature flags are false. +// +// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (DARN, SCV), so there are feature bits for +// those as well. The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8 + _ CacheLinePad +} + +// S390X contains the supported CPU features of the current IBM Z +// (s390x) platform. If the current platform is not IBM Z then all +// feature flags are false. +// +// S390X is padded to avoid false sharing. Further HasVX is only set +// if the OS supports vector registers in addition to the STFLE +// feature bit being set. +var S390X struct { + _ CacheLinePad + HasZARCH bool // z/Architecture mode is active [mandatory] + HasSTFLE bool // store facility list extended + HasLDISP bool // long (20-bit) displacements + HasEIMM bool // 32-bit immediates + HasDFP bool // decimal floating point + HasETF3EH bool // ETF-3 enhanced + HasMSA bool // message security assist (CPACF) + HasAES bool // KM-AES{128,192,256} functions + HasAESCBC bool // KMC-AES{128,192,256} functions + HasAESCTR bool // KMCTR-AES{128,192,256} functions + HasAESGCM bool // KMA-GCM-AES{128,192,256} functions + HasGHASH bool // KIMD-GHASH function + HasSHA1 bool // K{I,L}MD-SHA-1 functions + HasSHA256 bool // K{I,L}MD-SHA-256 functions + HasSHA512 bool // K{I,L}MD-SHA-512 functions + HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions + HasVX bool // vector facility + HasVXE bool // vector-enhancements facility 1 + _ CacheLinePad +} + +// RISCV64 contains the supported CPU features and performance characteristics for riscv64 +// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate +// the presence of RISC-V extensions. +// +// It is safe to assume that all the RV64G extensions are supported and so they are omitted from +// this structure. As riscv64 Go programs require at least RV64G, the code that populates +// this structure cannot run successfully if some of the RV64G extensions are missing. +// The struct is padded to avoid false sharing. +var RISCV64 struct { + _ CacheLinePad + HasFastMisaligned bool // Fast misaligned accesses + HasC bool // Compressed instruction-set extension + HasV bool // Vector extension compatible with RVV 1.0 + HasZba bool // Address generation instructions extension + HasZbb bool // Basic bit-manipulation extension + HasZbs bool // Single-bit instructions extension + HasZvbb bool // Vector Basic Bit-manipulation + HasZvbc bool // Vector Carryless Multiplication + HasZvkb bool // Vector Cryptography Bit-manipulation + HasZvkt bool // Vector Data-Independent Execution Latency + HasZvkg bool // Vector GCM/GMAC + HasZvkn bool // NIST Algorithm Suite (AES/SHA256/SHA512) + HasZvknc bool // NIST Algorithm Suite with carryless multiply + HasZvkng bool // NIST Algorithm Suite with GCM + HasZvks bool // ShangMi Algorithm Suite + HasZvksc bool // ShangMi Algorithm Suite with carryless multiplication + HasZvksg bool // ShangMi Algorithm Suite with GCM + _ CacheLinePad +} + +func init() { + archInit() + initOptions() + processOptions() +} + +// options contains the cpu debug options that can be used in GODEBUG. +// Options are arch dependent and are added by the arch specific initOptions functions. +// Features that are mandatory for the specific GOARCH should have the Required field set +// (e.g. SSE2 on amd64). +var options []option + +// Option names should be lower case. e.g. avx instead of AVX. +type option struct { + Name string + Feature *bool + Specified bool // whether feature value was specified in GODEBUG + Enable bool // whether feature should be enabled + Required bool // whether feature is mandatory and can not be disabled +} + +func processOptions() { + env := os.Getenv("GODEBUG") +field: + for env != "" { + field := "" + i := strings.IndexByte(env, ',') + if i < 0 { + field, env = env, "" + } else { + field, env = env[:i], env[i+1:] + } + if len(field) < 4 || field[:4] != "cpu." { + continue + } + i = strings.IndexByte(field, '=') + if i < 0 { + print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n") + continue + } + key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" + + var enable bool + switch value { + case "on": + enable = true + case "off": + enable = false + default: + print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n") + continue field + } + + if key == "all" { + for i := range options { + options[i].Specified = true + options[i].Enable = enable || options[i].Required + } + continue field + } + + for i := range options { + if options[i].Name == key { + options[i].Specified = true + options[i].Enable = enable + continue field + } + } + + print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n") + } + + for _, o := range options { + if !o.Specified { + continue + } + + if o.Enable && !*o.Feature { + print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n") + continue + } + + if !o.Enable && o.Required { + print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n") + continue + } + + *o.Feature = o.Enable + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go new file mode 100644 index 0000000000..9bf0c32eb6 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -0,0 +1,33 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix + +package cpu + +const ( + // getsystemcfg constants + _SC_IMPL = 2 + _IMPL_POWER8 = 0x10000 + _IMPL_POWER9 = 0x20000 +) + +func archInit() { + impl := getsystemcfg(_SC_IMPL) + if impl&_IMPL_POWER8 != 0 { + PPC64.IsPOWER8 = true + } + if impl&_IMPL_POWER9 != 0 { + PPC64.IsPOWER8 = true + PPC64.IsPOWER9 = true + } + + Initialized = true +} + +func getsystemcfg(label int) (n uint64) { + r0, _ := callgetsystemcfg(label) + n = uint64(r0) + return +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go new file mode 100644 index 0000000000..301b752e9c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm.go @@ -0,0 +1,73 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 32 + +// HWCAP/HWCAP2 bits. +// These are specific to Linux. +const ( + hwcap_SWP = 1 << 0 + hwcap_HALF = 1 << 1 + hwcap_THUMB = 1 << 2 + hwcap_26BIT = 1 << 3 + hwcap_FAST_MULT = 1 << 4 + hwcap_FPA = 1 << 5 + hwcap_VFP = 1 << 6 + hwcap_EDSP = 1 << 7 + hwcap_JAVA = 1 << 8 + hwcap_IWMMXT = 1 << 9 + hwcap_CRUNCH = 1 << 10 + hwcap_THUMBEE = 1 << 11 + hwcap_NEON = 1 << 12 + hwcap_VFPv3 = 1 << 13 + hwcap_VFPv3D16 = 1 << 14 + hwcap_TLS = 1 << 15 + hwcap_VFPv4 = 1 << 16 + hwcap_IDIVA = 1 << 17 + hwcap_IDIVT = 1 << 18 + hwcap_VFPD32 = 1 << 19 + hwcap_LPAE = 1 << 20 + hwcap_EVTSTRM = 1 << 21 + + hwcap2_AES = 1 << 0 + hwcap2_PMULL = 1 << 1 + hwcap2_SHA1 = 1 << 2 + hwcap2_SHA2 = 1 << 3 + hwcap2_CRC32 = 1 << 4 +) + +func initOptions() { + options = []option{ + {Name: "pmull", Feature: &ARM.HasPMULL}, + {Name: "sha1", Feature: &ARM.HasSHA1}, + {Name: "sha2", Feature: &ARM.HasSHA2}, + {Name: "swp", Feature: &ARM.HasSWP}, + {Name: "thumb", Feature: &ARM.HasTHUMB}, + {Name: "thumbee", Feature: &ARM.HasTHUMBEE}, + {Name: "tls", Feature: &ARM.HasTLS}, + {Name: "vfp", Feature: &ARM.HasVFP}, + {Name: "vfpd32", Feature: &ARM.HasVFPD32}, + {Name: "vfpv3", Feature: &ARM.HasVFPv3}, + {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16}, + {Name: "vfpv4", Feature: &ARM.HasVFPv4}, + {Name: "half", Feature: &ARM.HasHALF}, + {Name: "26bit", Feature: &ARM.Has26BIT}, + {Name: "fastmul", Feature: &ARM.HasFASTMUL}, + {Name: "fpa", Feature: &ARM.HasFPA}, + {Name: "edsp", Feature: &ARM.HasEDSP}, + {Name: "java", Feature: &ARM.HasJAVA}, + {Name: "iwmmxt", Feature: &ARM.HasIWMMXT}, + {Name: "crunch", Feature: &ARM.HasCRUNCH}, + {Name: "neon", Feature: &ARM.HasNEON}, + {Name: "idivt", Feature: &ARM.HasIDIVT}, + {Name: "idiva", Feature: &ARM.HasIDIVA}, + {Name: "lpae", Feature: &ARM.HasLPAE}, + {Name: "evtstrm", Feature: &ARM.HasEVTSTRM}, + {Name: "aes", Feature: &ARM.HasAES}, + {Name: "crc32", Feature: &ARM.HasCRC32}, + } + +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go new file mode 100644 index 0000000000..5fc09e2935 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -0,0 +1,191 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "runtime" + +// cacheLineSize is used to prevent false sharing of cache lines. +// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size. +// It doesn't cost much and is much more future-proof. +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "fp", Feature: &ARM64.HasFP}, + {Name: "asimd", Feature: &ARM64.HasASIMD}, + {Name: "evstrm", Feature: &ARM64.HasEVTSTRM}, + {Name: "aes", Feature: &ARM64.HasAES}, + {Name: "fphp", Feature: &ARM64.HasFPHP}, + {Name: "jscvt", Feature: &ARM64.HasJSCVT}, + {Name: "lrcpc", Feature: &ARM64.HasLRCPC}, + {Name: "pmull", Feature: &ARM64.HasPMULL}, + {Name: "sha1", Feature: &ARM64.HasSHA1}, + {Name: "sha2", Feature: &ARM64.HasSHA2}, + {Name: "sha3", Feature: &ARM64.HasSHA3}, + {Name: "sha512", Feature: &ARM64.HasSHA512}, + {Name: "sm3", Feature: &ARM64.HasSM3}, + {Name: "sm4", Feature: &ARM64.HasSM4}, + {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "sve2", Feature: &ARM64.HasSVE2}, + {Name: "crc32", Feature: &ARM64.HasCRC32}, + {Name: "atomics", Feature: &ARM64.HasATOMICS}, + {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, + {Name: "cpuid", Feature: &ARM64.HasCPUID}, + {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM}, + {Name: "fcma", Feature: &ARM64.HasFCMA}, + {Name: "dcpop", Feature: &ARM64.HasDCPOP}, + {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, + {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, + {Name: "dit", Feature: &ARM64.HasDIT}, + {Name: "i8mm", Feature: &ARM64.HasI8MM}, + } +} + +func archInit() { + if runtime.GOOS == "freebsd" { + readARM64Registers() + } else { + // Most platforms don't seem to allow directly reading these registers. + doinit() + } +} + +// setMinimalFeatures fakes the minimal ARM64 features expected by +// TestARM64minimalFeatures. +func setMinimalFeatures() { + ARM64.HasASIMD = true + ARM64.HasFP = true +} + +func readARM64Registers() { + Initialized = true + + parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) +} + +func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { + // ID_AA64ISAR0_EL1 + switch extractBits(isar0, 4, 7) { + case 1: + ARM64.HasAES = true + case 2: + ARM64.HasAES = true + ARM64.HasPMULL = true + } + + switch extractBits(isar0, 8, 11) { + case 1: + ARM64.HasSHA1 = true + } + + switch extractBits(isar0, 12, 15) { + case 1: + ARM64.HasSHA2 = true + case 2: + ARM64.HasSHA2 = true + ARM64.HasSHA512 = true + } + + switch extractBits(isar0, 16, 19) { + case 1: + ARM64.HasCRC32 = true + } + + switch extractBits(isar0, 20, 23) { + case 2: + ARM64.HasATOMICS = true + } + + switch extractBits(isar0, 28, 31) { + case 1: + ARM64.HasASIMDRDM = true + } + + switch extractBits(isar0, 32, 35) { + case 1: + ARM64.HasSHA3 = true + } + + switch extractBits(isar0, 36, 39) { + case 1: + ARM64.HasSM3 = true + } + + switch extractBits(isar0, 40, 43) { + case 1: + ARM64.HasSM4 = true + } + + switch extractBits(isar0, 44, 47) { + case 1: + ARM64.HasASIMDDP = true + } + + // ID_AA64ISAR1_EL1 + switch extractBits(isar1, 0, 3) { + case 1: + ARM64.HasDCPOP = true + } + + switch extractBits(isar1, 12, 15) { + case 1: + ARM64.HasJSCVT = true + } + + switch extractBits(isar1, 16, 19) { + case 1: + ARM64.HasFCMA = true + } + + switch extractBits(isar1, 20, 23) { + case 1: + ARM64.HasLRCPC = true + } + + switch extractBits(isar1, 52, 55) { + case 1: + ARM64.HasI8MM = true + } + + // ID_AA64PFR0_EL1 + switch extractBits(pfr0, 16, 19) { + case 0: + ARM64.HasFP = true + case 1: + ARM64.HasFP = true + ARM64.HasFPHP = true + } + + switch extractBits(pfr0, 20, 23) { + case 0: + ARM64.HasASIMD = true + case 1: + ARM64.HasASIMD = true + ARM64.HasASIMDHP = true + } + + switch extractBits(pfr0, 32, 35) { + case 1: + ARM64.HasSVE = true + + parseARM64SVERegister(getzfr0()) + } + + switch extractBits(pfr0, 48, 51) { + case 1: + ARM64.HasDIT = true + } +} + +func parseARM64SVERegister(zfr0 uint64) { + switch extractBits(zfr0, 0, 3) { + case 1: + ARM64.HasSVE2 = true + } +} + +func extractBits(data uint64, start, end uint) uint { + return (uint)(data>>start) & ((1 << (end - start + 1)) - 1) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s new file mode 100644 index 0000000000..3b0450a06a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -0,0 +1,35 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// func getisar0() uint64 +TEXT ·getisar0(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 0 into x0 + MRS ID_AA64ISAR0_EL1, R0 + MOVD R0, ret+0(FP) + RET + +// func getisar1() uint64 +TEXT ·getisar1(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 1 into x0 + MRS ID_AA64ISAR1_EL1, R0 + MOVD R0, ret+0(FP) + RET + +// func getpfr0() uint64 +TEXT ·getpfr0(SB),NOSPLIT,$0-8 + // get Processor Feature Register 0 into x0 + MRS ID_AA64PFR0_EL1, R0 + MOVD R0, ret+0(FP) + RET + +// func getzfr0() uint64 +TEXT ·getzfr0(SB),NOSPLIT,$0-8 + // get SVE Feature Register 0 into x0 + MRS ID_AA64ZFR0_EL1, R0 + MOVD R0, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64.go new file mode 100644 index 0000000000..0b470744a0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64.go @@ -0,0 +1,67 @@ +// Copyright 2026 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && arm64 && gc + +package cpu + +func doinit() { + setMinimalFeatures() + + // The feature flags are explained in [Instruction Set Detection]. + // There are some differences between MacOS versions: + // + // MacOS 11 and 12 do not have "hw.optional" sysctl values for some of the features. + // + // MacOS 13 changed some of the naming conventions to align with ARM Architecture Reference Manual. + // For example "hw.optional.armv8_2_sha512" became "hw.optional.arm.FEAT_SHA512". + // It currently checks both to stay compatible with MacOS 11 and 12. + // The old names also work with MacOS 13, however it's not clear whether + // they will continue working with future OS releases. + // + // Once MacOS 12 is no longer supported the old names can be removed. + // + // [Instruction Set Detection]: https://developer.apple.com/documentation/kernel/1387446-sysctlbyname/determining_instruction_set_characteristics + + // Encryption, hashing and checksum capabilities + + // For the following flags there are no MacOS 11 sysctl flags. + ARM64.HasAES = true || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_AES\x00")) + ARM64.HasPMULL = true || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_PMULL\x00")) + ARM64.HasSHA1 = true || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SHA1\x00")) + ARM64.HasSHA2 = true || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SHA256\x00")) + + ARM64.HasSHA3 = darwinSysctlEnabled([]byte("hw.optional.armv8_2_sha3\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SHA3\x00")) + ARM64.HasSHA512 = darwinSysctlEnabled([]byte("hw.optional.armv8_2_sha512\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SHA512\x00")) + + ARM64.HasCRC32 = darwinSysctlEnabled([]byte("hw.optional.armv8_crc32\x00")) + + // Atomic and memory ordering + ARM64.HasATOMICS = darwinSysctlEnabled([]byte("hw.optional.armv8_1_atomics\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_LSE\x00")) + ARM64.HasLRCPC = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_LRCPC\x00")) + + // SIMD and floating point capabilities + ARM64.HasFPHP = darwinSysctlEnabled([]byte("hw.optional.neon_fp16\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_FP16\x00")) + ARM64.HasASIMDHP = darwinSysctlEnabled([]byte("hw.optional.neon_hpfp\x00")) || darwinSysctlEnabled([]byte("hw.optional.AdvSIMD_HPFPCvt\x00")) + ARM64.HasASIMDRDM = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_RDM\x00")) + ARM64.HasASIMDDP = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_DotProd\x00")) + ARM64.HasASIMDFHM = darwinSysctlEnabled([]byte("hw.optional.armv8_2_fhm\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_FHM\x00")) + ARM64.HasI8MM = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_I8MM\x00")) + + ARM64.HasJSCVT = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_JSCVT\x00")) + ARM64.HasFCMA = darwinSysctlEnabled([]byte("hw.optional.armv8_3_compnum\x00")) || darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_FCMA\x00")) + + // Miscellaneous + ARM64.HasDCPOP = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_DPB\x00")) + ARM64.HasEVTSTRM = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_ECV\x00")) + ARM64.HasDIT = darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_DIT\x00")) + + // Not supported, but added for completeness + ARM64.HasCPUID = false + + ARM64.HasSM3 = false // darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SM3\x00")) + ARM64.HasSM4 = false // darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SM4\x00")) + ARM64.HasSVE = false // darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SVE\x00")) + ARM64.HasSVE2 = false // darwinSysctlEnabled([]byte("hw.optional.arm.FEAT_SVE2\x00")) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64_other.go b/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64_other.go new file mode 100644 index 0000000000..37ecc66440 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_darwin_arm64_other.go @@ -0,0 +1,31 @@ +// Copyright 2026 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && arm64 && !gc + +package cpu + +import "runtime" + +func doinit() { + setMinimalFeatures() + + ARM64.HasASIMD = true + ARM64.HasFP = true + + // Go already assumes these to be available because they were on the M1 + // and these are supported on all Apple arm64 chips. + ARM64.HasAES = true + ARM64.HasPMULL = true + ARM64.HasSHA1 = true + ARM64.HasSHA2 = true + + if runtime.GOOS != "ios" { + // Apple A7 processors do not support these, however + // M-series SoCs are at least armv8.4-a + ARM64.HasCRC32 = true // armv8.1 + ARM64.HasATOMICS = true // armv8.2 + ARM64.HasJSCVT = true // armv8.3, if HasFP + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go new file mode 100644 index 0000000000..b838cb9e95 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +package cpu + +// darwinSupportsAVX512 checks Darwin kernel for AVX512 support via sysctl +// call (see issue 43089). It also restricts AVX512 support for Darwin to +// kernel version 21.3.0 (MacOS 12.2.0) or later (see issue 49233). +// +// Background: +// Darwin implements a special mechanism to economize on thread state when +// AVX512 specific registers are not in use. This scheme minimizes state when +// preempting threads that haven't yet used any AVX512 instructions, but adds +// special requirements to check for AVX512 hardware support at runtime (e.g. +// via sysctl call or commpage inspection). See issue 43089 and link below for +// full background: +// https://github.com/apple-oss-distributions/xnu/blob/xnu-11215.1.10/osfmk/i386/fpu.c#L214-L240 +// +// Additionally, all versions of the Darwin kernel from 19.6.0 through 21.2.0 +// (corresponding to MacOS 10.15.6 - 12.1) have a bug that can cause corruption +// of the AVX512 mask registers (K0-K7) upon signal return. For this reason +// AVX512 is considered unsafe to use on Darwin for kernel versions prior to +// 21.3.0, where a fix has been confirmed. See issue 49233 for full background. +func darwinSupportsAVX512() bool { + return darwinSysctlEnabled([]byte("hw.optional.avx512f\x00")) && darwinKernelVersionCheck(21, 3, 0) +} + +// Ensure Darwin kernel version is at least major.minor.patch, avoiding dependencies +func darwinKernelVersionCheck(major, minor, patch int) bool { + var release [256]byte + err := darwinOSRelease(&release) + if err != nil { + return false + } + + var mmp [3]int + c := 0 +Loop: + for _, b := range release[:] { + switch { + case b >= '0' && b <= '9': + mmp[c] = 10*mmp[c] + int(b-'0') + case b == '.': + c++ + if c > 2 { + return false + } + case b == 0: + break Loop + default: + return false + } + } + if c != 2 { + return false + } + return mmp[0] > major || mmp[0] == major && (mmp[1] > minor || mmp[1] == minor && mmp[2] >= patch) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go new file mode 100644 index 0000000000..6ac6e1efb2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +package cpu + +func getisar0() uint64 +func getisar1() uint64 +func getpfr0() uint64 +func getzfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go new file mode 100644 index 0000000000..c8ae6ddc15 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return true } + +// The following feature detection functions are defined in cpu_s390x.s. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList +func kmQuery() queryResult +func kmcQuery() queryResult +func kmctrQuery() queryResult +func kmaQuery() queryResult +func kimdQuery() queryResult +func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 0000000000..32a44514e2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gc + +package cpu + +// cpuid is implemented in cpu_gc_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_gc_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s new file mode 100644 index 0000000000..ce208ce6d6 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gc + +#include "textflag.h" + +// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuid(SB), NOSPLIT, $0-24 + MOVL eaxArg+0(FP), AX + MOVL ecxArg+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv() (eax, edx uint32) +TEXT ·xgetbv(SB), NOSPLIT, $0-8 + MOVL $0, CX + XGETBV + MOVL AX, eax+0(FP) + MOVL DX, edx+4(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go new file mode 100644 index 0000000000..05913081ec --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +package cpu + +func getisar0() uint64 { return 0 } +func getisar1() uint64 { return 0 } +func getpfr0() uint64 { return 0 } +func getzfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go new file mode 100644 index 0000000000..9526d2ce3a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return false } + +// TODO(mundaym): the following feature detection functions are currently +// stubs. See https://golang.org/cl/162887 for how to fix this. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList { panic("not implemented for gccgo") } +func kmQuery() queryResult { panic("not implemented for gccgo") } +func kmcQuery() queryResult { panic("not implemented for gccgo") } +func kmctrQuery() queryResult { panic("not implemented for gccgo") } +func kmaQuery() queryResult { panic("not implemented for gccgo") } +func kimdQuery() queryResult { panic("not implemented for gccgo") } +func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c new file mode 100644 index 0000000000..3f73a05dcf --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -0,0 +1,37 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo + +#include +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma GCC push_options +#pragma GCC target("xsave") +#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function) + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + uint64_t v = _xgetbv(0); + *eax = v & 0xffffffff; + *edx = v >> 32; +} + +#pragma clang attribute pop +#pragma GCC pop_options diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go new file mode 100644 index 0000000000..170d21ddfd --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -0,0 +1,25 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go new file mode 100644 index 0000000000..743eb54354 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !386 && !amd64 && !amd64p32 && !arm64 + +package cpu + +func archInit() { + if err := readHWCAP(); err != nil { + return + } + doinit() + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go new file mode 100644 index 0000000000..2057006dce --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func doinit() { + ARM.HasSWP = isSet(hwCap, hwcap_SWP) + ARM.HasHALF = isSet(hwCap, hwcap_HALF) + ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) + ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) + ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) + ARM.HasFPA = isSet(hwCap, hwcap_FPA) + ARM.HasVFP = isSet(hwCap, hwcap_VFP) + ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) + ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) + ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) + ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) + ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) + ARM.HasNEON = isSet(hwCap, hwcap_NEON) + ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) + ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) + ARM.HasTLS = isSet(hwCap, hwcap_TLS) + ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) + ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) + ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) + ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) + ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) + ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM.HasAES = isSet(hwCap2, hwcap2_AES) + ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) + ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) + ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) + ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go new file mode 100644 index 0000000000..f1caf0f78e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -0,0 +1,120 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "strings" + "syscall" +) + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 + hwcap_DIT = 1 << 24 + + hwcap2_SVE2 = 1 << 1 + hwcap2_I8MM = 1 << 13 +) + +// linuxKernelCanEmulateCPUID reports whether we're running +// on Linux 4.11+. Ideally we'd like to ask the question about +// whether the current kernel contains +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2 +// but the version number will have to do. +func linuxKernelCanEmulateCPUID() bool { + var un syscall.Utsname + syscall.Uname(&un) + var sb strings.Builder + for _, b := range un.Release[:] { + if b == 0 { + break + } + sb.WriteByte(byte(b)) + } + major, minor, _, ok := parseRelease(sb.String()) + return ok && (major > 4 || major == 4 && minor >= 11) +} + +func doinit() { + if err := readHWCAP(); err != nil { + // We failed to read /proc/self/auxv. This can happen if the binary has + // been given extra capabilities(7) with /bin/setcap. + // + // When this happens, we have two options. If the Linux kernel is new + // enough (4.11+), we can read the arm64 registers directly which'll + // trap into the kernel and then return back to userspace. + // + // But on older kernels, such as Linux 4.4.180 as used on many Synology + // devices, calling readARM64Registers (specifically getisar0) will + // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo + // instead. + // + // See golang/go#57336. + if linuxKernelCanEmulateCPUID() { + readARM64Registers() + } else { + readLinuxProcCPUInfo() + } + return + } + + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) + ARM64.HasDIT = isSet(hwCap, hwcap_DIT) + + // HWCAP2 feature bits + ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) + ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go new file mode 100644 index 0000000000..4f34114329 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go @@ -0,0 +1,22 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel. +const ( + hwcap_LOONGARCH_LSX = 1 << 4 + hwcap_LOONGARCH_LASX = 1 << 5 +) + +func doinit() { + // TODO: Features that require kernel support like LSX and LASX can + // be detected here once needed in std library or by the compiler. + Loong64.HasLSX = hwcIsSet(hwCap, hwcap_LOONGARCH_LSX) + Loong64.HasLASX = hwcIsSet(hwCap, hwcap_LOONGARCH_LASX) +} + +func hwcIsSet(hwc uint, val uint) bool { + return hwc&val != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go new file mode 100644 index 0000000000..4686c1d541 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (mips64 || mips64le) + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel 5.4. +const ( + // CPU features + hwcap_MIPS_MSA = 1 << 1 +) + +func doinit() { + // HWCAP feature bits + MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go new file mode 100644 index 0000000000..a428dec9cd --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && !arm && !arm64 && !loong64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 + +package cpu + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go new file mode 100644 index 0000000000..197188e67f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -0,0 +1,30 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (ppc64 || ppc64le) + +package cpu + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go new file mode 100644 index 0000000000..ad741536f3 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go @@ -0,0 +1,160 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe +// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available. +// +// A note on detection of the Vector extension using HWCAP. +// +// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5. +// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe +// syscall is not available then neither is the Vector extension (which needs kernel support). +// The riscv_hwprobe syscall should then be all we need to detect the Vector extension. +// However, some RISC-V board manufacturers ship boards with an older kernel on top of which +// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe +// patches. These kernels advertise support for the Vector extension using HWCAP. Falling +// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not +// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option. +// +// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by +// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board +// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified +// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use +// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector +// extension are binary incompatible. HWCAP can then not be used in isolation to populate the +// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0. +// +// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector +// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype +// register. This check would allow us to safely detect version 1.0 of the Vector extension +// with HWCAP, if riscv_hwprobe were not available. However, the check cannot +// be added until the assembler supports the Vector instructions. +// +// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the +// extensions it advertises support for are explicitly versioned. It's also worth noting that +// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba. +// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority +// of RISC-V extensions. +// +// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information. + +// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must +// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall +// here. + +const ( + // Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. + riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4 + riscv_HWPROBE_IMA_C = 0x2 + riscv_HWPROBE_IMA_V = 0x4 + riscv_HWPROBE_EXT_ZBA = 0x8 + riscv_HWPROBE_EXT_ZBB = 0x10 + riscv_HWPROBE_EXT_ZBS = 0x20 + riscv_HWPROBE_EXT_ZVBB = 0x20000 + riscv_HWPROBE_EXT_ZVBC = 0x40000 + riscv_HWPROBE_EXT_ZVKB = 0x80000 + riscv_HWPROBE_EXT_ZVKG = 0x100000 + riscv_HWPROBE_EXT_ZVKNED = 0x200000 + riscv_HWPROBE_EXT_ZVKNHB = 0x800000 + riscv_HWPROBE_EXT_ZVKSED = 0x1000000 + riscv_HWPROBE_EXT_ZVKSH = 0x2000000 + riscv_HWPROBE_EXT_ZVKT = 0x4000000 + riscv_HWPROBE_KEY_CPUPERF_0 = 0x5 + riscv_HWPROBE_MISALIGNED_FAST = 0x3 + riscv_HWPROBE_MISALIGNED_MASK = 0x7 +) + +const ( + // sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go. + sys_RISCV_HWPROBE = 258 +) + +// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. +type riscvHWProbePairs struct { + key int64 + value uint64 +} + +const ( + // CPU features + hwcap_RISCV_ISA_C = 1 << ('C' - 'A') +) + +func doinit() { + // A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key + // field should be initialised with one of the key constants defined above, e.g., + // RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value. + // If the kernel does not recognise a key it will set the key field to -1 and the value field to 0. + + pairs := []riscvHWProbePairs{ + {riscv_HWPROBE_KEY_IMA_EXT_0, 0}, + {riscv_HWPROBE_KEY_CPUPERF_0, 0}, + } + + // This call only indicates that extensions are supported if they are implemented on all cores. + if riscvHWProbe(pairs, 0) { + if pairs[0].key != -1 { + v := uint(pairs[0].value) + RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C) + RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V) + RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA) + RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB) + RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS) + RISCV64.HasZvbb = isSet(v, riscv_HWPROBE_EXT_ZVBB) + RISCV64.HasZvbc = isSet(v, riscv_HWPROBE_EXT_ZVBC) + RISCV64.HasZvkb = isSet(v, riscv_HWPROBE_EXT_ZVKB) + RISCV64.HasZvkg = isSet(v, riscv_HWPROBE_EXT_ZVKG) + RISCV64.HasZvkt = isSet(v, riscv_HWPROBE_EXT_ZVKT) + // Cryptography shorthand extensions + RISCV64.HasZvkn = isSet(v, riscv_HWPROBE_EXT_ZVKNED) && + isSet(v, riscv_HWPROBE_EXT_ZVKNHB) && RISCV64.HasZvkb && RISCV64.HasZvkt + RISCV64.HasZvknc = RISCV64.HasZvkn && RISCV64.HasZvbc + RISCV64.HasZvkng = RISCV64.HasZvkn && RISCV64.HasZvkg + RISCV64.HasZvks = isSet(v, riscv_HWPROBE_EXT_ZVKSED) && + isSet(v, riscv_HWPROBE_EXT_ZVKSH) && RISCV64.HasZvkb && RISCV64.HasZvkt + RISCV64.HasZvksc = RISCV64.HasZvks && RISCV64.HasZvbc + RISCV64.HasZvksg = RISCV64.HasZvks && RISCV64.HasZvkg + } + if pairs[1].key != -1 { + v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK + RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST + } + } + + // Let's double check with HWCAP if the C extension does not appear to be supported. + // This may happen if we're running on a kernel older than 6.4. + + if !RISCV64.HasC { + RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C) + } +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} + +// riscvHWProbe is a simplified version of the generated wrapper function found in +// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the +// cpuCount and cpus parameters which we do not need. We always want to pass 0 for +// these parameters here so the kernel only reports the extensions that are present +// on all cores. +func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool { + var _zero uintptr + var p0 unsafe.Pointer + if len(pairs) > 0 { + p0 = unsafe.Pointer(&pairs[0]) + } else { + p0 = unsafe.Pointer(&_zero) + } + + _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0) + return e1 == 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go new file mode 100644 index 0000000000..1517ac61d3 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -0,0 +1,40 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const ( + // bit mask values from /usr/include/bits/hwcap.h + hwcap_ZARCH = 2 + hwcap_STFLE = 4 + hwcap_MSA = 8 + hwcap_LDISP = 16 + hwcap_EIMM = 32 + hwcap_DFP = 64 + hwcap_ETF3EH = 256 + hwcap_VX = 2048 + hwcap_VXE = 8192 +) + +func initS390Xbase() { + // test HWCAP bit vector + has := func(featureMask uint) bool { + return hwCap&featureMask == featureMask + } + + // mandatory + S390X.HasZARCH = has(hwcap_ZARCH) + + // optional + S390X.HasSTFLE = has(hwcap_STFLE) + S390X.HasLDISP = has(hwcap_LDISP) + S390X.HasEIMM = has(hwcap_EIMM) + S390X.HasETF3EH = has(hwcap_ETF3EH) + S390X.HasDFP = has(hwcap_DFP) + S390X.HasMSA = has(hwcap_MSA) + S390X.HasVX = has(hwcap_VX) + if S390X.HasVX { + S390X.HasVXE = has(hwcap_VXE) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go new file mode 100644 index 0000000000..45ecb29ae7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 + +package cpu + +const cacheLineSize = 64 + +// Bit fields for CPUCFG registers, Related reference documents: +// https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_cpucfg +const ( + // CPUCFG1 bits + cpucfg1_CRC32 = 1 << 25 + + // CPUCFG2 bits + cpucfg2_LAM_BH = 1 << 27 + cpucfg2_LAMCAS = 1 << 28 +) + +func initOptions() { + options = []option{ + {Name: "lsx", Feature: &Loong64.HasLSX}, + {Name: "lasx", Feature: &Loong64.HasLASX}, + {Name: "crc32", Feature: &Loong64.HasCRC32}, + {Name: "lam_bh", Feature: &Loong64.HasLAM_BH}, + {Name: "lamcas", Feature: &Loong64.HasLAMCAS}, + } + + // The CPUCFG data on Loong64 only reflects the hardware capabilities, + // not the kernel support status, so features such as LSX and LASX that + // require kernel support cannot be obtained from the CPUCFG data. + // + // These features only require hardware capability support and do not + // require kernel specific support, so they can be obtained directly + // through CPUCFG + cfg1 := get_cpucfg(1) + cfg2 := get_cpucfg(2) + + Loong64.HasCRC32 = cfgIsSet(cfg1, cpucfg1_CRC32) + Loong64.HasLAMCAS = cfgIsSet(cfg2, cpucfg2_LAMCAS) + Loong64.HasLAM_BH = cfgIsSet(cfg2, cpucfg2_LAM_BH) +} + +func get_cpucfg(reg uint32) uint32 + +func cfgIsSet(cfg uint32, val uint32) bool { + return cfg&val != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.s b/vendor/golang.org/x/sys/cpu/cpu_loong64.s new file mode 100644 index 0000000000..71cbaf1ce2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.s @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// func get_cpucfg(reg uint32) uint32 +TEXT ·get_cpucfg(SB), NOSPLIT|NOFRAME, $0 + MOVW reg+0(FP), R5 + // CPUCFG R5, R4 = 0x00006ca4 + WORD $0x00006ca4 + MOVW R4, ret+8(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 0000000000..fedb00cc4c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le + +package cpu + +const cacheLineSize = 32 + +func initOptions() { + options = []option{ + {Name: "msa", Feature: &MIPS64X.HasMSA}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 0000000000..ffb4ec7eb3 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle + +package cpu + +const cacheLineSize = 32 + +func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go new file mode 100644 index 0000000000..ebfb3fc8e7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go @@ -0,0 +1,173 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + _CTL_QUERY = -2 + + _SYSCTL_VERS_1 = 0x1000000 +) + +var _zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +type sysctlNode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + __rsvd uint32 + Un [16]byte + _sysctl_size [8]byte + _sysctl_func [8]byte + _sysctl_parent [8]byte + _sysctl_desc [8]byte +} + +func sysctlNodes(mib []int32) ([]sysctlNode, error) { + var olen uintptr + + // Get a list of all sysctl nodes below the given MIB by performing + // a sysctl for the given MIB with CTL_QUERY appended. + mib = append(mib, _CTL_QUERY) + qnode := sysctlNode{Flags: _SYSCTL_VERS_1} + qp := (*byte)(unsafe.Pointer(&qnode)) + sz := unsafe.Sizeof(qnode) + if err := sysctl(mib, nil, &olen, qp, sz); err != nil { + return nil, err + } + + // Now that we know the size, get the actual nodes. + nodes := make([]sysctlNode, olen/sz) + np := (*byte)(unsafe.Pointer(&nodes[0])) + if err := sysctl(mib, np, &olen, qp, sz); err != nil { + return nil, err + } + + return nodes, nil +} + +func nametomib(name string) ([]int32, error) { + // Split name into components. + var parts []string + last := 0 + for i := 0; i < len(name); i++ { + if name[i] == '.' { + parts = append(parts, name[last:i]) + last = i + 1 + } + } + parts = append(parts, name[last:]) + + mib := []int32{} + // Discover the nodes and construct the MIB OID. + for partno, part := range parts { + nodes, err := sysctlNodes(mib) + if err != nil { + return nil, err + } + for _, node := range nodes { + n := make([]byte, 0) + for i := range node.Name { + if node.Name[i] != 0 { + n = append(n, byte(node.Name[i])) + } + } + if string(n) == part { + mib = append(mib, int32(node.Num)) + break + } + } + if len(mib) != partno+1 { + return nil, err + } + } + + return mib, nil +} + +// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's +type aarch64SysctlCPUID struct { + midr uint64 /* Main ID Register */ + revidr uint64 /* Revision ID Register */ + mpidr uint64 /* Multiprocessor Affinity Register */ + aa64dfr0 uint64 /* A64 Debug Feature Register 0 */ + aa64dfr1 uint64 /* A64 Debug Feature Register 1 */ + aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */ + aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */ + aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */ + aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */ + aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */ + aa64pfr0 uint64 /* A64 Processor Feature Register 0 */ + aa64pfr1 uint64 /* A64 Processor Feature Register 1 */ + aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */ + mvfr0 uint32 /* Media and VFP Feature Register 0 */ + mvfr1 uint32 /* Media and VFP Feature Register 1 */ + mvfr2 uint32 /* Media and VFP Feature Register 2 */ + pad uint32 + clidr uint64 /* Cache Level ID Register */ + ctr uint64 /* Cache Type Register */ +} + +func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) { + mib, err := nametomib(name) + if err != nil { + return nil, err + } + + out := aarch64SysctlCPUID{} + n := unsafe.Sizeof(out) + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(len(mib)), + uintptr(unsafe.Pointer(&out)), + uintptr(unsafe.Pointer(&n)), + uintptr(0), + uintptr(0)) + if errno != 0 { + return nil, errno + } + return &out, nil +} + +func doinit() { + cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id") + if err != nil { + setMinimalFeatures() + return + } + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go new file mode 100644 index 0000000000..85b64d5ccb --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go @@ -0,0 +1,65 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + // From OpenBSD's sys/sysctl.h. + _CTL_MACHDEP = 7 + + // From OpenBSD's machine/cpu.h. + _CPU_ID_AA64ISAR0 = 2 + _CPU_ID_AA64ISAR1 = 3 +) + +// Implemented in the runtime package (runtime/sys_openbsd3.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 + +func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +func sysctlUint64(mib []uint32) (uint64, bool) { + var out uint64 + nout := unsafe.Sizeof(out) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil { + return 0, false + } + return out, true +} + +func doinit() { + setMinimalFeatures() + + // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl. + isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0}) + if !ok { + return + } + isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1}) + if !ok { + return + } + parseARM64SystemRegisters(isar0, isar1, 0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s new file mode 100644 index 0000000000..054ba05d60 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go new file mode 100644 index 0000000000..e9ecf2a456 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go @@ -0,0 +1,9 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && arm + +package cpu + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go new file mode 100644 index 0000000000..53f814d7a6 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !darwin && !linux && !netbsd && !openbsd && arm64 + +package cpu + +func doinit() { + setMinimalFeatures() +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go new file mode 100644 index 0000000000..5f8f2419ab --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go @@ -0,0 +1,11 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && (mips64 || mips64le) + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go new file mode 100644 index 0000000000..89608fba27 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !aix && !linux && (ppc64 || ppc64le) + +package cpu + +func archInit() { + PPC64.IsPOWER8 = true + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go new file mode 100644 index 0000000000..5ab87808f7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && riscv64 + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_x86.go b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go new file mode 100644 index 0000000000..a0fd7e2f75 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64p32 || (amd64 && (!darwin || !gc)) + +package cpu + +func darwinSupportsAVX512() bool { + panic("only implemented for gc && amd64 && darwin") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go new file mode 100644 index 0000000000..c14f12b149 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -0,0 +1,16 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +package cpu + +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "darn", Feature: &PPC64.HasDARN}, + {Name: "scv", Feature: &PPC64.HasSCV}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go new file mode 100644 index 0000000000..0f617aef54 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -0,0 +1,32 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned}, + {Name: "c", Feature: &RISCV64.HasC}, + {Name: "v", Feature: &RISCV64.HasV}, + {Name: "zba", Feature: &RISCV64.HasZba}, + {Name: "zbb", Feature: &RISCV64.HasZbb}, + {Name: "zbs", Feature: &RISCV64.HasZbs}, + // RISC-V Cryptography Extensions + {Name: "zvbb", Feature: &RISCV64.HasZvbb}, + {Name: "zvbc", Feature: &RISCV64.HasZvbc}, + {Name: "zvkb", Feature: &RISCV64.HasZvkb}, + {Name: "zvkg", Feature: &RISCV64.HasZvkg}, + {Name: "zvkt", Feature: &RISCV64.HasZvkt}, + {Name: "zvkn", Feature: &RISCV64.HasZvkn}, + {Name: "zvknc", Feature: &RISCV64.HasZvknc}, + {Name: "zvkng", Feature: &RISCV64.HasZvkng}, + {Name: "zvks", Feature: &RISCV64.HasZvks}, + {Name: "zvksc", Feature: &RISCV64.HasZvksc}, + {Name: "zvksg", Feature: &RISCV64.HasZvksg}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go new file mode 100644 index 0000000000..5881b8833f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.go @@ -0,0 +1,172 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +func initOptions() { + options = []option{ + {Name: "zarch", Feature: &S390X.HasZARCH, Required: true}, + {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true}, + {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true}, + {Name: "eimm", Feature: &S390X.HasEIMM, Required: true}, + {Name: "dfp", Feature: &S390X.HasDFP}, + {Name: "etf3eh", Feature: &S390X.HasETF3EH}, + {Name: "msa", Feature: &S390X.HasMSA}, + {Name: "aes", Feature: &S390X.HasAES}, + {Name: "aescbc", Feature: &S390X.HasAESCBC}, + {Name: "aesctr", Feature: &S390X.HasAESCTR}, + {Name: "aesgcm", Feature: &S390X.HasAESGCM}, + {Name: "ghash", Feature: &S390X.HasGHASH}, + {Name: "sha1", Feature: &S390X.HasSHA1}, + {Name: "sha256", Feature: &S390X.HasSHA256}, + {Name: "sha3", Feature: &S390X.HasSHA3}, + {Name: "sha512", Feature: &S390X.HasSHA512}, + {Name: "vx", Feature: &S390X.HasVX}, + {Name: "vxe", Feature: &S390X.HasVXE}, + } +} + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // mandatory facilities + zarch facility = 1 // z architecture mode is active + stflef facility = 7 // store-facility-list-extended + ldisp facility = 18 // long-displacement + eimm facility = 21 // extended-immediate + + // miscellaneous facilities + dfp facility = 42 // decimal-floating-point + etf3eh facility = 30 // extended-translation 3 enhancement + + // cryptography facilities + msa facility = 17 // message-security-assist + msa3 facility = 76 // message-security-assist extension 3 + msa4 facility = 77 // message-security-assist extension 4 + msa5 facility = 57 // message-security-assist extension 5 + msa8 facility = 146 // message-security-assist extension 8 + msa9 facility = 155 // message-security-assist extension 9 + + // vector facilities + vx facility = 129 // vector facility + vxe facility = 135 // vector-enhancements 1 + vxe2 facility = 148 // vector-enhancements 2 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + initS390Xbase() + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s new file mode 100644 index 0000000000..1fb4b70133 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92E0024 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92F0024 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92D4024 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xb9296024 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93E0024 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93F0024 // compute last message digest (KLMD) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go new file mode 100644 index 0000000000..384787ea30 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasm + +package cpu + +// We're compiling the cpu package for an unknown (software-abstracted) CPU. +// Make CacheLinePad an empty struct and hope that the usual struct alignment +// rules are good enough. + +const cacheLineSize = 0 + +func initOptions() {} + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 0000000000..f5723d4f7e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,236 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 || amd64p32 + +package cpu + +import "runtime" + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "adx", Feature: &X86.HasADX}, + {Name: "aes", Feature: &X86.HasAES}, + {Name: "avx", Feature: &X86.HasAVX}, + {Name: "avx2", Feature: &X86.HasAVX2}, + {Name: "avx512", Feature: &X86.HasAVX512}, + {Name: "avx512f", Feature: &X86.HasAVX512F}, + {Name: "avx512cd", Feature: &X86.HasAVX512CD}, + {Name: "avx512er", Feature: &X86.HasAVX512ER}, + {Name: "avx512pf", Feature: &X86.HasAVX512PF}, + {Name: "avx512vl", Feature: &X86.HasAVX512VL}, + {Name: "avx512bw", Feature: &X86.HasAVX512BW}, + {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, + {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, + {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, + {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, + {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, + {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, + {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, + {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, + {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, + {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, + {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, + {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, + {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, + {Name: "amxtile", Feature: &X86.HasAMXTile}, + {Name: "amxint8", Feature: &X86.HasAMXInt8}, + {Name: "amxbf16", Feature: &X86.HasAMXBF16}, + {Name: "bmi1", Feature: &X86.HasBMI1}, + {Name: "bmi2", Feature: &X86.HasBMI2}, + {Name: "cx16", Feature: &X86.HasCX16}, + {Name: "erms", Feature: &X86.HasERMS}, + {Name: "fma", Feature: &X86.HasFMA}, + {Name: "osxsave", Feature: &X86.HasOSXSAVE}, + {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, + {Name: "popcnt", Feature: &X86.HasPOPCNT}, + {Name: "rdrand", Feature: &X86.HasRDRAND}, + {Name: "rdseed", Feature: &X86.HasRDSEED}, + {Name: "sse3", Feature: &X86.HasSSE3}, + {Name: "sse41", Feature: &X86.HasSSE41}, + {Name: "sse42", Feature: &X86.HasSSE42}, + {Name: "ssse3", Feature: &X86.HasSSSE3}, + {Name: "avxifma", Feature: &X86.HasAVXIFMA}, + {Name: "avxvnni", Feature: &X86.HasAVXVNNI}, + {Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8}, + + // These capabilities should always be enabled on amd64: + {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, + } +} + +func archInit() { + + // From internal/cpu + const ( + // eax bits + cpuid_AVXVNNI = 1 << 4 + + // ecx bits + cpuid_SSE3 = 1 << 0 + cpuid_PCLMULQDQ = 1 << 1 + cpuid_AVX512VBMI = 1 << 1 + cpuid_AVX512VBMI2 = 1 << 6 + cpuid_SSSE3 = 1 << 9 + cpuid_AVX512GFNI = 1 << 8 + cpuid_AVX512VAES = 1 << 9 + cpuid_AVX512VNNI = 1 << 11 + cpuid_AVX512BITALG = 1 << 12 + cpuid_FMA = 1 << 12 + cpuid_AVX512VPOPCNTDQ = 1 << 14 + cpuid_SSE41 = 1 << 19 + cpuid_SSE42 = 1 << 20 + cpuid_POPCNT = 1 << 23 + cpuid_AES = 1 << 25 + cpuid_OSXSAVE = 1 << 27 + cpuid_AVX = 1 << 28 + + // "Extended Feature Flag" bits returned in EBX for CPUID EAX=0x7 ECX=0x0 + cpuid_BMI1 = 1 << 3 + cpuid_AVX2 = 1 << 5 + cpuid_BMI2 = 1 << 8 + cpuid_ERMS = 1 << 9 + cpuid_AVX512F = 1 << 16 + cpuid_AVX512DQ = 1 << 17 + cpuid_ADX = 1 << 19 + cpuid_AVX512CD = 1 << 28 + cpuid_SHA = 1 << 29 + cpuid_AVX512BW = 1 << 30 + cpuid_AVX512VL = 1 << 31 + + // "Extended Feature Flag" bits returned in ECX for CPUID EAX=0x7 ECX=0x0 + cpuid_AVX512_VBMI = 1 << 1 + cpuid_AVX512_VBMI2 = 1 << 6 + cpuid_GFNI = 1 << 8 + cpuid_AVX512VPCLMULQDQ = 1 << 10 + cpuid_AVX512_BITALG = 1 << 12 + + // edx bits + cpuid_FSRM = 1 << 4 + // edx bits for CPUID 0x80000001 + cpuid_RDTSCP = 1 << 27 + ) + // Additional constants not in internal/cpu + const ( + // eax=1: edx + cpuid_SSE2 = 1 << 26 + // eax=1: ecx + cpuid_CX16 = 1 << 13 + cpuid_RDRAND = 1 << 30 + // eax=7,ecx=0: ebx + cpuid_RDSEED = 1 << 18 + cpuid_AVX512IFMA = 1 << 21 + cpuid_AVX512PF = 1 << 26 + cpuid_AVX512ER = 1 << 27 + // eax=7,ecx=0: edx + cpuid_AVX5124VNNIW = 1 << 2 + cpuid_AVX5124FMAPS = 1 << 3 + cpuid_AMXBF16 = 1 << 22 + cpuid_AMXTile = 1 << 24 + cpuid_AMXInt8 = 1 << 25 + // eax=7,ecx=1: eax + cpuid_AVX512BF16 = 1 << 5 + cpuid_AVXIFMA = 1 << 23 + // eax=7,ecx=1: edx + cpuid_AVXVNNIInt8 = 1 << 4 + ) + + Initialized = true + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(edx1, cpuid_SSE2) + + X86.HasSSE3 = isSet(ecx1, cpuid_SSE3) + X86.HasPCLMULQDQ = isSet(ecx1, cpuid_PCLMULQDQ) + X86.HasSSSE3 = isSet(ecx1, cpuid_SSSE3) + X86.HasFMA = isSet(ecx1, cpuid_FMA) + X86.HasCX16 = isSet(ecx1, cpuid_CX16) + X86.HasSSE41 = isSet(ecx1, cpuid_SSE41) + X86.HasSSE42 = isSet(ecx1, cpuid_SSE42) + X86.HasPOPCNT = isSet(ecx1, cpuid_POPCNT) + X86.HasAES = isSet(ecx1, cpuid_AES) + X86.HasOSXSAVE = isSet(ecx1, cpuid_OSXSAVE) + X86.HasRDRAND = isSet(ecx1, cpuid_RDRAND) + + var osSupportsAVX, osSupportsAVX512 bool + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(eax, 1<<1) && isSet(eax, 1<<2) + + if runtime.GOOS == "darwin" { + // Darwin requires special AVX512 checks, see cpu_darwin_x86.go + osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512() + } else { + // Check if OPMASK and ZMM registers have OS support. + osSupportsAVX512 = osSupportsAVX && isSet(eax, 1<<5) && isSet(eax, 1<<6) && isSet(eax, 1<<7) + } + } + + X86.HasAVX = isSet(ecx1, cpuid_AVX) && osSupportsAVX + + if maxID < 7 { + return + } + + eax7, ebx7, ecx7, edx7 := cpuid(7, 0) + X86.HasBMI1 = isSet(ebx7, cpuid_BMI1) + X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX + X86.HasBMI2 = isSet(ebx7, cpuid_BMI2) + X86.HasERMS = isSet(ebx7, cpuid_ERMS) + X86.HasRDSEED = isSet(ebx7, cpuid_RDSEED) + X86.HasADX = isSet(ebx7, cpuid_ADX) + + X86.HasAVX512 = isSet(ebx7, cpuid_AVX512F) && osSupportsAVX512 // Because avx-512 foundation is the core required extension + if X86.HasAVX512 { + X86.HasAVX512F = true + X86.HasAVX512CD = isSet(ebx7, cpuid_AVX512CD) + X86.HasAVX512ER = isSet(ebx7, cpuid_AVX512ER) + X86.HasAVX512PF = isSet(ebx7, cpuid_AVX512PF) + X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL) + X86.HasAVX512BW = isSet(ebx7, cpuid_AVX512BW) + X86.HasAVX512DQ = isSet(ebx7, cpuid_AVX512DQ) + X86.HasAVX512IFMA = isSet(ebx7, cpuid_AVX512IFMA) + X86.HasAVX512VBMI = isSet(ecx7, cpuid_AVX512_VBMI) + X86.HasAVX5124VNNIW = isSet(edx7, cpuid_AVX5124VNNIW) + X86.HasAVX5124FMAPS = isSet(edx7, cpuid_AVX5124FMAPS) + X86.HasAVX512VPOPCNTDQ = isSet(ecx7, cpuid_AVX512VPOPCNTDQ) + X86.HasAVX512VPCLMULQDQ = isSet(ecx7, cpuid_AVX512VPCLMULQDQ) + X86.HasAVX512VNNI = isSet(ecx7, cpuid_AVX512VNNI) + X86.HasAVX512GFNI = isSet(ecx7, cpuid_AVX512GFNI) + X86.HasAVX512VAES = isSet(ecx7, cpuid_AVX512VAES) + X86.HasAVX512VBMI2 = isSet(ecx7, cpuid_AVX512VBMI2) + X86.HasAVX512BITALG = isSet(ecx7, cpuid_AVX512BITALG) + } + + X86.HasAMXTile = isSet(edx7, cpuid_AMXTile) + X86.HasAMXInt8 = isSet(edx7, cpuid_AMXInt8) + X86.HasAMXBF16 = isSet(edx7, cpuid_AMXBF16) + + // These features depend on the second level of extended features. + if eax7 >= 1 { + eax71, _, _, edx71 := cpuid(7, 1) + if X86.HasAVX512 { + X86.HasAVX512BF16 = isSet(eax71, cpuid_AVX512BF16) + } + if X86.HasAVX { + X86.HasAVXIFMA = isSet(eax71, cpuid_AVXIFMA) + X86.HasAVXVNNI = isSet(eax71, cpuid_AVXVNNI) + X86.HasAVXVNNIInt8 = isSet(edx71, cpuid_AVXVNNIInt8) + } + } +} + +func isSet(hwc uint32, value uint32) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_zos.go b/vendor/golang.org/x/sys/cpu/cpu_zos.go new file mode 100644 index 0000000000..5f54683a22 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_zos.go @@ -0,0 +1,10 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func archInit() { + doinit() + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go new file mode 100644 index 0000000000..ccb1b708ab --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go @@ -0,0 +1,25 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func initS390Xbase() { + // get the facilities list + facilities := stfle() + + // mandatory + S390X.HasZARCH = facilities.Has(zarch) + S390X.HasSTFLE = facilities.Has(stflef) + S390X.HasLDISP = facilities.Has(ldisp) + S390X.HasEIMM = facilities.Has(eimm) + + // optional + S390X.HasETF3EH = facilities.Has(etf3eh) + S390X.HasDFP = facilities.Has(dfp) + S390X.HasMSA = facilities.Has(msa) + S390X.HasVX = facilities.Has(vx) + if S390X.HasVX { + S390X.HasVXE = facilities.Has(vxe) + } +} diff --git a/vendor/golang.org/x/sys/cpu/endian_big.go b/vendor/golang.org/x/sys/cpu/endian_big.go new file mode 100644 index 0000000000..7fe04b0a13 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/endian_big.go @@ -0,0 +1,10 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 + +package cpu + +// IsBigEndian records whether the GOARCH's byte order is big endian. +const IsBigEndian = true diff --git a/vendor/golang.org/x/sys/cpu/endian_little.go b/vendor/golang.org/x/sys/cpu/endian_little.go new file mode 100644 index 0000000000..48eccc4c79 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/endian_little.go @@ -0,0 +1,10 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh || wasm + +package cpu + +// IsBigEndian records whether the GOARCH's byte order is big endian. +const IsBigEndian = false diff --git a/vendor/golang.org/x/sys/cpu/hwcap_linux.go b/vendor/golang.org/x/sys/cpu/hwcap_linux.go new file mode 100644 index 0000000000..34e49f955a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/hwcap_linux.go @@ -0,0 +1,71 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "os" +) + +const ( + _AT_HWCAP = 16 + _AT_HWCAP2 = 26 + + procAuxv = "/proc/self/auxv" + + uintSize = int(32 << (^uint(0) >> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func readHWCAP() error { + // For Go 1.21+, get auxv from the Go runtime. + if a := getAuxv(); len(a) > 0 { + for len(a) >= 2 { + tag, val := a[0], uint(a[1]) + a = a[2:] + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil + } + + buf, err := os.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return err + } + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/cpu/parse.go b/vendor/golang.org/x/sys/cpu/parse.go new file mode 100644 index 0000000000..56a7e1a176 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/parse.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "strconv" + +// parseRelease parses a dot-separated version number. It follows the semver +// syntax, but allows the minor and patch versions to be elided. +// +// This is a copy of the Go runtime's parseRelease from +// https://golang.org/cl/209597. +func parseRelease(rel string) (major, minor, patch int, ok bool) { + // Strip anything after a dash or plus. + for i := range len(rel) { + if rel[i] == '-' || rel[i] == '+' { + rel = rel[:i] + break + } + } + + next := func() (int, bool) { + for i := range len(rel) { + if rel[i] == '.' { + ver, err := strconv.Atoi(rel[:i]) + rel = rel[i+1:] + return ver, err == nil + } + } + ver, err := strconv.Atoi(rel) + rel = "" + return ver, err == nil + } + if major, ok = next(); !ok || rel == "" { + return + } + if minor, ok = next(); !ok || rel == "" { + return + } + patch, ok = next() + return +} diff --git a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go new file mode 100644 index 0000000000..4cd64c7042 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go @@ -0,0 +1,53 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && arm64 + +package cpu + +import ( + "errors" + "io" + "os" + "strings" +) + +func readLinuxProcCPUInfo() error { + f, err := os.Open("/proc/cpuinfo") + if err != nil { + return err + } + defer f.Close() + + var buf [1 << 10]byte // enough for first CPU + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + in := string(buf[:n]) + const features = "\nFeatures : " + i := strings.Index(in, features) + if i == -1 { + return errors.New("no CPU features found") + } + in = in[i+len(features):] + if i := strings.Index(in, "\n"); i != -1 { + in = in[:i] + } + m := map[string]*bool{} + + initOptions() // need it early here; it's harmless to call twice + for _, o := range options { + m[o.Name] = o.Feature + } + // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm". + m["evtstrm"] = &ARM64.HasEVTSTRM + + for _, f := range strings.Fields(in) { + if p, ok := m[f]; ok { + *p = true + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv.go b/vendor/golang.org/x/sys/cpu/runtime_auxv.go new file mode 100644 index 0000000000..5f92ac9a2e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init) +// on platforms that use auxv. +var getAuxvFn func() []uintptr + +func getAuxv() []uintptr { + if getAuxvFn == nil { + return nil + } + return getAuxvFn() +} diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go new file mode 100644 index 0000000000..4c9788ea8e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go @@ -0,0 +1,18 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package cpu + +import ( + _ "unsafe" // for linkname +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +func init() { + getAuxvFn = runtime_getAuxv +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go new file mode 100644 index 0000000000..1b9ccb091a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -0,0 +1,26 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Recreate a getsystemcfg syscall handler instead of +// using the one provided by x/sys/unix to avoid having +// the dependency between them. (See golang.org/issue/32102) +// Moreover, this file will be used during the building of +// gccgo's libgo and thus must not used a CGo method. + +//go:build aix && gccgo + +package cpu + +import ( + "syscall" +) + +//extern getsystemcfg +func gccgoGetsystemcfg(label uint32) (r uint64) + +func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { + r1 = uintptr(gccgoGetsystemcfg(uint32(label))) + e1 = syscall.GetErrno() + return +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go new file mode 100644 index 0000000000..e8b6cdbe9a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -0,0 +1,35 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on AIX without depending on x/sys/unix. +// (See golang.org/issue/32102) + +//go:build aix && ppc64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" + +//go:linkname libc_getsystemcfg libc_getsystemcfg + +type syscallFunc uintptr + +var libc_getsystemcfg syscallFunc + +type errno = syscall.Errno + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) + +func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) + return +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_darwin_arm64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_darwin_arm64_gc.go new file mode 100644 index 0000000000..7b4e67ff9c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_darwin_arm64_gc.go @@ -0,0 +1,54 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy from internal/cpu and runtime to make sysctl calls. + +//go:build darwin && arm64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +type Errno = syscall.Errno + +// adapted from internal/cpu/cpu_arm64_darwin.go +func darwinSysctlEnabled(name []byte) bool { + out := int32(0) + nout := unsafe.Sizeof(out) + if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil { + return false + } + return out > 0 +} + +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +var libc_sysctlbyname_trampoline_addr uintptr + +// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix +func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + if _, _, err := syscall_syscall6( + libc_sysctlbyname_trampoline_addr, + uintptr(unsafe.Pointer(name)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + 0, + ); err != 0 { + return err + } + + return nil +} + +//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib" + +// Implemented in the runtime package (runtime/sys_darwin.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 diff --git a/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go new file mode 100644 index 0000000000..4d0888b0c0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go @@ -0,0 +1,98 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on Darwin without depending on x/sys/unix. + +//go:build darwin && amd64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +type _C_int int32 + +// adapted from unix.Uname() at x/sys/unix/syscall_darwin.go L419 +func darwinOSRelease(release *[256]byte) error { + // from x/sys/unix/zerrors_openbsd_amd64.go + const ( + CTL_KERN = 0x1 + KERN_OSRELEASE = 0x2 + ) + + mib := []_C_int{CTL_KERN, KERN_OSRELEASE} + n := unsafe.Sizeof(*release) + + return sysctl(mib, &release[0], &n, nil, 0) +} + +type Errno = syscall.Errno + +var _zero uintptr // Single-word zero for use when we need a valid pointer to 0 bytes. + +// from x/sys/unix/zsyscall_darwin_amd64.go L791-807 +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + if _, _, err := syscall_syscall6( + libc_sysctl_trampoline_addr, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + ); err != 0 { + return err + } + + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +// adapted from internal/cpu/cpu_arm64_darwin.go +func darwinSysctlEnabled(name []byte) bool { + out := int32(0) + nout := unsafe.Sizeof(out) + if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil { + return false + } + return out > 0 +} + +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +var libc_sysctlbyname_trampoline_addr uintptr + +// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix +func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + if _, _, err := syscall_syscall6( + libc_sysctlbyname_trampoline_addr, + uintptr(unsafe.Pointer(name)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + 0, + ); err != 0 { + return err + } + + return nil +} + +//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib" + +// Implemented in the runtime package (runtime/sys_darwin.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 diff --git a/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go deleted file mode 100644 index 2ef36bbcf9..0000000000 --- a/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package protodelim marshals and unmarshals varint size-delimited messages. -package protodelim - -import ( - "bufio" - "encoding/binary" - "fmt" - "io" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/proto" -) - -// MarshalOptions is a configurable varint size-delimited marshaler. -type MarshalOptions struct{ proto.MarshalOptions } - -// MarshalTo writes a varint size-delimited wire-format message to w. -// If w returns an error, MarshalTo returns it unchanged. -func (o MarshalOptions) MarshalTo(w io.Writer, m proto.Message) (int, error) { - msgBytes, err := o.MarshalOptions.Marshal(m) - if err != nil { - return 0, err - } - - sizeBytes := protowire.AppendVarint(nil, uint64(len(msgBytes))) - sizeWritten, err := w.Write(sizeBytes) - if err != nil { - return sizeWritten, err - } - msgWritten, err := w.Write(msgBytes) - if err != nil { - return sizeWritten + msgWritten, err - } - return sizeWritten + msgWritten, nil -} - -// MarshalTo writes a varint size-delimited wire-format message to w -// with the default options. -// -// See the documentation for [MarshalOptions.MarshalTo]. -func MarshalTo(w io.Writer, m proto.Message) (int, error) { - return MarshalOptions{}.MarshalTo(w, m) -} - -// UnmarshalOptions is a configurable varint size-delimited unmarshaler. -type UnmarshalOptions struct { - proto.UnmarshalOptions - - // MaxSize is the maximum size in wire-format bytes of a single message. - // Unmarshaling a message larger than MaxSize will return an error. - // A zero MaxSize will default to 4 MiB. - // Setting MaxSize to -1 disables the limit. - MaxSize int64 -} - -const defaultMaxSize = 4 << 20 // 4 MiB, corresponds to the default gRPC max request/response size - -// SizeTooLargeError is an error that is returned when the unmarshaler encounters a message size -// that is larger than its configured [UnmarshalOptions.MaxSize]. -type SizeTooLargeError struct { - // Size is the varint size of the message encountered - // that was larger than the provided MaxSize. - Size uint64 - - // MaxSize is the MaxSize limit configured in UnmarshalOptions, which Size exceeded. - MaxSize uint64 -} - -func (e *SizeTooLargeError) Error() string { - return fmt.Sprintf("message size %d exceeded unmarshaler's maximum configured size %d", e.Size, e.MaxSize) -} - -// Reader is the interface expected by [UnmarshalFrom]. -// It is implemented by *[bufio.Reader]. -type Reader interface { - io.Reader - io.ByteReader -} - -// UnmarshalFrom parses and consumes a varint size-delimited wire-format message -// from r. -// The provided message must be mutable (e.g., a non-nil pointer to a message). -// -// The error is [io.EOF] error only if no bytes are read. -// If an EOF happens after reading some but not all the bytes, -// UnmarshalFrom returns a non-io.EOF error. -// In particular if r returns a non-io.EOF error, UnmarshalFrom returns it unchanged, -// and if only a size is read with no subsequent message, [io.ErrUnexpectedEOF] is returned. -func (o UnmarshalOptions) UnmarshalFrom(r Reader, m proto.Message) error { - var sizeArr [binary.MaxVarintLen64]byte - sizeBuf := sizeArr[:0] - for i := range sizeArr { - b, err := r.ReadByte() - if err != nil { - // Immediate EOF is unexpected. - if err == io.EOF && i != 0 { - break - } - return err - } - sizeBuf = append(sizeBuf, b) - if b < 0x80 { - break - } - } - size, n := protowire.ConsumeVarint(sizeBuf) - if n < 0 { - return protowire.ParseError(n) - } - - maxSize := o.MaxSize - if maxSize == 0 { - maxSize = defaultMaxSize - } - if maxSize != -1 && size > uint64(maxSize) { - return errors.Wrap(&SizeTooLargeError{Size: size, MaxSize: uint64(maxSize)}, "") - } - - var b []byte - var err error - if br, ok := r.(*bufio.Reader); ok { - // Use the []byte from the bufio.Reader instead of having to allocate one. - // This reduces CPU usage and allocated bytes. - b, err = br.Peek(int(size)) - if err == nil { - defer br.Discard(int(size)) - } else { - b = nil - } - } - if b == nil { - b = make([]byte, size) - _, err = io.ReadFull(r, b) - } - - if err == io.EOF { - return io.ErrUnexpectedEOF - } - if err != nil { - return err - } - if err := o.Unmarshal(b, m); err != nil { - return err - } - return nil -} - -// UnmarshalFrom parses and consumes a varint size-delimited wire-format message -// from r with the default options. -// The provided message must be mutable (e.g., a non-nil pointer to a message). -// -// See the documentation for [UnmarshalOptions.UnmarshalFrom]. -func UnmarshalFrom(r Reader, m proto.Message) error { - return UnmarshalOptions{}.UnmarshalFrom(r, m) -} diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md deleted file mode 100644 index 08eb1babdd..0000000000 --- a/vendor/gopkg.in/yaml.v3/README.md +++ /dev/null @@ -1,150 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.2, but preserves some behavior -from 1.1 for backwards compatibility. - -Specifically, as of v3 of the yaml package: - - - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being - decoded into a typed bool value. Otherwise they behave as a string. Booleans - in YAML 1.2 are _true/false_ only. - - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ - as specified in YAML 1.2, because most parsers still use the old format. - Octals in the _0o777_ format are supported though, so new files work. - - Does not support base-60 floats. These are gone from YAML 1.2, and were - actually never supported by this package as it's clearly a poor choice. - -and offers backwards -compatibility with YAML 1.1 in some cases. -1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v3*. - -To install it, run: - - go get gopkg.in/yaml.v3 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) - -API stability -------------- - -The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the MIT and Apache License 2.0 licenses. -Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v3" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -// Note: struct fields must be public in order for unmarshal to -// correctly populate the data. -type T struct { - A string - B struct { - RenamedC int `yaml:"c"` - D []int `yaml:",flow"` - } -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go deleted file mode 100644 index 0173b6982e..0000000000 --- a/vendor/gopkg.in/yaml.v3/decode.go +++ /dev/null @@ -1,1000 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "encoding" - "encoding/base64" - "fmt" - "io" - "math" - "reflect" - "strconv" - "time" -) - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *Node - anchors map[string]*Node - doneInit bool - textless bool -} - -func newParser(b []byte) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - if len(b) == 0 { - b = []byte{'\n'} - } - yaml_parser_set_input_string(&p.parser, b) - return &p -} - -func newParserFromReader(r io.Reader) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - yaml_parser_set_input_reader(&p.parser, r) - return &p -} - -func (p *parser) init() { - if p.doneInit { - return - } - p.anchors = make(map[string]*Node) - p.expect(yaml_STREAM_START_EVENT) - p.doneInit = true -} - -func (p *parser) destroy() { - if p.event.typ != yaml_NO_EVENT { - yaml_event_delete(&p.event) - } - yaml_parser_delete(&p.parser) -} - -// expect consumes an event from the event stream and -// checks that it's of the expected type. -func (p *parser) expect(e yaml_event_type_t) { - if p.event.typ == yaml_NO_EVENT { - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - } - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") - } - if p.event.typ != e { - p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) - p.fail() - } - yaml_event_delete(&p.event) - p.event.typ = yaml_NO_EVENT -} - -// peek peeks at the next event in the event stream, -// puts the results into p.event and returns the event type. -func (p *parser) peek() yaml_event_type_t { - if p.event.typ != yaml_NO_EVENT { - return p.event.typ - } - // It's curious choice from the underlying API to generally return a - // positive result on success, but on this case return true in an error - // scenario. This was the source of bugs in the past (issue #666). - if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { - p.fail() - } - return p.event.typ -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line - // Scanner errors don't iterate line before returning error - if p.parser.error == yaml_SCANNER_ERROR { - line++ - } - } else if p.parser.problem_mark.line != 0 { - line = p.parser.problem_mark.line - // Scanner errors don't iterate line before returning error - if p.parser.error == yaml_SCANNER_ERROR { - line++ - } - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "unknown problem parsing YAML content" - } - failf("%s%s", where, msg) -} - -func (p *parser) anchor(n *Node, anchor []byte) { - if anchor != nil { - n.Anchor = string(anchor) - p.anchors[n.Anchor] = n - } -} - -func (p *parser) parse() *Node { - p.init() - switch p.peek() { - case yaml_SCALAR_EVENT: - return p.scalar() - case yaml_ALIAS_EVENT: - return p.alias() - case yaml_MAPPING_START_EVENT: - return p.mapping() - case yaml_SEQUENCE_START_EVENT: - return p.sequence() - case yaml_DOCUMENT_START_EVENT: - return p.document() - case yaml_STREAM_END_EVENT: - // Happens when attempting to decode an empty buffer. - return nil - case yaml_TAIL_COMMENT_EVENT: - panic("internal error: unexpected tail comment event (please report)") - default: - panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) - } -} - -func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { - var style Style - if tag != "" && tag != "!" { - tag = shortTag(tag) - style = TaggedStyle - } else if defaultTag != "" { - tag = defaultTag - } else if kind == ScalarNode { - tag, _ = resolve("", value) - } - n := &Node{ - Kind: kind, - Tag: tag, - Value: value, - Style: style, - } - if !p.textless { - n.Line = p.event.start_mark.line + 1 - n.Column = p.event.start_mark.column + 1 - n.HeadComment = string(p.event.head_comment) - n.LineComment = string(p.event.line_comment) - n.FootComment = string(p.event.foot_comment) - } - return n -} - -func (p *parser) parseChild(parent *Node) *Node { - child := p.parse() - parent.Content = append(parent.Content, child) - return child -} - -func (p *parser) document() *Node { - n := p.node(DocumentNode, "", "", "") - p.doc = n - p.expect(yaml_DOCUMENT_START_EVENT) - p.parseChild(n) - if p.peek() == yaml_DOCUMENT_END_EVENT { - n.FootComment = string(p.event.foot_comment) - } - p.expect(yaml_DOCUMENT_END_EVENT) - return n -} - -func (p *parser) alias() *Node { - n := p.node(AliasNode, "", "", string(p.event.anchor)) - n.Alias = p.anchors[n.Value] - if n.Alias == nil { - failf("unknown anchor '%s' referenced", n.Value) - } - p.expect(yaml_ALIAS_EVENT) - return n -} - -func (p *parser) scalar() *Node { - var parsedStyle = p.event.scalar_style() - var nodeStyle Style - switch { - case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: - nodeStyle = DoubleQuotedStyle - case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: - nodeStyle = SingleQuotedStyle - case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: - nodeStyle = LiteralStyle - case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: - nodeStyle = FoldedStyle - } - var nodeValue = string(p.event.value) - var nodeTag = string(p.event.tag) - var defaultTag string - if nodeStyle == 0 { - if nodeValue == "<<" { - defaultTag = mergeTag - } - } else { - defaultTag = strTag - } - n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) - n.Style |= nodeStyle - p.anchor(n, p.event.anchor) - p.expect(yaml_SCALAR_EVENT) - return n -} - -func (p *parser) sequence() *Node { - n := p.node(SequenceNode, seqTag, string(p.event.tag), "") - if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { - n.Style |= FlowStyle - } - p.anchor(n, p.event.anchor) - p.expect(yaml_SEQUENCE_START_EVENT) - for p.peek() != yaml_SEQUENCE_END_EVENT { - p.parseChild(n) - } - n.LineComment = string(p.event.line_comment) - n.FootComment = string(p.event.foot_comment) - p.expect(yaml_SEQUENCE_END_EVENT) - return n -} - -func (p *parser) mapping() *Node { - n := p.node(MappingNode, mapTag, string(p.event.tag), "") - block := true - if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { - block = false - n.Style |= FlowStyle - } - p.anchor(n, p.event.anchor) - p.expect(yaml_MAPPING_START_EVENT) - for p.peek() != yaml_MAPPING_END_EVENT { - k := p.parseChild(n) - if block && k.FootComment != "" { - // Must be a foot comment for the prior value when being dedented. - if len(n.Content) > 2 { - n.Content[len(n.Content)-3].FootComment = k.FootComment - k.FootComment = "" - } - } - v := p.parseChild(n) - if k.FootComment == "" && v.FootComment != "" { - k.FootComment = v.FootComment - v.FootComment = "" - } - if p.peek() == yaml_TAIL_COMMENT_EVENT { - if k.FootComment == "" { - k.FootComment = string(p.event.foot_comment) - } - p.expect(yaml_TAIL_COMMENT_EVENT) - } - } - n.LineComment = string(p.event.line_comment) - n.FootComment = string(p.event.foot_comment) - if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { - n.Content[len(n.Content)-2].FootComment = n.FootComment - n.FootComment = "" - } - p.expect(yaml_MAPPING_END_EVENT) - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *Node - aliases map[*Node]bool - terrors []string - - stringMapType reflect.Type - generalMapType reflect.Type - - knownFields bool - uniqueKeys bool - decodeCount int - aliasCount int - aliasDepth int - - mergedFields map[interface{}]bool -} - -var ( - nodeType = reflect.TypeOf(Node{}) - durationType = reflect.TypeOf(time.Duration(0)) - stringMapType = reflect.TypeOf(map[string]interface{}{}) - generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = generalMapType.Elem() - timeType = reflect.TypeOf(time.Time{}) - ptrTimeType = reflect.TypeOf(&time.Time{}) -) - -func newDecoder() *decoder { - d := &decoder{ - stringMapType: stringMapType, - generalMapType: generalMapType, - uniqueKeys: true, - } - d.aliases = make(map[*Node]bool) - return d -} - -func (d *decoder) terror(n *Node, tag string, out reflect.Value) { - if n.Tag != "" { - tag = n.Tag - } - value := n.Value - if tag != seqTag && tag != mapTag { - if len(value) > 10 { - value = " `" + value[:7] + "...`" - } else { - value = " `" + value + "`" - } - } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) -} - -func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { - err := u.UnmarshalYAML(n) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { - terrlen := len(d.terrors) - err := u.UnmarshalYAML(func(v interface{}) (err error) { - defer handleErr(&err) - d.unmarshal(n, reflect.ValueOf(v)) - if len(d.terrors) > terrlen { - issues := d.terrors[terrlen:] - d.terrors = d.terrors[:terrlen] - return &TypeError{issues} - } - return nil - }) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML -// if a value is found to implement it. -// It returns the initialized and dereferenced out value, whether -// unmarshalling was already done by UnmarshalYAML, and if so whether -// its types unmarshalled appropriately. -// -// If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.ShortTag() == nullTag { - return out, false, false - } - again := true - for again { - again = false - if out.Kind() == reflect.Ptr { - if out.IsNil() { - out.Set(reflect.New(out.Type().Elem())) - } - out = out.Elem() - again = true - } - if out.CanAddr() { - outi := out.Addr().Interface() - if u, ok := outi.(Unmarshaler); ok { - good = d.callUnmarshaler(n, u) - return out, true, good - } - if u, ok := outi.(obsoleteUnmarshaler); ok { - good = d.callObsoleteUnmarshaler(n, u) - return out, true, good - } - } - } - return out, false, false -} - -func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { - if n.ShortTag() == nullTag { - return reflect.Value{} - } - for _, num := range index { - for { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - continue - } - break - } - v = v.Field(num) - } - return v -} - -const ( - // 400,000 decode operations is ~500kb of dense object declarations, or - // ~5kb of dense object declarations with 10000% alias expansion - alias_ratio_range_low = 400000 - - // 4,000,000 decode operations is ~5MB of dense object declarations, or - // ~4.5MB of dense object declarations with 10% alias expansion - alias_ratio_range_high = 4000000 - - // alias_ratio_range is the range over which we scale allowed alias ratios - alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) -) - -func allowedAliasRatio(decodeCount int) float64 { - switch { - case decodeCount <= alias_ratio_range_low: - // allow 99% to come from alias expansion for small-to-medium documents - return 0.99 - case decodeCount >= alias_ratio_range_high: - // allow 10% to come from alias expansion for very large documents - return 0.10 - default: - // scale smoothly from 99% down to 10% over the range. - // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. - // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). - return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) - } -} - -func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { - d.decodeCount++ - if d.aliasDepth > 0 { - d.aliasCount++ - } - if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { - failf("document contains excessive aliasing") - } - if out.Type() == nodeType { - out.Set(reflect.ValueOf(n).Elem()) - return true - } - switch n.Kind { - case DocumentNode: - return d.document(n, out) - case AliasNode: - return d.alias(n, out) - } - out, unmarshaled, good := d.prepare(n, out) - if unmarshaled { - return good - } - switch n.Kind { - case ScalarNode: - good = d.scalar(n, out) - case MappingNode: - good = d.mapping(n, out) - case SequenceNode: - good = d.sequence(n, out) - case 0: - if n.IsZero() { - return d.null(out) - } - fallthrough - default: - failf("cannot decode node with unknown kind %d", n.Kind) - } - return good -} - -func (d *decoder) document(n *Node, out reflect.Value) (good bool) { - if len(n.Content) == 1 { - d.doc = n - d.unmarshal(n.Content[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { - if d.aliases[n] { - // TODO this could actually be allowed in some circumstances. - failf("anchor '%s' value contains itself", n.Value) - } - d.aliases[n] = true - d.aliasDepth++ - good = d.unmarshal(n.Alias, out) - d.aliasDepth-- - delete(d.aliases, n) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -func (d *decoder) null(out reflect.Value) bool { - if out.CanAddr() { - switch out.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - out.Set(reflect.Zero(out.Type())) - return true - } - } - return false -} - -func (d *decoder) scalar(n *Node, out reflect.Value) bool { - var tag string - var resolved interface{} - if n.indicatedString() { - tag = strTag - resolved = n.Value - } else { - tag, resolved = resolve(n.Tag, n.Value) - if tag == binaryTag { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - failf("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if resolved == nil { - return d.null(out) - } - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - // We've resolved to exactly the type we want, so use that. - out.Set(resolvedv) - return true - } - // Perhaps we can use the value as a TextUnmarshaler to - // set its value. - if out.CanAddr() { - u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) - if ok { - var text []byte - if tag == binaryTag { - text = []byte(resolved.(string)) - } else { - // We let any value be unmarshaled into TextUnmarshaler. - // That might be more lax than we'd like, but the - // TextUnmarshaler itself should bowl out any dubious values. - text = []byte(n.Value) - } - err := u.UnmarshalText(text) - if err != nil { - fail(err) - } - return true - } - } - switch out.Kind() { - case reflect.String: - if tag == binaryTag { - out.SetString(resolved.(string)) - return true - } - out.SetString(n.Value) - return true - case reflect.Interface: - out.Set(reflect.ValueOf(resolved)) - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - // This used to work in v2, but it's very unfriendly. - isDuration := out.Type() == durationType - - switch resolved := resolved.(type) { - case int: - if !isDuration && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case int64: - if !isDuration && !out.OverflowInt(resolved) { - out.SetInt(resolved) - return true - } - case uint64: - if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case float64: - if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - return true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case int64: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case uint64: - if !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case float64: - if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - return true - case string: - // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). - // It only works if explicitly attempting to unmarshal into a typed bool value. - switch resolved { - case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": - out.SetBool(true) - return true - case "n", "N", "no", "No", "NO", "off", "Off", "OFF": - out.SetBool(false) - return true - } - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - return true - case int64: - out.SetFloat(float64(resolved)) - return true - case uint64: - out.SetFloat(float64(resolved)) - return true - case float64: - out.SetFloat(resolved) - return true - } - case reflect.Struct: - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - out.Set(resolvedv) - return true - } - case reflect.Ptr: - panic("yaml internal error: please report the issue") - } - d.terror(n, tag, out) - return false -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { - l := len(n.Content) - - var iface reflect.Value - switch out.Kind() { - case reflect.Slice: - out.Set(reflect.MakeSlice(out.Type(), l, l)) - case reflect.Array: - if l != out.Len() { - failf("invalid array: want %d elements but got %d", out.Len(), l) - } - case reflect.Interface: - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, l)) - default: - d.terror(n, seqTag, out) - return false - } - et := out.Type().Elem() - - j := 0 - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(n.Content[i], e); ok { - out.Index(j).Set(e) - j++ - } - } - if out.Kind() != reflect.Array { - out.Set(out.Slice(0, j)) - } - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { - l := len(n.Content) - if d.uniqueKeys { - nerrs := len(d.terrors) - for i := 0; i < l; i += 2 { - ni := n.Content[i] - for j := i + 2; j < l; j += 2 { - nj := n.Content[j] - if ni.Kind == nj.Kind && ni.Value == nj.Value { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) - } - } - } - if len(d.terrors) > nerrs { - return false - } - } - switch out.Kind() { - case reflect.Struct: - return d.mappingStruct(n, out) - case reflect.Map: - // okay - case reflect.Interface: - iface := out - if isStringMap(n) { - out = reflect.MakeMap(d.stringMapType) - } else { - out = reflect.MakeMap(d.generalMapType) - } - iface.Set(out) - default: - d.terror(n, mapTag, out) - return false - } - - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - stringMapType := d.stringMapType - generalMapType := d.generalMapType - if outt.Elem() == ifaceType { - if outt.Key().Kind() == reflect.String { - d.stringMapType = outt - } else if outt.Key() == ifaceType { - d.generalMapType = outt - } - } - - mergedFields := d.mergedFields - d.mergedFields = nil - - var mergeNode *Node - - mapIsNew := false - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - mapIsNew = true - } - for i := 0; i < l; i += 2 { - if isMerge(n.Content[i]) { - mergeNode = n.Content[i+1] - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.Content[i], k) { - if mergedFields != nil { - ki := k.Interface() - if mergedFields[ki] { - continue - } - mergedFields[ki] = true - } - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - failf("invalid map key: %#v", k.Interface()) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { - out.SetMapIndex(k, e) - } - } - } - - d.mergedFields = mergedFields - if mergeNode != nil { - d.merge(n, mergeNode, out) - } - - d.stringMapType = stringMapType - d.generalMapType = generalMapType - return true -} - -func isStringMap(n *Node) bool { - if n.Kind != MappingNode { - return false - } - l := len(n.Content) - for i := 0; i < l; i += 2 { - shortTag := n.Content[i].ShortTag() - if shortTag != strTag && shortTag != mergeTag { - return false - } - } - return true -} - -func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - - var inlineMap reflect.Value - var elemType reflect.Type - if sinfo.InlineMap != -1 { - inlineMap = out.Field(sinfo.InlineMap) - elemType = inlineMap.Type().Elem() - } - - for _, index := range sinfo.InlineUnmarshalers { - field := d.fieldByIndex(n, out, index) - d.prepare(n, field) - } - - mergedFields := d.mergedFields - d.mergedFields = nil - var mergeNode *Node - var doneFields []bool - if d.uniqueKeys { - doneFields = make([]bool, len(sinfo.FieldsList)) - } - name := settableValueOf("") - l := len(n.Content) - for i := 0; i < l; i += 2 { - ni := n.Content[i] - if isMerge(ni) { - mergeNode = n.Content[i+1] - continue - } - if !d.unmarshal(ni, name) { - continue - } - sname := name.String() - if mergedFields != nil { - if mergedFields[sname] { - continue - } - mergedFields[sname] = true - } - if info, ok := sinfo.FieldsMap[sname]; ok { - if d.uniqueKeys { - if doneFields[info.Id] { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) - continue - } - doneFields[info.Id] = true - } - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = d.fieldByIndex(n, out, info.Inline) - } - d.unmarshal(n.Content[i+1], field) - } else if sinfo.InlineMap != -1 { - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - value := reflect.New(elemType).Elem() - d.unmarshal(n.Content[i+1], value) - inlineMap.SetMapIndex(name, value) - } else if d.knownFields { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) - } - } - - d.mergedFields = mergedFields - if mergeNode != nil { - d.merge(n, mergeNode, out) - } - return true -} - -func failWantMap() { - failf("map merge requires map or sequence of maps as the value") -} - -func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { - mergedFields := d.mergedFields - if mergedFields == nil { - d.mergedFields = make(map[interface{}]bool) - for i := 0; i < len(parent.Content); i += 2 { - k := reflect.New(ifaceType).Elem() - if d.unmarshal(parent.Content[i], k) { - d.mergedFields[k.Interface()] = true - } - } - } - - switch merge.Kind { - case MappingNode: - d.unmarshal(merge, out) - case AliasNode: - if merge.Alias != nil && merge.Alias.Kind != MappingNode { - failWantMap() - } - d.unmarshal(merge, out) - case SequenceNode: - for i := 0; i < len(merge.Content); i++ { - ni := merge.Content[i] - if ni.Kind == AliasNode { - if ni.Alias != nil && ni.Alias.Kind != MappingNode { - failWantMap() - } - } else if ni.Kind != MappingNode { - failWantMap() - } - d.unmarshal(ni, out) - } - default: - failWantMap() - } - - d.mergedFields = mergedFields -} - -func isMerge(n *Node) bool { - return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 42cbba483d..b5261f1d3c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -25,10 +25,7 @@ github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/pkg/process github.com/Microsoft/go-winio/tools/mkwinsyscall github.com/Microsoft/go-winio/vhd -# github.com/OneOfOne/xxhash v1.2.8 -## explicit; go 1.11 -github.com/OneOfOne/xxhash -# github.com/agnivade/levenshtein v1.2.0 +# github.com/agnivade/levenshtein v1.2.1 ## explicit; go 1.21 github.com/agnivade/levenshtein # github.com/akavel/rsrc v0.10.2 @@ -36,9 +33,6 @@ github.com/agnivade/levenshtein github.com/akavel/rsrc/binutil github.com/akavel/rsrc/coff github.com/akavel/rsrc/ico -# github.com/beorn7/perks v1.0.1 -## explicit; go 1.11 -github.com/beorn7/perks/quantile # github.com/blang/semver/v4 v4.0.0 ## explicit; go 1.14 github.com/blang/semver/v4 @@ -193,16 +187,6 @@ github.com/docker/go-units # github.com/fxamacker/cbor/v2 v2.9.0 ## explicit; go 1.20 github.com/fxamacker/cbor/v2 -# github.com/go-ini/ini v1.67.0 -## explicit -github.com/go-ini/ini -# github.com/go-logr/logr v1.4.3 -## explicit; go 1.18 -github.com/go-logr/logr -github.com/go-logr/logr/funcr -# github.com/go-logr/stdr v1.2.2 -## explicit; go 1.16 -github.com/go-logr/stdr # github.com/gobwas/glob v0.2.3 ## explicit github.com/gobwas/glob @@ -213,7 +197,7 @@ github.com/gobwas/glob/syntax/ast github.com/gobwas/glob/syntax/lexer github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings -# github.com/goccy/go-json v0.10.3 +# github.com/goccy/go-json v0.10.5 ## explicit; go 1.19 github.com/goccy/go-json github.com/goccy/go-json/internal/decoder @@ -274,9 +258,6 @@ github.com/google/go-containerregistry/pkg/v1/types # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/gorilla/mux v1.8.1 -## explicit; go 1.20 -github.com/gorilla/mux # github.com/josephspurrier/goversioninfo v1.5.0 ## explicit; go 1.18 github.com/josephspurrier/goversioninfo @@ -294,12 +275,25 @@ github.com/klauspost/compress/zstd/internal/xxhash # github.com/lestrrat-go/backoff/v2 v2.0.8 ## explicit; go 1.16 github.com/lestrrat-go/backoff/v2 -# github.com/lestrrat-go/blackmagic v1.0.2 -## explicit; go 1.16 +# github.com/lestrrat-go/blackmagic v1.0.4 +## explicit; go 1.23 github.com/lestrrat-go/blackmagic +# github.com/lestrrat-go/dsig v1.0.0 +## explicit; go 1.23.0 +github.com/lestrrat-go/dsig +github.com/lestrrat-go/dsig/internal/ecutil +# github.com/lestrrat-go/dsig-secp256k1 v1.0.0 +## explicit; go 1.23.0 +github.com/lestrrat-go/dsig-secp256k1 # github.com/lestrrat-go/httpcc v1.0.1 ## explicit; go 1.16 github.com/lestrrat-go/httpcc +# github.com/lestrrat-go/httprc/v3 v3.0.2 +## explicit; go 1.23.0 +github.com/lestrrat-go/httprc/v3 +github.com/lestrrat-go/httprc/v3/errsink +github.com/lestrrat-go/httprc/v3/proxysink +github.com/lestrrat-go/httprc/v3/tracesink # github.com/lestrrat-go/iter v1.0.2 ## explicit; go 1.13 github.com/lestrrat-go/iter/arrayiter @@ -314,9 +308,42 @@ github.com/lestrrat-go/jwx/internal/pool github.com/lestrrat-go/jwx/jwa github.com/lestrrat-go/jwx/jwk github.com/lestrrat-go/jwx/x25519 +# github.com/lestrrat-go/jwx/v3 v3.0.13 +## explicit; go 1.24.0 +github.com/lestrrat-go/jwx/v3 +github.com/lestrrat-go/jwx/v3/cert +github.com/lestrrat-go/jwx/v3/internal/base64 +github.com/lestrrat-go/jwx/v3/internal/ecutil +github.com/lestrrat-go/jwx/v3/internal/json +github.com/lestrrat-go/jwx/v3/internal/jwxio +github.com/lestrrat-go/jwx/v3/internal/keyconv +github.com/lestrrat-go/jwx/v3/internal/pool +github.com/lestrrat-go/jwx/v3/internal/tokens +github.com/lestrrat-go/jwx/v3/jwa +github.com/lestrrat-go/jwx/v3/jwe +github.com/lestrrat-go/jwx/v3/jwe/internal/aescbc +github.com/lestrrat-go/jwx/v3/jwe/internal/cipher +github.com/lestrrat-go/jwx/v3/jwe/internal/concatkdf +github.com/lestrrat-go/jwx/v3/jwe/internal/content_crypt +github.com/lestrrat-go/jwx/v3/jwe/internal/keygen +github.com/lestrrat-go/jwx/v3/jwe/jwebb +github.com/lestrrat-go/jwx/v3/jwk +github.com/lestrrat-go/jwx/v3/jwk/ecdsa +github.com/lestrrat-go/jwx/v3/jwk/jwkbb +github.com/lestrrat-go/jwx/v3/jws +github.com/lestrrat-go/jwx/v3/jws/internal/keytype +github.com/lestrrat-go/jwx/v3/jws/jwsbb +github.com/lestrrat-go/jwx/v3/jws/legacy +github.com/lestrrat-go/jwx/v3/jwt +github.com/lestrrat-go/jwx/v3/jwt/internal/errors +github.com/lestrrat-go/jwx/v3/jwt/internal/types +github.com/lestrrat-go/jwx/v3/transform # github.com/lestrrat-go/option v1.0.1 ## explicit; go 1.16 github.com/lestrrat-go/option +# github.com/lestrrat-go/option/v2 v2.0.0 +## explicit; go 1.23 +github.com/lestrrat-go/option/v2 # github.com/linuxkit/virtsock v0.0.0-20241009230534-cb6a20cc0422 ## explicit; go 1.17 github.com/linuxkit/virtsock/pkg/vsock @@ -347,27 +374,17 @@ github.com/moby/sys/userns # github.com/mrunalp/fileutils v0.5.1 ## explicit; go 1.13 github.com/mrunalp/fileutils -# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 -## explicit -github.com/munnerz/goautoneg -# github.com/open-policy-agent/opa v0.70.0 -## explicit; go 1.21 +# github.com/open-policy-agent/opa v1.15.2 +## explicit; go 1.25.0 github.com/open-policy-agent/opa/ast -github.com/open-policy-agent/opa/ast/internal/scanner -github.com/open-policy-agent/opa/ast/internal/tokens github.com/open-policy-agent/opa/ast/json -github.com/open-policy-agent/opa/ast/location github.com/open-policy-agent/opa/bundle github.com/open-policy-agent/opa/capabilities -github.com/open-policy-agent/opa/config -github.com/open-policy-agent/opa/format -github.com/open-policy-agent/opa/hooks github.com/open-policy-agent/opa/internal/bundle github.com/open-policy-agent/opa/internal/cidr/merge github.com/open-policy-agent/opa/internal/compiler github.com/open-policy-agent/opa/internal/compiler/wasm github.com/open-policy-agent/opa/internal/compiler/wasm/opa -github.com/open-policy-agent/opa/internal/config github.com/open-policy-agent/opa/internal/debug github.com/open-policy-agent/opa/internal/deepcopy github.com/open-policy-agent/opa/internal/edittree @@ -376,19 +393,7 @@ github.com/open-policy-agent/opa/internal/file/archive github.com/open-policy-agent/opa/internal/file/url github.com/open-policy-agent/opa/internal/future github.com/open-policy-agent/opa/internal/gojsonschema -github.com/open-policy-agent/opa/internal/gqlparser/ast -github.com/open-policy-agent/opa/internal/gqlparser/gqlerror -github.com/open-policy-agent/opa/internal/gqlparser/lexer -github.com/open-policy-agent/opa/internal/gqlparser/parser -github.com/open-policy-agent/opa/internal/gqlparser/validator -github.com/open-policy-agent/opa/internal/gqlparser/validator/rules github.com/open-policy-agent/opa/internal/json/patch -github.com/open-policy-agent/opa/internal/jwx/buffer -github.com/open-policy-agent/opa/internal/jwx/jwa -github.com/open-policy-agent/opa/internal/jwx/jwk -github.com/open-policy-agent/opa/internal/jwx/jws -github.com/open-policy-agent/opa/internal/jwx/jws/sign -github.com/open-policy-agent/opa/internal/jwx/jws/verify github.com/open-policy-agent/opa/internal/lcss github.com/open-policy-agent/opa/internal/leb128 github.com/open-policy-agent/opa/internal/merge @@ -398,11 +403,8 @@ github.com/open-policy-agent/opa/internal/providers/aws/crypto github.com/open-policy-agent/opa/internal/providers/aws/v4 github.com/open-policy-agent/opa/internal/ref github.com/open-policy-agent/opa/internal/rego/opa -github.com/open-policy-agent/opa/internal/report -github.com/open-policy-agent/opa/internal/runtime/init github.com/open-policy-agent/opa/internal/semver github.com/open-policy-agent/opa/internal/strings -github.com/open-policy-agent/opa/internal/strvals github.com/open-policy-agent/opa/internal/uuid github.com/open-policy-agent/opa/internal/version github.com/open-policy-agent/opa/internal/wasm/constant @@ -413,33 +415,45 @@ github.com/open-policy-agent/opa/internal/wasm/opcode github.com/open-policy-agent/opa/internal/wasm/sdk/opa/capabilities github.com/open-policy-agent/opa/internal/wasm/types github.com/open-policy-agent/opa/internal/wasm/util -github.com/open-policy-agent/opa/ir -github.com/open-policy-agent/opa/keys github.com/open-policy-agent/opa/loader -github.com/open-policy-agent/opa/loader/extension -github.com/open-policy-agent/opa/loader/filter -github.com/open-policy-agent/opa/logging -github.com/open-policy-agent/opa/metrics -github.com/open-policy-agent/opa/plugins -github.com/open-policy-agent/opa/plugins/rest github.com/open-policy-agent/opa/rego -github.com/open-policy-agent/opa/resolver -github.com/open-policy-agent/opa/resolver/wasm -github.com/open-policy-agent/opa/schemas github.com/open-policy-agent/opa/storage github.com/open-policy-agent/opa/storage/inmem -github.com/open-policy-agent/opa/storage/internal/errors -github.com/open-policy-agent/opa/storage/internal/ptr github.com/open-policy-agent/opa/topdown -github.com/open-policy-agent/opa/topdown/builtins -github.com/open-policy-agent/opa/topdown/cache -github.com/open-policy-agent/opa/topdown/copypropagation github.com/open-policy-agent/opa/topdown/print -github.com/open-policy-agent/opa/tracing -github.com/open-policy-agent/opa/types -github.com/open-policy-agent/opa/util -github.com/open-policy-agent/opa/util/decoding -github.com/open-policy-agent/opa/version +github.com/open-policy-agent/opa/v1/ast +github.com/open-policy-agent/opa/v1/ast/internal/scanner +github.com/open-policy-agent/opa/v1/ast/internal/tokens +github.com/open-policy-agent/opa/v1/ast/json +github.com/open-policy-agent/opa/v1/ast/location +github.com/open-policy-agent/opa/v1/bundle +github.com/open-policy-agent/opa/v1/capabilities +github.com/open-policy-agent/opa/v1/format +github.com/open-policy-agent/opa/v1/ir +github.com/open-policy-agent/opa/v1/keys +github.com/open-policy-agent/opa/v1/loader +github.com/open-policy-agent/opa/v1/loader/extension +github.com/open-policy-agent/opa/v1/loader/filter +github.com/open-policy-agent/opa/v1/logging +github.com/open-policy-agent/opa/v1/metrics +github.com/open-policy-agent/opa/v1/rego +github.com/open-policy-agent/opa/v1/resolver +github.com/open-policy-agent/opa/v1/resolver/wasm +github.com/open-policy-agent/opa/v1/schemas +github.com/open-policy-agent/opa/v1/storage +github.com/open-policy-agent/opa/v1/storage/inmem +github.com/open-policy-agent/opa/v1/storage/internal/errors +github.com/open-policy-agent/opa/v1/storage/internal/ptr +github.com/open-policy-agent/opa/v1/topdown +github.com/open-policy-agent/opa/v1/topdown/builtins +github.com/open-policy-agent/opa/v1/topdown/cache +github.com/open-policy-agent/opa/v1/topdown/copypropagation +github.com/open-policy-agent/opa/v1/topdown/print +github.com/open-policy-agent/opa/v1/tracing +github.com/open-policy-agent/opa/v1/types +github.com/open-policy-agent/opa/v1/util +github.com/open-policy-agent/opa/v1/util/decoding +github.com/open-policy-agent/opa/v1/version # github.com/opencontainers/cgroups v0.0.6 ## explicit; go 1.23.0 github.com/opencontainers/cgroups @@ -493,23 +507,7 @@ github.com/pelletier/go-toml # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/prometheus/client_golang v1.23.2 -## explicit; go 1.23.0 -github.com/prometheus/client_golang/prometheus -github.com/prometheus/client_golang/prometheus/internal -# github.com/prometheus/client_model v0.6.2 -## explicit; go 1.22.0 -github.com/prometheus/client_model/go -# github.com/prometheus/common v0.66.1 -## explicit; go 1.23.0 -github.com/prometheus/common/expfmt -github.com/prometheus/common/model -# github.com/prometheus/procfs v0.16.1 -## explicit; go 1.23.0 -github.com/prometheus/procfs -github.com/prometheus/procfs/internal/fs -github.com/prometheus/procfs/internal/util -# github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 +# github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 ## explicit github.com/rcrowley/go-metrics # github.com/russross/blackfriday/v2 v2.1.0 @@ -525,6 +523,15 @@ github.com/samber/lo/mutable # github.com/seccomp/libseccomp-golang v0.11.1 ## explicit; go 1.19 github.com/seccomp/libseccomp-golang +# github.com/segmentio/asm v1.2.1 +## explicit; go 1.18 +github.com/segmentio/asm/base64 +github.com/segmentio/asm/cpu +github.com/segmentio/asm/cpu/arm +github.com/segmentio/asm/cpu/arm64 +github.com/segmentio/asm/cpu/cpuid +github.com/segmentio/asm/cpu/x86 +github.com/segmentio/asm/internal/unsafebytes # github.com/sirupsen/logrus v1.9.4 ## explicit; go 1.17 github.com/sirupsen/logrus @@ -537,9 +544,22 @@ github.com/urfave/cli # github.com/urfave/cli/v2 v2.27.7 ## explicit; go 1.18 github.com/urfave/cli/v2 +# github.com/valyala/fastjson v1.6.7 +## explicit; go 1.12 +github.com/valyala/fastjson +github.com/valyala/fastjson/fastfloat # github.com/vbatts/tar-split v0.12.2 ## explicit; go 1.17 github.com/vbatts/tar-split/archive/tar +# github.com/vektah/gqlparser/v2 v2.5.32 +## explicit; go 1.22 +github.com/vektah/gqlparser/v2/ast +github.com/vektah/gqlparser/v2/gqlerror +github.com/vektah/gqlparser/v2/lexer +github.com/vektah/gqlparser/v2/parser +github.com/vektah/gqlparser/v2/validator +github.com/vektah/gqlparser/v2/validator/core +github.com/vektah/gqlparser/v2/validator/rules # github.com/veraison/go-cose v1.3.0 ## explicit; go 1.21 github.com/veraison/go-cose @@ -588,45 +608,10 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/auto/sdk v1.2.1 -## explicit; go 1.24.0 -go.opentelemetry.io/auto/sdk -go.opentelemetry.io/auto/sdk/internal/telemetry # go.opentelemetry.io/otel v1.43.0 ## explicit; go 1.25.0 -go.opentelemetry.io/otel -go.opentelemetry.io/otel/attribute -go.opentelemetry.io/otel/attribute/internal -go.opentelemetry.io/otel/attribute/internal/xxhash -go.opentelemetry.io/otel/baggage -go.opentelemetry.io/otel/codes -go.opentelemetry.io/otel/internal/baggage -go.opentelemetry.io/otel/internal/errorhandler -go.opentelemetry.io/otel/internal/global -go.opentelemetry.io/otel/propagation -go.opentelemetry.io/otel/semconv/v1.37.0 -go.opentelemetry.io/otel/semconv/v1.40.0 -go.opentelemetry.io/otel/semconv/v1.40.0/otelconv -# go.opentelemetry.io/otel/metric v1.43.0 -## explicit; go 1.25.0 -go.opentelemetry.io/otel/metric -go.opentelemetry.io/otel/metric/embedded -go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.43.0 +# go.opentelemetry.io/otel/sdk/metric v1.43.0 ## explicit; go 1.25.0 -go.opentelemetry.io/otel/sdk -go.opentelemetry.io/otel/sdk/instrumentation -go.opentelemetry.io/otel/sdk/internal/x -go.opentelemetry.io/otel/sdk/resource -go.opentelemetry.io/otel/sdk/trace -go.opentelemetry.io/otel/sdk/trace/internal/env -go.opentelemetry.io/otel/sdk/trace/internal/observ -# go.opentelemetry.io/otel/trace v1.43.0 -## explicit; go 1.25.0 -go.opentelemetry.io/otel/trace -go.opentelemetry.io/otel/trace/embedded -go.opentelemetry.io/otel/trace/internal/telemetry -go.opentelemetry.io/otel/trace/noop # go.uber.org/mock v0.6.0 ## explicit; go 1.23.0 go.uber.org/mock/gomock @@ -635,9 +620,13 @@ go.uber.org/mock/mockgen/model # go.yaml.in/yaml/v2 v2.4.2 ## explicit; go 1.15 go.yaml.in/yaml/v2 +# go.yaml.in/yaml/v3 v3.0.4 +## explicit; go 1.16 +go.yaml.in/yaml/v3 # golang.org/x/crypto v0.50.0 ## explicit; go 1.25.0 golang.org/x/crypto/curve25519 +golang.org/x/crypto/pbkdf2 # golang.org/x/mod v0.35.0 ## explicit; go 1.25.0 golang.org/x/mod/internal/lazyregexp @@ -661,6 +650,7 @@ golang.org/x/net/trace golang.org/x/sync/errgroup # golang.org/x/sys v0.43.0 ## explicit; go 1.25.0 +golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry @@ -704,7 +694,7 @@ golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/genproto/googleapis/rpc v0.0.0-20260120221211-b8f7ae30c516 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 ## explicit; go 1.24.0 google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.80.0 @@ -779,7 +769,6 @@ google.golang.org/grpc/cmd/protoc-gen-go-grpc google.golang.org/protobuf/cmd/protoc-gen-go google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo google.golang.org/protobuf/compiler/protogen -google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire @@ -824,9 +813,6 @@ google.golang.org/protobuf/types/known/emptypb google.golang.org/protobuf/types/known/fieldmaskpb google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/pluginpb -# gopkg.in/yaml.v3 v3.0.1 -## explicit -gopkg.in/yaml.v3 # sigs.k8s.io/yaml v1.6.0 ## explicit; go 1.22 sigs.k8s.io/yaml